1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2019 MediaTek Inc.
3 *
4 * Author: Ryder Lee <ryder.lee@mediatek.com>
5 * Roy Luo <royluo@google.com>
6 * Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 */
9
10 #include <linux/devcoredump.h>
11 #include <linux/etherdevice.h>
12 #include <linux/timekeeping.h>
13 #include "mt7615.h"
14 #include "../trace.h"
15 #include "../dma.h"
16 #include "mt7615_trace.h"
17 #include "mac.h"
18 #include "mcu.h"
19
20 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
21
22 static const struct mt7615_dfs_radar_spec etsi_radar_specs = {
23 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
24 .radar_pattern = {
25 [5] = { 1, 0, 6, 32, 28, 0, 17, 990, 5010, 1, 1 },
26 [6] = { 1, 0, 9, 32, 28, 0, 27, 615, 5010, 1, 1 },
27 [7] = { 1, 0, 15, 32, 28, 0, 27, 240, 445, 1, 1 },
28 [8] = { 1, 0, 12, 32, 28, 0, 42, 240, 510, 1, 1 },
29 [9] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 12, 32, 28 },
30 [10] = { 1, 1, 0, 0, 0, 0, 14, 2490, 3343, 0, 0, 15, 32, 24 },
31 [11] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 18, 32, 28 },
32 [12] = { 1, 1, 0, 0, 0, 0, 14, 823, 2510, 0, 0, 27, 32, 24 },
33 },
34 };
35
36 static const struct mt7615_dfs_radar_spec fcc_radar_specs = {
37 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
38 .radar_pattern = {
39 [0] = { 1, 0, 9, 32, 28, 0, 13, 508, 3076, 1, 1 },
40 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
41 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
42 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
43 [4] = { 1, 0, 9, 255, 28, 0, 13, 323, 343, 1, 32 },
44 },
45 };
46
47 static const struct mt7615_dfs_radar_spec jp_radar_specs = {
48 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
49 .radar_pattern = {
50 [0] = { 1, 0, 8, 32, 28, 0, 13, 508, 3076, 1, 1 },
51 [1] = { 1, 0, 12, 32, 28, 0, 17, 140, 240, 1, 1 },
52 [2] = { 1, 0, 8, 32, 28, 0, 22, 190, 510, 1, 1 },
53 [3] = { 1, 0, 6, 32, 28, 0, 32, 190, 510, 1, 1 },
54 [4] = { 1, 0, 9, 32, 28, 0, 13, 323, 343, 1, 32 },
55 [13] = { 1, 0, 8, 32, 28, 0, 14, 3836, 3856, 1, 1 },
56 [14] = { 1, 0, 8, 32, 28, 0, 14, 3990, 4010, 1, 1 },
57 },
58 };
59
60 static enum mt76_cipher_type
mt7615_mac_get_cipher(int cipher)61 mt7615_mac_get_cipher(int cipher)
62 {
63 switch (cipher) {
64 case WLAN_CIPHER_SUITE_WEP40:
65 return MT_CIPHER_WEP40;
66 case WLAN_CIPHER_SUITE_WEP104:
67 return MT_CIPHER_WEP104;
68 case WLAN_CIPHER_SUITE_TKIP:
69 return MT_CIPHER_TKIP;
70 case WLAN_CIPHER_SUITE_AES_CMAC:
71 return MT_CIPHER_BIP_CMAC_128;
72 case WLAN_CIPHER_SUITE_CCMP:
73 return MT_CIPHER_AES_CCMP;
74 case WLAN_CIPHER_SUITE_CCMP_256:
75 return MT_CIPHER_CCMP_256;
76 case WLAN_CIPHER_SUITE_GCMP:
77 return MT_CIPHER_GCMP;
78 case WLAN_CIPHER_SUITE_GCMP_256:
79 return MT_CIPHER_GCMP_256;
80 case WLAN_CIPHER_SUITE_SMS4:
81 return MT_CIPHER_WAPI;
82 default:
83 return MT_CIPHER_NONE;
84 }
85 }
86
mt7615_rx_get_wcid(struct mt7615_dev * dev,u8 idx,bool unicast)87 static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
88 u8 idx, bool unicast)
89 {
90 struct mt7615_sta *sta;
91 struct mt76_wcid *wcid;
92
93 if (idx >= MT7615_WTBL_SIZE)
94 return NULL;
95
96 wcid = rcu_dereference(dev->mt76.wcid[idx]);
97 if (unicast || !wcid)
98 return wcid;
99
100 if (!wcid->sta)
101 return NULL;
102
103 sta = container_of(wcid, struct mt7615_sta, wcid);
104 if (!sta->vif)
105 return NULL;
106
107 return &sta->vif->sta.wcid;
108 }
109
mt7615_mac_reset_counters(struct mt7615_dev * dev)110 void mt7615_mac_reset_counters(struct mt7615_dev *dev)
111 {
112 int i;
113
114 for (i = 0; i < 4; i++) {
115 mt76_rr(dev, MT_TX_AGG_CNT(0, i));
116 mt76_rr(dev, MT_TX_AGG_CNT(1, i));
117 }
118
119 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
120 dev->mt76.phy.survey_time = ktime_get_boottime();
121 if (dev->mt76.phy2)
122 dev->mt76.phy2->survey_time = ktime_get_boottime();
123
124 /* reset airtime counters */
125 mt76_rr(dev, MT_MIB_SDR9(0));
126 mt76_rr(dev, MT_MIB_SDR9(1));
127
128 mt76_rr(dev, MT_MIB_SDR36(0));
129 mt76_rr(dev, MT_MIB_SDR36(1));
130
131 mt76_rr(dev, MT_MIB_SDR37(0));
132 mt76_rr(dev, MT_MIB_SDR37(1));
133
134 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
135 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
136 }
137
mt7615_mac_set_timing(struct mt7615_phy * phy)138 void mt7615_mac_set_timing(struct mt7615_phy *phy)
139 {
140 s16 coverage_class = phy->coverage_class;
141 struct mt7615_dev *dev = phy->dev;
142 bool ext_phy = phy != &dev->phy;
143 u32 val, reg_offset;
144 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
145 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
146 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
147 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
148 int sifs, offset;
149 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
150
151 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
152 return;
153
154 if (is_5ghz)
155 sifs = 16;
156 else
157 sifs = 10;
158
159 if (ext_phy) {
160 coverage_class = max_t(s16, dev->phy.coverage_class,
161 coverage_class);
162 mt76_set(dev, MT_ARB_SCR,
163 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
164 } else {
165 struct mt7615_phy *phy_ext = mt7615_ext_phy(dev);
166
167 if (phy_ext)
168 coverage_class = max_t(s16, phy_ext->coverage_class,
169 coverage_class);
170 mt76_set(dev, MT_ARB_SCR,
171 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
172 }
173 udelay(1);
174
175 offset = 3 * coverage_class;
176 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
177 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
178 mt76_wr(dev, MT_TMAC_CDTR, cck + reg_offset);
179 mt76_wr(dev, MT_TMAC_ODTR, ofdm + reg_offset);
180
181 mt76_wr(dev, MT_TMAC_ICR(ext_phy),
182 FIELD_PREP(MT_IFS_EIFS, 360) |
183 FIELD_PREP(MT_IFS_RIFS, 2) |
184 FIELD_PREP(MT_IFS_SIFS, sifs) |
185 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
186
187 if (phy->slottime < 20 || is_5ghz)
188 val = MT7615_CFEND_RATE_DEFAULT;
189 else
190 val = MT7615_CFEND_RATE_11B;
191
192 mt76_rmw_field(dev, MT_AGG_ACR(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
193 if (ext_phy)
194 mt76_clear(dev, MT_ARB_SCR,
195 MT_ARB_SCR_TX1_DISABLE | MT_ARB_SCR_RX1_DISABLE);
196 else
197 mt76_clear(dev, MT_ARB_SCR,
198 MT_ARB_SCR_TX0_DISABLE | MT_ARB_SCR_RX0_DISABLE);
199
200 }
201
202 static void
mt7615_get_status_freq_info(struct mt7615_dev * dev,struct mt76_phy * mphy,struct mt76_rx_status * status,u8 chfreq)203 mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
204 struct mt76_rx_status *status, u8 chfreq)
205 {
206 if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
207 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
208 !test_bit(MT76_STATE_ROC, &mphy->state)) {
209 status->freq = mphy->chandef.chan->center_freq;
210 status->band = mphy->chandef.chan->band;
211 return;
212 }
213
214 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
215 status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
216 }
217
mt7615_mac_fill_tm_rx(struct mt7615_phy * phy,__le32 * rxv)218 static void mt7615_mac_fill_tm_rx(struct mt7615_phy *phy, __le32 *rxv)
219 {
220 #ifdef CONFIG_NL80211_TESTMODE
221 u32 rxv1 = le32_to_cpu(rxv[0]);
222 u32 rxv3 = le32_to_cpu(rxv[2]);
223 u32 rxv4 = le32_to_cpu(rxv[3]);
224 u32 rxv5 = le32_to_cpu(rxv[4]);
225 u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
226 u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
227 s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
228 u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
229
230 if (!mode) {
231 /* CCK */
232 foe &= ~BIT(11);
233 foe *= 1000;
234 foe >>= 11;
235 } else {
236 if (foe > 2048)
237 foe -= 4096;
238
239 foe = (foe * foe_const) >> 15;
240 }
241
242 phy->test.last_freq_offset = foe;
243 phy->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
244 phy->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
245 phy->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
246 phy->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
247 phy->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
248 phy->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
249 #endif
250 }
251
mt7615_mac_fill_rx(struct mt7615_dev * dev,struct sk_buff * skb)252 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
253 {
254 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
255 struct mt76_phy *mphy = &dev->mt76.phy;
256 struct mt7615_phy *phy = &dev->phy;
257 struct mt7615_phy *phy2 = dev->mt76.phy2 ? dev->mt76.phy2->priv : NULL;
258 struct ieee80211_supported_band *sband;
259 struct ieee80211_hdr *hdr;
260 __le32 *rxd = (__le32 *)skb->data;
261 u32 rxd0 = le32_to_cpu(rxd[0]);
262 u32 rxd1 = le32_to_cpu(rxd[1]);
263 u32 rxd2 = le32_to_cpu(rxd[2]);
264 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
265 bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false;
266 int phy_idx;
267 int i, idx;
268 u8 chfreq, amsdu_info, qos_ctl = 0;
269 u16 seq_ctrl = 0;
270 __le16 fc = 0;
271
272 memset(status, 0, sizeof(*status));
273
274 chfreq = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
275 if (!phy2)
276 phy_idx = 0;
277 else if (phy2->chfreq == phy->chfreq)
278 phy_idx = -1;
279 else if (phy->chfreq == chfreq)
280 phy_idx = 0;
281 else if (phy2->chfreq == chfreq)
282 phy_idx = 1;
283 else
284 phy_idx = -1;
285
286 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
287 return -EINVAL;
288
289 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
290 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
291 hdr_trans = rxd1 & MT_RXD1_NORMAL_HDR_TRANS;
292 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
293
294 if (status->wcid) {
295 struct mt7615_sta *msta;
296
297 msta = container_of(status->wcid, struct mt7615_sta, wcid);
298 spin_lock_bh(&dev->sta_poll_lock);
299 if (list_empty(&msta->poll_list))
300 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
301 spin_unlock_bh(&dev->sta_poll_lock);
302 }
303
304 if ((rxd0 & csum_mask) == csum_mask)
305 skb->ip_summed = CHECKSUM_UNNECESSARY;
306
307 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
308 status->flag |= RX_FLAG_FAILED_FCS_CRC;
309
310 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
311 status->flag |= RX_FLAG_MMIC_ERROR;
312
313 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
314 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
315 status->flag |= RX_FLAG_DECRYPTED;
316 status->flag |= RX_FLAG_IV_STRIPPED;
317 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
318 }
319
320 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
321
322 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
323 return -EINVAL;
324
325 rxd += 4;
326 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
327 u32 v0 = le32_to_cpu(rxd[0]);
328 u32 v2 = le32_to_cpu(rxd[2]);
329
330 fc = cpu_to_le16(FIELD_GET(MT_RXD4_FRAME_CONTROL, v0));
331 qos_ctl = FIELD_GET(MT_RXD6_QOS_CTL, v2);
332 seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, v2);
333
334 rxd += 4;
335 if ((u8 *)rxd - skb->data >= skb->len)
336 return -EINVAL;
337 }
338
339 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
340 u8 *data = (u8 *)rxd;
341
342 if (status->flag & RX_FLAG_DECRYPTED) {
343 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
344 case MT_CIPHER_AES_CCMP:
345 case MT_CIPHER_CCMP_CCX:
346 case MT_CIPHER_CCMP_256:
347 insert_ccmp_hdr =
348 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
349 fallthrough;
350 case MT_CIPHER_TKIP:
351 case MT_CIPHER_TKIP_NO_MIC:
352 case MT_CIPHER_GCMP:
353 case MT_CIPHER_GCMP_256:
354 status->iv[0] = data[5];
355 status->iv[1] = data[4];
356 status->iv[2] = data[3];
357 status->iv[3] = data[2];
358 status->iv[4] = data[1];
359 status->iv[5] = data[0];
360 break;
361 default:
362 break;
363 }
364 }
365 rxd += 4;
366 if ((u8 *)rxd - skb->data >= skb->len)
367 return -EINVAL;
368 }
369
370 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
371 status->timestamp = le32_to_cpu(rxd[0]);
372 status->flag |= RX_FLAG_MACTIME_START;
373
374 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
375 MT_RXD2_NORMAL_NON_AMPDU))) {
376 status->flag |= RX_FLAG_AMPDU_DETAILS;
377
378 /* all subframes of an A-MPDU have the same timestamp */
379 if (phy->rx_ampdu_ts != status->timestamp) {
380 if (!++phy->ampdu_ref)
381 phy->ampdu_ref++;
382 }
383 phy->rx_ampdu_ts = status->timestamp;
384
385 status->ampdu_ref = phy->ampdu_ref;
386 }
387
388 rxd += 2;
389 if ((u8 *)rxd - skb->data >= skb->len)
390 return -EINVAL;
391 }
392
393 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
394 u32 rxdg5 = le32_to_cpu(rxd[5]);
395
396 /*
397 * If both PHYs are on the same channel and we don't have a WCID,
398 * we need to figure out which PHY this packet was received on.
399 * On the primary PHY, the noise value for the chains belonging to the
400 * second PHY will be set to the noise value of the last packet from
401 * that PHY.
402 */
403 if (phy_idx < 0) {
404 int first_chain = ffs(phy2->mt76->chainmask) - 1;
405
406 phy_idx = ((rxdg5 >> (first_chain * 8)) & 0xff) == 0;
407 }
408 }
409
410 if (phy_idx == 1 && phy2) {
411 mphy = dev->mt76.phy2;
412 phy = phy2;
413 status->ext_phy = true;
414 }
415
416 if (!mt7615_firmware_offload(dev) && chfreq != phy->chfreq)
417 return -EINVAL;
418
419 mt7615_get_status_freq_info(dev, mphy, status, chfreq);
420 if (status->band == NL80211_BAND_5GHZ)
421 sband = &mphy->sband_5g.sband;
422 else
423 sband = &mphy->sband_2g.sband;
424
425 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
426 return -EINVAL;
427
428 if (!sband->channels)
429 return -EINVAL;
430
431 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
432 u32 rxdg0 = le32_to_cpu(rxd[0]);
433 u32 rxdg1 = le32_to_cpu(rxd[1]);
434 u32 rxdg3 = le32_to_cpu(rxd[3]);
435 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
436 bool cck = false;
437
438 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
439 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
440 case MT_PHY_TYPE_CCK:
441 cck = true;
442 fallthrough;
443 case MT_PHY_TYPE_OFDM:
444 i = mt76_get_rate(&dev->mt76, sband, i, cck);
445 break;
446 case MT_PHY_TYPE_HT_GF:
447 case MT_PHY_TYPE_HT:
448 status->encoding = RX_ENC_HT;
449 if (i > 31)
450 return -EINVAL;
451 break;
452 case MT_PHY_TYPE_VHT:
453 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
454 status->encoding = RX_ENC_VHT;
455 break;
456 default:
457 return -EINVAL;
458 }
459 status->rate_idx = i;
460
461 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
462 case MT_PHY_BW_20:
463 break;
464 case MT_PHY_BW_40:
465 status->bw = RATE_INFO_BW_40;
466 break;
467 case MT_PHY_BW_80:
468 status->bw = RATE_INFO_BW_80;
469 break;
470 case MT_PHY_BW_160:
471 status->bw = RATE_INFO_BW_160;
472 break;
473 default:
474 return -EINVAL;
475 }
476
477 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
478 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
479 if (rxdg0 & MT_RXV1_HT_AD_CODE)
480 status->enc_flags |= RX_ENC_FLAG_LDPC;
481
482 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
483
484 status->chains = mphy->antenna_mask;
485 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
486 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
487 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
488 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
489 status->signal = status->chain_signal[0];
490
491 for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
492 if (!(status->chains & BIT(i)))
493 continue;
494
495 status->signal = max(status->signal,
496 status->chain_signal[i]);
497 }
498
499 mt7615_mac_fill_tm_rx(mphy->priv, rxd);
500
501 rxd += 6;
502 if ((u8 *)rxd - skb->data >= skb->len)
503 return -EINVAL;
504 }
505
506 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
507
508 amsdu_info = FIELD_GET(MT_RXD1_NORMAL_PAYLOAD_FORMAT, rxd1);
509 status->amsdu = !!amsdu_info;
510 if (status->amsdu) {
511 status->first_amsdu = amsdu_info == MT_RXD1_FIRST_AMSDU_FRAME;
512 status->last_amsdu = amsdu_info == MT_RXD1_LAST_AMSDU_FRAME;
513 if (!hdr_trans) {
514 memmove(skb->data + 2, skb->data,
515 ieee80211_get_hdrlen_from_skb(skb));
516 skb_pull(skb, 2);
517 }
518 }
519
520 if (insert_ccmp_hdr && !hdr_trans) {
521 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
522
523 mt76_insert_ccmp_hdr(skb, key_id);
524 }
525
526 if (!hdr_trans) {
527 hdr = (struct ieee80211_hdr *)skb->data;
528 fc = hdr->frame_control;
529 if (ieee80211_is_data_qos(fc)) {
530 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
531 qos_ctl = *ieee80211_get_qos_ctl(hdr);
532 }
533 } else {
534 status->flag |= RX_FLAG_8023;
535 }
536
537 if (!status->wcid || !ieee80211_is_data_qos(fc))
538 return 0;
539
540 status->aggr = unicast &&
541 !ieee80211_is_qos_nullfunc(fc);
542 status->qos_ctl = qos_ctl;
543 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
544
545 return 0;
546 }
547
mt7615_sta_ps(struct mt76_dev * mdev,struct ieee80211_sta * sta,bool ps)548 void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
549 {
550 }
551 EXPORT_SYMBOL_GPL(mt7615_sta_ps);
552
553 static u16
mt7615_mac_tx_rate_val(struct mt7615_dev * dev,struct mt76_phy * mphy,const struct ieee80211_tx_rate * rate,bool stbc,u8 * bw)554 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
555 struct mt76_phy *mphy,
556 const struct ieee80211_tx_rate *rate,
557 bool stbc, u8 *bw)
558 {
559 u8 phy, nss, rate_idx;
560 u16 rateval = 0;
561
562 *bw = 0;
563
564 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
565 rate_idx = ieee80211_rate_get_vht_mcs(rate);
566 nss = ieee80211_rate_get_vht_nss(rate);
567 phy = MT_PHY_TYPE_VHT;
568 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
569 *bw = 1;
570 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
571 *bw = 2;
572 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
573 *bw = 3;
574 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
575 rate_idx = rate->idx;
576 nss = 1 + (rate->idx >> 3);
577 phy = MT_PHY_TYPE_HT;
578 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
579 phy = MT_PHY_TYPE_HT_GF;
580 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
581 *bw = 1;
582 } else {
583 const struct ieee80211_rate *r;
584 int band = mphy->chandef.chan->band;
585 u16 val;
586
587 nss = 1;
588 r = &mphy->hw->wiphy->bands[band]->bitrates[rate->idx];
589 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
590 val = r->hw_value_short;
591 else
592 val = r->hw_value;
593
594 phy = val >> 8;
595 rate_idx = val & 0xff;
596 }
597
598 if (stbc && nss == 1) {
599 nss++;
600 rateval |= MT_TX_RATE_STBC;
601 }
602
603 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
604 FIELD_PREP(MT_TX_RATE_MODE, phy) |
605 FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
606
607 return rateval;
608 }
609
mt7615_mac_write_txwi(struct mt7615_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta,int pid,struct ieee80211_key_conf * key,bool beacon)610 int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
611 struct sk_buff *skb, struct mt76_wcid *wcid,
612 struct ieee80211_sta *sta, int pid,
613 struct ieee80211_key_conf *key, bool beacon)
614 {
615 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
616 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
617 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
618 struct ieee80211_tx_rate *rate = &info->control.rates[0];
619 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
620 bool multicast = is_multicast_ether_addr(hdr->addr1);
621 struct ieee80211_vif *vif = info->control.vif;
622 bool is_mmio = mt76_is_mmio(&dev->mt76);
623 u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
624 struct mt76_phy *mphy = &dev->mphy;
625 __le16 fc = hdr->frame_control;
626 int tx_count = 8;
627 u16 seqno = 0;
628
629 if (vif) {
630 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
631
632 omac_idx = mvif->omac_idx;
633 wmm_idx = mvif->wmm_idx;
634 }
635
636 if (sta) {
637 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
638
639 tx_count = msta->rate_count;
640 }
641
642 if (ext_phy && dev->mt76.phy2)
643 mphy = dev->mt76.phy2;
644
645 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
646 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
647
648 if (beacon) {
649 p_fmt = MT_TX_TYPE_FW;
650 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
651 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
652 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
653 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
654 } else {
655 p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
656 q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
657 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
658 }
659
660 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
661 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
662 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
663 txwi[0] = cpu_to_le32(val);
664
665 val = MT_TXD1_LONG_FORMAT |
666 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
667 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
668 FIELD_PREP(MT_TXD1_HDR_INFO,
669 ieee80211_get_hdrlen_from_skb(skb) / 2) |
670 FIELD_PREP(MT_TXD1_TID,
671 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
672 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
673 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
674 txwi[1] = cpu_to_le32(val);
675
676 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
677 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
678 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
679 if (key) {
680 if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
681 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
682 val |= MT_TXD2_BIP;
683 txwi[3] = 0;
684 } else {
685 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
686 }
687 } else {
688 txwi[3] = 0;
689 }
690 txwi[2] = cpu_to_le32(val);
691
692 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
693 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
694
695 txwi[4] = 0;
696 txwi[6] = 0;
697
698 if (rate->idx >= 0 && rate->count &&
699 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
700 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
701 u8 bw;
702 u16 rateval = mt7615_mac_tx_rate_val(dev, mphy, rate, stbc,
703 &bw);
704
705 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
706
707 val = MT_TXD6_FIXED_BW |
708 FIELD_PREP(MT_TXD6_BW, bw) |
709 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
710 txwi[6] |= cpu_to_le32(val);
711
712 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
713 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
714
715 if (info->flags & IEEE80211_TX_CTL_LDPC)
716 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
717
718 if (!(rate->flags & (IEEE80211_TX_RC_MCS |
719 IEEE80211_TX_RC_VHT_MCS)))
720 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
721
722 tx_count = rate->count;
723 }
724
725 if (!ieee80211_is_beacon(fc)) {
726 struct ieee80211_hw *hw = mt76_hw(dev);
727
728 val = MT_TXD5_TX_STATUS_HOST | FIELD_PREP(MT_TXD5_PID, pid);
729 if (!ieee80211_hw_check(hw, SUPPORTS_PS))
730 val |= MT_TXD5_SW_POWER_MGMT;
731 txwi[5] = cpu_to_le32(val);
732 } else {
733 txwi[5] = 0;
734 /* use maximum tx count for beacons */
735 tx_count = 0x1f;
736 }
737
738 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
739 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
740 seqno = le16_to_cpu(hdr->seq_ctrl);
741
742 if (ieee80211_is_back_req(hdr->frame_control)) {
743 struct ieee80211_bar *bar;
744
745 bar = (struct ieee80211_bar *)skb->data;
746 seqno = le16_to_cpu(bar->start_seq_num);
747 }
748
749 val |= MT_TXD3_SN_VALID |
750 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
751 }
752
753 txwi[3] |= cpu_to_le32(val);
754
755 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
756 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
757
758 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
759 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
760 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
761 txwi[7] = cpu_to_le32(val);
762 if (!is_mmio) {
763 val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
764 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
765 txwi[8] = cpu_to_le32(val);
766 }
767
768 return 0;
769 }
770 EXPORT_SYMBOL_GPL(mt7615_mac_write_txwi);
771
772 static void
mt7615_txp_skb_unmap_fw(struct mt76_dev * dev,struct mt7615_fw_txp * txp)773 mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
774 {
775 int i;
776
777 for (i = 0; i < txp->nbuf; i++)
778 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
779 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
780 }
781
782 static void
mt7615_txp_skb_unmap_hw(struct mt76_dev * dev,struct mt7615_hw_txp * txp)783 mt7615_txp_skb_unmap_hw(struct mt76_dev *dev, struct mt7615_hw_txp *txp)
784 {
785 u32 last_mask;
786 int i;
787
788 last_mask = is_mt7663(dev) ? MT_TXD_LEN_LAST : MT_TXD_LEN_MSDU_LAST;
789
790 for (i = 0; i < ARRAY_SIZE(txp->ptr); i++) {
791 struct mt7615_txp_ptr *ptr = &txp->ptr[i];
792 bool last;
793 u16 len;
794
795 len = le16_to_cpu(ptr->len0);
796 last = len & last_mask;
797 len &= MT_TXD_LEN_MASK;
798 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
799 DMA_TO_DEVICE);
800 if (last)
801 break;
802
803 len = le16_to_cpu(ptr->len1);
804 last = len & last_mask;
805 len &= MT_TXD_LEN_MASK;
806 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
807 DMA_TO_DEVICE);
808 if (last)
809 break;
810 }
811 }
812
mt7615_txp_skb_unmap(struct mt76_dev * dev,struct mt76_txwi_cache * t)813 void mt7615_txp_skb_unmap(struct mt76_dev *dev,
814 struct mt76_txwi_cache *t)
815 {
816 struct mt7615_txp_common *txp;
817
818 txp = mt7615_txwi_to_txp(dev, t);
819 if (is_mt7615(dev))
820 mt7615_txp_skb_unmap_fw(dev, &txp->fw);
821 else
822 mt7615_txp_skb_unmap_hw(dev, &txp->hw);
823 }
824 EXPORT_SYMBOL_GPL(mt7615_txp_skb_unmap);
825
mt7615_mac_wtbl_update(struct mt7615_dev * dev,int idx,u32 mask)826 bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
827 {
828 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
829 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
830
831 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
832 0, 5000);
833 }
834
mt7615_mac_sta_poll(struct mt7615_dev * dev)835 void mt7615_mac_sta_poll(struct mt7615_dev *dev)
836 {
837 static const u8 ac_to_tid[4] = {
838 [IEEE80211_AC_BE] = 0,
839 [IEEE80211_AC_BK] = 1,
840 [IEEE80211_AC_VI] = 4,
841 [IEEE80211_AC_VO] = 6
842 };
843 static const u8 hw_queue_map[] = {
844 [IEEE80211_AC_BK] = 0,
845 [IEEE80211_AC_BE] = 1,
846 [IEEE80211_AC_VI] = 2,
847 [IEEE80211_AC_VO] = 3,
848 };
849 struct ieee80211_sta *sta;
850 struct mt7615_sta *msta;
851 u32 addr, tx_time[4], rx_time[4];
852 struct list_head sta_poll_list;
853 int i;
854
855 INIT_LIST_HEAD(&sta_poll_list);
856 spin_lock_bh(&dev->sta_poll_lock);
857 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
858 spin_unlock_bh(&dev->sta_poll_lock);
859
860 while (!list_empty(&sta_poll_list)) {
861 bool clear = false;
862
863 msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
864 poll_list);
865 list_del_init(&msta->poll_list);
866
867 addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
868
869 for (i = 0; i < 4; i++, addr += 8) {
870 u32 tx_last = msta->airtime_ac[i];
871 u32 rx_last = msta->airtime_ac[i + 4];
872
873 msta->airtime_ac[i] = mt76_rr(dev, addr);
874 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
875 tx_time[i] = msta->airtime_ac[i] - tx_last;
876 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
877
878 if ((tx_last | rx_last) & BIT(30))
879 clear = true;
880 }
881
882 if (clear) {
883 mt7615_mac_wtbl_update(dev, msta->wcid.idx,
884 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
885 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
886 }
887
888 if (!msta->wcid.sta)
889 continue;
890
891 sta = container_of((void *)msta, struct ieee80211_sta,
892 drv_priv);
893 for (i = 0; i < 4; i++) {
894 u32 tx_cur = tx_time[i];
895 u32 rx_cur = rx_time[hw_queue_map[i]];
896 u8 tid = ac_to_tid[i];
897
898 if (!tx_cur && !rx_cur)
899 continue;
900
901 ieee80211_sta_register_airtime(sta, tid, tx_cur,
902 rx_cur);
903 }
904 }
905 }
906 EXPORT_SYMBOL_GPL(mt7615_mac_sta_poll);
907
908 static void
mt7615_mac_update_rate_desc(struct mt7615_phy * phy,struct mt7615_sta * sta,struct ieee80211_tx_rate * probe_rate,struct ieee80211_tx_rate * rates,struct mt7615_rate_desc * rd)909 mt7615_mac_update_rate_desc(struct mt7615_phy *phy, struct mt7615_sta *sta,
910 struct ieee80211_tx_rate *probe_rate,
911 struct ieee80211_tx_rate *rates,
912 struct mt7615_rate_desc *rd)
913 {
914 struct mt7615_dev *dev = phy->dev;
915 struct mt76_phy *mphy = phy->mt76;
916 struct ieee80211_tx_rate *ref;
917 bool rateset, stbc = false;
918 int n_rates = sta->n_rates;
919 u8 bw, bw_prev;
920 int i, j;
921
922 for (i = n_rates; i < 4; i++)
923 rates[i] = rates[n_rates - 1];
924
925 rateset = !(sta->rate_set_tsf & BIT(0));
926 memcpy(sta->rateset[rateset].rates, rates,
927 sizeof(sta->rateset[rateset].rates));
928 if (probe_rate) {
929 sta->rateset[rateset].probe_rate = *probe_rate;
930 ref = &sta->rateset[rateset].probe_rate;
931 } else {
932 sta->rateset[rateset].probe_rate.idx = -1;
933 ref = &sta->rateset[rateset].rates[0];
934 }
935
936 rates = sta->rateset[rateset].rates;
937 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
938 /*
939 * We don't support switching between short and long GI
940 * within the rate set. For accurate tx status reporting, we
941 * need to make sure that flags match.
942 * For improved performance, avoid duplicate entries by
943 * decrementing the MCS index if necessary
944 */
945 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
946 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
947
948 for (j = 0; j < i; j++) {
949 if (rates[i].idx != rates[j].idx)
950 continue;
951 if ((rates[i].flags ^ rates[j].flags) &
952 (IEEE80211_TX_RC_40_MHZ_WIDTH |
953 IEEE80211_TX_RC_80_MHZ_WIDTH |
954 IEEE80211_TX_RC_160_MHZ_WIDTH))
955 continue;
956
957 if (!rates[i].idx)
958 continue;
959
960 rates[i].idx--;
961 }
962 }
963
964 rd->val[0] = mt7615_mac_tx_rate_val(dev, mphy, &rates[0], stbc, &bw);
965 bw_prev = bw;
966
967 if (probe_rate) {
968 rd->probe_val = mt7615_mac_tx_rate_val(dev, mphy, probe_rate,
969 stbc, &bw);
970 if (bw)
971 rd->bw_idx = 1;
972 else
973 bw_prev = 0;
974 } else {
975 rd->probe_val = rd->val[0];
976 }
977
978 rd->val[1] = mt7615_mac_tx_rate_val(dev, mphy, &rates[1], stbc, &bw);
979 if (bw_prev) {
980 rd->bw_idx = 3;
981 bw_prev = bw;
982 }
983
984 rd->val[2] = mt7615_mac_tx_rate_val(dev, mphy, &rates[2], stbc, &bw);
985 if (bw_prev) {
986 rd->bw_idx = 5;
987 bw_prev = bw;
988 }
989
990 rd->val[3] = mt7615_mac_tx_rate_val(dev, mphy, &rates[3], stbc, &bw);
991 if (bw_prev)
992 rd->bw_idx = 7;
993
994 rd->rateset = rateset;
995 rd->bw = bw;
996 }
997
998 static int
mt7615_mac_queue_rate_update(struct mt7615_phy * phy,struct mt7615_sta * sta,struct ieee80211_tx_rate * probe_rate,struct ieee80211_tx_rate * rates)999 mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
1000 struct ieee80211_tx_rate *probe_rate,
1001 struct ieee80211_tx_rate *rates)
1002 {
1003 struct mt7615_dev *dev = phy->dev;
1004 struct mt7615_wtbl_rate_desc *wrd;
1005
1006 if (work_pending(&dev->rate_work))
1007 return -EBUSY;
1008
1009 wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC);
1010 if (!wrd)
1011 return -ENOMEM;
1012
1013 wrd->sta = sta;
1014 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
1015 &wrd->rate);
1016 list_add_tail(&wrd->node, &dev->wrd_head);
1017 queue_work(dev->mt76.wq, &dev->rate_work);
1018
1019 return 0;
1020 }
1021
mt7615_mac_get_sta_tid_sn(struct mt7615_dev * dev,int wcid,u8 tid)1022 u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
1023 {
1024 u32 addr, val, val2;
1025 u8 offset;
1026
1027 addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
1028
1029 offset = tid * 12;
1030 addr += 4 * (offset / 32);
1031 offset %= 32;
1032
1033 val = mt76_rr(dev, addr);
1034 val >>= (tid % 32);
1035
1036 if (offset > 20) {
1037 addr += 4;
1038 val2 = mt76_rr(dev, addr);
1039 val |= val2 << (32 - offset);
1040 }
1041
1042 return val & GENMASK(11, 0);
1043 }
1044
mt7615_mac_set_rates(struct mt7615_phy * phy,struct mt7615_sta * sta,struct ieee80211_tx_rate * probe_rate,struct ieee80211_tx_rate * rates)1045 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
1046 struct ieee80211_tx_rate *probe_rate,
1047 struct ieee80211_tx_rate *rates)
1048 {
1049 int wcid = sta->wcid.idx, n_rates = sta->n_rates;
1050 struct mt7615_dev *dev = phy->dev;
1051 struct mt7615_rate_desc rd;
1052 u32 w5, w27, addr;
1053 u16 idx = sta->vif->mt76.omac_idx;
1054
1055 if (!mt76_is_mmio(&dev->mt76)) {
1056 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
1057 return;
1058 }
1059
1060 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1061 return;
1062
1063 memset(&rd, 0, sizeof(struct mt7615_rate_desc));
1064 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &rd);
1065
1066 addr = mt7615_mac_wtbl_addr(dev, wcid);
1067 w27 = mt76_rr(dev, addr + 27 * 4);
1068 w27 &= ~MT_WTBL_W27_CC_BW_SEL;
1069 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rd.bw);
1070
1071 w5 = mt76_rr(dev, addr + 5 * 4);
1072 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
1073 MT_WTBL_W5_MPDU_OK_COUNT |
1074 MT_WTBL_W5_MPDU_FAIL_COUNT |
1075 MT_WTBL_W5_RATE_IDX);
1076 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rd.bw) |
1077 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
1078 rd.bw_idx ? rd.bw_idx - 1 : 7);
1079
1080 mt76_wr(dev, MT_WTBL_RIUCR0, w5);
1081
1082 mt76_wr(dev, MT_WTBL_RIUCR1,
1083 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rd.probe_val) |
1084 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rd.val[0]) |
1085 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rd.val[1]));
1086
1087 mt76_wr(dev, MT_WTBL_RIUCR2,
1088 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rd.val[1] >> 8) |
1089 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rd.val[1]) |
1090 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rd.val[2]) |
1091 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rd.val[2]));
1092
1093 mt76_wr(dev, MT_WTBL_RIUCR3,
1094 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rd.val[2] >> 4) |
1095 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rd.val[3]) |
1096 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rd.val[3]));
1097
1098 mt76_wr(dev, MT_WTBL_UPDATE,
1099 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
1100 MT_WTBL_UPDATE_RATE_UPDATE |
1101 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
1102
1103 mt76_wr(dev, addr + 27 * 4, w27);
1104
1105 idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
1106 addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
1107
1108 mt76_rmw(dev, addr, MT_LPON_TCR_MODE, MT_LPON_TCR_READ); /* TSF read */
1109 sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
1110 sta->rate_set_tsf |= rd.rateset;
1111
1112 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
1113 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
1114
1115 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
1116 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
1117 sta->rate_probe = !!probe_rate;
1118 }
1119 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
1120
1121 static int
mt7615_mac_wtbl_update_key(struct mt7615_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,enum mt76_cipher_type cipher,u16 cipher_mask,enum set_key_cmd cmd)1122 mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1123 struct ieee80211_key_conf *key,
1124 enum mt76_cipher_type cipher, u16 cipher_mask,
1125 enum set_key_cmd cmd)
1126 {
1127 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
1128 u8 data[32] = {};
1129
1130 if (key->keylen > sizeof(data))
1131 return -EINVAL;
1132
1133 mt76_rr_copy(dev, addr, data, sizeof(data));
1134 if (cmd == SET_KEY) {
1135 if (cipher == MT_CIPHER_TKIP) {
1136 /* Rx/Tx MIC keys are swapped */
1137 memcpy(data, key->key, 16);
1138 memcpy(data + 16, key->key + 24, 8);
1139 memcpy(data + 24, key->key + 16, 8);
1140 } else {
1141 if (cipher_mask == BIT(cipher))
1142 memcpy(data, key->key, key->keylen);
1143 else if (cipher != MT_CIPHER_BIP_CMAC_128)
1144 memcpy(data, key->key, 16);
1145 if (cipher == MT_CIPHER_BIP_CMAC_128)
1146 memcpy(data + 16, key->key, 16);
1147 }
1148 } else {
1149 if (cipher == MT_CIPHER_BIP_CMAC_128)
1150 memset(data + 16, 0, 16);
1151 else if (cipher_mask)
1152 memset(data, 0, 16);
1153 if (!cipher_mask)
1154 memset(data, 0, sizeof(data));
1155 }
1156
1157 mt76_wr_copy(dev, addr, data, sizeof(data));
1158
1159 return 0;
1160 }
1161
1162 static int
mt7615_mac_wtbl_update_pk(struct mt7615_dev * dev,struct mt76_wcid * wcid,enum mt76_cipher_type cipher,u16 cipher_mask,int keyidx,enum set_key_cmd cmd)1163 mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1164 enum mt76_cipher_type cipher, u16 cipher_mask,
1165 int keyidx, enum set_key_cmd cmd)
1166 {
1167 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
1168
1169 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
1170 return -ETIMEDOUT;
1171
1172 w0 = mt76_rr(dev, addr);
1173 w1 = mt76_rr(dev, addr + 4);
1174
1175 if (cipher_mask)
1176 w0 |= MT_WTBL_W0_RX_KEY_VALID;
1177 else
1178 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
1179 if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
1180 w0 |= MT_WTBL_W0_RX_IK_VALID;
1181 else
1182 w0 &= ~MT_WTBL_W0_RX_IK_VALID;
1183
1184 if (cmd == SET_KEY &&
1185 (cipher != MT_CIPHER_BIP_CMAC_128 ||
1186 cipher_mask == BIT(cipher))) {
1187 w0 &= ~MT_WTBL_W0_KEY_IDX;
1188 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
1189 }
1190
1191 mt76_wr(dev, MT_WTBL_RICR0, w0);
1192 mt76_wr(dev, MT_WTBL_RICR1, w1);
1193
1194 if (!mt7615_mac_wtbl_update(dev, wcid->idx,
1195 MT_WTBL_UPDATE_RXINFO_UPDATE))
1196 return -ETIMEDOUT;
1197
1198 return 0;
1199 }
1200
1201 static void
mt7615_mac_wtbl_update_cipher(struct mt7615_dev * dev,struct mt76_wcid * wcid,enum mt76_cipher_type cipher,u16 cipher_mask,enum set_key_cmd cmd)1202 mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
1203 enum mt76_cipher_type cipher, u16 cipher_mask,
1204 enum set_key_cmd cmd)
1205 {
1206 u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
1207
1208 if (!cipher_mask) {
1209 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
1210 return;
1211 }
1212
1213 if (cmd != SET_KEY)
1214 return;
1215
1216 if (cipher == MT_CIPHER_BIP_CMAC_128 &&
1217 cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
1218 return;
1219
1220 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
1221 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
1222 }
1223
__mt7615_mac_wtbl_set_key(struct mt7615_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,enum set_key_cmd cmd)1224 int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1225 struct mt76_wcid *wcid,
1226 struct ieee80211_key_conf *key,
1227 enum set_key_cmd cmd)
1228 {
1229 enum mt76_cipher_type cipher;
1230 u16 cipher_mask = wcid->cipher;
1231 int err;
1232
1233 cipher = mt7615_mac_get_cipher(key->cipher);
1234 if (cipher == MT_CIPHER_NONE)
1235 return -EOPNOTSUPP;
1236
1237 if (cmd == SET_KEY)
1238 cipher_mask |= BIT(cipher);
1239 else
1240 cipher_mask &= ~BIT(cipher);
1241
1242 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
1243 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
1244 cmd);
1245 if (err < 0)
1246 return err;
1247
1248 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
1249 key->keyidx, cmd);
1250 if (err < 0)
1251 return err;
1252
1253 wcid->cipher = cipher_mask;
1254
1255 return 0;
1256 }
1257
mt7615_mac_wtbl_set_key(struct mt7615_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,enum set_key_cmd cmd)1258 int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
1259 struct mt76_wcid *wcid,
1260 struct ieee80211_key_conf *key,
1261 enum set_key_cmd cmd)
1262 {
1263 int err;
1264
1265 spin_lock_bh(&dev->mt76.lock);
1266 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
1267 spin_unlock_bh(&dev->mt76.lock);
1268
1269 return err;
1270 }
1271
mt7615_fill_txs(struct mt7615_dev * dev,struct mt7615_sta * sta,struct ieee80211_tx_info * info,__le32 * txs_data)1272 static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
1273 struct ieee80211_tx_info *info, __le32 *txs_data)
1274 {
1275 struct ieee80211_supported_band *sband;
1276 struct mt7615_rate_set *rs;
1277 struct mt76_phy *mphy;
1278 int first_idx = 0, last_idx;
1279 int i, idx, count;
1280 bool fixed_rate, ack_timeout;
1281 bool ampdu, cck = false;
1282 bool rs_idx;
1283 u32 rate_set_tsf;
1284 u32 final_rate, final_rate_flags, final_nss, txs;
1285
1286 txs = le32_to_cpu(txs_data[1]);
1287 ampdu = txs & MT_TXS1_AMPDU;
1288
1289 txs = le32_to_cpu(txs_data[3]);
1290 count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
1291 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
1292
1293 txs = le32_to_cpu(txs_data[0]);
1294 fixed_rate = txs & MT_TXS0_FIXED_RATE;
1295 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1296 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1297
1298 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1299 return false;
1300
1301 if (txs & MT_TXS0_QUEUE_TIMEOUT)
1302 return false;
1303
1304 if (!ack_timeout)
1305 info->flags |= IEEE80211_TX_STAT_ACK;
1306
1307 info->status.ampdu_len = 1;
1308 info->status.ampdu_ack_len = !!(info->flags &
1309 IEEE80211_TX_STAT_ACK);
1310
1311 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1312 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1313
1314 first_idx = max_t(int, 0, last_idx - (count - 1) / MT7615_RATE_RETRY);
1315
1316 if (fixed_rate) {
1317 info->status.rates[0].count = count;
1318 i = 0;
1319 goto out;
1320 }
1321
1322 rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1323 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
1324 rate_set_tsf) < 1000000);
1325 rs_idx ^= rate_set_tsf & BIT(0);
1326 rs = &sta->rateset[rs_idx];
1327
1328 if (!first_idx && rs->probe_rate.idx >= 0) {
1329 info->status.rates[0] = rs->probe_rate;
1330
1331 spin_lock_bh(&dev->mt76.lock);
1332 if (sta->rate_probe) {
1333 struct mt7615_phy *phy = &dev->phy;
1334
1335 if (sta->wcid.ext_phy && dev->mt76.phy2)
1336 phy = dev->mt76.phy2->priv;
1337
1338 mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
1339 }
1340 spin_unlock_bh(&dev->mt76.lock);
1341 } else {
1342 info->status.rates[0] = rs->rates[first_idx / 2];
1343 }
1344 info->status.rates[0].count = 0;
1345
1346 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1347 struct ieee80211_tx_rate *cur_rate;
1348 int cur_count;
1349
1350 cur_rate = &rs->rates[idx / 2];
1351 cur_count = min_t(int, MT7615_RATE_RETRY, count);
1352 count -= cur_count;
1353
1354 if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1355 cur_rate->flags != info->status.rates[i].flags)) {
1356 i++;
1357 if (i == ARRAY_SIZE(info->status.rates)) {
1358 i--;
1359 break;
1360 }
1361
1362 info->status.rates[i] = *cur_rate;
1363 info->status.rates[i].count = 0;
1364 }
1365
1366 info->status.rates[i].count += cur_count;
1367 }
1368
1369 out:
1370 final_rate_flags = info->status.rates[i].flags;
1371
1372 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1373 case MT_PHY_TYPE_CCK:
1374 cck = true;
1375 fallthrough;
1376 case MT_PHY_TYPE_OFDM:
1377 mphy = &dev->mphy;
1378 if (sta->wcid.ext_phy && dev->mt76.phy2)
1379 mphy = dev->mt76.phy2;
1380
1381 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1382 sband = &mphy->sband_5g.sband;
1383 else
1384 sband = &mphy->sband_2g.sband;
1385 final_rate &= MT_TX_RATE_IDX;
1386 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1387 cck);
1388 final_rate_flags = 0;
1389 break;
1390 case MT_PHY_TYPE_HT_GF:
1391 case MT_PHY_TYPE_HT:
1392 final_rate_flags |= IEEE80211_TX_RC_MCS;
1393 final_rate &= MT_TX_RATE_IDX;
1394 if (final_rate > 31)
1395 return false;
1396 break;
1397 case MT_PHY_TYPE_VHT:
1398 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
1399
1400 if ((final_rate & MT_TX_RATE_STBC) && final_nss)
1401 final_nss--;
1402
1403 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
1404 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
1405 break;
1406 default:
1407 return false;
1408 }
1409
1410 info->status.rates[i].idx = final_rate;
1411 info->status.rates[i].flags = final_rate_flags;
1412
1413 return true;
1414 }
1415
mt7615_mac_add_txs_skb(struct mt7615_dev * dev,struct mt7615_sta * sta,int pid,__le32 * txs_data)1416 static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
1417 struct mt7615_sta *sta, int pid,
1418 __le32 *txs_data)
1419 {
1420 struct mt76_dev *mdev = &dev->mt76;
1421 struct sk_buff_head list;
1422 struct sk_buff *skb;
1423
1424 if (pid < MT_PACKET_ID_FIRST)
1425 return false;
1426
1427 trace_mac_txdone(mdev, sta->wcid.idx, pid);
1428
1429 mt76_tx_status_lock(mdev, &list);
1430 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1431 if (skb) {
1432 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1433
1434 if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
1435 info->status.rates[0].count = 0;
1436 info->status.rates[0].idx = -1;
1437 }
1438
1439 mt76_tx_status_skb_done(mdev, skb, &list);
1440 }
1441 mt76_tx_status_unlock(mdev, &list);
1442
1443 return !!skb;
1444 }
1445
mt7615_mac_add_txs(struct mt7615_dev * dev,void * data)1446 static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
1447 {
1448 struct ieee80211_tx_info info = {};
1449 struct ieee80211_sta *sta = NULL;
1450 struct mt7615_sta *msta = NULL;
1451 struct mt76_wcid *wcid;
1452 struct mt76_phy *mphy = &dev->mt76.phy;
1453 __le32 *txs_data = data;
1454 u32 txs;
1455 u8 wcidx;
1456 u8 pid;
1457
1458 txs = le32_to_cpu(txs_data[0]);
1459 pid = FIELD_GET(MT_TXS0_PID, txs);
1460 txs = le32_to_cpu(txs_data[2]);
1461 wcidx = FIELD_GET(MT_TXS2_WCID, txs);
1462
1463 if (pid == MT_PACKET_ID_NO_ACK)
1464 return;
1465
1466 if (wcidx >= MT7615_WTBL_SIZE)
1467 return;
1468
1469 rcu_read_lock();
1470
1471 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1472 if (!wcid)
1473 goto out;
1474
1475 msta = container_of(wcid, struct mt7615_sta, wcid);
1476 sta = wcid_to_sta(wcid);
1477
1478 spin_lock_bh(&dev->sta_poll_lock);
1479 if (list_empty(&msta->poll_list))
1480 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1481 spin_unlock_bh(&dev->sta_poll_lock);
1482
1483 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1484 goto out;
1485
1486 if (wcidx >= MT7615_WTBL_STA || !sta)
1487 goto out;
1488
1489 if (wcid->ext_phy && dev->mt76.phy2)
1490 mphy = dev->mt76.phy2;
1491
1492 if (mt7615_fill_txs(dev, msta, &info, txs_data))
1493 ieee80211_tx_status_noskb(mphy->hw, sta, &info);
1494
1495 out:
1496 rcu_read_unlock();
1497 }
1498
1499 static void
mt7615_txwi_free(struct mt7615_dev * dev,struct mt76_txwi_cache * txwi)1500 mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
1501 {
1502 struct mt76_dev *mdev = &dev->mt76;
1503 __le32 *txwi_data;
1504 u32 val;
1505 u8 wcid;
1506
1507 mt7615_txp_skb_unmap(mdev, txwi);
1508 if (!txwi->skb)
1509 goto out;
1510
1511 txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
1512 val = le32_to_cpu(txwi_data[1]);
1513 wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
1514 mt76_tx_complete_skb(mdev, wcid, txwi->skb);
1515
1516 out:
1517 txwi->skb = NULL;
1518 mt76_put_txwi(mdev, txwi);
1519 }
1520
1521 static void
mt7615_mac_tx_free_token(struct mt7615_dev * dev,u16 token)1522 mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
1523 {
1524 struct mt76_dev *mdev = &dev->mt76;
1525 struct mt76_txwi_cache *txwi;
1526
1527 trace_mac_tx_free(dev, token);
1528 txwi = mt76_token_put(mdev, token);
1529 if (!txwi)
1530 return;
1531
1532 mt7615_txwi_free(dev, txwi);
1533 }
1534
mt7615_mac_tx_free(struct mt7615_dev * dev,struct sk_buff * skb)1535 static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
1536 {
1537 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
1538 u8 i, count;
1539
1540 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1541 if (is_mt7615(&dev->mt76)) {
1542 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1543 } else {
1544 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1545 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
1546 }
1547
1548 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
1549 if (is_mt7615(&dev->mt76)) {
1550 __le16 *token = &free->token[0];
1551
1552 for (i = 0; i < count; i++)
1553 mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
1554 } else {
1555 __le32 *token = (__le32 *)&free->token[0];
1556
1557 for (i = 0; i < count; i++)
1558 mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
1559 }
1560
1561 dev_kfree_skb(skb);
1562
1563 rcu_read_lock();
1564 mt7615_mac_sta_poll(dev);
1565 rcu_read_unlock();
1566
1567 mt76_worker_schedule(&dev->mt76.tx_worker);
1568 }
1569
mt7615_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb)1570 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1571 struct sk_buff *skb)
1572 {
1573 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1574 __le32 *rxd = (__le32 *)skb->data;
1575 __le32 *end = (__le32 *)&skb->data[skb->len];
1576 enum rx_pkt_type type;
1577 u16 flag;
1578
1579 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
1580 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
1581 if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1582 type = PKT_TYPE_NORMAL_MCU;
1583
1584 switch (type) {
1585 case PKT_TYPE_TXS:
1586 for (rxd++; rxd + 7 <= end; rxd += 7)
1587 mt7615_mac_add_txs(dev, rxd);
1588 dev_kfree_skb(skb);
1589 break;
1590 case PKT_TYPE_TXRX_NOTIFY:
1591 mt7615_mac_tx_free(dev, skb);
1592 break;
1593 case PKT_TYPE_RX_EVENT:
1594 mt7615_mcu_rx_event(dev, skb);
1595 break;
1596 case PKT_TYPE_NORMAL_MCU:
1597 case PKT_TYPE_NORMAL:
1598 if (!mt7615_mac_fill_rx(dev, skb)) {
1599 mt76_rx(&dev->mt76, q, skb);
1600 return;
1601 }
1602 fallthrough;
1603 default:
1604 dev_kfree_skb(skb);
1605 break;
1606 }
1607 }
1608 EXPORT_SYMBOL_GPL(mt7615_queue_rx_skb);
1609
1610 static void
mt7615_mac_set_sensitivity(struct mt7615_phy * phy,int val,bool ofdm)1611 mt7615_mac_set_sensitivity(struct mt7615_phy *phy, int val, bool ofdm)
1612 {
1613 struct mt7615_dev *dev = phy->dev;
1614 bool ext_phy = phy != &dev->phy;
1615
1616 if (is_mt7663(&dev->mt76)) {
1617 if (ofdm)
1618 mt76_rmw(dev, MT7663_WF_PHY_MIN_PRI_PWR(ext_phy),
1619 MT_WF_PHY_PD_OFDM_MASK(0),
1620 MT_WF_PHY_PD_OFDM(0, val));
1621 else
1622 mt76_rmw(dev, MT7663_WF_PHY_RXTD_CCK_PD(ext_phy),
1623 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1624 MT_WF_PHY_PD_CCK(ext_phy, val));
1625 return;
1626 }
1627
1628 if (ofdm)
1629 mt76_rmw(dev, MT_WF_PHY_MIN_PRI_PWR(ext_phy),
1630 MT_WF_PHY_PD_OFDM_MASK(ext_phy),
1631 MT_WF_PHY_PD_OFDM(ext_phy, val));
1632 else
1633 mt76_rmw(dev, MT_WF_PHY_RXTD_CCK_PD(ext_phy),
1634 MT_WF_PHY_PD_CCK_MASK(ext_phy),
1635 MT_WF_PHY_PD_CCK(ext_phy, val));
1636 }
1637
1638 static void
mt7615_mac_set_default_sensitivity(struct mt7615_phy * phy)1639 mt7615_mac_set_default_sensitivity(struct mt7615_phy *phy)
1640 {
1641 /* ofdm */
1642 mt7615_mac_set_sensitivity(phy, 0x13c, true);
1643 /* cck */
1644 mt7615_mac_set_sensitivity(phy, 0x92, false);
1645
1646 phy->ofdm_sensitivity = -98;
1647 phy->cck_sensitivity = -110;
1648 phy->last_cca_adj = jiffies;
1649 }
1650
mt7615_mac_set_scs(struct mt7615_phy * phy,bool enable)1651 void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
1652 {
1653 struct mt7615_dev *dev = phy->dev;
1654 bool ext_phy = phy != &dev->phy;
1655 u32 reg, mask;
1656
1657 mt7615_mutex_acquire(dev);
1658
1659 if (phy->scs_en == enable)
1660 goto out;
1661
1662 if (is_mt7663(&dev->mt76)) {
1663 reg = MT7663_WF_PHY_MIN_PRI_PWR(ext_phy);
1664 mask = MT_WF_PHY_PD_BLK(0);
1665 } else {
1666 reg = MT_WF_PHY_MIN_PRI_PWR(ext_phy);
1667 mask = MT_WF_PHY_PD_BLK(ext_phy);
1668 }
1669
1670 if (enable) {
1671 mt76_set(dev, reg, mask);
1672 if (is_mt7622(&dev->mt76)) {
1673 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7 << 8);
1674 mt76_set(dev, MT_MIB_M0_MISC_CR(0), 0x7);
1675 }
1676 } else {
1677 mt76_clear(dev, reg, mask);
1678 }
1679
1680 mt7615_mac_set_default_sensitivity(phy);
1681 phy->scs_en = enable;
1682
1683 out:
1684 mt7615_mutex_release(dev);
1685 }
1686
mt7615_mac_enable_nf(struct mt7615_dev * dev,bool ext_phy)1687 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
1688 {
1689 u32 rxtd, reg;
1690
1691 if (is_mt7663(&dev->mt76))
1692 reg = MT7663_WF_PHY_R0_PHYMUX_5;
1693 else
1694 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1695
1696 if (ext_phy)
1697 rxtd = MT_WF_PHY_RXTD2(10);
1698 else
1699 rxtd = MT_WF_PHY_RXTD(12);
1700
1701 mt76_set(dev, rxtd, BIT(18) | BIT(29));
1702 mt76_set(dev, reg, 0x5 << 12);
1703 }
1704
mt7615_mac_cca_stats_reset(struct mt7615_phy * phy)1705 void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy)
1706 {
1707 struct mt7615_dev *dev = phy->dev;
1708 bool ext_phy = phy != &dev->phy;
1709 u32 reg;
1710
1711 if (is_mt7663(&dev->mt76))
1712 reg = MT7663_WF_PHY_R0_PHYMUX_5;
1713 else
1714 reg = MT_WF_PHY_R0_PHYMUX_5(ext_phy);
1715
1716 /* reset PD and MDRDY counters */
1717 mt76_clear(dev, reg, GENMASK(22, 20));
1718 mt76_set(dev, reg, BIT(22) | BIT(20));
1719 }
1720
1721 static void
mt7615_mac_adjust_sensitivity(struct mt7615_phy * phy,u32 rts_err_rate,bool ofdm)1722 mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy,
1723 u32 rts_err_rate, bool ofdm)
1724 {
1725 struct mt7615_dev *dev = phy->dev;
1726 int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck;
1727 bool ext_phy = phy != &dev->phy;
1728 u16 def_th = ofdm ? -98 : -110;
1729 bool update = false;
1730 s8 *sensitivity;
1731 int signal;
1732
1733 sensitivity = ofdm ? &phy->ofdm_sensitivity : &phy->cck_sensitivity;
1734 signal = mt76_get_min_avg_rssi(&dev->mt76, ext_phy);
1735 if (!signal) {
1736 mt7615_mac_set_default_sensitivity(phy);
1737 return;
1738 }
1739
1740 signal = min(signal, -72);
1741 if (false_cca > 500) {
1742 if (rts_err_rate > MT_FRAC(40, 100))
1743 return;
1744
1745 /* decrease coverage */
1746 if (*sensitivity == def_th && signal > -90) {
1747 *sensitivity = -90;
1748 update = true;
1749 } else if (*sensitivity + 2 < signal) {
1750 *sensitivity += 2;
1751 update = true;
1752 }
1753 } else if ((false_cca > 0 && false_cca < 50) ||
1754 rts_err_rate > MT_FRAC(60, 100)) {
1755 /* increase coverage */
1756 if (*sensitivity - 2 >= def_th) {
1757 *sensitivity -= 2;
1758 update = true;
1759 }
1760 }
1761
1762 if (*sensitivity > signal) {
1763 *sensitivity = signal;
1764 update = true;
1765 }
1766
1767 if (update) {
1768 u16 val = ofdm ? *sensitivity * 2 + 512 : *sensitivity + 256;
1769
1770 mt7615_mac_set_sensitivity(phy, val, ofdm);
1771 phy->last_cca_adj = jiffies;
1772 }
1773 }
1774
1775 static void
mt7615_mac_scs_check(struct mt7615_phy * phy)1776 mt7615_mac_scs_check(struct mt7615_phy *phy)
1777 {
1778 struct mt7615_dev *dev = phy->dev;
1779 struct mib_stats *mib = &phy->mib;
1780 u32 val, rts_err_rate = 0;
1781 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1782 bool ext_phy = phy != &dev->phy;
1783
1784 if (!phy->scs_en)
1785 return;
1786
1787 if (is_mt7663(&dev->mt76))
1788 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1789 else
1790 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS0(ext_phy));
1791 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1792 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1793
1794 if (is_mt7663(&dev->mt76))
1795 val = mt76_rr(dev, MT7663_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1796 else
1797 val = mt76_rr(dev, MT_WF_PHY_R0_PHYCTRL_STS5(ext_phy));
1798 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1799 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1800
1801 phy->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1802 phy->false_cca_cck = pd_cck - mdrdy_cck;
1803 mt7615_mac_cca_stats_reset(phy);
1804
1805 if (mib->rts_cnt + mib->rts_retries_cnt)
1806 rts_err_rate = MT_FRAC(mib->rts_retries_cnt,
1807 mib->rts_cnt + mib->rts_retries_cnt);
1808
1809 /* cck */
1810 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, false);
1811 /* ofdm */
1812 mt7615_mac_adjust_sensitivity(phy, rts_err_rate, true);
1813
1814 if (time_after(jiffies, phy->last_cca_adj + 10 * HZ))
1815 mt7615_mac_set_default_sensitivity(phy);
1816 }
1817
1818 static u8
mt7615_phy_get_nf(struct mt7615_dev * dev,int idx)1819 mt7615_phy_get_nf(struct mt7615_dev *dev, int idx)
1820 {
1821 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1822 u32 reg, val, sum = 0, n = 0;
1823 int i;
1824
1825 if (is_mt7663(&dev->mt76))
1826 reg = MT7663_WF_PHY_RXTD(20);
1827 else
1828 reg = idx ? MT_WF_PHY_RXTD2(17) : MT_WF_PHY_RXTD(20);
1829
1830 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1831 val = mt76_rr(dev, reg);
1832 sum += val * nf_power[i];
1833 n += val;
1834 }
1835
1836 if (!n)
1837 return 0;
1838
1839 return sum / n;
1840 }
1841
1842 static void
mt7615_phy_update_channel(struct mt76_phy * mphy,int idx)1843 mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
1844 {
1845 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1846 struct mt7615_phy *phy = mphy->priv;
1847 struct mt76_channel_state *state;
1848 u64 busy_time, tx_time, rx_time, obss_time;
1849 u32 obss_reg = idx ? MT_WF_RMAC_MIB_TIME6 : MT_WF_RMAC_MIB_TIME5;
1850 int nf;
1851
1852 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1853 MT_MIB_SDR9_BUSY_MASK);
1854 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1855 MT_MIB_SDR36_TXTIME_MASK);
1856 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1857 MT_MIB_SDR37_RXTIME_MASK);
1858 obss_time = mt76_get_field(dev, obss_reg, MT_MIB_OBSSTIME_MASK);
1859
1860 nf = mt7615_phy_get_nf(dev, idx);
1861 if (!phy->noise)
1862 phy->noise = nf << 4;
1863 else if (nf)
1864 phy->noise += nf - (phy->noise >> 4);
1865
1866 state = mphy->chan_state;
1867 state->cc_busy += busy_time;
1868 state->cc_tx += tx_time;
1869 state->cc_rx += rx_time + obss_time;
1870 state->cc_bss_rx += rx_time;
1871 state->noise = -(phy->noise >> 4);
1872 }
1873
mt7615_update_survey(struct mt7615_dev * dev)1874 static void mt7615_update_survey(struct mt7615_dev *dev)
1875 {
1876 struct mt76_dev *mdev = &dev->mt76;
1877 ktime_t cur_time;
1878
1879 /* MT7615 can only update both phys simultaneously
1880 * since some reisters are shared across bands.
1881 */
1882
1883 mt7615_phy_update_channel(&mdev->phy, 0);
1884 if (mdev->phy2)
1885 mt7615_phy_update_channel(mdev->phy2, 1);
1886
1887 cur_time = ktime_get_boottime();
1888
1889 mt76_update_survey_active_time(&mdev->phy, cur_time);
1890 if (mdev->phy2)
1891 mt76_update_survey_active_time(mdev->phy2, cur_time);
1892
1893 /* reset obss airtime */
1894 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
1895 }
1896
mt7615_update_channel(struct mt76_phy * mphy)1897 void mt7615_update_channel(struct mt76_phy *mphy)
1898 {
1899 struct mt7615_dev *dev = container_of(mphy->dev, struct mt7615_dev, mt76);
1900
1901 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1902 return;
1903
1904 mt7615_update_survey(dev);
1905 mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1906 }
1907 EXPORT_SYMBOL_GPL(mt7615_update_channel);
1908
1909 static void
mt7615_mac_update_mib_stats(struct mt7615_phy * phy)1910 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
1911 {
1912 struct mt7615_dev *dev = phy->dev;
1913 struct mib_stats *mib = &phy->mib;
1914 bool ext_phy = phy != &dev->phy;
1915 int i, aggr;
1916 u32 val, val2;
1917
1918 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1919 MT_MIB_SDR3_FCS_ERR_MASK);
1920
1921 val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
1922 MT_MIB_AMPDU_MPDU_COUNT);
1923 if (val) {
1924 val2 = mt76_get_field(dev, MT_MIB_SDR15(ext_phy),
1925 MT_MIB_AMPDU_ACK_COUNT);
1926 mib->aggr_per = 1000 * (val - val2) / val;
1927 }
1928
1929 aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1930 for (i = 0; i < 4; i++) {
1931 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
1932 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1933 mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
1934 val);
1935
1936 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
1937 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1938 mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
1939 val);
1940
1941 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1942 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
1943 dev->mt76.aggr_stats[aggr++] += val >> 16;
1944 }
1945 }
1946
mt7615_pm_wake_work(struct work_struct * work)1947 void mt7615_pm_wake_work(struct work_struct *work)
1948 {
1949 struct mt7615_dev *dev;
1950 struct mt76_phy *mphy;
1951
1952 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
1953 pm.wake_work);
1954 mphy = dev->phy.mt76;
1955
1956 if (!mt7615_mcu_set_drv_ctrl(dev)) {
1957 struct mt76_dev *mdev = &dev->mt76;
1958 int i;
1959
1960 if (mt76_is_sdio(mdev)) {
1961 mt76_worker_schedule(&mdev->sdio.txrx_worker);
1962 } else {
1963 mt76_for_each_q_rx(mdev, i)
1964 napi_schedule(&mdev->napi[i]);
1965 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1966 mt76_queue_tx_cleanup(dev, mdev->q_mcu[MT_MCUQ_WM],
1967 false);
1968 }
1969
1970 if (test_bit(MT76_STATE_RUNNING, &mphy->state)) {
1971 unsigned long timeout;
1972
1973 timeout = mt7615_get_macwork_timeout(dev);
1974 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1975 timeout);
1976 }
1977 }
1978
1979 ieee80211_wake_queues(mphy->hw);
1980 wake_up(&dev->pm.wait);
1981 }
1982
mt7615_pm_power_save_work(struct work_struct * work)1983 void mt7615_pm_power_save_work(struct work_struct *work)
1984 {
1985 struct mt7615_dev *dev;
1986 unsigned long delta;
1987
1988 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
1989 pm.ps_work.work);
1990
1991 delta = dev->pm.idle_timeout;
1992 if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
1993 test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
1994 goto out;
1995
1996 if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1997 delta = dev->pm.last_activity + delta - jiffies;
1998 goto out;
1999 }
2000
2001 if (!mt7615_mcu_set_fw_ctrl(dev))
2002 return;
2003 out:
2004 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
2005 }
2006
mt7615_mac_work(struct work_struct * work)2007 void mt7615_mac_work(struct work_struct *work)
2008 {
2009 struct mt7615_phy *phy;
2010 struct mt76_phy *mphy;
2011 unsigned long timeout;
2012
2013 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2014 mac_work.work);
2015 phy = mphy->priv;
2016
2017 mt7615_mutex_acquire(phy->dev);
2018
2019 mt7615_update_survey(phy->dev);
2020 if (++mphy->mac_work_count == 5) {
2021 mphy->mac_work_count = 0;
2022
2023 mt7615_mac_update_mib_stats(phy);
2024 mt7615_mac_scs_check(phy);
2025 }
2026
2027 mt7615_mutex_release(phy->dev);
2028
2029 mt76_tx_status_check(mphy->dev, false);
2030
2031 timeout = mt7615_get_macwork_timeout(phy->dev);
2032 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
2033 }
2034
mt7615_tx_token_put(struct mt7615_dev * dev)2035 void mt7615_tx_token_put(struct mt7615_dev *dev)
2036 {
2037 struct mt76_txwi_cache *txwi;
2038 int id;
2039
2040 spin_lock_bh(&dev->mt76.token_lock);
2041 idr_for_each_entry(&dev->mt76.token, txwi, id)
2042 mt7615_txwi_free(dev, txwi);
2043 spin_unlock_bh(&dev->mt76.token_lock);
2044 idr_destroy(&dev->mt76.token);
2045 }
2046 EXPORT_SYMBOL_GPL(mt7615_tx_token_put);
2047
mt7615_dfs_stop_radar_detector(struct mt7615_phy * phy)2048 static void mt7615_dfs_stop_radar_detector(struct mt7615_phy *phy)
2049 {
2050 struct mt7615_dev *dev = phy->dev;
2051
2052 if (phy->rdd_state & BIT(0))
2053 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
2054 if (phy->rdd_state & BIT(1))
2055 mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
2056 }
2057
mt7615_dfs_start_rdd(struct mt7615_dev * dev,int chain)2058 static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
2059 {
2060 int err;
2061
2062 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
2063 if (err < 0)
2064 return err;
2065
2066 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2067 MT_RX_SEL0, 1);
2068 }
2069
mt7615_dfs_start_radar_detector(struct mt7615_phy * phy)2070 static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
2071 {
2072 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2073 struct mt7615_dev *dev = phy->dev;
2074 bool ext_phy = phy != &dev->phy;
2075 int err;
2076
2077 /* start CAC */
2078 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
2079 if (err < 0)
2080 return err;
2081
2082 err = mt7615_dfs_start_rdd(dev, ext_phy);
2083 if (err < 0)
2084 return err;
2085
2086 phy->rdd_state |= BIT(ext_phy);
2087
2088 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2089 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2090 err = mt7615_dfs_start_rdd(dev, 1);
2091 if (err < 0)
2092 return err;
2093
2094 phy->rdd_state |= BIT(1);
2095 }
2096
2097 return 0;
2098 }
2099
2100 static int
mt7615_dfs_init_radar_specs(struct mt7615_phy * phy)2101 mt7615_dfs_init_radar_specs(struct mt7615_phy *phy)
2102 {
2103 const struct mt7615_dfs_radar_spec *radar_specs;
2104 struct mt7615_dev *dev = phy->dev;
2105 int err, i, lpn = 500;
2106
2107 switch (dev->mt76.region) {
2108 case NL80211_DFS_FCC:
2109 radar_specs = &fcc_radar_specs;
2110 lpn = 8;
2111 break;
2112 case NL80211_DFS_ETSI:
2113 radar_specs = &etsi_radar_specs;
2114 break;
2115 case NL80211_DFS_JP:
2116 radar_specs = &jp_radar_specs;
2117 break;
2118 default:
2119 return -EINVAL;
2120 }
2121
2122 /* avoid FCC radar detection in non-FCC region */
2123 err = mt7615_mcu_set_fcc5_lpn(dev, lpn);
2124 if (err < 0)
2125 return err;
2126
2127 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2128 err = mt7615_mcu_set_radar_th(dev, i,
2129 &radar_specs->radar_pattern[i]);
2130 if (err < 0)
2131 return err;
2132 }
2133
2134 return mt7615_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2135 }
2136
mt7615_dfs_init_radar_detector(struct mt7615_phy * phy)2137 int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
2138 {
2139 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2140 struct mt7615_dev *dev = phy->dev;
2141 bool ext_phy = phy != &dev->phy;
2142 int err;
2143
2144 if (is_mt7663(&dev->mt76))
2145 return 0;
2146
2147 if (dev->mt76.region == NL80211_DFS_UNSET) {
2148 phy->dfs_state = -1;
2149 if (phy->rdd_state)
2150 goto stop;
2151
2152 return 0;
2153 }
2154
2155 if (test_bit(MT76_SCANNING, &phy->mt76->state))
2156 return 0;
2157
2158 if (phy->dfs_state == chandef->chan->dfs_state)
2159 return 0;
2160
2161 err = mt7615_dfs_init_radar_specs(phy);
2162 if (err < 0) {
2163 phy->dfs_state = -1;
2164 goto stop;
2165 }
2166
2167 phy->dfs_state = chandef->chan->dfs_state;
2168
2169 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
2170 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
2171 return mt7615_dfs_start_radar_detector(phy);
2172
2173 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
2174 MT_RX_SEL0, 0);
2175 }
2176
2177 stop:
2178 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
2179 if (err < 0)
2180 return err;
2181
2182 mt7615_dfs_stop_radar_detector(phy);
2183 return 0;
2184 }
2185
mt7615_mac_set_beacon_filter(struct mt7615_phy * phy,struct ieee80211_vif * vif,bool enable)2186 int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
2187 struct ieee80211_vif *vif,
2188 bool enable)
2189 {
2190 struct mt7615_dev *dev = phy->dev;
2191 bool ext_phy = phy != &dev->phy;
2192 int err;
2193
2194 if (!mt7615_firmware_offload(dev))
2195 return -EOPNOTSUPP;
2196
2197 switch (vif->type) {
2198 case NL80211_IFTYPE_MONITOR:
2199 return 0;
2200 case NL80211_IFTYPE_MESH_POINT:
2201 case NL80211_IFTYPE_ADHOC:
2202 case NL80211_IFTYPE_AP:
2203 if (enable)
2204 phy->n_beacon_vif++;
2205 else
2206 phy->n_beacon_vif--;
2207 fallthrough;
2208 default:
2209 break;
2210 }
2211
2212 err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif);
2213 if (err)
2214 return err;
2215
2216 if (phy->n_beacon_vif) {
2217 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
2218 mt76_clear(dev, MT_WF_RFCR(ext_phy),
2219 MT_WF_RFCR_DROP_OTHER_BEACON);
2220 } else {
2221 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
2222 mt76_set(dev, MT_WF_RFCR(ext_phy),
2223 MT_WF_RFCR_DROP_OTHER_BEACON);
2224 }
2225
2226 return 0;
2227 }
2228
mt7615_coredump_work(struct work_struct * work)2229 void mt7615_coredump_work(struct work_struct *work)
2230 {
2231 struct mt7615_dev *dev;
2232 char *dump, *data;
2233
2234 dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
2235 coredump.work.work);
2236
2237 if (time_is_after_jiffies(dev->coredump.last_activity +
2238 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
2239 queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
2240 MT76_CONNAC_COREDUMP_TIMEOUT);
2241 return;
2242 }
2243
2244 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
2245 data = dump;
2246
2247 while (true) {
2248 struct sk_buff *skb;
2249
2250 spin_lock_bh(&dev->mt76.lock);
2251 skb = __skb_dequeue(&dev->coredump.msg_list);
2252 spin_unlock_bh(&dev->mt76.lock);
2253
2254 if (!skb)
2255 break;
2256
2257 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
2258 if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
2259 dev_kfree_skb(skb);
2260 continue;
2261 }
2262
2263 memcpy(data, skb->data, skb->len);
2264 data += skb->len;
2265
2266 dev_kfree_skb(skb);
2267 }
2268 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
2269 GFP_KERNEL);
2270 }
2271