1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3
4 #include "mt7915.h"
5 #include "../dma.h"
6 #include "mac.h"
7
8 static int
mt7915_init_tx_queues(struct mt7915_phy * phy,int idx,int n_desc,int ring_base)9 mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
10 {
11 struct mt7915_dev *dev = phy->dev;
12
13 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
14 if (is_mt7986(&dev->mt76))
15 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
16 else
17 ring_base = MT_WED_TX_RING_BASE;
18
19 idx -= MT_TXQ_ID(0);
20 }
21
22 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
23 MT_WED_Q_TX(idx));
24 }
25
mt7915_poll_tx(struct napi_struct * napi,int budget)26 static int mt7915_poll_tx(struct napi_struct *napi, int budget)
27 {
28 struct mt7915_dev *dev;
29
30 dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
31
32 mt76_connac_tx_cleanup(&dev->mt76);
33 if (napi_complete_done(napi, 0))
34 mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
35
36 return 0;
37 }
38
mt7915_dma_config(struct mt7915_dev * dev)39 static void mt7915_dma_config(struct mt7915_dev *dev)
40 {
41 #define Q_CONFIG(q, wfdma, int, id) do { \
42 if (wfdma) \
43 dev->wfdma_mask |= (1 << (q)); \
44 dev->q_int_mask[(q)] = int; \
45 dev->q_id[(q)] = id; \
46 } while (0)
47
48 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
49 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
50 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
51
52 if (is_mt7915(&dev->mt76)) {
53 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0,
54 MT7915_RXQ_BAND0);
55 RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM,
56 MT7915_RXQ_MCU_WM);
57 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA,
58 MT7915_RXQ_MCU_WA);
59 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1,
60 MT7915_RXQ_BAND1);
61 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT,
62 MT7915_RXQ_MCU_WA_EXT);
63 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN,
64 MT7915_RXQ_MCU_WA);
65 TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
66 TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
67 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM,
68 MT7915_TXQ_MCU_WM);
69 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA,
70 MT7915_TXQ_MCU_WA);
71 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL,
72 MT7915_TXQ_FWDL);
73 } else {
74 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM,
75 MT7916_RXQ_MCU_WM);
76 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916,
77 MT7916_RXQ_MCU_WA_EXT);
78 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM,
79 MT7915_TXQ_MCU_WM);
80 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916,
81 MT7915_TXQ_MCU_WA);
82 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL,
83 MT7915_TXQ_FWDL);
84
85 if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) {
86 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916,
87 MT7916_RXQ_BAND0);
88 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916,
89 MT7916_RXQ_MCU_WA);
90 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_WED_RX_DONE_BAND1_MT7916,
91 MT7916_RXQ_BAND1);
92 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916,
93 MT7916_RXQ_MCU_WA_MAIN);
94 TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0,
95 MT7915_TXQ_BAND0);
96 TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1,
97 MT7915_TXQ_BAND1);
98 } else {
99 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916,
100 MT7916_RXQ_BAND0);
101 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA,
102 MT7916_RXQ_MCU_WA);
103 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916,
104 MT7916_RXQ_BAND1);
105 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916,
106 MT7916_RXQ_MCU_WA_MAIN);
107 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
108 MT7915_TXQ_BAND0);
109 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
110 MT7915_TXQ_BAND1);
111 }
112 }
113 }
114
__mt7915_dma_prefetch(struct mt7915_dev * dev,u32 ofs)115 static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
116 {
117 #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
118 u32 base = 0;
119
120 /* prefetch SRAM wrapping boundary for tx/rx ring. */
121 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
122 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
123 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
124 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
125 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
126
127 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs,
128 PREFETCH(0x140, 0x4));
129 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs,
130 PREFETCH(0x180, 0x4));
131 if (!is_mt7915(&dev->mt76)) {
132 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs,
133 PREFETCH(0x1c0, 0x4));
134 base = 0x40;
135 }
136 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
137 PREFETCH(0x1c0 + base, 0x4));
138 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs,
139 PREFETCH(0x200 + base, 0x4));
140 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
141 PREFETCH(0x240 + base, 0x4));
142
143 /* for mt7915, the ring which is next the last
144 * used ring must be initialized.
145 */
146 if (is_mt7915(&dev->mt76)) {
147 ofs += 0x4;
148 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs,
149 PREFETCH(0x140, 0x0));
150 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
151 PREFETCH(0x200 + base, 0x0));
152 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
153 PREFETCH(0x280 + base, 0x0));
154 }
155 }
156
mt7915_dma_prefetch(struct mt7915_dev * dev)157 void mt7915_dma_prefetch(struct mt7915_dev *dev)
158 {
159 __mt7915_dma_prefetch(dev, 0);
160 if (dev->hif2)
161 __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
162 }
163
mt7915_dma_disable(struct mt7915_dev * dev,bool rst)164 static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
165 {
166 struct mt76_dev *mdev = &dev->mt76;
167 u32 hif1_ofs = 0;
168
169 if (dev->hif2)
170 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
171
172 /* reset */
173 if (rst) {
174 mt76_clear(dev, MT_WFDMA0_RST,
175 MT_WFDMA0_RST_DMASHDL_ALL_RST |
176 MT_WFDMA0_RST_LOGIC_RST);
177
178 mt76_set(dev, MT_WFDMA0_RST,
179 MT_WFDMA0_RST_DMASHDL_ALL_RST |
180 MT_WFDMA0_RST_LOGIC_RST);
181
182 if (is_mt7915(mdev)) {
183 mt76_clear(dev, MT_WFDMA1_RST,
184 MT_WFDMA1_RST_DMASHDL_ALL_RST |
185 MT_WFDMA1_RST_LOGIC_RST);
186
187 mt76_set(dev, MT_WFDMA1_RST,
188 MT_WFDMA1_RST_DMASHDL_ALL_RST |
189 MT_WFDMA1_RST_LOGIC_RST);
190 }
191
192 if (dev->hif2) {
193 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
194 MT_WFDMA0_RST_DMASHDL_ALL_RST |
195 MT_WFDMA0_RST_LOGIC_RST);
196
197 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
198 MT_WFDMA0_RST_DMASHDL_ALL_RST |
199 MT_WFDMA0_RST_LOGIC_RST);
200
201 if (is_mt7915(mdev)) {
202 mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
203 MT_WFDMA1_RST_DMASHDL_ALL_RST |
204 MT_WFDMA1_RST_LOGIC_RST);
205
206 mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
207 MT_WFDMA1_RST_DMASHDL_ALL_RST |
208 MT_WFDMA1_RST_LOGIC_RST);
209 }
210 }
211 }
212
213 /* disable */
214 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
215 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
216 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
217 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
218 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
219 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
220
221 if (is_mt7915(mdev))
222 mt76_clear(dev, MT_WFDMA1_GLO_CFG,
223 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
224 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
225 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
226 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
227 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
228
229 if (dev->hif2) {
230 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
231 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
232 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
233 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
234 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
235 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
236
237 if (is_mt7915(mdev))
238 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
239 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
240 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
241 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
242 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
243 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
244 }
245 }
246
mt7915_dma_enable(struct mt7915_dev * dev)247 static int mt7915_dma_enable(struct mt7915_dev *dev)
248 {
249 struct mt76_dev *mdev = &dev->mt76;
250 u32 hif1_ofs = 0;
251 u32 irq_mask;
252
253 if (dev->hif2)
254 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
255
256 /* reset dma idx */
257 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
258 if (is_mt7915(mdev))
259 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
260 if (dev->hif2) {
261 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
262 if (is_mt7915(mdev))
263 mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
264 }
265
266 /* configure delay interrupt off */
267 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
268 if (is_mt7915(mdev)) {
269 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
270 } else {
271 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
272 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
273 }
274
275 if (dev->hif2) {
276 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
277 if (is_mt7915(mdev)) {
278 mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
279 hif1_ofs, 0);
280 } else {
281 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
282 hif1_ofs, 0);
283 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
284 hif1_ofs, 0);
285 }
286 }
287
288 /* configure perfetch settings */
289 mt7915_dma_prefetch(dev);
290
291 /* hif wait WFDMA idle */
292 mt76_set(dev, MT_WFDMA0_BUSY_ENA,
293 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
294 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
295 MT_WFDMA0_BUSY_ENA_RX_FIFO);
296
297 if (is_mt7915(mdev))
298 mt76_set(dev, MT_WFDMA1_BUSY_ENA,
299 MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
300 MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
301 MT_WFDMA1_BUSY_ENA_RX_FIFO);
302
303 if (dev->hif2) {
304 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
305 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
306 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
307 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
308
309 if (is_mt7915(mdev))
310 mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
311 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
312 MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
313 MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
314 }
315
316 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
317 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
318
319 /* set WFDMA Tx/Rx */
320 mt76_set(dev, MT_WFDMA0_GLO_CFG,
321 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
322 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
323 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
324 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
325
326 if (is_mt7915(mdev))
327 mt76_set(dev, MT_WFDMA1_GLO_CFG,
328 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
329 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
330 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
331 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
332
333 if (dev->hif2) {
334 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
335 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
336 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
337 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
338 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
339
340 if (is_mt7915(mdev))
341 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
342 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
343 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
344 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
345 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
346
347 mt76_set(dev, MT_WFDMA_HOST_CONFIG,
348 MT_WFDMA_HOST_CONFIG_PDMA_BAND);
349 }
350
351 /* enable interrupts for TX/RX rings */
352 irq_mask = MT_INT_RX_DONE_MCU |
353 MT_INT_TX_DONE_MCU |
354 MT_INT_MCU_CMD;
355
356 if (!dev->phy.mt76->band_idx)
357 irq_mask |= MT_INT_BAND0_RX_DONE;
358
359 if (dev->dbdc_support || dev->phy.mt76->band_idx)
360 irq_mask |= MT_INT_BAND1_RX_DONE;
361
362 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
363 u32 wed_irq_mask = irq_mask;
364 int ret;
365
366 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
367 if (!is_mt7986(&dev->mt76))
368 mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
369 else
370 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
371
372 ret = mt7915_mcu_wed_enable_rx_stats(dev);
373 if (ret)
374 return ret;
375
376 mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
377 }
378
379 mt7915_irq_enable(dev, irq_mask);
380
381 return 0;
382 }
383
mt7915_dma_init(struct mt7915_dev * dev,struct mt7915_phy * phy2)384 int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
385 {
386 struct mt76_dev *mdev = &dev->mt76;
387 u32 wa_rx_base, wa_rx_idx;
388 u32 hif1_ofs = 0;
389 int ret;
390
391 mt7915_dma_config(dev);
392
393 mt76_dma_attach(&dev->mt76);
394
395 if (dev->hif2)
396 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
397
398 mt7915_dma_disable(dev, true);
399
400 if (mtk_wed_device_active(&mdev->mmio.wed)) {
401 if (!is_mt7986(mdev)) {
402 u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2;
403
404 mt76_set(dev, MT_WFDMA_HOST_CONFIG,
405 MT_WFDMA_HOST_CONFIG_WED);
406 mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
407 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
408 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
409 FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1,
410 wed_control_rx1));
411 if (is_mt7915(mdev))
412 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
413 MT_WFDMA0_EXT0_RXWB_KEEP);
414 }
415 } else {
416 mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
417 }
418
419 /* init tx queue */
420 ret = mt7915_init_tx_queues(&dev->phy,
421 MT_TXQ_ID(dev->phy.mt76->band_idx),
422 MT7915_TX_RING_SIZE,
423 MT_TXQ_RING_BASE(0));
424 if (ret)
425 return ret;
426
427 if (phy2) {
428 ret = mt7915_init_tx_queues(phy2,
429 MT_TXQ_ID(phy2->mt76->band_idx),
430 MT7915_TX_RING_SIZE,
431 MT_TXQ_RING_BASE(1));
432 if (ret)
433 return ret;
434 }
435
436 /* command to WM */
437 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
438 MT_MCUQ_ID(MT_MCUQ_WM),
439 MT7915_TX_MCU_RING_SIZE,
440 MT_MCUQ_RING_BASE(MT_MCUQ_WM));
441 if (ret)
442 return ret;
443
444 /* command to WA */
445 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
446 MT_MCUQ_ID(MT_MCUQ_WA),
447 MT7915_TX_MCU_RING_SIZE,
448 MT_MCUQ_RING_BASE(MT_MCUQ_WA));
449 if (ret)
450 return ret;
451
452 /* firmware download */
453 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
454 MT_MCUQ_ID(MT_MCUQ_FWDL),
455 MT7915_TX_FWDL_RING_SIZE,
456 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
457 if (ret)
458 return ret;
459
460 /* event from WM */
461 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
462 MT_RXQ_ID(MT_RXQ_MCU),
463 MT7915_RX_MCU_RING_SIZE,
464 MT_RX_BUF_SIZE,
465 MT_RXQ_RING_BASE(MT_RXQ_MCU));
466 if (ret)
467 return ret;
468
469 /* event from WA */
470 if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
471 wa_rx_base = MT_WED_RX_RING_BASE;
472 wa_rx_idx = MT7915_RXQ_MCU_WA;
473 dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
474 } else {
475 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
476 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
477 }
478 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
479 wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
480 MT_RX_BUF_SIZE, wa_rx_base);
481 if (ret)
482 return ret;
483
484 /* rx data queue for band0 */
485 if (!dev->phy.mt76->band_idx) {
486 if (mtk_wed_device_active(&mdev->mmio.wed) &&
487 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
488 dev->mt76.q_rx[MT_RXQ_MAIN].flags =
489 MT_WED_Q_RX(MT7915_RXQ_BAND0);
490 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
491 }
492
493 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
494 MT_RXQ_ID(MT_RXQ_MAIN),
495 MT7915_RX_RING_SIZE,
496 MT_RX_BUF_SIZE,
497 MT_RXQ_RING_BASE(MT_RXQ_MAIN));
498 if (ret)
499 return ret;
500 }
501
502 /* tx free notify event from WA for band0 */
503 if (!is_mt7915(mdev)) {
504 wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA);
505 wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA);
506
507 if (mtk_wed_device_active(&mdev->mmio.wed)) {
508 mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
509 if (is_mt7916(mdev)) {
510 wa_rx_base = MT_WED_RX_RING_BASE;
511 wa_rx_idx = MT7915_RXQ_MCU_WA;
512 }
513 }
514
515 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
516 wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
517 MT_RX_BUF_SIZE, wa_rx_base);
518 if (ret)
519 return ret;
520 }
521
522 if (dev->dbdc_support || dev->phy.mt76->band_idx) {
523 if (mtk_wed_device_active(&mdev->mmio.wed) &&
524 mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
525 dev->mt76.q_rx[MT_RXQ_BAND1].flags =
526 MT_WED_Q_RX(MT7915_RXQ_BAND1);
527 dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
528 }
529
530 /* rx data queue for band1 */
531 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
532 MT_RXQ_ID(MT_RXQ_BAND1),
533 MT7915_RX_RING_SIZE,
534 MT_RX_BUF_SIZE,
535 MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs);
536 if (ret)
537 return ret;
538
539 /* tx free notify event from WA for band1 */
540 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
541 MT_RXQ_ID(MT_RXQ_BAND1_WA),
542 MT7915_RX_MCU_RING_SIZE,
543 MT_RX_BUF_SIZE,
544 MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs);
545 if (ret)
546 return ret;
547 }
548
549 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
550 if (ret < 0)
551 return ret;
552
553 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
554 mt7915_poll_tx);
555 napi_enable(&dev->mt76.tx_napi);
556
557 mt7915_dma_enable(dev);
558
559 return 0;
560 }
561
mt7915_dma_wed_reset(struct mt7915_dev * dev)562 static void mt7915_dma_wed_reset(struct mt7915_dev *dev)
563 {
564 struct mt76_dev *mdev = &dev->mt76;
565
566 if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
567 return;
568
569 complete(&mdev->mmio.wed_reset);
570
571 if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
572 3 * HZ))
573 dev_err(dev->mt76.dev, "wed reset complete timeout\n");
574 }
575
576 static void
mt7915_dma_reset_tx_queue(struct mt7915_dev * dev,struct mt76_queue * q)577 mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
578 {
579 mt76_queue_reset(dev, q);
580 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
581 mt76_dma_wed_setup(&dev->mt76, q, true);
582 }
583
mt7915_dma_reset(struct mt7915_dev * dev,bool force)584 int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
585 {
586 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
587 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
588 int i;
589
590 /* clean up hw queues */
591 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) {
592 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
593 if (mphy_ext)
594 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
595 }
596
597 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
598 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
599
600 mt76_for_each_q_rx(&dev->mt76, i)
601 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
602
603 /* reset wfsys */
604 if (force)
605 mt7915_wfsys_reset(dev);
606
607 if (mtk_wed_device_active(wed))
608 mtk_wed_device_dma_reset(wed);
609
610 mt7915_dma_disable(dev, force);
611 mt7915_dma_wed_reset(dev);
612
613 /* reset hw queues */
614 for (i = 0; i < __MT_TXQ_MAX; i++) {
615 mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
616 if (mphy_ext)
617 mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]);
618 }
619
620 for (i = 0; i < __MT_MCUQ_MAX; i++)
621 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
622
623 mt76_for_each_q_rx(&dev->mt76, i) {
624 if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
625 continue;
626
627 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
628 }
629
630 mt76_tx_status_check(&dev->mt76, true);
631
632 mt76_for_each_q_rx(&dev->mt76, i)
633 mt76_queue_rx_reset(dev, i);
634
635 if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
636 mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
637 MT_WFDMA0_EXT0_RXWB_KEEP);
638
639 mt7915_dma_enable(dev);
640
641 return 0;
642 }
643
mt7915_dma_cleanup(struct mt7915_dev * dev)644 void mt7915_dma_cleanup(struct mt7915_dev *dev)
645 {
646 mt7915_dma_disable(dev, true);
647
648 mt76_dma_cleanup(&dev->mt76);
649 }
650