1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2022 MediaTek Inc.
3 *
4 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
5 * Sujuan Chen <sujuan.chen@mediatek.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/of_platform.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/of_irq.h>
15 #include <linux/bitfield.h>
16
17 #include "mtk_wed.h"
18 #include "mtk_wed_regs.h"
19 #include "mtk_wed_wo.h"
20
21 static u32
mtk_wed_mmio_r32(struct mtk_wed_wo * wo,u32 reg)22 mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
23 {
24 u32 val;
25
26 if (regmap_read(wo->mmio.regs, reg, &val))
27 val = ~0;
28
29 return val;
30 }
31
32 static void
mtk_wed_mmio_w32(struct mtk_wed_wo * wo,u32 reg,u32 val)33 mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
34 {
35 regmap_write(wo->mmio.regs, reg, val);
36 }
37
38 static u32
mtk_wed_wo_get_isr(struct mtk_wed_wo * wo)39 mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
40 {
41 u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
42
43 return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
44 }
45
46 static void
mtk_wed_wo_set_isr(struct mtk_wed_wo * wo,u32 mask)47 mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
48 {
49 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
50 }
51
52 static void
mtk_wed_wo_set_ack(struct mtk_wed_wo * wo,u32 mask)53 mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
54 {
55 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
56 }
57
58 static void
mtk_wed_wo_set_isr_mask(struct mtk_wed_wo * wo,u32 mask,u32 val,bool set)59 mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
60 {
61 unsigned long flags;
62
63 spin_lock_irqsave(&wo->mmio.lock, flags);
64 wo->mmio.irq_mask &= ~mask;
65 wo->mmio.irq_mask |= val;
66 if (set)
67 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
68 spin_unlock_irqrestore(&wo->mmio.lock, flags);
69 }
70
71 static void
mtk_wed_wo_irq_enable(struct mtk_wed_wo * wo,u32 mask)72 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
73 {
74 mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
75 tasklet_schedule(&wo->mmio.irq_tasklet);
76 }
77
78 static void
mtk_wed_wo_irq_disable(struct mtk_wed_wo * wo,u32 mask)79 mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
80 {
81 mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
82 }
83
84 static void
mtk_wed_wo_kickout(struct mtk_wed_wo * wo)85 mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
86 {
87 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
88 mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
89 }
90
91 static void
mtk_wed_wo_queue_kick(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 val)92 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
93 u32 val)
94 {
95 wmb();
96 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
97 }
98
99 static void *
mtk_wed_wo_dequeue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,u32 * len,bool flush)100 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
101 bool flush)
102 {
103 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
104 int index = (q->tail + 1) % q->n_desc;
105 struct mtk_wed_wo_queue_entry *entry;
106 struct mtk_wed_wo_queue_desc *desc;
107 void *buf;
108
109 if (!q->queued)
110 return NULL;
111
112 if (flush)
113 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
114 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
115 return NULL;
116
117 q->tail = index;
118 q->queued--;
119
120 desc = &q->desc[index];
121 entry = &q->entry[index];
122 buf = entry->buf;
123 if (len)
124 *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
125 le32_to_cpu(READ_ONCE(desc->ctrl)));
126 if (buf)
127 dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
128 DMA_FROM_DEVICE);
129 entry->buf = NULL;
130
131 return buf;
132 }
133
134 static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,bool rx)135 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
136 bool rx)
137 {
138 enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
139 int n_buf = 0;
140
141 while (q->queued < q->n_desc) {
142 struct mtk_wed_wo_queue_entry *entry;
143 dma_addr_t addr;
144 void *buf;
145
146 buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
147 if (!buf)
148 break;
149
150 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
151 if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
152 skb_free_frag(buf);
153 break;
154 }
155
156 q->head = (q->head + 1) % q->n_desc;
157 entry = &q->entry[q->head];
158 entry->addr = addr;
159 entry->len = q->buf_size;
160 q->entry[q->head].buf = buf;
161
162 if (rx) {
163 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
164 u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
165 FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
166 entry->len);
167
168 WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
169 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
170 }
171 q->queued++;
172 n_buf++;
173 }
174
175 return n_buf;
176 }
177
178 static void
mtk_wed_wo_rx_complete(struct mtk_wed_wo * wo)179 mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
180 {
181 mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
182 mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
183 }
184
185 static void
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)186 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
187 {
188 for (;;) {
189 struct mtk_wed_mcu_hdr *hdr;
190 struct sk_buff *skb;
191 void *data;
192 u32 len;
193
194 data = mtk_wed_wo_dequeue(wo, q, &len, false);
195 if (!data)
196 break;
197
198 skb = build_skb(data, q->buf_size);
199 if (!skb) {
200 skb_free_frag(data);
201 continue;
202 }
203
204 __skb_put(skb, len);
205 if (mtk_wed_mcu_check_msg(wo, skb)) {
206 dev_kfree_skb(skb);
207 continue;
208 }
209
210 hdr = (struct mtk_wed_mcu_hdr *)skb->data;
211 if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
212 mtk_wed_mcu_rx_event(wo, skb);
213 else
214 mtk_wed_mcu_rx_unsolicited_event(wo, skb);
215 }
216
217 if (mtk_wed_wo_queue_refill(wo, q, true)) {
218 u32 index = (q->head - 1) % q->n_desc;
219
220 mtk_wed_wo_queue_kick(wo, q, index);
221 }
222 }
223
224 static irqreturn_t
mtk_wed_wo_irq_handler(int irq,void * data)225 mtk_wed_wo_irq_handler(int irq, void *data)
226 {
227 struct mtk_wed_wo *wo = data;
228
229 mtk_wed_wo_set_isr(wo, 0);
230 tasklet_schedule(&wo->mmio.irq_tasklet);
231
232 return IRQ_HANDLED;
233 }
234
mtk_wed_wo_irq_tasklet(struct tasklet_struct * t)235 static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
236 {
237 struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
238 u32 intr, mask;
239
240 /* disable interrupts */
241 mtk_wed_wo_set_isr(wo, 0);
242
243 intr = mtk_wed_wo_get_isr(wo);
244 intr &= wo->mmio.irq_mask;
245 mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
246 mtk_wed_wo_irq_disable(wo, mask);
247
248 if (intr & MTK_WED_WO_RXCH_INT_MASK) {
249 mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
250 mtk_wed_wo_rx_complete(wo);
251 }
252 }
253
254 /* mtk wed wo hw queues */
255
256 static int
mtk_wed_wo_queue_alloc(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,int n_desc,int buf_size,int index,struct mtk_wed_wo_queue_regs * regs)257 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
258 int n_desc, int buf_size, int index,
259 struct mtk_wed_wo_queue_regs *regs)
260 {
261 q->regs = *regs;
262 q->n_desc = n_desc;
263 q->buf_size = buf_size;
264
265 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
266 &q->desc_dma, GFP_KERNEL);
267 if (!q->desc)
268 return -ENOMEM;
269
270 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
271 GFP_KERNEL);
272 if (!q->entry)
273 return -ENOMEM;
274
275 return 0;
276 }
277
278 static void
mtk_wed_wo_queue_free(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)279 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
280 {
281 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
282 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
283 q->desc_dma);
284 }
285
286 static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)287 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
288 {
289 struct page *page;
290 int i;
291
292 for (i = 0; i < q->n_desc; i++) {
293 struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
294
295 dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
296 DMA_TO_DEVICE);
297 skb_free_frag(entry->buf);
298 entry->buf = NULL;
299 }
300
301 if (!q->cache.va)
302 return;
303
304 page = virt_to_page(q->cache.va);
305 __page_frag_cache_drain(page, q->cache.pagecnt_bias);
306 memset(&q->cache, 0, sizeof(q->cache));
307 }
308
309 static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)310 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
311 {
312 struct page *page;
313
314 for (;;) {
315 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
316
317 if (!buf)
318 break;
319
320 skb_free_frag(buf);
321 }
322
323 if (!q->cache.va)
324 return;
325
326 page = virt_to_page(q->cache.va);
327 __page_frag_cache_drain(page, q->cache.pagecnt_bias);
328 memset(&q->cache, 0, sizeof(q->cache));
329 }
330
331 static void
mtk_wed_wo_queue_reset(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q)332 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
333 {
334 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
335 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
336 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
337 }
338
mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo * wo,struct mtk_wed_wo_queue * q,struct sk_buff * skb)339 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
340 struct sk_buff *skb)
341 {
342 struct mtk_wed_wo_queue_entry *entry;
343 struct mtk_wed_wo_queue_desc *desc;
344 int ret = 0, index;
345 u32 ctrl;
346
347 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
348 index = (q->head + 1) % q->n_desc;
349 if (q->tail == index) {
350 ret = -ENOMEM;
351 goto out;
352 }
353
354 entry = &q->entry[index];
355 if (skb->len > entry->len) {
356 ret = -ENOMEM;
357 goto out;
358 }
359
360 desc = &q->desc[index];
361 q->head = index;
362
363 dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
364 DMA_TO_DEVICE);
365 memcpy(entry->buf, skb->data, skb->len);
366 dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
367 DMA_TO_DEVICE);
368
369 ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
370 MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
371 WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
372 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
373
374 mtk_wed_wo_queue_kick(wo, q, q->head);
375 mtk_wed_wo_kickout(wo);
376 out:
377 dev_kfree_skb(skb);
378
379 return ret;
380 }
381
382 static int
mtk_wed_wo_exception_init(struct mtk_wed_wo * wo)383 mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
384 {
385 return 0;
386 }
387
388 static int
mtk_wed_wo_hardware_init(struct mtk_wed_wo * wo)389 mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
390 {
391 struct mtk_wed_wo_queue_regs regs;
392 struct device_node *np;
393 int ret;
394
395 np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
396 if (!np)
397 return -ENODEV;
398
399 wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
400 if (IS_ERR(wo->mmio.regs)) {
401 ret = PTR_ERR(wo->mmio.regs);
402 goto error_put;
403 }
404
405 wo->mmio.irq = irq_of_parse_and_map(np, 0);
406 wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
407 spin_lock_init(&wo->mmio.lock);
408 tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
409
410 ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
411 mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
412 KBUILD_MODNAME, wo);
413 if (ret)
414 goto error;
415
416 regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
417 regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
418 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
419 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
420
421 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
422 MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
423 ®s);
424 if (ret)
425 goto error;
426
427 mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
428 mtk_wed_wo_queue_reset(wo, &wo->q_tx);
429
430 regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
431 regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
432 regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
433 regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
434
435 ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
436 MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
437 ®s);
438 if (ret)
439 goto error;
440
441 mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
442 mtk_wed_wo_queue_reset(wo, &wo->q_rx);
443
444 /* rx queue irqmask */
445 mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
446
447 return 0;
448
449 error:
450 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
451 error_put:
452 of_node_put(np);
453 return ret;
454 }
455
456 static void
mtk_wed_wo_hw_deinit(struct mtk_wed_wo * wo)457 mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
458 {
459 /* disable interrupts */
460 mtk_wed_wo_set_isr(wo, 0);
461
462 tasklet_disable(&wo->mmio.irq_tasklet);
463
464 disable_irq(wo->mmio.irq);
465 devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
466
467 mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
468 mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
469 mtk_wed_wo_queue_free(wo, &wo->q_tx);
470 mtk_wed_wo_queue_free(wo, &wo->q_rx);
471 }
472
mtk_wed_wo_init(struct mtk_wed_hw * hw)473 int mtk_wed_wo_init(struct mtk_wed_hw *hw)
474 {
475 struct mtk_wed_wo *wo;
476 int ret;
477
478 wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
479 if (!wo)
480 return -ENOMEM;
481
482 hw->wed_wo = wo;
483 wo->hw = hw;
484
485 ret = mtk_wed_wo_hardware_init(wo);
486 if (ret)
487 return ret;
488
489 ret = mtk_wed_mcu_init(wo);
490 if (ret)
491 return ret;
492
493 return mtk_wed_wo_exception_init(wo);
494 }
495
mtk_wed_wo_deinit(struct mtk_wed_hw * hw)496 void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
497 {
498 struct mtk_wed_wo *wo = hw->wed_wo;
499
500 mtk_wed_wo_hw_deinit(wo);
501 }
502