1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
9
10 #include "aq_ring.h"
11 #include "aq_nic.h"
12 #include "aq_hw.h"
13 #include "aq_hw_utils.h"
14 #include "aq_ptp.h"
15
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18
aq_free_rxpage(struct aq_rxpage * rxpage,struct device * dev)19 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
20 {
21 unsigned int len = PAGE_SIZE << rxpage->order;
22
23 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
24
25 /* Drop the ref for being in the ring. */
26 __free_pages(rxpage->page, rxpage->order);
27 rxpage->page = NULL;
28 }
29
aq_get_rxpage(struct aq_rxpage * rxpage,unsigned int order,struct device * dev)30 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
31 struct device *dev)
32 {
33 struct page *page;
34 int ret = -ENOMEM;
35 dma_addr_t daddr;
36
37 page = dev_alloc_pages(order);
38 if (unlikely(!page))
39 goto err_exit;
40
41 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
42 DMA_FROM_DEVICE);
43
44 if (unlikely(dma_mapping_error(dev, daddr)))
45 goto free_page;
46
47 rxpage->page = page;
48 rxpage->daddr = daddr;
49 rxpage->order = order;
50 rxpage->pg_off = 0;
51
52 return 0;
53
54 free_page:
55 __free_pages(page, order);
56
57 err_exit:
58 return ret;
59 }
60
aq_get_rxpages(struct aq_ring_s * self,struct aq_ring_buff_s * rxbuf,int order)61 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
62 int order)
63 {
64 int ret;
65
66 if (rxbuf->rxdata.page) {
67 /* One means ring is the only user and can reuse */
68 if (page_ref_count(rxbuf->rxdata.page) > 1) {
69 /* Try reuse buffer */
70 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
71 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
72 (PAGE_SIZE << order)) {
73 u64_stats_update_begin(&self->stats.rx.syncp);
74 self->stats.rx.pg_flips++;
75 u64_stats_update_end(&self->stats.rx.syncp);
76 } else {
77 /* Buffer exhausted. We have other users and
78 * should release this page and realloc
79 */
80 aq_free_rxpage(&rxbuf->rxdata,
81 aq_nic_get_dev(self->aq_nic));
82 u64_stats_update_begin(&self->stats.rx.syncp);
83 self->stats.rx.pg_losts++;
84 u64_stats_update_end(&self->stats.rx.syncp);
85 }
86 } else {
87 rxbuf->rxdata.pg_off = 0;
88 u64_stats_update_begin(&self->stats.rx.syncp);
89 self->stats.rx.pg_reuses++;
90 u64_stats_update_end(&self->stats.rx.syncp);
91 }
92 }
93
94 if (!rxbuf->rxdata.page) {
95 ret = aq_get_rxpage(&rxbuf->rxdata, order,
96 aq_nic_get_dev(self->aq_nic));
97 if (ret) {
98 u64_stats_update_begin(&self->stats.rx.syncp);
99 self->stats.rx.alloc_fails++;
100 u64_stats_update_end(&self->stats.rx.syncp);
101 }
102 return ret;
103 }
104
105 return 0;
106 }
107
aq_ring_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic)108 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
109 struct aq_nic_s *aq_nic)
110 {
111 int err = 0;
112
113 self->buff_ring =
114 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
115
116 if (!self->buff_ring) {
117 err = -ENOMEM;
118 goto err_exit;
119 }
120 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
121 self->size * self->dx_size,
122 &self->dx_ring_pa, GFP_KERNEL);
123 if (!self->dx_ring) {
124 err = -ENOMEM;
125 goto err_exit;
126 }
127
128 err_exit:
129 if (err < 0) {
130 aq_ring_free(self);
131 self = NULL;
132 }
133
134 return self;
135 }
136
aq_ring_tx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)137 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
138 struct aq_nic_s *aq_nic,
139 unsigned int idx,
140 struct aq_nic_cfg_s *aq_nic_cfg)
141 {
142 int err = 0;
143
144 self->aq_nic = aq_nic;
145 self->idx = idx;
146 self->size = aq_nic_cfg->txds;
147 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
148
149 self = aq_ring_alloc(self, aq_nic);
150 if (!self) {
151 err = -ENOMEM;
152 goto err_exit;
153 }
154
155 err_exit:
156 if (err < 0) {
157 aq_ring_free(self);
158 self = NULL;
159 }
160
161 return self;
162 }
163
aq_ring_rx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,struct aq_nic_cfg_s * aq_nic_cfg)164 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
165 struct aq_nic_s *aq_nic,
166 unsigned int idx,
167 struct aq_nic_cfg_s *aq_nic_cfg)
168 {
169 int err = 0;
170
171 self->aq_nic = aq_nic;
172 self->idx = idx;
173 self->size = aq_nic_cfg->rxds;
174 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
175 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
176 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
177
178 if (aq_nic_cfg->rxpageorder > self->page_order)
179 self->page_order = aq_nic_cfg->rxpageorder;
180
181 self = aq_ring_alloc(self, aq_nic);
182 if (!self) {
183 err = -ENOMEM;
184 goto err_exit;
185 }
186
187 err_exit:
188 if (err < 0) {
189 aq_ring_free(self);
190 self = NULL;
191 }
192
193 return self;
194 }
195
196 struct aq_ring_s *
aq_ring_hwts_rx_alloc(struct aq_ring_s * self,struct aq_nic_s * aq_nic,unsigned int idx,unsigned int size,unsigned int dx_size)197 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
198 unsigned int idx, unsigned int size, unsigned int dx_size)
199 {
200 struct device *dev = aq_nic_get_dev(aq_nic);
201 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
202
203 memset(self, 0, sizeof(*self));
204
205 self->aq_nic = aq_nic;
206 self->idx = idx;
207 self->size = size;
208 self->dx_size = dx_size;
209
210 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
211 GFP_KERNEL);
212 if (!self->dx_ring) {
213 aq_ring_free(self);
214 return NULL;
215 }
216
217 return self;
218 }
219
aq_ring_init(struct aq_ring_s * self,const enum atl_ring_type ring_type)220 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
221 {
222 self->hw_head = 0;
223 self->sw_head = 0;
224 self->sw_tail = 0;
225 self->ring_type = ring_type;
226
227 if (self->ring_type == ATL_RING_RX)
228 u64_stats_init(&self->stats.rx.syncp);
229 else
230 u64_stats_init(&self->stats.tx.syncp);
231
232 return 0;
233 }
234
aq_ring_dx_in_range(unsigned int h,unsigned int i,unsigned int t)235 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
236 unsigned int t)
237 {
238 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
239 }
240
aq_ring_update_queue_state(struct aq_ring_s * ring)241 void aq_ring_update_queue_state(struct aq_ring_s *ring)
242 {
243 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
244 aq_ring_queue_stop(ring);
245 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
246 aq_ring_queue_wake(ring);
247 }
248
aq_ring_queue_wake(struct aq_ring_s * ring)249 void aq_ring_queue_wake(struct aq_ring_s *ring)
250 {
251 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
252
253 if (__netif_subqueue_stopped(ndev,
254 AQ_NIC_RING2QMAP(ring->aq_nic,
255 ring->idx))) {
256 netif_wake_subqueue(ndev,
257 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
258 u64_stats_update_begin(&ring->stats.tx.syncp);
259 ring->stats.tx.queue_restarts++;
260 u64_stats_update_end(&ring->stats.tx.syncp);
261 }
262 }
263
aq_ring_queue_stop(struct aq_ring_s * ring)264 void aq_ring_queue_stop(struct aq_ring_s *ring)
265 {
266 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
267
268 if (!__netif_subqueue_stopped(ndev,
269 AQ_NIC_RING2QMAP(ring->aq_nic,
270 ring->idx)))
271 netif_stop_subqueue(ndev,
272 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
273 }
274
aq_ring_tx_clean(struct aq_ring_s * self)275 bool aq_ring_tx_clean(struct aq_ring_s *self)
276 {
277 struct device *dev = aq_nic_get_dev(self->aq_nic);
278 unsigned int budget;
279
280 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
281 budget && self->sw_head != self->hw_head; budget--) {
282 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
283
284 if (likely(buff->is_mapped)) {
285 if (unlikely(buff->is_sop)) {
286 if (!buff->is_eop &&
287 buff->eop_index != 0xffffU &&
288 (!aq_ring_dx_in_range(self->sw_head,
289 buff->eop_index,
290 self->hw_head)))
291 break;
292
293 dma_unmap_single(dev, buff->pa, buff->len,
294 DMA_TO_DEVICE);
295 } else {
296 dma_unmap_page(dev, buff->pa, buff->len,
297 DMA_TO_DEVICE);
298 }
299 }
300
301 if (unlikely(buff->is_eop && buff->skb)) {
302 u64_stats_update_begin(&self->stats.tx.syncp);
303 ++self->stats.tx.packets;
304 self->stats.tx.bytes += buff->skb->len;
305 u64_stats_update_end(&self->stats.tx.syncp);
306
307 dev_kfree_skb_any(buff->skb);
308 buff->skb = NULL;
309 }
310 buff->pa = 0U;
311 buff->eop_index = 0xffffU;
312 self->sw_head = aq_ring_next_dx(self, self->sw_head);
313 }
314
315 return !!budget;
316 }
317
aq_rx_checksum(struct aq_ring_s * self,struct aq_ring_buff_s * buff,struct sk_buff * skb)318 static void aq_rx_checksum(struct aq_ring_s *self,
319 struct aq_ring_buff_s *buff,
320 struct sk_buff *skb)
321 {
322 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
323 return;
324
325 if (unlikely(buff->is_cso_err)) {
326 u64_stats_update_begin(&self->stats.rx.syncp);
327 ++self->stats.rx.errors;
328 u64_stats_update_end(&self->stats.rx.syncp);
329 skb->ip_summed = CHECKSUM_NONE;
330 return;
331 }
332 if (buff->is_ip_cso) {
333 __skb_incr_checksum_unnecessary(skb);
334 } else {
335 skb->ip_summed = CHECKSUM_NONE;
336 }
337
338 if (buff->is_udp_cso || buff->is_tcp_cso)
339 __skb_incr_checksum_unnecessary(skb);
340 }
341
342 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
aq_ring_rx_clean(struct aq_ring_s * self,struct napi_struct * napi,int * work_done,int budget)343 int aq_ring_rx_clean(struct aq_ring_s *self,
344 struct napi_struct *napi,
345 int *work_done,
346 int budget)
347 {
348 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
349 bool is_rsc_completed = true;
350 int err = 0;
351
352 for (; (self->sw_head != self->hw_head) && budget;
353 self->sw_head = aq_ring_next_dx(self, self->sw_head),
354 --budget, ++(*work_done)) {
355 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
356 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
357 struct aq_ring_buff_s *buff_ = NULL;
358 struct sk_buff *skb = NULL;
359 unsigned int next_ = 0U;
360 unsigned int i = 0U;
361 u16 hdr_len;
362
363 if (buff->is_cleaned)
364 continue;
365
366 if (!buff->is_eop) {
367 buff_ = buff;
368 do {
369 if (buff_->next >= self->size) {
370 err = -EIO;
371 goto err_exit;
372 }
373 next_ = buff_->next,
374 buff_ = &self->buff_ring[next_];
375 is_rsc_completed =
376 aq_ring_dx_in_range(self->sw_head,
377 next_,
378 self->hw_head);
379
380 if (unlikely(!is_rsc_completed))
381 break;
382
383 buff->is_error |= buff_->is_error;
384 buff->is_cso_err |= buff_->is_cso_err;
385
386 } while (!buff_->is_eop);
387
388 if (!is_rsc_completed) {
389 err = 0;
390 goto err_exit;
391 }
392 if (buff->is_error ||
393 (buff->is_lro && buff->is_cso_err)) {
394 buff_ = buff;
395 do {
396 if (buff_->next >= self->size) {
397 err = -EIO;
398 goto err_exit;
399 }
400 next_ = buff_->next,
401 buff_ = &self->buff_ring[next_];
402
403 buff_->is_cleaned = true;
404 } while (!buff_->is_eop);
405
406 u64_stats_update_begin(&self->stats.rx.syncp);
407 ++self->stats.rx.errors;
408 u64_stats_update_end(&self->stats.rx.syncp);
409 continue;
410 }
411 }
412
413 if (buff->is_error) {
414 u64_stats_update_begin(&self->stats.rx.syncp);
415 ++self->stats.rx.errors;
416 u64_stats_update_end(&self->stats.rx.syncp);
417 continue;
418 }
419
420 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
421 buff->rxdata.daddr,
422 buff->rxdata.pg_off,
423 buff->len, DMA_FROM_DEVICE);
424
425 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
426 if (unlikely(!skb)) {
427 u64_stats_update_begin(&self->stats.rx.syncp);
428 self->stats.rx.skb_alloc_fails++;
429 u64_stats_update_end(&self->stats.rx.syncp);
430 err = -ENOMEM;
431 goto err_exit;
432 }
433 if (is_ptp_ring)
434 buff->len -=
435 aq_ptp_extract_ts(self->aq_nic, skb,
436 aq_buf_vaddr(&buff->rxdata),
437 buff->len);
438
439 hdr_len = buff->len;
440 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
441 hdr_len = eth_get_headlen(skb->dev,
442 aq_buf_vaddr(&buff->rxdata),
443 AQ_CFG_RX_HDR_SIZE);
444
445 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
446 ALIGN(hdr_len, sizeof(long)));
447
448 if (buff->len - hdr_len > 0) {
449 skb_add_rx_frag(skb, 0, buff->rxdata.page,
450 buff->rxdata.pg_off + hdr_len,
451 buff->len - hdr_len,
452 AQ_CFG_RX_FRAME_MAX);
453 page_ref_inc(buff->rxdata.page);
454 }
455
456 if (!buff->is_eop) {
457 buff_ = buff;
458 i = 1U;
459 do {
460 next_ = buff_->next;
461 buff_ = &self->buff_ring[next_];
462
463 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
464 buff_->rxdata.daddr,
465 buff_->rxdata.pg_off,
466 buff_->len,
467 DMA_FROM_DEVICE);
468 skb_add_rx_frag(skb, i++,
469 buff_->rxdata.page,
470 buff_->rxdata.pg_off,
471 buff_->len,
472 AQ_CFG_RX_FRAME_MAX);
473 page_ref_inc(buff_->rxdata.page);
474 buff_->is_cleaned = 1;
475
476 buff->is_ip_cso &= buff_->is_ip_cso;
477 buff->is_udp_cso &= buff_->is_udp_cso;
478 buff->is_tcp_cso &= buff_->is_tcp_cso;
479 buff->is_cso_err |= buff_->is_cso_err;
480
481 } while (!buff_->is_eop);
482 }
483
484 if (buff->is_vlan)
485 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
486 buff->vlan_rx_tag);
487
488 skb->protocol = eth_type_trans(skb, ndev);
489
490 aq_rx_checksum(self, buff, skb);
491
492 skb_set_hash(skb, buff->rss_hash,
493 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
494 PKT_HASH_TYPE_NONE);
495 /* Send all PTP traffic to 0 queue */
496 skb_record_rx_queue(skb,
497 is_ptp_ring ? 0
498 : AQ_NIC_RING2QMAP(self->aq_nic,
499 self->idx));
500
501 u64_stats_update_begin(&self->stats.rx.syncp);
502 ++self->stats.rx.packets;
503 self->stats.rx.bytes += skb->len;
504 u64_stats_update_end(&self->stats.rx.syncp);
505
506 napi_gro_receive(napi, skb);
507 }
508
509 err_exit:
510 return err;
511 }
512
aq_ring_hwts_rx_clean(struct aq_ring_s * self,struct aq_nic_s * aq_nic)513 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
514 {
515 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
516 while (self->sw_head != self->hw_head) {
517 u64 ns;
518
519 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
520 self->dx_ring +
521 (self->sw_head * self->dx_size),
522 self->dx_size, &ns);
523 aq_ptp_tx_hwtstamp(aq_nic, ns);
524
525 self->sw_head = aq_ring_next_dx(self, self->sw_head);
526 }
527 #endif
528 }
529
aq_ring_rx_fill(struct aq_ring_s * self)530 int aq_ring_rx_fill(struct aq_ring_s *self)
531 {
532 unsigned int page_order = self->page_order;
533 struct aq_ring_buff_s *buff = NULL;
534 int err = 0;
535 int i = 0;
536
537 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
538 self->size / 2))
539 return err;
540
541 for (i = aq_ring_avail_dx(self); i--;
542 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
543 buff = &self->buff_ring[self->sw_tail];
544
545 buff->flags = 0U;
546 buff->len = AQ_CFG_RX_FRAME_MAX;
547
548 err = aq_get_rxpages(self, buff, page_order);
549 if (err)
550 goto err_exit;
551
552 buff->pa = aq_buf_daddr(&buff->rxdata);
553 buff = NULL;
554 }
555
556 err_exit:
557 return err;
558 }
559
aq_ring_rx_deinit(struct aq_ring_s * self)560 void aq_ring_rx_deinit(struct aq_ring_s *self)
561 {
562 if (!self)
563 return;
564
565 for (; self->sw_head != self->sw_tail;
566 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
567 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
568
569 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
570 }
571 }
572
aq_ring_free(struct aq_ring_s * self)573 void aq_ring_free(struct aq_ring_s *self)
574 {
575 if (!self)
576 return;
577
578 kfree(self->buff_ring);
579
580 if (self->dx_ring)
581 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
582 self->size * self->dx_size, self->dx_ring,
583 self->dx_ring_pa);
584 }
585
aq_ring_fill_stats_data(struct aq_ring_s * self,u64 * data)586 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
587 {
588 unsigned int count;
589 unsigned int start;
590
591 if (self->ring_type == ATL_RING_RX) {
592 /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
593 do {
594 count = 0;
595 start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
596 data[count] = self->stats.rx.packets;
597 data[++count] = self->stats.rx.jumbo_packets;
598 data[++count] = self->stats.rx.lro_packets;
599 data[++count] = self->stats.rx.errors;
600 data[++count] = self->stats.rx.alloc_fails;
601 data[++count] = self->stats.rx.skb_alloc_fails;
602 data[++count] = self->stats.rx.polls;
603 } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
604 } else {
605 /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
606 do {
607 count = 0;
608 start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
609 data[count] = self->stats.tx.packets;
610 data[++count] = self->stats.tx.queue_restarts;
611 } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
612 }
613
614 return ++count;
615 }
616