1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12
ionic_txq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)13 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
14 ionic_desc_cb cb_func, void *cb_arg)
15 {
16 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
17 }
18
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)19 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
20 ionic_desc_cb cb_func, void *cb_arg)
21 {
22 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
23 }
24
ionic_txq_poke_doorbell(struct ionic_queue * q)25 bool ionic_txq_poke_doorbell(struct ionic_queue *q)
26 {
27 unsigned long now, then, dif;
28 struct netdev_queue *netdev_txq;
29 struct net_device *netdev;
30
31 netdev = q->lif->netdev;
32 netdev_txq = netdev_get_tx_queue(netdev, q->index);
33
34 HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
35
36 if (q->tail_idx == q->head_idx) {
37 HARD_TX_UNLOCK(netdev, netdev_txq);
38 return false;
39 }
40
41 now = READ_ONCE(jiffies);
42 then = q->dbell_jiffies;
43 dif = now - then;
44
45 if (dif > q->dbell_deadline) {
46 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
47 q->dbval | q->head_idx);
48
49 q->dbell_jiffies = now;
50 }
51
52 HARD_TX_UNLOCK(netdev, netdev_txq);
53
54 return true;
55 }
56
ionic_rxq_poke_doorbell(struct ionic_queue * q)57 bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
58 {
59 unsigned long now, then, dif;
60
61 /* no lock, called from rx napi or txrx napi, nothing else can fill */
62
63 if (q->tail_idx == q->head_idx)
64 return false;
65
66 now = READ_ONCE(jiffies);
67 then = q->dbell_jiffies;
68 dif = now - then;
69
70 if (dif > q->dbell_deadline) {
71 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
72 q->dbval | q->head_idx);
73
74 q->dbell_jiffies = now;
75
76 dif = 2 * q->dbell_deadline;
77 if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
78 dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
79
80 q->dbell_deadline = dif;
81 }
82
83 return true;
84 }
85
q_to_ndq(struct ionic_queue * q)86 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
87 {
88 return netdev_get_tx_queue(q->lif->netdev, q->index);
89 }
90
ionic_rx_page_alloc(struct ionic_queue * q,struct ionic_buf_info * buf_info)91 static int ionic_rx_page_alloc(struct ionic_queue *q,
92 struct ionic_buf_info *buf_info)
93 {
94 struct net_device *netdev = q->lif->netdev;
95 struct ionic_rx_stats *stats;
96 struct device *dev;
97 struct page *page;
98
99 dev = q->dev;
100 stats = q_to_rx_stats(q);
101
102 if (unlikely(!buf_info)) {
103 net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
104 netdev->name, q->name);
105 return -EINVAL;
106 }
107
108 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
109 if (unlikely(!page)) {
110 net_err_ratelimited("%s: %s page alloc failed\n",
111 netdev->name, q->name);
112 stats->alloc_err++;
113 return -ENOMEM;
114 }
115
116 buf_info->dma_addr = dma_map_page(dev, page, 0,
117 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
118 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
119 __free_pages(page, 0);
120 net_err_ratelimited("%s: %s dma map failed\n",
121 netdev->name, q->name);
122 stats->dma_map_err++;
123 return -EIO;
124 }
125
126 buf_info->page = page;
127 buf_info->page_offset = 0;
128
129 return 0;
130 }
131
ionic_rx_page_free(struct ionic_queue * q,struct ionic_buf_info * buf_info)132 static void ionic_rx_page_free(struct ionic_queue *q,
133 struct ionic_buf_info *buf_info)
134 {
135 struct net_device *netdev = q->lif->netdev;
136 struct device *dev = q->dev;
137
138 if (unlikely(!buf_info)) {
139 net_err_ratelimited("%s: %s invalid buf_info in free\n",
140 netdev->name, q->name);
141 return;
142 }
143
144 if (!buf_info->page)
145 return;
146
147 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
148 __free_pages(buf_info->page, 0);
149 buf_info->page = NULL;
150 }
151
ionic_rx_buf_recycle(struct ionic_queue * q,struct ionic_buf_info * buf_info,u32 used)152 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
153 struct ionic_buf_info *buf_info, u32 used)
154 {
155 u32 size;
156
157 /* don't re-use pages allocated in low-mem condition */
158 if (page_is_pfmemalloc(buf_info->page))
159 return false;
160
161 /* don't re-use buffers from non-local numa nodes */
162 if (page_to_nid(buf_info->page) != numa_mem_id())
163 return false;
164
165 size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
166 buf_info->page_offset += size;
167 if (buf_info->page_offset >= IONIC_PAGE_SIZE)
168 return false;
169
170 get_page(buf_info->page);
171
172 return true;
173 }
174
ionic_rx_frags(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)175 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
176 struct ionic_desc_info *desc_info,
177 struct ionic_rxq_comp *comp)
178 {
179 struct net_device *netdev = q->lif->netdev;
180 struct ionic_buf_info *buf_info;
181 struct ionic_rx_stats *stats;
182 struct device *dev = q->dev;
183 struct sk_buff *skb;
184 unsigned int i;
185 u16 frag_len;
186 u16 len;
187
188 stats = q_to_rx_stats(q);
189
190 buf_info = &desc_info->bufs[0];
191 len = le16_to_cpu(comp->len);
192
193 prefetchw(buf_info->page);
194
195 skb = napi_get_frags(&q_to_qcq(q)->napi);
196 if (unlikely(!skb)) {
197 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
198 netdev->name, q->name);
199 stats->alloc_err++;
200 return NULL;
201 }
202
203 i = comp->num_sg_elems + 1;
204 do {
205 if (unlikely(!buf_info->page)) {
206 dev_kfree_skb(skb);
207 return NULL;
208 }
209
210 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
211 len -= frag_len;
212
213 dma_sync_single_for_cpu(dev,
214 buf_info->dma_addr + buf_info->page_offset,
215 frag_len, DMA_FROM_DEVICE);
216
217 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
218 buf_info->page, buf_info->page_offset, frag_len,
219 IONIC_PAGE_SIZE);
220
221 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
222 dma_unmap_page(dev, buf_info->dma_addr,
223 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
224 buf_info->page = NULL;
225 }
226
227 buf_info++;
228
229 i--;
230 } while (i > 0);
231
232 return skb;
233 }
234
ionic_rx_copybreak(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)235 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
236 struct ionic_desc_info *desc_info,
237 struct ionic_rxq_comp *comp)
238 {
239 struct net_device *netdev = q->lif->netdev;
240 struct ionic_buf_info *buf_info;
241 struct ionic_rx_stats *stats;
242 struct device *dev = q->dev;
243 struct sk_buff *skb;
244 u16 len;
245
246 stats = q_to_rx_stats(q);
247
248 buf_info = &desc_info->bufs[0];
249 len = le16_to_cpu(comp->len);
250
251 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
252 if (unlikely(!skb)) {
253 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
254 netdev->name, q->name);
255 stats->alloc_err++;
256 return NULL;
257 }
258
259 if (unlikely(!buf_info->page)) {
260 dev_kfree_skb(skb);
261 return NULL;
262 }
263
264 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
265 len, DMA_FROM_DEVICE);
266 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
267 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
268 len, DMA_FROM_DEVICE);
269
270 skb_put(skb, len);
271 skb->protocol = eth_type_trans(skb, q->lif->netdev);
272
273 return skb;
274 }
275
ionic_rx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)276 static void ionic_rx_clean(struct ionic_queue *q,
277 struct ionic_desc_info *desc_info,
278 struct ionic_cq_info *cq_info,
279 void *cb_arg)
280 {
281 struct net_device *netdev = q->lif->netdev;
282 struct ionic_qcq *qcq = q_to_qcq(q);
283 struct ionic_rx_stats *stats;
284 struct ionic_rxq_comp *comp;
285 struct sk_buff *skb;
286
287 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
288
289 stats = q_to_rx_stats(q);
290
291 if (comp->status) {
292 stats->dropped++;
293 return;
294 }
295
296 stats->pkts++;
297 stats->bytes += le16_to_cpu(comp->len);
298
299 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
300 skb = ionic_rx_copybreak(q, desc_info, comp);
301 else
302 skb = ionic_rx_frags(q, desc_info, comp);
303
304 if (unlikely(!skb)) {
305 stats->dropped++;
306 return;
307 }
308
309 skb_record_rx_queue(skb, q->index);
310
311 if (likely(netdev->features & NETIF_F_RXHASH)) {
312 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
313 case IONIC_PKT_TYPE_IPV4:
314 case IONIC_PKT_TYPE_IPV6:
315 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
316 PKT_HASH_TYPE_L3);
317 break;
318 case IONIC_PKT_TYPE_IPV4_TCP:
319 case IONIC_PKT_TYPE_IPV6_TCP:
320 case IONIC_PKT_TYPE_IPV4_UDP:
321 case IONIC_PKT_TYPE_IPV6_UDP:
322 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
323 PKT_HASH_TYPE_L4);
324 break;
325 }
326 }
327
328 if (likely(netdev->features & NETIF_F_RXCSUM) &&
329 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
330 skb->ip_summed = CHECKSUM_COMPLETE;
331 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
332 stats->csum_complete++;
333 } else {
334 stats->csum_none++;
335 }
336
337 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
338 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
339 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
340 stats->csum_error++;
341
342 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
343 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
344 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
345 le16_to_cpu(comp->vlan_tci));
346 stats->vlan_stripped++;
347 }
348
349 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
350 __le64 *cq_desc_hwstamp;
351 u64 hwstamp;
352
353 cq_desc_hwstamp =
354 cq_info->cq_desc +
355 qcq->cq.desc_size -
356 sizeof(struct ionic_rxq_comp) -
357 IONIC_HWSTAMP_CQ_NEGOFFSET;
358
359 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
360
361 if (hwstamp != IONIC_HWSTAMP_INVALID) {
362 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
363 stats->hwstamp_valid++;
364 } else {
365 stats->hwstamp_invalid++;
366 }
367 }
368
369 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
370 napi_gro_receive(&qcq->napi, skb);
371 else
372 napi_gro_frags(&qcq->napi);
373 }
374
ionic_rx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)375 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
376 {
377 struct ionic_queue *q = cq->bound_q;
378 struct ionic_desc_info *desc_info;
379 struct ionic_rxq_comp *comp;
380
381 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
382
383 if (!color_match(comp->pkt_type_color, cq->done_color))
384 return false;
385
386 /* check for empty queue */
387 if (q->tail_idx == q->head_idx)
388 return false;
389
390 if (q->tail_idx != le16_to_cpu(comp->comp_index))
391 return false;
392
393 desc_info = &q->info[q->tail_idx];
394 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
395
396 /* clean the related q entry, only one per qc completion */
397 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
398
399 desc_info->cb = NULL;
400 desc_info->cb_arg = NULL;
401
402 return true;
403 }
404
ionic_write_cmb_desc(struct ionic_queue * q,void __iomem * cmb_desc,void * desc)405 static inline void ionic_write_cmb_desc(struct ionic_queue *q,
406 void __iomem *cmb_desc,
407 void *desc)
408 {
409 if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
410 memcpy_toio(cmb_desc, desc, q->desc_size);
411 }
412
ionic_rx_fill(struct ionic_queue * q)413 void ionic_rx_fill(struct ionic_queue *q)
414 {
415 struct net_device *netdev = q->lif->netdev;
416 struct ionic_desc_info *desc_info;
417 struct ionic_rxq_sg_desc *sg_desc;
418 struct ionic_rxq_sg_elem *sg_elem;
419 struct ionic_buf_info *buf_info;
420 unsigned int fill_threshold;
421 struct ionic_rxq_desc *desc;
422 unsigned int remain_len;
423 unsigned int frag_len;
424 unsigned int nfrags;
425 unsigned int n_fill;
426 unsigned int i, j;
427 unsigned int len;
428
429 n_fill = ionic_q_space_avail(q);
430
431 fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
432 q->num_descs / IONIC_RX_FILL_DIV);
433 if (n_fill < fill_threshold)
434 return;
435
436 len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
437
438 for (i = n_fill; i; i--) {
439 nfrags = 0;
440 remain_len = len;
441 desc_info = &q->info[q->head_idx];
442 desc = desc_info->desc;
443 buf_info = &desc_info->bufs[0];
444
445 if (!buf_info->page) { /* alloc a new buffer? */
446 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
447 desc->addr = 0;
448 desc->len = 0;
449 return;
450 }
451 }
452
453 /* fill main descriptor - buf[0] */
454 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
455 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
456 desc->len = cpu_to_le16(frag_len);
457 remain_len -= frag_len;
458 buf_info++;
459 nfrags++;
460
461 /* fill sg descriptors - buf[1..n] */
462 sg_desc = desc_info->sg_desc;
463 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
464 sg_elem = &sg_desc->elems[j];
465 if (!buf_info->page) { /* alloc a new sg buffer? */
466 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
467 sg_elem->addr = 0;
468 sg_elem->len = 0;
469 return;
470 }
471 }
472
473 sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
474 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
475 sg_elem->len = cpu_to_le16(frag_len);
476 remain_len -= frag_len;
477 buf_info++;
478 nfrags++;
479 }
480
481 /* clear end sg element as a sentinel */
482 if (j < q->max_sg_elems) {
483 sg_elem = &sg_desc->elems[j];
484 memset(sg_elem, 0, sizeof(*sg_elem));
485 }
486
487 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
488 IONIC_RXQ_DESC_OPCODE_SIMPLE;
489 desc_info->nbufs = nfrags;
490
491 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
492
493 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
494 }
495
496 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
497 q->dbval | q->head_idx);
498
499 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
500 q->dbell_jiffies = jiffies;
501
502 mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
503 jiffies + IONIC_NAPI_DEADLINE);
504 }
505
ionic_rx_empty(struct ionic_queue * q)506 void ionic_rx_empty(struct ionic_queue *q)
507 {
508 struct ionic_desc_info *desc_info;
509 struct ionic_buf_info *buf_info;
510 unsigned int i, j;
511
512 for (i = 0; i < q->num_descs; i++) {
513 desc_info = &q->info[i];
514 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
515 buf_info = &desc_info->bufs[j];
516 if (buf_info->page)
517 ionic_rx_page_free(q, buf_info);
518 }
519
520 desc_info->nbufs = 0;
521 desc_info->cb = NULL;
522 desc_info->cb_arg = NULL;
523 }
524
525 q->head_idx = 0;
526 q->tail_idx = 0;
527 }
528
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)529 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
530 {
531 struct dim_sample dim_sample;
532 struct ionic_lif *lif;
533 unsigned int qi;
534 u64 pkts, bytes;
535
536 if (!qcq->intr.dim_coal_hw)
537 return;
538
539 lif = qcq->q.lif;
540 qi = qcq->cq.bound_q->index;
541
542 switch (napi_mode) {
543 case IONIC_LIF_F_TX_DIM_INTR:
544 pkts = lif->txqstats[qi].pkts;
545 bytes = lif->txqstats[qi].bytes;
546 break;
547 case IONIC_LIF_F_RX_DIM_INTR:
548 pkts = lif->rxqstats[qi].pkts;
549 bytes = lif->rxqstats[qi].bytes;
550 break;
551 default:
552 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
553 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
554 break;
555 }
556
557 dim_update_sample(qcq->cq.bound_intr->rearm_count,
558 pkts, bytes, &dim_sample);
559
560 net_dim(&qcq->dim, dim_sample);
561 }
562
ionic_tx_napi(struct napi_struct * napi,int budget)563 int ionic_tx_napi(struct napi_struct *napi, int budget)
564 {
565 struct ionic_qcq *qcq = napi_to_qcq(napi);
566 struct ionic_cq *cq = napi_to_cq(napi);
567 struct ionic_dev *idev;
568 struct ionic_lif *lif;
569 u32 work_done = 0;
570 u32 flags = 0;
571
572 lif = cq->bound_q->lif;
573 idev = &lif->ionic->idev;
574
575 work_done = ionic_cq_service(cq, budget,
576 ionic_tx_service, NULL, NULL);
577
578 if (work_done < budget && napi_complete_done(napi, work_done)) {
579 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
580 flags |= IONIC_INTR_CRED_UNMASK;
581 cq->bound_intr->rearm_count++;
582 }
583
584 if (work_done || flags) {
585 flags |= IONIC_INTR_CRED_RESET_COALESCE;
586 ionic_intr_credits(idev->intr_ctrl,
587 cq->bound_intr->index,
588 work_done, flags);
589 }
590
591 if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
592 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
593
594 return work_done;
595 }
596
ionic_rx_napi(struct napi_struct * napi,int budget)597 int ionic_rx_napi(struct napi_struct *napi, int budget)
598 {
599 struct ionic_qcq *qcq = napi_to_qcq(napi);
600 struct ionic_cq *cq = napi_to_cq(napi);
601 struct ionic_dev *idev;
602 struct ionic_lif *lif;
603 u32 work_done = 0;
604 u32 flags = 0;
605
606 lif = cq->bound_q->lif;
607 idev = &lif->ionic->idev;
608
609 work_done = ionic_cq_service(cq, budget,
610 ionic_rx_service, NULL, NULL);
611
612 ionic_rx_fill(cq->bound_q);
613
614 if (work_done < budget && napi_complete_done(napi, work_done)) {
615 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
616 flags |= IONIC_INTR_CRED_UNMASK;
617 cq->bound_intr->rearm_count++;
618 }
619
620 if (work_done || flags) {
621 flags |= IONIC_INTR_CRED_RESET_COALESCE;
622 ionic_intr_credits(idev->intr_ctrl,
623 cq->bound_intr->index,
624 work_done, flags);
625 }
626
627 if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
628 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
629
630 return work_done;
631 }
632
ionic_txrx_napi(struct napi_struct * napi,int budget)633 int ionic_txrx_napi(struct napi_struct *napi, int budget)
634 {
635 struct ionic_qcq *rxqcq = napi_to_qcq(napi);
636 struct ionic_cq *rxcq = napi_to_cq(napi);
637 unsigned int qi = rxcq->bound_q->index;
638 struct ionic_qcq *txqcq;
639 struct ionic_dev *idev;
640 struct ionic_lif *lif;
641 struct ionic_cq *txcq;
642 bool resched = false;
643 u32 rx_work_done = 0;
644 u32 tx_work_done = 0;
645 u32 flags = 0;
646
647 lif = rxcq->bound_q->lif;
648 idev = &lif->ionic->idev;
649 txqcq = lif->txqcqs[qi];
650 txcq = &lif->txqcqs[qi]->cq;
651
652 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
653 ionic_tx_service, NULL, NULL);
654
655 rx_work_done = ionic_cq_service(rxcq, budget,
656 ionic_rx_service, NULL, NULL);
657
658 ionic_rx_fill(rxcq->bound_q);
659
660 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
661 ionic_dim_update(rxqcq, 0);
662 flags |= IONIC_INTR_CRED_UNMASK;
663 rxcq->bound_intr->rearm_count++;
664 }
665
666 if (rx_work_done || flags) {
667 flags |= IONIC_INTR_CRED_RESET_COALESCE;
668 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
669 tx_work_done + rx_work_done, flags);
670 }
671
672 if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
673 resched = true;
674 if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
675 resched = true;
676 if (resched)
677 mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
678
679 return rx_work_done;
680 }
681
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)682 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
683 void *data, size_t len)
684 {
685 struct ionic_tx_stats *stats = q_to_tx_stats(q);
686 struct device *dev = q->dev;
687 dma_addr_t dma_addr;
688
689 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
690 if (dma_mapping_error(dev, dma_addr)) {
691 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
692 q->lif->netdev->name, q->name);
693 stats->dma_map_err++;
694 return 0;
695 }
696 return dma_addr;
697 }
698
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)699 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
700 const skb_frag_t *frag,
701 size_t offset, size_t len)
702 {
703 struct ionic_tx_stats *stats = q_to_tx_stats(q);
704 struct device *dev = q->dev;
705 dma_addr_t dma_addr;
706
707 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
708 if (dma_mapping_error(dev, dma_addr)) {
709 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
710 q->lif->netdev->name, q->name);
711 stats->dma_map_err++;
712 }
713 return dma_addr;
714 }
715
ionic_tx_map_skb(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)716 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
717 struct ionic_desc_info *desc_info)
718 {
719 struct ionic_buf_info *buf_info = desc_info->bufs;
720 struct ionic_tx_stats *stats = q_to_tx_stats(q);
721 struct device *dev = q->dev;
722 dma_addr_t dma_addr;
723 unsigned int nfrags;
724 skb_frag_t *frag;
725 int frag_idx;
726
727 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
728 if (dma_mapping_error(dev, dma_addr)) {
729 stats->dma_map_err++;
730 return -EIO;
731 }
732 buf_info->dma_addr = dma_addr;
733 buf_info->len = skb_headlen(skb);
734 buf_info++;
735
736 frag = skb_shinfo(skb)->frags;
737 nfrags = skb_shinfo(skb)->nr_frags;
738 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
739 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
740 if (dma_mapping_error(dev, dma_addr)) {
741 stats->dma_map_err++;
742 goto dma_fail;
743 }
744 buf_info->dma_addr = dma_addr;
745 buf_info->len = skb_frag_size(frag);
746 buf_info++;
747 }
748
749 desc_info->nbufs = 1 + nfrags;
750
751 return 0;
752
753 dma_fail:
754 /* unwind the frag mappings and the head mapping */
755 while (frag_idx > 0) {
756 frag_idx--;
757 buf_info--;
758 dma_unmap_page(dev, buf_info->dma_addr,
759 buf_info->len, DMA_TO_DEVICE);
760 }
761 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
762 return -EIO;
763 }
764
ionic_tx_desc_unmap_bufs(struct ionic_queue * q,struct ionic_desc_info * desc_info)765 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
766 struct ionic_desc_info *desc_info)
767 {
768 struct ionic_buf_info *buf_info = desc_info->bufs;
769 struct device *dev = q->dev;
770 unsigned int i;
771
772 if (!desc_info->nbufs)
773 return;
774
775 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
776 buf_info->len, DMA_TO_DEVICE);
777 buf_info++;
778 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
779 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
780 buf_info->len, DMA_TO_DEVICE);
781
782 desc_info->nbufs = 0;
783 }
784
ionic_tx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)785 static void ionic_tx_clean(struct ionic_queue *q,
786 struct ionic_desc_info *desc_info,
787 struct ionic_cq_info *cq_info,
788 void *cb_arg)
789 {
790 struct ionic_tx_stats *stats = q_to_tx_stats(q);
791 struct ionic_qcq *qcq = q_to_qcq(q);
792 struct sk_buff *skb = cb_arg;
793 u16 qi;
794
795 ionic_tx_desc_unmap_bufs(q, desc_info);
796
797 if (!skb)
798 return;
799
800 qi = skb_get_queue_mapping(skb);
801
802 if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
803 if (cq_info) {
804 struct skb_shared_hwtstamps hwts = {};
805 __le64 *cq_desc_hwstamp;
806 u64 hwstamp;
807
808 cq_desc_hwstamp =
809 cq_info->cq_desc +
810 qcq->cq.desc_size -
811 sizeof(struct ionic_txq_comp) -
812 IONIC_HWSTAMP_CQ_NEGOFFSET;
813
814 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
815
816 if (hwstamp != IONIC_HWSTAMP_INVALID) {
817 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
818
819 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
820 skb_tstamp_tx(skb, &hwts);
821
822 stats->hwstamp_valid++;
823 } else {
824 stats->hwstamp_invalid++;
825 }
826 }
827
828 } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
829 netif_wake_subqueue(q->lif->netdev, qi);
830 }
831
832 desc_info->bytes = skb->len;
833 stats->clean++;
834
835 dev_consume_skb_any(skb);
836 }
837
ionic_tx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)838 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
839 {
840 struct ionic_queue *q = cq->bound_q;
841 struct ionic_desc_info *desc_info;
842 struct ionic_txq_comp *comp;
843 int bytes = 0;
844 int pkts = 0;
845 u16 index;
846
847 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
848
849 if (!color_match(comp->color, cq->done_color))
850 return false;
851
852 /* clean the related q entries, there could be
853 * several q entries completed for each cq completion
854 */
855 do {
856 desc_info = &q->info[q->tail_idx];
857 desc_info->bytes = 0;
858 index = q->tail_idx;
859 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
860 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
861 if (desc_info->cb_arg) {
862 pkts++;
863 bytes += desc_info->bytes;
864 }
865 desc_info->cb = NULL;
866 desc_info->cb_arg = NULL;
867 } while (index != le16_to_cpu(comp->comp_index));
868
869 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
870 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
871
872 return true;
873 }
874
ionic_tx_flush(struct ionic_cq * cq)875 void ionic_tx_flush(struct ionic_cq *cq)
876 {
877 struct ionic_dev *idev = &cq->lif->ionic->idev;
878 u32 work_done;
879
880 work_done = ionic_cq_service(cq, cq->num_descs,
881 ionic_tx_service, NULL, NULL);
882 if (work_done)
883 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
884 work_done, IONIC_INTR_CRED_RESET_COALESCE);
885 }
886
ionic_tx_empty(struct ionic_queue * q)887 void ionic_tx_empty(struct ionic_queue *q)
888 {
889 struct ionic_desc_info *desc_info;
890 int bytes = 0;
891 int pkts = 0;
892
893 /* walk the not completed tx entries, if any */
894 while (q->head_idx != q->tail_idx) {
895 desc_info = &q->info[q->tail_idx];
896 desc_info->bytes = 0;
897 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
898 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
899 if (desc_info->cb_arg) {
900 pkts++;
901 bytes += desc_info->bytes;
902 }
903 desc_info->cb = NULL;
904 desc_info->cb_arg = NULL;
905 }
906
907 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
908 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
909 }
910
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)911 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
912 {
913 int err;
914
915 err = skb_cow_head(skb, 0);
916 if (err)
917 return err;
918
919 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
920 inner_ip_hdr(skb)->check = 0;
921 inner_tcp_hdr(skb)->check =
922 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
923 inner_ip_hdr(skb)->daddr,
924 0, IPPROTO_TCP, 0);
925 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
926 inner_tcp_hdr(skb)->check =
927 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
928 &inner_ipv6_hdr(skb)->daddr,
929 0, IPPROTO_TCP, 0);
930 }
931
932 return 0;
933 }
934
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)935 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
936 {
937 int err;
938
939 err = skb_cow_head(skb, 0);
940 if (err)
941 return err;
942
943 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
944 ip_hdr(skb)->check = 0;
945 tcp_hdr(skb)->check =
946 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
947 ip_hdr(skb)->daddr,
948 0, IPPROTO_TCP, 0);
949 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
950 tcp_v6_gso_csum_prep(skb);
951 }
952
953 return 0;
954 }
955
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)956 static void ionic_tx_tso_post(struct ionic_queue *q,
957 struct ionic_desc_info *desc_info,
958 struct sk_buff *skb,
959 dma_addr_t addr, u8 nsge, u16 len,
960 unsigned int hdrlen, unsigned int mss,
961 bool outer_csum,
962 u16 vlan_tci, bool has_vlan,
963 bool start, bool done)
964 {
965 struct ionic_txq_desc *desc = desc_info->desc;
966 u8 flags = 0;
967 u64 cmd;
968
969 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
970 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
971 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
972 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
973
974 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
975 desc->cmd = cpu_to_le64(cmd);
976 desc->len = cpu_to_le16(len);
977 desc->vlan_tci = cpu_to_le16(vlan_tci);
978 desc->hdr_len = cpu_to_le16(hdrlen);
979 desc->mss = cpu_to_le16(mss);
980
981 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
982
983 if (start) {
984 skb_tx_timestamp(skb);
985 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
986 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
987 ionic_txq_post(q, false, ionic_tx_clean, skb);
988 } else {
989 ionic_txq_post(q, done, NULL, NULL);
990 }
991 }
992
ionic_tx_tso(struct ionic_queue * q,struct sk_buff * skb)993 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
994 {
995 struct ionic_tx_stats *stats = q_to_tx_stats(q);
996 struct ionic_desc_info *desc_info;
997 struct ionic_buf_info *buf_info;
998 struct ionic_txq_sg_elem *elem;
999 struct ionic_txq_desc *desc;
1000 unsigned int chunk_len;
1001 unsigned int frag_rem;
1002 unsigned int tso_rem;
1003 unsigned int seg_rem;
1004 dma_addr_t desc_addr;
1005 dma_addr_t frag_addr;
1006 unsigned int hdrlen;
1007 unsigned int len;
1008 unsigned int mss;
1009 bool start, done;
1010 bool outer_csum;
1011 bool has_vlan;
1012 u16 desc_len;
1013 u8 desc_nsge;
1014 u16 vlan_tci;
1015 bool encap;
1016 int err;
1017
1018 desc_info = &q->info[q->head_idx];
1019 buf_info = desc_info->bufs;
1020
1021 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1022 return -EIO;
1023
1024 len = skb->len;
1025 mss = skb_shinfo(skb)->gso_size;
1026 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1027 SKB_GSO_GRE_CSUM |
1028 SKB_GSO_IPXIP4 |
1029 SKB_GSO_IPXIP6 |
1030 SKB_GSO_UDP_TUNNEL |
1031 SKB_GSO_UDP_TUNNEL_CSUM));
1032 has_vlan = !!skb_vlan_tag_present(skb);
1033 vlan_tci = skb_vlan_tag_get(skb);
1034 encap = skb->encapsulation;
1035
1036 /* Preload inner-most TCP csum field with IP pseudo hdr
1037 * calculated with IP length set to zero. HW will later
1038 * add in length to each TCP segment resulting from the TSO.
1039 */
1040
1041 if (encap)
1042 err = ionic_tx_tcp_inner_pseudo_csum(skb);
1043 else
1044 err = ionic_tx_tcp_pseudo_csum(skb);
1045 if (err) {
1046 /* clean up mapping from ionic_tx_map_skb */
1047 ionic_tx_desc_unmap_bufs(q, desc_info);
1048 return err;
1049 }
1050
1051 if (encap)
1052 hdrlen = skb_inner_tcp_all_headers(skb);
1053 else
1054 hdrlen = skb_tcp_all_headers(skb);
1055
1056 tso_rem = len;
1057 seg_rem = min(tso_rem, hdrlen + mss);
1058
1059 frag_addr = 0;
1060 frag_rem = 0;
1061
1062 start = true;
1063
1064 while (tso_rem > 0) {
1065 desc = NULL;
1066 elem = NULL;
1067 desc_addr = 0;
1068 desc_len = 0;
1069 desc_nsge = 0;
1070 /* use fragments until we have enough to post a single descriptor */
1071 while (seg_rem > 0) {
1072 /* if the fragment is exhausted then move to the next one */
1073 if (frag_rem == 0) {
1074 /* grab the next fragment */
1075 frag_addr = buf_info->dma_addr;
1076 frag_rem = buf_info->len;
1077 buf_info++;
1078 }
1079 chunk_len = min(frag_rem, seg_rem);
1080 if (!desc) {
1081 /* fill main descriptor */
1082 desc = desc_info->txq_desc;
1083 elem = desc_info->txq_sg_desc->elems;
1084 desc_addr = frag_addr;
1085 desc_len = chunk_len;
1086 } else {
1087 /* fill sg descriptor */
1088 elem->addr = cpu_to_le64(frag_addr);
1089 elem->len = cpu_to_le16(chunk_len);
1090 elem++;
1091 desc_nsge++;
1092 }
1093 frag_addr += chunk_len;
1094 frag_rem -= chunk_len;
1095 tso_rem -= chunk_len;
1096 seg_rem -= chunk_len;
1097 }
1098 seg_rem = min(tso_rem, mss);
1099 done = (tso_rem == 0);
1100 /* post descriptor */
1101 ionic_tx_tso_post(q, desc_info, skb,
1102 desc_addr, desc_nsge, desc_len,
1103 hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1104 start, done);
1105 start = false;
1106 /* Buffer information is stored with the first tso descriptor */
1107 desc_info = &q->info[q->head_idx];
1108 desc_info->nbufs = 0;
1109 }
1110
1111 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1112 stats->bytes += len;
1113 stats->tso++;
1114 stats->tso_bytes = len;
1115
1116 return 0;
1117 }
1118
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1119 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1120 struct ionic_desc_info *desc_info)
1121 {
1122 struct ionic_txq_desc *desc = desc_info->txq_desc;
1123 struct ionic_buf_info *buf_info = desc_info->bufs;
1124 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1125 bool has_vlan;
1126 u8 flags = 0;
1127 bool encap;
1128 u64 cmd;
1129
1130 has_vlan = !!skb_vlan_tag_present(skb);
1131 encap = skb->encapsulation;
1132
1133 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1134 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1135
1136 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1137 flags, skb_shinfo(skb)->nr_frags,
1138 buf_info->dma_addr);
1139 desc->cmd = cpu_to_le64(cmd);
1140 desc->len = cpu_to_le16(buf_info->len);
1141 if (has_vlan) {
1142 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1143 stats->vlan_inserted++;
1144 } else {
1145 desc->vlan_tci = 0;
1146 }
1147 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1148 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1149
1150 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1151
1152 if (skb_csum_is_sctp(skb))
1153 stats->crc32_csum++;
1154 else
1155 stats->csum++;
1156 }
1157
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1158 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1159 struct ionic_desc_info *desc_info)
1160 {
1161 struct ionic_txq_desc *desc = desc_info->txq_desc;
1162 struct ionic_buf_info *buf_info = desc_info->bufs;
1163 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1164 bool has_vlan;
1165 u8 flags = 0;
1166 bool encap;
1167 u64 cmd;
1168
1169 has_vlan = !!skb_vlan_tag_present(skb);
1170 encap = skb->encapsulation;
1171
1172 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1173 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1174
1175 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1176 flags, skb_shinfo(skb)->nr_frags,
1177 buf_info->dma_addr);
1178 desc->cmd = cpu_to_le64(cmd);
1179 desc->len = cpu_to_le16(buf_info->len);
1180 if (has_vlan) {
1181 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1182 stats->vlan_inserted++;
1183 } else {
1184 desc->vlan_tci = 0;
1185 }
1186 desc->csum_start = 0;
1187 desc->csum_offset = 0;
1188
1189 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1190
1191 stats->csum_none++;
1192 }
1193
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1194 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1195 struct ionic_desc_info *desc_info)
1196 {
1197 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1198 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1199 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1200 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1201 unsigned int i;
1202
1203 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1204 elem->addr = cpu_to_le64(buf_info->dma_addr);
1205 elem->len = cpu_to_le16(buf_info->len);
1206 }
1207
1208 stats->frags += skb_shinfo(skb)->nr_frags;
1209 }
1210
ionic_tx(struct ionic_queue * q,struct sk_buff * skb)1211 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1212 {
1213 struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1214 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1215
1216 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1217 return -EIO;
1218
1219 /* set up the initial descriptor */
1220 if (skb->ip_summed == CHECKSUM_PARTIAL)
1221 ionic_tx_calc_csum(q, skb, desc_info);
1222 else
1223 ionic_tx_calc_no_csum(q, skb, desc_info);
1224
1225 /* add frags */
1226 ionic_tx_skb_frags(q, skb, desc_info);
1227
1228 skb_tx_timestamp(skb);
1229 stats->pkts++;
1230 stats->bytes += skb->len;
1231
1232 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1233 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1234 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1235
1236 return 0;
1237 }
1238
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1239 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1240 {
1241 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1242 int ndescs;
1243 int err;
1244
1245 /* Each desc is mss long max, so a descriptor for each gso_seg */
1246 if (skb_is_gso(skb))
1247 ndescs = skb_shinfo(skb)->gso_segs;
1248 else
1249 ndescs = 1;
1250
1251 /* If non-TSO, just need 1 desc and nr_frags sg elems */
1252 if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1253 return ndescs;
1254
1255 /* Too many frags, so linearize */
1256 err = skb_linearize(skb);
1257 if (err)
1258 return err;
1259
1260 stats->linearize++;
1261
1262 return ndescs;
1263 }
1264
ionic_maybe_stop_tx(struct ionic_queue * q,int ndescs)1265 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1266 {
1267 int stopped = 0;
1268
1269 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1270 netif_stop_subqueue(q->lif->netdev, q->index);
1271 stopped = 1;
1272
1273 /* Might race with ionic_tx_clean, check again */
1274 smp_rmb();
1275 if (ionic_q_has_space(q, ndescs)) {
1276 netif_wake_subqueue(q->lif->netdev, q->index);
1277 stopped = 0;
1278 }
1279 }
1280
1281 return stopped;
1282 }
1283
ionic_start_hwstamp_xmit(struct sk_buff * skb,struct net_device * netdev)1284 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1285 struct net_device *netdev)
1286 {
1287 struct ionic_lif *lif = netdev_priv(netdev);
1288 struct ionic_queue *q = &lif->hwstamp_txq->q;
1289 int err, ndescs;
1290
1291 /* Does not stop/start txq, because we post to a separate tx queue
1292 * for timestamping, and if a packet can't be posted immediately to
1293 * the timestamping queue, it is dropped.
1294 */
1295
1296 ndescs = ionic_tx_descs_needed(q, skb);
1297 if (unlikely(ndescs < 0))
1298 goto err_out_drop;
1299
1300 if (unlikely(!ionic_q_has_space(q, ndescs)))
1301 goto err_out_drop;
1302
1303 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1304 if (skb_is_gso(skb))
1305 err = ionic_tx_tso(q, skb);
1306 else
1307 err = ionic_tx(q, skb);
1308
1309 if (err)
1310 goto err_out_drop;
1311
1312 return NETDEV_TX_OK;
1313
1314 err_out_drop:
1315 q->drop++;
1316 dev_kfree_skb(skb);
1317 return NETDEV_TX_OK;
1318 }
1319
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1320 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1321 {
1322 u16 queue_index = skb_get_queue_mapping(skb);
1323 struct ionic_lif *lif = netdev_priv(netdev);
1324 struct ionic_queue *q;
1325 int ndescs;
1326 int err;
1327
1328 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1329 dev_kfree_skb(skb);
1330 return NETDEV_TX_OK;
1331 }
1332
1333 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1334 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1335 return ionic_start_hwstamp_xmit(skb, netdev);
1336
1337 if (unlikely(queue_index >= lif->nxqs))
1338 queue_index = 0;
1339 q = &lif->txqcqs[queue_index]->q;
1340
1341 ndescs = ionic_tx_descs_needed(q, skb);
1342 if (ndescs < 0)
1343 goto err_out_drop;
1344
1345 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1346 return NETDEV_TX_BUSY;
1347
1348 if (skb_is_gso(skb))
1349 err = ionic_tx_tso(q, skb);
1350 else
1351 err = ionic_tx(q, skb);
1352
1353 if (err)
1354 goto err_out_drop;
1355
1356 /* Stop the queue if there aren't descriptors for the next packet.
1357 * Since our SG lists per descriptor take care of most of the possible
1358 * fragmentation, we don't need to have many descriptors available.
1359 */
1360 ionic_maybe_stop_tx(q, 4);
1361
1362 return NETDEV_TX_OK;
1363
1364 err_out_drop:
1365 q->drop++;
1366 dev_kfree_skb(skb);
1367 return NETDEV_TX_OK;
1368 }
1369