1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <net/ipv6.h>
9 #include <linux/sort.h>
10
11 #include "otx2_common.h"
12
13 #define OTX2_DEFAULT_ACTION 0x1
14
15 static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
16
17 struct otx2_flow {
18 struct ethtool_rx_flow_spec flow_spec;
19 struct list_head list;
20 u32 location;
21 u16 entry;
22 bool is_vf;
23 u8 rss_ctx_id;
24 int vf;
25 bool dmac_filter;
26 };
27
28 enum dmac_req {
29 DMAC_ADDR_UPDATE,
30 DMAC_ADDR_DEL
31 };
32
otx2_clear_ntuple_flow_info(struct otx2_nic * pfvf,struct otx2_flow_config * flow_cfg)33 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
34 {
35 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
36 flow_cfg->flow_ent = NULL;
37 flow_cfg->max_flows = 0;
38 }
39
otx2_free_ntuple_mcam_entries(struct otx2_nic * pfvf)40 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
41 {
42 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
43 struct npc_mcam_free_entry_req *req;
44 int ent, err;
45
46 if (!flow_cfg->max_flows)
47 return 0;
48
49 mutex_lock(&pfvf->mbox.lock);
50 for (ent = 0; ent < flow_cfg->max_flows; ent++) {
51 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
52 if (!req)
53 break;
54
55 req->entry = flow_cfg->flow_ent[ent];
56
57 /* Send message to AF to free MCAM entries */
58 err = otx2_sync_mbox_msg(&pfvf->mbox);
59 if (err)
60 break;
61 }
62 mutex_unlock(&pfvf->mbox.lock);
63 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
64 return 0;
65 }
66
mcam_entry_cmp(const void * a,const void * b)67 static int mcam_entry_cmp(const void *a, const void *b)
68 {
69 return *(u16 *)a - *(u16 *)b;
70 }
71
otx2_alloc_mcam_entries(struct otx2_nic * pfvf,u16 count)72 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
73 {
74 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
75 struct npc_mcam_alloc_entry_req *req;
76 struct npc_mcam_alloc_entry_rsp *rsp;
77 int ent, allocated = 0;
78
79 /* Free current ones and allocate new ones with requested count */
80 otx2_free_ntuple_mcam_entries(pfvf);
81
82 if (!count)
83 return 0;
84
85 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
86 sizeof(u16), GFP_KERNEL);
87 if (!flow_cfg->flow_ent) {
88 netdev_err(pfvf->netdev,
89 "%s: Unable to allocate memory for flow entries\n",
90 __func__);
91 return -ENOMEM;
92 }
93
94 mutex_lock(&pfvf->mbox.lock);
95
96 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
97 * can only be allocated.
98 */
99 while (allocated < count) {
100 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
101 if (!req)
102 goto exit;
103
104 req->contig = false;
105 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
106 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
107
108 /* Allocate higher priority entries for PFs, so that VF's entries
109 * will be on top of PF.
110 */
111 if (!is_otx2_vf(pfvf->pcifunc)) {
112 req->priority = NPC_MCAM_HIGHER_PRIO;
113 req->ref_entry = flow_cfg->def_ent[0];
114 }
115
116 /* Send message to AF */
117 if (otx2_sync_mbox_msg(&pfvf->mbox))
118 goto exit;
119
120 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
121 (&pfvf->mbox.mbox, 0, &req->hdr);
122
123 for (ent = 0; ent < rsp->count; ent++)
124 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
125
126 allocated += rsp->count;
127
128 /* If this request is not fulfilled, no need to send
129 * further requests.
130 */
131 if (rsp->count != req->count)
132 break;
133 }
134
135 /* Multiple MCAM entry alloc requests could result in non-sequential
136 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
137 * otherwise user installed ntuple filter index and MCAM entry index will
138 * not be in sync.
139 */
140 if (allocated)
141 sort(&flow_cfg->flow_ent[0], allocated,
142 sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
143
144 exit:
145 mutex_unlock(&pfvf->mbox.lock);
146
147 flow_cfg->max_flows = allocated;
148
149 if (allocated) {
150 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
151 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
152 }
153
154 if (allocated != count)
155 netdev_info(pfvf->netdev,
156 "Unable to allocate %d MCAM entries, got only %d\n",
157 count, allocated);
158 return allocated;
159 }
160 EXPORT_SYMBOL(otx2_alloc_mcam_entries);
161
otx2_mcam_entry_init(struct otx2_nic * pfvf)162 static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
163 {
164 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
165 struct npc_mcam_alloc_entry_req *req;
166 struct npc_mcam_alloc_entry_rsp *rsp;
167 int vf_vlan_max_flows;
168 int ent, count;
169
170 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
171 count = OTX2_MAX_UNICAST_FLOWS +
172 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
173
174 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
175 sizeof(u16), GFP_KERNEL);
176 if (!flow_cfg->def_ent)
177 return -ENOMEM;
178
179 mutex_lock(&pfvf->mbox.lock);
180
181 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
182 if (!req) {
183 mutex_unlock(&pfvf->mbox.lock);
184 return -ENOMEM;
185 }
186
187 req->contig = false;
188 req->count = count;
189
190 /* Send message to AF */
191 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
192 mutex_unlock(&pfvf->mbox.lock);
193 return -EINVAL;
194 }
195
196 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
197 (&pfvf->mbox.mbox, 0, &req->hdr);
198
199 if (rsp->count != req->count) {
200 netdev_info(pfvf->netdev,
201 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
202 mutex_unlock(&pfvf->mbox.lock);
203 devm_kfree(pfvf->dev, flow_cfg->def_ent);
204 return 0;
205 }
206
207 for (ent = 0; ent < rsp->count; ent++)
208 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
209
210 flow_cfg->vf_vlan_offset = 0;
211 flow_cfg->unicast_offset = vf_vlan_max_flows;
212 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
213 OTX2_MAX_UNICAST_FLOWS;
214 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
215 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
216 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
217
218 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
219 mutex_unlock(&pfvf->mbox.lock);
220
221 /* Allocate entries for Ntuple filters */
222 count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
223 if (count <= 0) {
224 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
225 return 0;
226 }
227
228 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
229
230 return 0;
231 }
232
otx2vf_mcam_flow_init(struct otx2_nic * pfvf)233 int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
234 {
235 struct otx2_flow_config *flow_cfg;
236
237 pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
238 sizeof(struct otx2_flow_config),
239 GFP_KERNEL);
240 if (!pfvf->flow_cfg)
241 return -ENOMEM;
242
243 flow_cfg = pfvf->flow_cfg;
244 INIT_LIST_HEAD(&flow_cfg->flow_list);
245 flow_cfg->max_flows = 0;
246
247 return 0;
248 }
249 EXPORT_SYMBOL(otx2vf_mcam_flow_init);
250
otx2_mcam_flow_init(struct otx2_nic * pf)251 int otx2_mcam_flow_init(struct otx2_nic *pf)
252 {
253 int err;
254
255 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
256 GFP_KERNEL);
257 if (!pf->flow_cfg)
258 return -ENOMEM;
259
260 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
261
262 /* Allocate bare minimum number of MCAM entries needed for
263 * unicast and ntuple filters.
264 */
265 err = otx2_mcam_entry_init(pf);
266 if (err)
267 return err;
268
269 /* Check if MCAM entries are allocate or not */
270 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
271 return 0;
272
273 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
274 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
275 if (!pf->mac_table)
276 return -ENOMEM;
277
278 otx2_dmacflt_get_max_cnt(pf);
279
280 /* DMAC filters are not allocated */
281 if (!pf->flow_cfg->dmacflt_max_flows)
282 return 0;
283
284 pf->flow_cfg->bmap_to_dmacindex =
285 devm_kzalloc(pf->dev, sizeof(u8) *
286 pf->flow_cfg->dmacflt_max_flows,
287 GFP_KERNEL);
288
289 if (!pf->flow_cfg->bmap_to_dmacindex)
290 return -ENOMEM;
291
292 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
293
294 return 0;
295 }
296
otx2_mcam_flow_del(struct otx2_nic * pf)297 void otx2_mcam_flow_del(struct otx2_nic *pf)
298 {
299 otx2_destroy_mcam_flows(pf);
300 }
301 EXPORT_SYMBOL(otx2_mcam_flow_del);
302
303 /* On success adds mcam entry
304 * On failure enable promisous mode
305 */
otx2_do_add_macfilter(struct otx2_nic * pf,const u8 * mac)306 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
307 {
308 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
309 struct npc_install_flow_req *req;
310 int err, i;
311
312 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
313 return -ENOMEM;
314
315 /* dont have free mcam entries or uc list is greater than alloted */
316 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
317 return -ENOMEM;
318
319 mutex_lock(&pf->mbox.lock);
320 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
321 if (!req) {
322 mutex_unlock(&pf->mbox.lock);
323 return -ENOMEM;
324 }
325
326 /* unicast offset starts with 32 0..31 for ntuple */
327 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
328 if (pf->mac_table[i].inuse)
329 continue;
330 ether_addr_copy(pf->mac_table[i].addr, mac);
331 pf->mac_table[i].inuse = true;
332 pf->mac_table[i].mcam_entry =
333 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
334 req->entry = pf->mac_table[i].mcam_entry;
335 break;
336 }
337
338 ether_addr_copy(req->packet.dmac, mac);
339 eth_broadcast_addr((u8 *)&req->mask.dmac);
340 req->features = BIT_ULL(NPC_DMAC);
341 req->channel = pf->hw.rx_chan_base;
342 req->intf = NIX_INTF_RX;
343 req->op = NIX_RX_ACTION_DEFAULT;
344 req->set_cntr = 1;
345
346 err = otx2_sync_mbox_msg(&pf->mbox);
347 mutex_unlock(&pf->mbox.lock);
348
349 return err;
350 }
351
otx2_add_macfilter(struct net_device * netdev,const u8 * mac)352 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
353 {
354 struct otx2_nic *pf = netdev_priv(netdev);
355
356 if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
357 pf->flow_cfg->dmacflt_max_flows))
358 netdev_warn(netdev,
359 "Add %pM to CGX/RPM DMAC filters list as well\n",
360 mac);
361
362 return otx2_do_add_macfilter(pf, mac);
363 }
364
otx2_get_mcamentry_for_mac(struct otx2_nic * pf,const u8 * mac,int * mcam_entry)365 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
366 int *mcam_entry)
367 {
368 int i;
369
370 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
371 if (!pf->mac_table[i].inuse)
372 continue;
373
374 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
375 *mcam_entry = pf->mac_table[i].mcam_entry;
376 pf->mac_table[i].inuse = false;
377 return true;
378 }
379 }
380 return false;
381 }
382
otx2_del_macfilter(struct net_device * netdev,const u8 * mac)383 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
384 {
385 struct otx2_nic *pf = netdev_priv(netdev);
386 struct npc_delete_flow_req *req;
387 int err, mcam_entry;
388
389 /* check does mcam entry exists for given mac */
390 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
391 return 0;
392
393 mutex_lock(&pf->mbox.lock);
394 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
395 if (!req) {
396 mutex_unlock(&pf->mbox.lock);
397 return -ENOMEM;
398 }
399 req->entry = mcam_entry;
400 /* Send message to AF */
401 err = otx2_sync_mbox_msg(&pf->mbox);
402 mutex_unlock(&pf->mbox.lock);
403
404 return err;
405 }
406
otx2_find_flow(struct otx2_nic * pfvf,u32 location)407 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
408 {
409 struct otx2_flow *iter;
410
411 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
412 if (iter->location == location)
413 return iter;
414 }
415
416 return NULL;
417 }
418
otx2_add_flow_to_list(struct otx2_nic * pfvf,struct otx2_flow * flow)419 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
420 {
421 struct list_head *head = &pfvf->flow_cfg->flow_list;
422 struct otx2_flow *iter;
423
424 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
425 if (iter->location > flow->location)
426 break;
427 head = &iter->list;
428 }
429
430 list_add(&flow->list, head);
431 }
432
otx2_get_maxflows(struct otx2_flow_config * flow_cfg)433 int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
434 {
435 if (!flow_cfg)
436 return 0;
437
438 if (flow_cfg->nr_flows == flow_cfg->max_flows ||
439 bitmap_weight(&flow_cfg->dmacflt_bmap,
440 flow_cfg->dmacflt_max_flows))
441 return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
442 else
443 return flow_cfg->max_flows;
444 }
445 EXPORT_SYMBOL(otx2_get_maxflows);
446
otx2_get_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 location)447 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
448 u32 location)
449 {
450 struct otx2_flow *iter;
451
452 if (location >= otx2_get_maxflows(pfvf->flow_cfg))
453 return -EINVAL;
454
455 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
456 if (iter->location == location) {
457 nfc->fs = iter->flow_spec;
458 nfc->rss_context = iter->rss_ctx_id;
459 return 0;
460 }
461 }
462
463 return -ENOENT;
464 }
465
otx2_get_all_flows(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc,u32 * rule_locs)466 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
467 u32 *rule_locs)
468 {
469 u32 rule_cnt = nfc->rule_cnt;
470 u32 location = 0;
471 int idx = 0;
472 int err = 0;
473
474 nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
475 while ((!err || err == -ENOENT) && idx < rule_cnt) {
476 err = otx2_get_flow(pfvf, nfc, location);
477 if (!err)
478 rule_locs[idx++] = location;
479 location++;
480 }
481 nfc->rule_cnt = rule_cnt;
482
483 return err;
484 }
485
otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)486 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
487 struct npc_install_flow_req *req,
488 u32 flow_type)
489 {
490 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
491 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
492 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
493 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
494 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
495 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
496 struct flow_msg *pmask = &req->mask;
497 struct flow_msg *pkt = &req->packet;
498
499 switch (flow_type) {
500 case IP_USER_FLOW:
501 if (ipv4_usr_mask->ip4src) {
502 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
503 sizeof(pkt->ip4src));
504 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
505 sizeof(pmask->ip4src));
506 req->features |= BIT_ULL(NPC_SIP_IPV4);
507 }
508 if (ipv4_usr_mask->ip4dst) {
509 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
510 sizeof(pkt->ip4dst));
511 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
512 sizeof(pmask->ip4dst));
513 req->features |= BIT_ULL(NPC_DIP_IPV4);
514 }
515 if (ipv4_usr_mask->tos) {
516 pkt->tos = ipv4_usr_hdr->tos;
517 pmask->tos = ipv4_usr_mask->tos;
518 req->features |= BIT_ULL(NPC_TOS);
519 }
520 if (ipv4_usr_mask->proto) {
521 switch (ipv4_usr_hdr->proto) {
522 case IPPROTO_ICMP:
523 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
524 break;
525 case IPPROTO_TCP:
526 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
527 break;
528 case IPPROTO_UDP:
529 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
530 break;
531 case IPPROTO_SCTP:
532 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
533 break;
534 case IPPROTO_AH:
535 req->features |= BIT_ULL(NPC_IPPROTO_AH);
536 break;
537 case IPPROTO_ESP:
538 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
539 break;
540 default:
541 return -EOPNOTSUPP;
542 }
543 }
544 pkt->etype = cpu_to_be16(ETH_P_IP);
545 pmask->etype = cpu_to_be16(0xFFFF);
546 req->features |= BIT_ULL(NPC_ETYPE);
547 break;
548 case TCP_V4_FLOW:
549 case UDP_V4_FLOW:
550 case SCTP_V4_FLOW:
551 pkt->etype = cpu_to_be16(ETH_P_IP);
552 pmask->etype = cpu_to_be16(0xFFFF);
553 req->features |= BIT_ULL(NPC_ETYPE);
554 if (ipv4_l4_mask->ip4src) {
555 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
556 sizeof(pkt->ip4src));
557 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
558 sizeof(pmask->ip4src));
559 req->features |= BIT_ULL(NPC_SIP_IPV4);
560 }
561 if (ipv4_l4_mask->ip4dst) {
562 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
563 sizeof(pkt->ip4dst));
564 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
565 sizeof(pmask->ip4dst));
566 req->features |= BIT_ULL(NPC_DIP_IPV4);
567 }
568 if (ipv4_l4_mask->tos) {
569 pkt->tos = ipv4_l4_hdr->tos;
570 pmask->tos = ipv4_l4_mask->tos;
571 req->features |= BIT_ULL(NPC_TOS);
572 }
573 if (ipv4_l4_mask->psrc) {
574 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
575 sizeof(pkt->sport));
576 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
577 sizeof(pmask->sport));
578 if (flow_type == UDP_V4_FLOW)
579 req->features |= BIT_ULL(NPC_SPORT_UDP);
580 else if (flow_type == TCP_V4_FLOW)
581 req->features |= BIT_ULL(NPC_SPORT_TCP);
582 else
583 req->features |= BIT_ULL(NPC_SPORT_SCTP);
584 }
585 if (ipv4_l4_mask->pdst) {
586 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
587 sizeof(pkt->dport));
588 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
589 sizeof(pmask->dport));
590 if (flow_type == UDP_V4_FLOW)
591 req->features |= BIT_ULL(NPC_DPORT_UDP);
592 else if (flow_type == TCP_V4_FLOW)
593 req->features |= BIT_ULL(NPC_DPORT_TCP);
594 else
595 req->features |= BIT_ULL(NPC_DPORT_SCTP);
596 }
597 if (flow_type == UDP_V4_FLOW)
598 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
599 else if (flow_type == TCP_V4_FLOW)
600 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
601 else
602 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
603 break;
604 case AH_V4_FLOW:
605 case ESP_V4_FLOW:
606 pkt->etype = cpu_to_be16(ETH_P_IP);
607 pmask->etype = cpu_to_be16(0xFFFF);
608 req->features |= BIT_ULL(NPC_ETYPE);
609 if (ah_esp_mask->ip4src) {
610 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
611 sizeof(pkt->ip4src));
612 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
613 sizeof(pmask->ip4src));
614 req->features |= BIT_ULL(NPC_SIP_IPV4);
615 }
616 if (ah_esp_mask->ip4dst) {
617 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
618 sizeof(pkt->ip4dst));
619 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
620 sizeof(pmask->ip4dst));
621 req->features |= BIT_ULL(NPC_DIP_IPV4);
622 }
623 if (ah_esp_mask->tos) {
624 pkt->tos = ah_esp_hdr->tos;
625 pmask->tos = ah_esp_mask->tos;
626 req->features |= BIT_ULL(NPC_TOS);
627 }
628
629 /* NPC profile doesn't extract AH/ESP header fields */
630 if (ah_esp_mask->spi & ah_esp_hdr->spi)
631 return -EOPNOTSUPP;
632
633 if (flow_type == AH_V4_FLOW)
634 req->features |= BIT_ULL(NPC_IPPROTO_AH);
635 else
636 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
637 break;
638 default:
639 break;
640 }
641
642 return 0;
643 }
644
otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req,u32 flow_type)645 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
646 struct npc_install_flow_req *req,
647 u32 flow_type)
648 {
649 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
650 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
651 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
652 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
653 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
654 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
655 struct flow_msg *pmask = &req->mask;
656 struct flow_msg *pkt = &req->packet;
657
658 switch (flow_type) {
659 case IPV6_USER_FLOW:
660 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
661 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
662 sizeof(pkt->ip6src));
663 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
664 sizeof(pmask->ip6src));
665 req->features |= BIT_ULL(NPC_SIP_IPV6);
666 }
667 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
668 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
669 sizeof(pkt->ip6dst));
670 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
671 sizeof(pmask->ip6dst));
672 req->features |= BIT_ULL(NPC_DIP_IPV6);
673 }
674 pkt->etype = cpu_to_be16(ETH_P_IPV6);
675 pmask->etype = cpu_to_be16(0xFFFF);
676 req->features |= BIT_ULL(NPC_ETYPE);
677 break;
678 case TCP_V6_FLOW:
679 case UDP_V6_FLOW:
680 case SCTP_V6_FLOW:
681 pkt->etype = cpu_to_be16(ETH_P_IPV6);
682 pmask->etype = cpu_to_be16(0xFFFF);
683 req->features |= BIT_ULL(NPC_ETYPE);
684 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
685 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
686 sizeof(pkt->ip6src));
687 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
688 sizeof(pmask->ip6src));
689 req->features |= BIT_ULL(NPC_SIP_IPV6);
690 }
691 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
692 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
693 sizeof(pkt->ip6dst));
694 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
695 sizeof(pmask->ip6dst));
696 req->features |= BIT_ULL(NPC_DIP_IPV6);
697 }
698 if (ipv6_l4_mask->psrc) {
699 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
700 sizeof(pkt->sport));
701 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
702 sizeof(pmask->sport));
703 if (flow_type == UDP_V6_FLOW)
704 req->features |= BIT_ULL(NPC_SPORT_UDP);
705 else if (flow_type == TCP_V6_FLOW)
706 req->features |= BIT_ULL(NPC_SPORT_TCP);
707 else
708 req->features |= BIT_ULL(NPC_SPORT_SCTP);
709 }
710 if (ipv6_l4_mask->pdst) {
711 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
712 sizeof(pkt->dport));
713 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
714 sizeof(pmask->dport));
715 if (flow_type == UDP_V6_FLOW)
716 req->features |= BIT_ULL(NPC_DPORT_UDP);
717 else if (flow_type == TCP_V6_FLOW)
718 req->features |= BIT_ULL(NPC_DPORT_TCP);
719 else
720 req->features |= BIT_ULL(NPC_DPORT_SCTP);
721 }
722 if (flow_type == UDP_V6_FLOW)
723 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
724 else if (flow_type == TCP_V6_FLOW)
725 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
726 else
727 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
728 break;
729 case AH_V6_FLOW:
730 case ESP_V6_FLOW:
731 pkt->etype = cpu_to_be16(ETH_P_IPV6);
732 pmask->etype = cpu_to_be16(0xFFFF);
733 req->features |= BIT_ULL(NPC_ETYPE);
734 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
735 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
736 sizeof(pkt->ip6src));
737 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
738 sizeof(pmask->ip6src));
739 req->features |= BIT_ULL(NPC_SIP_IPV6);
740 }
741 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
742 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
743 sizeof(pkt->ip6dst));
744 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
745 sizeof(pmask->ip6dst));
746 req->features |= BIT_ULL(NPC_DIP_IPV6);
747 }
748
749 /* NPC profile doesn't extract AH/ESP header fields */
750 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
751 (ah_esp_mask->tclass & ah_esp_mask->tclass))
752 return -EOPNOTSUPP;
753
754 if (flow_type == AH_V6_FLOW)
755 req->features |= BIT_ULL(NPC_IPPROTO_AH);
756 else
757 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
758 break;
759 default:
760 break;
761 }
762
763 return 0;
764 }
765
otx2_prepare_flow_request(struct ethtool_rx_flow_spec * fsp,struct npc_install_flow_req * req)766 static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
767 struct npc_install_flow_req *req)
768 {
769 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
770 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
771 struct flow_msg *pmask = &req->mask;
772 struct flow_msg *pkt = &req->packet;
773 u32 flow_type;
774 int ret;
775
776 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
777 switch (flow_type) {
778 /* bits not set in mask are don't care */
779 case ETHER_FLOW:
780 if (!is_zero_ether_addr(eth_mask->h_source)) {
781 ether_addr_copy(pkt->smac, eth_hdr->h_source);
782 ether_addr_copy(pmask->smac, eth_mask->h_source);
783 req->features |= BIT_ULL(NPC_SMAC);
784 }
785 if (!is_zero_ether_addr(eth_mask->h_dest)) {
786 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
787 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
788 req->features |= BIT_ULL(NPC_DMAC);
789 }
790 if (eth_hdr->h_proto) {
791 memcpy(&pkt->etype, ð_hdr->h_proto,
792 sizeof(pkt->etype));
793 memcpy(&pmask->etype, ð_mask->h_proto,
794 sizeof(pmask->etype));
795 req->features |= BIT_ULL(NPC_ETYPE);
796 }
797 break;
798 case IP_USER_FLOW:
799 case TCP_V4_FLOW:
800 case UDP_V4_FLOW:
801 case SCTP_V4_FLOW:
802 case AH_V4_FLOW:
803 case ESP_V4_FLOW:
804 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
805 if (ret)
806 return ret;
807 break;
808 case IPV6_USER_FLOW:
809 case TCP_V6_FLOW:
810 case UDP_V6_FLOW:
811 case SCTP_V6_FLOW:
812 case AH_V6_FLOW:
813 case ESP_V6_FLOW:
814 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
815 if (ret)
816 return ret;
817 break;
818 default:
819 return -EOPNOTSUPP;
820 }
821 if (fsp->flow_type & FLOW_EXT) {
822 u16 vlan_etype;
823
824 if (fsp->m_ext.vlan_etype) {
825 /* Partial masks not supported */
826 if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
827 return -EINVAL;
828
829 vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
830 /* Only ETH_P_8021Q and ETH_P_802AD types supported */
831 if (vlan_etype != ETH_P_8021Q &&
832 vlan_etype != ETH_P_8021AD)
833 return -EINVAL;
834
835 memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
836 sizeof(pkt->vlan_etype));
837 memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
838 sizeof(pmask->vlan_etype));
839
840 if (vlan_etype == ETH_P_8021Q)
841 req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
842 else
843 req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
844 }
845
846 if (fsp->m_ext.vlan_tci) {
847 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
848 sizeof(pkt->vlan_tci));
849 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
850 sizeof(pmask->vlan_tci));
851 req->features |= BIT_ULL(NPC_OUTER_VID);
852 }
853
854 /* Not Drop/Direct to queue but use action in default entry */
855 if (fsp->m_ext.data[1] &&
856 fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
857 req->op = NIX_RX_ACTION_DEFAULT;
858 }
859
860 if (fsp->flow_type & FLOW_MAC_EXT &&
861 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
862 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
863 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
864 req->features |= BIT_ULL(NPC_DMAC);
865 }
866
867 if (!req->features)
868 return -EOPNOTSUPP;
869
870 return 0;
871 }
872
otx2_is_flow_rule_dmacfilter(struct otx2_nic * pfvf,struct ethtool_rx_flow_spec * fsp)873 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
874 struct ethtool_rx_flow_spec *fsp)
875 {
876 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
877 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
878 u64 ring_cookie = fsp->ring_cookie;
879 u32 flow_type;
880
881 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
882 return false;
883
884 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
885
886 /* CGX/RPM block dmac filtering configured for white listing
887 * check for action other than DROP
888 */
889 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
890 !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
891 if (is_zero_ether_addr(eth_mask->h_dest) &&
892 is_valid_ether_addr(eth_hdr->h_dest))
893 return true;
894 }
895
896 return false;
897 }
898
otx2_add_flow_msg(struct otx2_nic * pfvf,struct otx2_flow * flow)899 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
900 {
901 u64 ring_cookie = flow->flow_spec.ring_cookie;
902 struct npc_install_flow_req *req;
903 int err, vf = 0;
904
905 mutex_lock(&pfvf->mbox.lock);
906 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
907 if (!req) {
908 mutex_unlock(&pfvf->mbox.lock);
909 return -ENOMEM;
910 }
911
912 err = otx2_prepare_flow_request(&flow->flow_spec, req);
913 if (err) {
914 /* free the allocated msg above */
915 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
916 mutex_unlock(&pfvf->mbox.lock);
917 return err;
918 }
919
920 req->entry = flow->entry;
921 req->intf = NIX_INTF_RX;
922 req->set_cntr = 1;
923 req->channel = pfvf->hw.rx_chan_base;
924 if (ring_cookie == RX_CLS_FLOW_DISC) {
925 req->op = NIX_RX_ACTIONOP_DROP;
926 } else {
927 /* change to unicast only if action of default entry is not
928 * requested by user
929 */
930 if (flow->flow_spec.flow_type & FLOW_RSS) {
931 req->op = NIX_RX_ACTIONOP_RSS;
932 req->index = flow->rss_ctx_id;
933 req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
934 } else {
935 req->op = NIX_RX_ACTIONOP_UCAST;
936 req->index = ethtool_get_flow_spec_ring(ring_cookie);
937 }
938 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
939 if (vf > pci_num_vf(pfvf->pdev)) {
940 mutex_unlock(&pfvf->mbox.lock);
941 return -EINVAL;
942 }
943 }
944
945 /* ethtool ring_cookie has (VF + 1) for VF */
946 if (vf) {
947 req->vf = vf;
948 flow->is_vf = true;
949 flow->vf = vf;
950 }
951
952 /* Send message to AF */
953 err = otx2_sync_mbox_msg(&pfvf->mbox);
954 mutex_unlock(&pfvf->mbox.lock);
955 return err;
956 }
957
otx2_add_flow_with_pfmac(struct otx2_nic * pfvf,struct otx2_flow * flow)958 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
959 struct otx2_flow *flow)
960 {
961 struct otx2_flow *pf_mac;
962 struct ethhdr *eth_hdr;
963
964 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
965 if (!pf_mac)
966 return -ENOMEM;
967
968 pf_mac->entry = 0;
969 pf_mac->dmac_filter = true;
970 pf_mac->location = pfvf->flow_cfg->max_flows;
971 memcpy(&pf_mac->flow_spec, &flow->flow_spec,
972 sizeof(struct ethtool_rx_flow_spec));
973 pf_mac->flow_spec.location = pf_mac->location;
974
975 /* Copy PF mac address */
976 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
977 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
978
979 /* Install DMAC filter with PF mac address */
980 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
981
982 otx2_add_flow_to_list(pfvf, pf_mac);
983 pfvf->flow_cfg->nr_flows++;
984 set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
985
986 return 0;
987 }
988
otx2_add_flow(struct otx2_nic * pfvf,struct ethtool_rxnfc * nfc)989 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
990 {
991 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
992 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
993 struct otx2_flow *flow;
994 struct ethhdr *eth_hdr;
995 bool new = false;
996 int err = 0;
997 u32 ring;
998
999 if (!flow_cfg->max_flows) {
1000 netdev_err(pfvf->netdev,
1001 "Ntuple rule count is 0, allocate and retry\n");
1002 return -EINVAL;
1003 }
1004
1005 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
1006 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1007 return -ENOMEM;
1008
1009 if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
1010 return -EINVAL;
1011
1012 if (fsp->location >= otx2_get_maxflows(flow_cfg))
1013 return -EINVAL;
1014
1015 flow = otx2_find_flow(pfvf, fsp->location);
1016 if (!flow) {
1017 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
1018 if (!flow)
1019 return -ENOMEM;
1020 flow->location = fsp->location;
1021 flow->entry = flow_cfg->flow_ent[flow->location];
1022 new = true;
1023 }
1024 /* struct copy */
1025 flow->flow_spec = *fsp;
1026
1027 if (fsp->flow_type & FLOW_RSS)
1028 flow->rss_ctx_id = nfc->rss_context;
1029
1030 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
1031 eth_hdr = &flow->flow_spec.h_u.ether_spec;
1032
1033 /* Sync dmac filter table with updated fields */
1034 if (flow->dmac_filter)
1035 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
1036 flow->entry);
1037
1038 if (bitmap_full(&flow_cfg->dmacflt_bmap,
1039 flow_cfg->dmacflt_max_flows)) {
1040 netdev_warn(pfvf->netdev,
1041 "Can't insert the rule %d as max allowed dmac filters are %d\n",
1042 flow->location +
1043 flow_cfg->dmacflt_max_flows,
1044 flow_cfg->dmacflt_max_flows);
1045 err = -EINVAL;
1046 if (new)
1047 kfree(flow);
1048 return err;
1049 }
1050
1051 /* Install PF mac address to DMAC filter list */
1052 if (!test_bit(0, &flow_cfg->dmacflt_bmap))
1053 otx2_add_flow_with_pfmac(pfvf, flow);
1054
1055 flow->dmac_filter = true;
1056 flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
1057 flow_cfg->dmacflt_max_flows);
1058 fsp->location = flow_cfg->max_flows + flow->entry;
1059 flow->flow_spec.location = fsp->location;
1060 flow->location = fsp->location;
1061
1062 set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1063 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
1064
1065 } else {
1066 if (flow->location >= pfvf->flow_cfg->max_flows) {
1067 netdev_warn(pfvf->netdev,
1068 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
1069 flow->location,
1070 flow_cfg->max_flows - 1);
1071 err = -EINVAL;
1072 } else {
1073 err = otx2_add_flow_msg(pfvf, flow);
1074 }
1075 }
1076
1077 if (err) {
1078 if (err == MBOX_MSG_INVALID)
1079 err = -EINVAL;
1080 if (new)
1081 kfree(flow);
1082 return err;
1083 }
1084
1085 /* add the new flow installed to list */
1086 if (new) {
1087 otx2_add_flow_to_list(pfvf, flow);
1088 flow_cfg->nr_flows++;
1089 }
1090
1091 return 0;
1092 }
1093
otx2_remove_flow_msg(struct otx2_nic * pfvf,u16 entry,bool all)1094 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1095 {
1096 struct npc_delete_flow_req *req;
1097 int err;
1098
1099 mutex_lock(&pfvf->mbox.lock);
1100 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1101 if (!req) {
1102 mutex_unlock(&pfvf->mbox.lock);
1103 return -ENOMEM;
1104 }
1105
1106 req->entry = entry;
1107 if (all)
1108 req->all = 1;
1109
1110 /* Send message to AF */
1111 err = otx2_sync_mbox_msg(&pfvf->mbox);
1112 mutex_unlock(&pfvf->mbox.lock);
1113 return err;
1114 }
1115
otx2_update_rem_pfmac(struct otx2_nic * pfvf,int req)1116 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1117 {
1118 struct otx2_flow *iter;
1119 struct ethhdr *eth_hdr;
1120 bool found = false;
1121
1122 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1123 if (iter->dmac_filter && iter->entry == 0) {
1124 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1125 if (req == DMAC_ADDR_DEL) {
1126 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1127 0);
1128 clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1129 found = true;
1130 } else {
1131 ether_addr_copy(eth_hdr->h_dest,
1132 pfvf->netdev->dev_addr);
1133 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1134 }
1135 break;
1136 }
1137 }
1138
1139 if (found) {
1140 list_del(&iter->list);
1141 kfree(iter);
1142 pfvf->flow_cfg->nr_flows--;
1143 }
1144 }
1145
otx2_remove_flow(struct otx2_nic * pfvf,u32 location)1146 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1147 {
1148 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1149 struct otx2_flow *flow;
1150 int err;
1151
1152 if (location >= otx2_get_maxflows(flow_cfg))
1153 return -EINVAL;
1154
1155 flow = otx2_find_flow(pfvf, location);
1156 if (!flow)
1157 return -ENOENT;
1158
1159 if (flow->dmac_filter) {
1160 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1161
1162 /* user not allowed to remove dmac filter with interface mac */
1163 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1164 return -EPERM;
1165
1166 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1167 flow->entry);
1168 clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1169 /* If all dmac filters are removed delete macfilter with
1170 * interface mac address and configure CGX/RPM block in
1171 * promiscuous mode
1172 */
1173 if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1174 flow_cfg->dmacflt_max_flows) == 1)
1175 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1176 } else {
1177 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1178 }
1179
1180 if (err)
1181 return err;
1182
1183 list_del(&flow->list);
1184 kfree(flow);
1185 flow_cfg->nr_flows--;
1186
1187 return 0;
1188 }
1189
otx2_rss_ctx_flow_del(struct otx2_nic * pfvf,int ctx_id)1190 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1191 {
1192 struct otx2_flow *flow, *tmp;
1193 int err;
1194
1195 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1196 if (flow->rss_ctx_id != ctx_id)
1197 continue;
1198 err = otx2_remove_flow(pfvf, flow->location);
1199 if (err)
1200 netdev_warn(pfvf->netdev,
1201 "Can't delete the rule %d associated with this rss group err:%d",
1202 flow->location, err);
1203 }
1204 }
1205
otx2_destroy_ntuple_flows(struct otx2_nic * pfvf)1206 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1207 {
1208 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1209 struct npc_delete_flow_req *req;
1210 struct otx2_flow *iter, *tmp;
1211 int err;
1212
1213 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1214 return 0;
1215
1216 if (!flow_cfg->max_flows)
1217 return 0;
1218
1219 mutex_lock(&pfvf->mbox.lock);
1220 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1221 if (!req) {
1222 mutex_unlock(&pfvf->mbox.lock);
1223 return -ENOMEM;
1224 }
1225
1226 req->start = flow_cfg->flow_ent[0];
1227 req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
1228 err = otx2_sync_mbox_msg(&pfvf->mbox);
1229 mutex_unlock(&pfvf->mbox.lock);
1230
1231 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1232 list_del(&iter->list);
1233 kfree(iter);
1234 flow_cfg->nr_flows--;
1235 }
1236 return err;
1237 }
1238
otx2_destroy_mcam_flows(struct otx2_nic * pfvf)1239 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1240 {
1241 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1242 struct npc_mcam_free_entry_req *req;
1243 struct otx2_flow *iter, *tmp;
1244 int err;
1245
1246 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1247 return 0;
1248
1249 /* remove all flows */
1250 err = otx2_remove_flow_msg(pfvf, 0, true);
1251 if (err)
1252 return err;
1253
1254 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1255 list_del(&iter->list);
1256 kfree(iter);
1257 flow_cfg->nr_flows--;
1258 }
1259
1260 mutex_lock(&pfvf->mbox.lock);
1261 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1262 if (!req) {
1263 mutex_unlock(&pfvf->mbox.lock);
1264 return -ENOMEM;
1265 }
1266
1267 req->all = 1;
1268 /* Send message to AF to free MCAM entries */
1269 err = otx2_sync_mbox_msg(&pfvf->mbox);
1270 if (err) {
1271 mutex_unlock(&pfvf->mbox.lock);
1272 return err;
1273 }
1274
1275 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1276 mutex_unlock(&pfvf->mbox.lock);
1277
1278 return 0;
1279 }
1280
otx2_install_rxvlan_offload_flow(struct otx2_nic * pfvf)1281 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1282 {
1283 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1284 struct npc_install_flow_req *req;
1285 int err;
1286
1287 mutex_lock(&pfvf->mbox.lock);
1288 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1289 if (!req) {
1290 mutex_unlock(&pfvf->mbox.lock);
1291 return -ENOMEM;
1292 }
1293
1294 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1295 req->intf = NIX_INTF_RX;
1296 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1297 eth_broadcast_addr((u8 *)&req->mask.dmac);
1298 req->channel = pfvf->hw.rx_chan_base;
1299 req->op = NIX_RX_ACTION_DEFAULT;
1300 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1301 req->vtag0_valid = true;
1302 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1303
1304 /* Send message to AF */
1305 err = otx2_sync_mbox_msg(&pfvf->mbox);
1306 mutex_unlock(&pfvf->mbox.lock);
1307 return err;
1308 }
1309
otx2_delete_rxvlan_offload_flow(struct otx2_nic * pfvf)1310 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1311 {
1312 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1313 struct npc_delete_flow_req *req;
1314 int err;
1315
1316 mutex_lock(&pfvf->mbox.lock);
1317 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1318 if (!req) {
1319 mutex_unlock(&pfvf->mbox.lock);
1320 return -ENOMEM;
1321 }
1322
1323 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1324 /* Send message to AF */
1325 err = otx2_sync_mbox_msg(&pfvf->mbox);
1326 mutex_unlock(&pfvf->mbox.lock);
1327 return err;
1328 }
1329
otx2_enable_rxvlan(struct otx2_nic * pf,bool enable)1330 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1331 {
1332 struct nix_vtag_config *req;
1333 struct mbox_msghdr *rsp_hdr;
1334 int err;
1335
1336 /* Dont have enough mcam entries */
1337 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1338 return -ENOMEM;
1339
1340 if (enable) {
1341 err = otx2_install_rxvlan_offload_flow(pf);
1342 if (err)
1343 return err;
1344 } else {
1345 err = otx2_delete_rxvlan_offload_flow(pf);
1346 if (err)
1347 return err;
1348 }
1349
1350 mutex_lock(&pf->mbox.lock);
1351 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1352 if (!req) {
1353 mutex_unlock(&pf->mbox.lock);
1354 return -ENOMEM;
1355 }
1356
1357 /* config strip, capture and size */
1358 req->vtag_size = VTAGSIZE_T4;
1359 req->cfg_type = 1; /* rx vlan cfg */
1360 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1361 req->rx.strip_vtag = enable;
1362 req->rx.capture_vtag = enable;
1363
1364 err = otx2_sync_mbox_msg(&pf->mbox);
1365 if (err) {
1366 mutex_unlock(&pf->mbox.lock);
1367 return err;
1368 }
1369
1370 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1371 if (IS_ERR(rsp_hdr)) {
1372 mutex_unlock(&pf->mbox.lock);
1373 return PTR_ERR(rsp_hdr);
1374 }
1375
1376 mutex_unlock(&pf->mbox.lock);
1377 return rsp_hdr->rc;
1378 }
1379
otx2_dmacflt_reinstall_flows(struct otx2_nic * pf)1380 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1381 {
1382 struct otx2_flow *iter;
1383 struct ethhdr *eth_hdr;
1384
1385 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1386 if (iter->dmac_filter) {
1387 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1388 otx2_dmacflt_add(pf, eth_hdr->h_dest,
1389 iter->entry);
1390 }
1391 }
1392 }
1393
otx2_dmacflt_update_pfmac_flow(struct otx2_nic * pfvf)1394 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1395 {
1396 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
1397 }
1398