1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_eswitch.h"
7 #include "ice_fltr.h"
8 #include "ice_repr.h"
9 #include "ice_devlink.h"
10 #include "ice_tc_lib.h"
11
12 /**
13 * ice_eswitch_setup_env - configure switchdev HW filters
14 * @pf: pointer to PF struct
15 *
16 * This function adds HW filters configuration specific for switchdev
17 * mode.
18 */
ice_eswitch_setup_env(struct ice_pf * pf)19 static int ice_eswitch_setup_env(struct ice_pf *pf)
20 {
21 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
22 struct net_device *uplink_netdev = uplink_vsi->netdev;
23 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
24 struct ice_port_info *pi = pf->hw.port_info;
25 bool rule_added = false;
26
27 ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
28
29 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
30
31 netif_addr_lock_bh(uplink_netdev);
32 __dev_uc_unsync(uplink_netdev, NULL);
33 __dev_mc_unsync(uplink_netdev, NULL);
34 netif_addr_unlock_bh(uplink_netdev);
35
36 if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
37 goto err_def_rx;
38
39 if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
40 if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
41 goto err_def_rx;
42 rule_added = true;
43 }
44
45 if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
46 goto err_def_tx;
47
48 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
49 goto err_override_uplink;
50
51 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
52 goto err_override_control;
53
54 if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
55 ICE_FLTR_TX,
56 ICE_SINGLE_ACT_LB_ENABLE))
57 goto err_update_action;
58
59 return 0;
60
61 err_update_action:
62 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
63 err_override_control:
64 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
65 err_override_uplink:
66 ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
67 err_def_tx:
68 if (rule_added)
69 ice_clear_dflt_vsi(uplink_vsi->vsw);
70 err_def_rx:
71 ice_fltr_add_mac_and_broadcast(uplink_vsi,
72 uplink_vsi->port_info->mac.perm_addr,
73 ICE_FWD_TO_VSI);
74 return -ENODEV;
75 }
76
77 /**
78 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
79 * @pf: pointer to PF struct
80 *
81 * In switchdev number of allocated Tx/Rx rings is equal.
82 *
83 * This function fills q_vectors structures associated with representor and
84 * move each ring pairs to port representor netdevs. Each port representor
85 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
86 * number of VFs.
87 */
ice_eswitch_remap_rings_to_vectors(struct ice_pf * pf)88 static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
89 {
90 struct ice_vsi *vsi = pf->switchdev.control_vsi;
91 int q_id;
92
93 ice_for_each_txq(vsi, q_id) {
94 struct ice_repr *repr = pf->vf[q_id].repr;
95 struct ice_q_vector *q_vector = repr->q_vector;
96 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
97 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
98
99 q_vector->vsi = vsi;
100 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
101
102 q_vector->num_ring_tx = 1;
103 q_vector->tx.tx_ring = tx_ring;
104 tx_ring->q_vector = q_vector;
105 tx_ring->next = NULL;
106 tx_ring->netdev = repr->netdev;
107 /* In switchdev mode, from OS stack perspective, there is only
108 * one queue for given netdev, so it needs to be indexed as 0.
109 */
110 tx_ring->q_index = 0;
111
112 q_vector->num_ring_rx = 1;
113 q_vector->rx.rx_ring = rx_ring;
114 rx_ring->q_vector = q_vector;
115 rx_ring->next = NULL;
116 rx_ring->netdev = repr->netdev;
117 }
118 }
119
120 /**
121 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
122 * @pf: pointer to PF struct
123 */
ice_eswitch_setup_reprs(struct ice_pf * pf)124 static int ice_eswitch_setup_reprs(struct ice_pf *pf)
125 {
126 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
127 int max_vsi_num = 0;
128 int i;
129
130 ice_for_each_vf(pf, i) {
131 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
132 struct ice_vf *vf = &pf->vf[i];
133
134 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
135 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
136 GFP_KERNEL);
137 if (!vf->repr->dst) {
138 ice_fltr_add_mac_and_broadcast(vsi,
139 vf->hw_lan_addr.addr,
140 ICE_FWD_TO_VSI);
141 goto err;
142 }
143
144 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
145 ice_fltr_add_mac_and_broadcast(vsi,
146 vf->hw_lan_addr.addr,
147 ICE_FWD_TO_VSI);
148 metadata_dst_free(vf->repr->dst);
149 goto err;
150 }
151
152 if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
153 ice_fltr_add_mac_and_broadcast(vsi,
154 vf->hw_lan_addr.addr,
155 ICE_FWD_TO_VSI);
156 metadata_dst_free(vf->repr->dst);
157 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
158 goto err;
159 }
160
161 if (max_vsi_num < vsi->vsi_num)
162 max_vsi_num = vsi->vsi_num;
163
164 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
165 NAPI_POLL_WEIGHT);
166
167 netif_keep_dst(vf->repr->netdev);
168 }
169
170 kfree(ctrl_vsi->target_netdevs);
171
172 ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
173 sizeof(*ctrl_vsi->target_netdevs),
174 GFP_KERNEL);
175 if (!ctrl_vsi->target_netdevs)
176 goto err;
177
178 ice_for_each_vf(pf, i) {
179 struct ice_repr *repr = pf->vf[i].repr;
180 struct ice_vsi *vsi = repr->src_vsi;
181 struct metadata_dst *dst;
182
183 ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
184
185 dst = repr->dst;
186 dst->u.port_info.port_id = vsi->vsi_num;
187 dst->u.port_info.lower_dev = repr->netdev;
188 ice_repr_set_traffic_vsi(repr, ctrl_vsi);
189 }
190
191 return 0;
192
193 err:
194 for (i = i - 1; i >= 0; i--) {
195 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
196 struct ice_vf *vf = &pf->vf[i];
197
198 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
199 metadata_dst_free(vf->repr->dst);
200 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
201 ICE_FWD_TO_VSI);
202 }
203
204 return -ENODEV;
205 }
206
207 /**
208 * ice_eswitch_release_reprs - clear PR VSIs configuration
209 * @pf: poiner to PF struct
210 * @ctrl_vsi: pointer to switchdev control VSI
211 */
212 static void
ice_eswitch_release_reprs(struct ice_pf * pf,struct ice_vsi * ctrl_vsi)213 ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
214 {
215 int i;
216
217 kfree(ctrl_vsi->target_netdevs);
218 ice_for_each_vf(pf, i) {
219 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
220 struct ice_vf *vf = &pf->vf[i];
221
222 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
223 metadata_dst_free(vf->repr->dst);
224 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
225 ICE_FWD_TO_VSI);
226
227 netif_napi_del(&vf->repr->q_vector->napi);
228 }
229 }
230
231 /**
232 * ice_eswitch_update_repr - reconfigure VF port representor
233 * @vsi: VF VSI for which port representor is configured
234 */
ice_eswitch_update_repr(struct ice_vsi * vsi)235 void ice_eswitch_update_repr(struct ice_vsi *vsi)
236 {
237 struct ice_pf *pf = vsi->back;
238 struct ice_repr *repr;
239 struct ice_vf *vf;
240 int ret;
241
242 if (!ice_is_switchdev_running(pf))
243 return;
244
245 vf = &pf->vf[vsi->vf_id];
246 repr = vf->repr;
247 repr->src_vsi = vsi;
248 repr->dst->u.port_info.port_id = vsi->vsi_num;
249
250 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
251 if (ret) {
252 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
253 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
254 }
255 }
256
257 /**
258 * ice_eswitch_port_start_xmit - callback for packets transmit
259 * @skb: send buffer
260 * @netdev: network interface device structure
261 *
262 * Returns NETDEV_TX_OK if sent, else an error code
263 */
264 netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff * skb,struct net_device * netdev)265 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
266 {
267 struct ice_netdev_priv *np;
268 struct ice_repr *repr;
269 struct ice_vsi *vsi;
270
271 np = netdev_priv(netdev);
272 vsi = np->vsi;
273
274 if (ice_is_reset_in_progress(vsi->back->state))
275 return NETDEV_TX_BUSY;
276
277 repr = ice_netdev_to_repr(netdev);
278 skb_dst_drop(skb);
279 dst_hold((struct dst_entry *)repr->dst);
280 skb_dst_set(skb, (struct dst_entry *)repr->dst);
281 skb->queue_mapping = repr->vf->vf_id;
282
283 return ice_start_xmit(skb, netdev);
284 }
285
286 /**
287 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
288 * @skb: pointer to send buffer
289 * @off: pointer to offload struct
290 */
291 void
ice_eswitch_set_target_vsi(struct sk_buff * skb,struct ice_tx_offload_params * off)292 ice_eswitch_set_target_vsi(struct sk_buff *skb,
293 struct ice_tx_offload_params *off)
294 {
295 struct metadata_dst *dst = skb_metadata_dst(skb);
296 u64 cd_cmd, dst_vsi;
297
298 if (!dst) {
299 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
300 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
301 } else {
302 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
303 dst_vsi = ((u64)dst->u.port_info.port_id <<
304 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
305 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
306 }
307 }
308
309 /**
310 * ice_eswitch_release_env - clear switchdev HW filters
311 * @pf: pointer to PF struct
312 *
313 * This function removes HW filters configuration specific for switchdev
314 * mode and restores default legacy mode settings.
315 */
ice_eswitch_release_env(struct ice_pf * pf)316 static void ice_eswitch_release_env(struct ice_pf *pf)
317 {
318 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
319 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
320
321 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
322 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
323 ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
324 ice_clear_dflt_vsi(uplink_vsi->vsw);
325 ice_fltr_add_mac_and_broadcast(uplink_vsi,
326 uplink_vsi->port_info->mac.perm_addr,
327 ICE_FWD_TO_VSI);
328 }
329
330 /**
331 * ice_eswitch_vsi_setup - configure switchdev control VSI
332 * @pf: pointer to PF structure
333 * @pi: pointer to port_info structure
334 */
335 static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)336 ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
337 {
338 return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
339 }
340
341 /**
342 * ice_eswitch_napi_del - remove NAPI handle for all port representors
343 * @pf: pointer to PF structure
344 */
ice_eswitch_napi_del(struct ice_pf * pf)345 static void ice_eswitch_napi_del(struct ice_pf *pf)
346 {
347 int i;
348
349 ice_for_each_vf(pf, i)
350 netif_napi_del(&pf->vf[i].repr->q_vector->napi);
351 }
352
353 /**
354 * ice_eswitch_napi_enable - enable NAPI for all port representors
355 * @pf: pointer to PF structure
356 */
ice_eswitch_napi_enable(struct ice_pf * pf)357 static void ice_eswitch_napi_enable(struct ice_pf *pf)
358 {
359 int i;
360
361 ice_for_each_vf(pf, i)
362 napi_enable(&pf->vf[i].repr->q_vector->napi);
363 }
364
365 /**
366 * ice_eswitch_napi_disable - disable NAPI for all port representors
367 * @pf: pointer to PF structure
368 */
ice_eswitch_napi_disable(struct ice_pf * pf)369 static void ice_eswitch_napi_disable(struct ice_pf *pf)
370 {
371 int i;
372
373 ice_for_each_vf(pf, i)
374 napi_disable(&pf->vf[i].repr->q_vector->napi);
375 }
376
377 /**
378 * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
379 * @vsi: VSI to setup rxdid on
380 * @rxdid: flex descriptor id
381 */
ice_eswitch_set_rxdid(struct ice_vsi * vsi,u32 rxdid)382 static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
383 {
384 struct ice_hw *hw = &vsi->back->hw;
385 int i;
386
387 ice_for_each_rxq(vsi, i) {
388 struct ice_rx_ring *ring = vsi->rx_rings[i];
389 u16 pf_q = vsi->rxq_map[ring->q_index];
390
391 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
392 }
393 }
394
395 /**
396 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
397 * @pf: pointer to PF structure
398 */
ice_eswitch_enable_switchdev(struct ice_pf * pf)399 static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
400 {
401 struct ice_vsi *ctrl_vsi;
402
403 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
404 if (!pf->switchdev.control_vsi)
405 return -ENODEV;
406
407 ctrl_vsi = pf->switchdev.control_vsi;
408 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
409 if (!pf->switchdev.uplink_vsi)
410 goto err_vsi;
411
412 if (ice_eswitch_setup_env(pf))
413 goto err_vsi;
414
415 if (ice_repr_add_for_all_vfs(pf))
416 goto err_repr_add;
417
418 if (ice_eswitch_setup_reprs(pf))
419 goto err_setup_reprs;
420
421 ice_eswitch_remap_rings_to_vectors(pf);
422
423 if (ice_vsi_open(ctrl_vsi))
424 goto err_setup_reprs;
425
426 ice_eswitch_napi_enable(pf);
427
428 ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
429
430 return 0;
431
432 err_setup_reprs:
433 ice_repr_rem_from_all_vfs(pf);
434 err_repr_add:
435 ice_eswitch_release_env(pf);
436 err_vsi:
437 ice_vsi_release(ctrl_vsi);
438 return -ENODEV;
439 }
440
441 /**
442 * ice_eswitch_disable_switchdev - disable switchdev resources
443 * @pf: pointer to PF structure
444 */
ice_eswitch_disable_switchdev(struct ice_pf * pf)445 static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
446 {
447 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
448
449 ice_eswitch_napi_disable(pf);
450 ice_eswitch_release_env(pf);
451 ice_eswitch_release_reprs(pf, ctrl_vsi);
452 ice_vsi_release(ctrl_vsi);
453 ice_repr_rem_from_all_vfs(pf);
454 }
455
456 /**
457 * ice_eswitch_mode_set - set new eswitch mode
458 * @devlink: pointer to devlink structure
459 * @mode: eswitch mode to switch to
460 * @extack: pointer to extack structure
461 */
462 int
ice_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)463 ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
464 struct netlink_ext_ack *extack)
465 {
466 struct ice_pf *pf = devlink_priv(devlink);
467
468 if (pf->eswitch_mode == mode)
469 return 0;
470
471 if (pf->num_alloc_vfs) {
472 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
473 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
474 return -EOPNOTSUPP;
475 }
476
477 switch (mode) {
478 case DEVLINK_ESWITCH_MODE_LEGACY:
479 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
480 pf->hw.pf_id);
481 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
482 break;
483 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
484 {
485 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
486 pf->hw.pf_id);
487 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
488 break;
489 }
490 default:
491 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
492 return -EINVAL;
493 }
494
495 pf->eswitch_mode = mode;
496 return 0;
497 }
498
499 /**
500 * ice_eswitch_get_target_netdev - return port representor netdev
501 * @rx_ring: pointer to Rx ring
502 * @rx_desc: pointer to Rx descriptor
503 *
504 * When working in switchdev mode context (when control VSI is used), this
505 * function returns netdev of appropriate port representor. For non-switchdev
506 * context, regular netdev associated with Rx ring is returned.
507 */
508 struct net_device *
ice_eswitch_get_target_netdev(struct ice_rx_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc)509 ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
510 union ice_32b_rx_flex_desc *rx_desc)
511 {
512 struct ice_32b_rx_flex_desc_nic_2 *desc;
513 struct ice_vsi *vsi = rx_ring->vsi;
514 struct ice_vsi *control_vsi;
515 u16 target_vsi_id;
516
517 control_vsi = vsi->back->switchdev.control_vsi;
518 if (vsi != control_vsi)
519 return rx_ring->netdev;
520
521 desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
522 target_vsi_id = le16_to_cpu(desc->src_vsi);
523
524 return vsi->target_netdevs[target_vsi_id];
525 }
526
527 /**
528 * ice_eswitch_mode_get - get current eswitch mode
529 * @devlink: pointer to devlink structure
530 * @mode: output parameter for current eswitch mode
531 */
ice_eswitch_mode_get(struct devlink * devlink,u16 * mode)532 int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
533 {
534 struct ice_pf *pf = devlink_priv(devlink);
535
536 *mode = pf->eswitch_mode;
537 return 0;
538 }
539
540 /**
541 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
542 * @pf: pointer to PF structure
543 *
544 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
545 * false otherwise.
546 */
ice_is_eswitch_mode_switchdev(struct ice_pf * pf)547 bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
548 {
549 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
550 }
551
552 /**
553 * ice_eswitch_release - cleanup eswitch
554 * @pf: pointer to PF structure
555 */
ice_eswitch_release(struct ice_pf * pf)556 void ice_eswitch_release(struct ice_pf *pf)
557 {
558 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
559 return;
560
561 ice_eswitch_disable_switchdev(pf);
562 pf->switchdev.is_running = false;
563 }
564
565 /**
566 * ice_eswitch_configure - configure eswitch
567 * @pf: pointer to PF structure
568 */
ice_eswitch_configure(struct ice_pf * pf)569 int ice_eswitch_configure(struct ice_pf *pf)
570 {
571 int status;
572
573 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
574 return 0;
575
576 status = ice_eswitch_enable_switchdev(pf);
577 if (status)
578 return status;
579
580 pf->switchdev.is_running = true;
581 return 0;
582 }
583
584 /**
585 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
586 * @pf: pointer to PF structure
587 */
ice_eswitch_start_all_tx_queues(struct ice_pf * pf)588 static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
589 {
590 struct ice_repr *repr;
591 int i;
592
593 if (test_bit(ICE_DOWN, pf->state))
594 return;
595
596 ice_for_each_vf(pf, i) {
597 repr = pf->vf[i].repr;
598 if (repr)
599 ice_repr_start_tx_queues(repr);
600 }
601 }
602
603 /**
604 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
605 * @pf: pointer to PF structure
606 */
ice_eswitch_stop_all_tx_queues(struct ice_pf * pf)607 void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
608 {
609 struct ice_repr *repr;
610 int i;
611
612 if (test_bit(ICE_DOWN, pf->state))
613 return;
614
615 ice_for_each_vf(pf, i) {
616 repr = pf->vf[i].repr;
617 if (repr)
618 ice_repr_stop_tx_queues(repr);
619 }
620 }
621
622 /**
623 * ice_eswitch_rebuild - rebuild eswitch
624 * @pf: pointer to PF structure
625 */
ice_eswitch_rebuild(struct ice_pf * pf)626 int ice_eswitch_rebuild(struct ice_pf *pf)
627 {
628 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
629 int status;
630
631 ice_eswitch_napi_disable(pf);
632 ice_eswitch_napi_del(pf);
633
634 status = ice_eswitch_setup_env(pf);
635 if (status)
636 return status;
637
638 status = ice_eswitch_setup_reprs(pf);
639 if (status)
640 return status;
641
642 ice_eswitch_remap_rings_to_vectors(pf);
643
644 ice_replay_tc_fltrs(pf);
645
646 status = ice_vsi_open(ctrl_vsi);
647 if (status)
648 return status;
649
650 ice_eswitch_napi_enable(pf);
651 ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
652 ice_eswitch_start_all_tx_queues(pf);
653
654 return 0;
655 }
656