1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_vf_lib_private.h"
6 #include "ice_base.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_flow.h"
11 #include "ice_eswitch.h"
12 #include "ice_virtchnl_allowlist.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_vf_vsi_vlan_ops.h"
15 #include "ice_vlan.h"
16
17 /**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
ice_free_vf_entries(struct ice_pf * pf)24 static void ice_free_vf_entries(struct ice_pf *pf)
25 {
26 struct ice_vfs *vfs = &pf->vfs;
27 struct hlist_node *tmp;
28 struct ice_vf *vf;
29 unsigned int bkt;
30
31 /* Remove all VFs from the hash table and release their main
32 * reference. Once all references to the VF are dropped, ice_put_vf()
33 * will call ice_release_vf which will remove the VF memory.
34 */
35 lockdep_assert_held(&vfs->table_lock);
36
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 hash_del_rcu(&vf->entry);
39 ice_put_vf(vf);
40 }
41 }
42
43 /**
44 * ice_free_vf_res - Free a VF's resources
45 * @vf: pointer to the VF info
46 */
ice_free_vf_res(struct ice_vf * vf)47 static void ice_free_vf_res(struct ice_vf *vf)
48 {
49 struct ice_pf *pf = vf->pf;
50 int i, last_vector_idx;
51
52 /* First, disable VF's configuration API to prevent OS from
53 * accessing the VF's VSI after it's freed or invalidated.
54 */
55 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
56 ice_vf_fdir_exit(vf);
57 /* free VF control VSI */
58 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
59 ice_vf_ctrl_vsi_release(vf);
60
61 /* free VSI and disconnect it from the parent uplink */
62 if (vf->lan_vsi_idx != ICE_NO_VSI) {
63 ice_vf_vsi_release(vf);
64 vf->num_mac = 0;
65 }
66
67 last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
68
69 /* clear VF MDD event information */
70 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
71 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
72
73 /* Disable interrupts so that VF starts in a known state */
74 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
75 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
76 ice_flush(&pf->hw);
77 }
78 /* reset some of the state variables keeping track of the resources */
79 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
80 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
81 }
82
83 /**
84 * ice_dis_vf_mappings
85 * @vf: pointer to the VF structure
86 */
ice_dis_vf_mappings(struct ice_vf * vf)87 static void ice_dis_vf_mappings(struct ice_vf *vf)
88 {
89 struct ice_pf *pf = vf->pf;
90 struct ice_vsi *vsi;
91 struct device *dev;
92 int first, last, v;
93 struct ice_hw *hw;
94
95 hw = &pf->hw;
96 vsi = ice_get_vf_vsi(vf);
97 if (WARN_ON(!vsi))
98 return;
99
100 dev = ice_pf_to_dev(pf);
101 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
102 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
103
104 first = vf->first_vector_idx;
105 last = first + pf->vfs.num_msix_per - 1;
106 for (v = first; v <= last; v++) {
107 u32 reg;
108
109 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
110 GLINT_VECT2FUNC_IS_PF_M) |
111 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
112 GLINT_VECT2FUNC_PF_NUM_M));
113 wr32(hw, GLINT_VECT2FUNC(v), reg);
114 }
115
116 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
118 else
119 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
120
121 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
123 else
124 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
125 }
126
127 /**
128 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
129 * @pf: pointer to the PF structure
130 *
131 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
132 * the pf->sriov_base_vector.
133 *
134 * Returns 0 on success, and -EINVAL on error.
135 */
ice_sriov_free_msix_res(struct ice_pf * pf)136 static int ice_sriov_free_msix_res(struct ice_pf *pf)
137 {
138 struct ice_res_tracker *res;
139
140 if (!pf)
141 return -EINVAL;
142
143 res = pf->irq_tracker;
144 if (!res)
145 return -EINVAL;
146
147 /* give back irq_tracker resources used */
148 WARN_ON(pf->sriov_base_vector < res->num_entries);
149
150 pf->sriov_base_vector = 0;
151
152 return 0;
153 }
154
155 /**
156 * ice_free_vfs - Free all VFs
157 * @pf: pointer to the PF structure
158 */
ice_free_vfs(struct ice_pf * pf)159 void ice_free_vfs(struct ice_pf *pf)
160 {
161 struct device *dev = ice_pf_to_dev(pf);
162 struct ice_vfs *vfs = &pf->vfs;
163 struct ice_hw *hw = &pf->hw;
164 struct ice_vf *vf;
165 unsigned int bkt;
166
167 if (!ice_has_vfs(pf))
168 return;
169
170 while (test_and_set_bit(ICE_VF_DIS, pf->state))
171 usleep_range(1000, 2000);
172
173 /* Disable IOV before freeing resources. This lets any VF drivers
174 * running in the host get themselves cleaned up before we yank
175 * the carpet out from underneath their feet.
176 */
177 if (!pci_vfs_assigned(pf->pdev))
178 pci_disable_sriov(pf->pdev);
179 else
180 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
181
182 mutex_lock(&vfs->table_lock);
183
184 ice_eswitch_release(pf);
185
186 ice_for_each_vf(pf, bkt, vf) {
187 mutex_lock(&vf->cfg_lock);
188
189 ice_dis_vf_qs(vf);
190
191 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
192 /* disable VF qp mappings and set VF disable state */
193 ice_dis_vf_mappings(vf);
194 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
195 ice_free_vf_res(vf);
196 }
197
198 if (!pci_vfs_assigned(pf->pdev)) {
199 u32 reg_idx, bit_idx;
200
201 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
202 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
203 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
204 }
205
206 /* clear malicious info since the VF is getting released */
207 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->vfs.malvfs,
208 ICE_MAX_SRIOV_VFS, vf->vf_id))
209 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
210 vf->vf_id);
211
212 mutex_unlock(&vf->cfg_lock);
213 }
214
215 if (ice_sriov_free_msix_res(pf))
216 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
217
218 vfs->num_qps_per = 0;
219 ice_free_vf_entries(pf);
220
221 mutex_unlock(&vfs->table_lock);
222
223 clear_bit(ICE_VF_DIS, pf->state);
224 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
225 }
226
227 /**
228 * ice_vf_vsi_setup - Set up a VF VSI
229 * @vf: VF to setup VSI for
230 *
231 * Returns pointer to the successfully allocated VSI struct on success,
232 * otherwise returns NULL on failure.
233 */
ice_vf_vsi_setup(struct ice_vf * vf)234 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
235 {
236 struct ice_vsi_cfg_params params = {};
237 struct ice_pf *pf = vf->pf;
238 struct ice_vsi *vsi;
239
240 params.type = ICE_VSI_VF;
241 params.pi = ice_vf_get_port_info(vf);
242 params.vf = vf;
243 params.flags = ICE_VSI_FLAG_INIT;
244
245 vsi = ice_vsi_setup(pf, ¶ms);
246
247 if (!vsi) {
248 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
249 ice_vf_invalidate_vsi(vf);
250 return NULL;
251 }
252
253 vf->lan_vsi_idx = vsi->idx;
254 vf->lan_vsi_num = vsi->vsi_num;
255
256 return vsi;
257 }
258
259 /**
260 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
261 * @pf: pointer to PF structure
262 * @vf: pointer to VF that the first MSIX vector index is being calculated for
263 *
264 * This returns the first MSIX vector index in PF space that is used by this VF.
265 * This index is used when accessing PF relative registers such as
266 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
267 * This will always be the OICR index in the AVF driver so any functionality
268 * using vf->first_vector_idx for queue configuration will have to increment by
269 * 1 to avoid meddling with the OICR index.
270 */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)271 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
272 {
273 return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
274 }
275
276 /**
277 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
278 * @vf: VF to enable MSIX mappings for
279 *
280 * Some of the registers need to be indexed/configured using hardware global
281 * device values and other registers need 0-based values, which represent PF
282 * based values.
283 */
ice_ena_vf_msix_mappings(struct ice_vf * vf)284 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
285 {
286 int device_based_first_msix, device_based_last_msix;
287 int pf_based_first_msix, pf_based_last_msix, v;
288 struct ice_pf *pf = vf->pf;
289 int device_based_vf_id;
290 struct ice_hw *hw;
291 u32 reg;
292
293 hw = &pf->hw;
294 pf_based_first_msix = vf->first_vector_idx;
295 pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
296
297 device_based_first_msix = pf_based_first_msix +
298 pf->hw.func_caps.common_cap.msix_vector_first_id;
299 device_based_last_msix =
300 (device_based_first_msix + pf->vfs.num_msix_per) - 1;
301 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
302
303 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
304 VPINT_ALLOC_FIRST_M) |
305 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
306 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
307 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
308
309 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
310 & VPINT_ALLOC_PCI_FIRST_M) |
311 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
312 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
313 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
314
315 /* map the interrupts to its functions */
316 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
317 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
318 GLINT_VECT2FUNC_VF_NUM_M) |
319 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
320 GLINT_VECT2FUNC_PF_NUM_M));
321 wr32(hw, GLINT_VECT2FUNC(v), reg);
322 }
323
324 /* Map mailbox interrupt to VF MSI-X vector 0 */
325 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
326 }
327
328 /**
329 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
330 * @vf: VF to enable the mappings for
331 * @max_txq: max Tx queues allowed on the VF's VSI
332 * @max_rxq: max Rx queues allowed on the VF's VSI
333 */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)334 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
335 {
336 struct device *dev = ice_pf_to_dev(vf->pf);
337 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
338 struct ice_hw *hw = &vf->pf->hw;
339 u32 reg;
340
341 if (WARN_ON(!vsi))
342 return;
343
344 /* set regardless of mapping mode */
345 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
346
347 /* VF Tx queues allocation */
348 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
349 /* set the VF PF Tx queue range
350 * VFNUMQ value should be set to (number of queues - 1). A value
351 * of 0 means 1 queue and a value of 255 means 256 queues
352 */
353 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
354 VPLAN_TX_QBASE_VFFIRSTQ_M) |
355 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
356 VPLAN_TX_QBASE_VFNUMQ_M));
357 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
358 } else {
359 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
360 }
361
362 /* set regardless of mapping mode */
363 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
364
365 /* VF Rx queues allocation */
366 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
367 /* set the VF PF Rx queue range
368 * VFNUMQ value should be set to (number of queues - 1). A value
369 * of 0 means 1 queue and a value of 255 means 256 queues
370 */
371 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
372 VPLAN_RX_QBASE_VFFIRSTQ_M) |
373 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
374 VPLAN_RX_QBASE_VFNUMQ_M));
375 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
376 } else {
377 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
378 }
379 }
380
381 /**
382 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
383 * @vf: pointer to the VF structure
384 */
ice_ena_vf_mappings(struct ice_vf * vf)385 static void ice_ena_vf_mappings(struct ice_vf *vf)
386 {
387 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
388
389 if (WARN_ON(!vsi))
390 return;
391
392 ice_ena_vf_msix_mappings(vf);
393 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
394 }
395
396 /**
397 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
398 * @vf: VF to calculate the register index for
399 * @q_vector: a q_vector associated to the VF
400 */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)401 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
402 {
403 struct ice_pf *pf;
404
405 if (!vf || !q_vector)
406 return -EINVAL;
407
408 pf = vf->pf;
409
410 /* always add one to account for the OICR being the first MSIX */
411 return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
412 q_vector->v_idx + 1;
413 }
414
415 /**
416 * ice_get_max_valid_res_idx - Get the max valid resource index
417 * @res: pointer to the resource to find the max valid index for
418 *
419 * Start from the end of the ice_res_tracker and return right when we find the
420 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
421 * valid for SR-IOV because it is the only consumer that manipulates the
422 * res->end and this is always called when res->end is set to res->num_entries.
423 */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)424 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
425 {
426 int i;
427
428 if (!res)
429 return -EINVAL;
430
431 for (i = res->num_entries - 1; i >= 0; i--)
432 if (res->list[i] & ICE_RES_VALID_BIT)
433 return i;
434
435 return 0;
436 }
437
438 /**
439 * ice_sriov_set_msix_res - Set any used MSIX resources
440 * @pf: pointer to PF structure
441 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
442 *
443 * This function allows SR-IOV resources to be taken from the end of the PF's
444 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
445 * just set the pf->sriov_base_vector and return success.
446 *
447 * If there are not enough resources available, return an error. This should
448 * always be caught by ice_set_per_vf_res().
449 *
450 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
451 * in the PF's space available for SR-IOV.
452 */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)453 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
454 {
455 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
456 int vectors_used = pf->irq_tracker->num_entries;
457 int sriov_base_vector;
458
459 sriov_base_vector = total_vectors - num_msix_needed;
460
461 /* make sure we only grab irq_tracker entries from the list end and
462 * that we have enough available MSIX vectors
463 */
464 if (sriov_base_vector < vectors_used)
465 return -EINVAL;
466
467 pf->sriov_base_vector = sriov_base_vector;
468
469 return 0;
470 }
471
472 /**
473 * ice_set_per_vf_res - check if vectors and queues are available
474 * @pf: pointer to the PF structure
475 * @num_vfs: the number of SR-IOV VFs being configured
476 *
477 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
478 * get more vectors and can enable more queues per VF. Note that this does not
479 * grab any vectors from the SW pool already allocated. Also note, that all
480 * vector counts include one for each VF's miscellaneous interrupt vector
481 * (i.e. OICR).
482 *
483 * Minimum VFs - 2 vectors, 1 queue pair
484 * Small VFs - 5 vectors, 4 queue pairs
485 * Medium VFs - 17 vectors, 16 queue pairs
486 *
487 * Second, determine number of queue pairs per VF by starting with a pre-defined
488 * maximum each VF supports. If this is not possible, then we adjust based on
489 * queue pairs available on the device.
490 *
491 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
492 * by each VF during VF initialization and reset.
493 */
ice_set_per_vf_res(struct ice_pf * pf,u16 num_vfs)494 static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
495 {
496 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
497 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
498 int msix_avail_per_vf, msix_avail_for_sriov;
499 struct device *dev = ice_pf_to_dev(pf);
500 int err;
501
502 lockdep_assert_held(&pf->vfs.table_lock);
503
504 if (!num_vfs)
505 return -EINVAL;
506
507 if (max_valid_res_idx < 0)
508 return -ENOSPC;
509
510 /* determine MSI-X resources per VF */
511 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
512 pf->irq_tracker->num_entries;
513 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
514 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
515 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
516 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
517 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
518 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
519 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
520 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
521 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
522 } else {
523 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
524 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
525 num_vfs);
526 return -ENOSPC;
527 }
528
529 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
530 ICE_MAX_RSS_QS_PER_VF);
531 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
532 if (!avail_qs)
533 num_txq = 0;
534 else if (num_txq > avail_qs)
535 num_txq = rounddown_pow_of_two(avail_qs);
536
537 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
538 ICE_MAX_RSS_QS_PER_VF);
539 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
540 if (!avail_qs)
541 num_rxq = 0;
542 else if (num_rxq > avail_qs)
543 num_rxq = rounddown_pow_of_two(avail_qs);
544
545 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
546 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
547 ICE_MIN_QS_PER_VF, num_vfs);
548 return -ENOSPC;
549 }
550
551 err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
552 if (err) {
553 dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
554 num_vfs, err);
555 return err;
556 }
557
558 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
559 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
560 pf->vfs.num_msix_per = num_msix_per_vf;
561 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
562 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
563
564 return 0;
565 }
566
567 /**
568 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
569 * @vf: VF to initialize/setup the VSI for
570 *
571 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
572 * VF VSI's broadcast filter and is only used during initial VF creation.
573 */
ice_init_vf_vsi_res(struct ice_vf * vf)574 static int ice_init_vf_vsi_res(struct ice_vf *vf)
575 {
576 struct ice_pf *pf = vf->pf;
577 struct ice_vsi *vsi;
578 int err;
579
580 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
581
582 vsi = ice_vf_vsi_setup(vf);
583 if (!vsi)
584 return -ENOMEM;
585
586 err = ice_vf_init_host_cfg(vf, vsi);
587 if (err)
588 goto release_vsi;
589
590 return 0;
591
592 release_vsi:
593 ice_vf_vsi_release(vf);
594 return err;
595 }
596
597 /**
598 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
599 * @pf: PF the VFs are associated with
600 */
ice_start_vfs(struct ice_pf * pf)601 static int ice_start_vfs(struct ice_pf *pf)
602 {
603 struct ice_hw *hw = &pf->hw;
604 unsigned int bkt, it_cnt;
605 struct ice_vf *vf;
606 int retval;
607
608 lockdep_assert_held(&pf->vfs.table_lock);
609
610 it_cnt = 0;
611 ice_for_each_vf(pf, bkt, vf) {
612 vf->vf_ops->clear_reset_trigger(vf);
613
614 retval = ice_init_vf_vsi_res(vf);
615 if (retval) {
616 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
617 vf->vf_id, retval);
618 goto teardown;
619 }
620
621 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
622 ice_ena_vf_mappings(vf);
623 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
624 it_cnt++;
625 }
626
627 ice_flush(hw);
628 return 0;
629
630 teardown:
631 ice_for_each_vf(pf, bkt, vf) {
632 if (it_cnt == 0)
633 break;
634
635 ice_dis_vf_mappings(vf);
636 ice_vf_vsi_release(vf);
637 it_cnt--;
638 }
639
640 return retval;
641 }
642
643 /**
644 * ice_sriov_free_vf - Free VF memory after all references are dropped
645 * @vf: pointer to VF to free
646 *
647 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
648 * structure has been dropped.
649 */
ice_sriov_free_vf(struct ice_vf * vf)650 static void ice_sriov_free_vf(struct ice_vf *vf)
651 {
652 mutex_destroy(&vf->cfg_lock);
653
654 kfree_rcu(vf, rcu);
655 }
656
657 /**
658 * ice_sriov_clear_reset_state - clears VF Reset status register
659 * @vf: the vf to configure
660 */
ice_sriov_clear_reset_state(struct ice_vf * vf)661 static void ice_sriov_clear_reset_state(struct ice_vf *vf)
662 {
663 struct ice_hw *hw = &vf->pf->hw;
664
665 /* Clear the reset status register so that VF immediately sees that
666 * the device is resetting, even if hardware hasn't yet gotten around
667 * to clearing VFGEN_RSTAT for us.
668 */
669 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
670 }
671
672 /**
673 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
674 * @vf: the vf to configure
675 */
ice_sriov_clear_mbx_register(struct ice_vf * vf)676 static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
677 {
678 struct ice_pf *pf = vf->pf;
679
680 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
681 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
682 }
683
684 /**
685 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
686 * @vf: pointer to VF structure
687 * @is_vflr: true if reset occurred due to VFLR
688 *
689 * Trigger and cleanup after a VF reset for a SR-IOV VF.
690 */
ice_sriov_trigger_reset_register(struct ice_vf * vf,bool is_vflr)691 static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
692 {
693 struct ice_pf *pf = vf->pf;
694 u32 reg, reg_idx, bit_idx;
695 unsigned int vf_abs_id, i;
696 struct device *dev;
697 struct ice_hw *hw;
698
699 dev = ice_pf_to_dev(pf);
700 hw = &pf->hw;
701 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
702
703 /* In the case of a VFLR, HW has already reset the VF and we just need
704 * to clean up. Otherwise we must first trigger the reset using the
705 * VFRTRIG register.
706 */
707 if (!is_vflr) {
708 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
709 reg |= VPGEN_VFRTRIG_VFSWR_M;
710 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
711 }
712
713 /* clear the VFLR bit in GLGEN_VFLRSTAT */
714 reg_idx = (vf_abs_id) / 32;
715 bit_idx = (vf_abs_id) % 32;
716 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
717 ice_flush(hw);
718
719 wr32(hw, PF_PCI_CIAA,
720 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
721 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
722 reg = rd32(hw, PF_PCI_CIAD);
723 /* no transactions pending so stop polling */
724 if ((reg & VF_TRANS_PENDING_M) == 0)
725 break;
726
727 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
728 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
729 }
730 }
731
732 /**
733 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
734 * @vf: pointer to VF structure
735 *
736 * Returns true when reset is successful, else returns false
737 */
ice_sriov_poll_reset_status(struct ice_vf * vf)738 static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
739 {
740 struct ice_pf *pf = vf->pf;
741 unsigned int i;
742 u32 reg;
743
744 for (i = 0; i < 10; i++) {
745 /* VF reset requires driver to first reset the VF and then
746 * poll the status register to make sure that the reset
747 * completed successfully.
748 */
749 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
750 if (reg & VPGEN_VFRSTAT_VFRD_M)
751 return true;
752
753 /* only sleep if the reset is not done */
754 usleep_range(10, 20);
755 }
756 return false;
757 }
758
759 /**
760 * ice_sriov_clear_reset_trigger - enable VF to access hardware
761 * @vf: VF to enabled hardware access for
762 */
ice_sriov_clear_reset_trigger(struct ice_vf * vf)763 static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
764 {
765 struct ice_hw *hw = &vf->pf->hw;
766 u32 reg;
767
768 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
769 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
770 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
771 ice_flush(hw);
772 }
773
774 /**
775 * ice_sriov_create_vsi - Create a new VSI for a VF
776 * @vf: VF to create the VSI for
777 *
778 * This is called by ice_vf_recreate_vsi to create the new VSI after the old
779 * VSI has been released.
780 */
ice_sriov_create_vsi(struct ice_vf * vf)781 static int ice_sriov_create_vsi(struct ice_vf *vf)
782 {
783 struct ice_vsi *vsi;
784
785 vsi = ice_vf_vsi_setup(vf);
786 if (!vsi)
787 return -ENOMEM;
788
789 return 0;
790 }
791
792 /**
793 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
794 * @vf: VF to perform tasks on
795 */
ice_sriov_post_vsi_rebuild(struct ice_vf * vf)796 static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
797 {
798 ice_ena_vf_mappings(vf);
799 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
800 }
801
802 static const struct ice_vf_ops ice_sriov_vf_ops = {
803 .reset_type = ICE_VF_RESET,
804 .free = ice_sriov_free_vf,
805 .clear_reset_state = ice_sriov_clear_reset_state,
806 .clear_mbx_register = ice_sriov_clear_mbx_register,
807 .trigger_reset_register = ice_sriov_trigger_reset_register,
808 .poll_reset_status = ice_sriov_poll_reset_status,
809 .clear_reset_trigger = ice_sriov_clear_reset_trigger,
810 .irq_close = NULL,
811 .create_vsi = ice_sriov_create_vsi,
812 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
813 };
814
815 /**
816 * ice_create_vf_entries - Allocate and insert VF entries
817 * @pf: pointer to the PF structure
818 * @num_vfs: the number of VFs to allocate
819 *
820 * Allocate new VF entries and insert them into the hash table. Set some
821 * basic default fields for initializing the new VFs.
822 *
823 * After this function exits, the hash table will have num_vfs entries
824 * inserted.
825 *
826 * Returns 0 on success or an integer error code on failure.
827 */
ice_create_vf_entries(struct ice_pf * pf,u16 num_vfs)828 static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
829 {
830 struct ice_vfs *vfs = &pf->vfs;
831 struct ice_vf *vf;
832 u16 vf_id;
833 int err;
834
835 lockdep_assert_held(&vfs->table_lock);
836
837 for (vf_id = 0; vf_id < num_vfs; vf_id++) {
838 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
839 if (!vf) {
840 err = -ENOMEM;
841 goto err_free_entries;
842 }
843 kref_init(&vf->refcnt);
844
845 vf->pf = pf;
846 vf->vf_id = vf_id;
847
848 /* set sriov vf ops for VFs created during SRIOV flow */
849 vf->vf_ops = &ice_sriov_vf_ops;
850
851 ice_initialize_vf_entry(vf);
852
853 vf->vf_sw_id = pf->first_sw;
854
855 hash_add_rcu(vfs->table, &vf->entry, vf_id);
856 }
857
858 return 0;
859
860 err_free_entries:
861 ice_free_vf_entries(pf);
862 return err;
863 }
864
865 /**
866 * ice_ena_vfs - enable VFs so they are ready to be used
867 * @pf: pointer to the PF structure
868 * @num_vfs: number of VFs to enable
869 */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)870 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
871 {
872 struct device *dev = ice_pf_to_dev(pf);
873 struct ice_hw *hw = &pf->hw;
874 int ret;
875
876 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
877 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
878 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
879 set_bit(ICE_OICR_INTR_DIS, pf->state);
880 ice_flush(hw);
881
882 ret = pci_enable_sriov(pf->pdev, num_vfs);
883 if (ret)
884 goto err_unroll_intr;
885
886 mutex_lock(&pf->vfs.table_lock);
887
888 ret = ice_set_per_vf_res(pf, num_vfs);
889 if (ret) {
890 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
891 num_vfs, ret);
892 goto err_unroll_sriov;
893 }
894
895 ret = ice_create_vf_entries(pf, num_vfs);
896 if (ret) {
897 dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
898 num_vfs);
899 goto err_unroll_sriov;
900 }
901
902 ret = ice_start_vfs(pf);
903 if (ret) {
904 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
905 ret = -EAGAIN;
906 goto err_unroll_vf_entries;
907 }
908
909 clear_bit(ICE_VF_DIS, pf->state);
910
911 ret = ice_eswitch_configure(pf);
912 if (ret) {
913 dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
914 goto err_unroll_sriov;
915 }
916
917 /* rearm global interrupts */
918 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
919 ice_irq_dynamic_ena(hw, NULL, NULL);
920
921 mutex_unlock(&pf->vfs.table_lock);
922
923 return 0;
924
925 err_unroll_vf_entries:
926 ice_free_vf_entries(pf);
927 err_unroll_sriov:
928 mutex_unlock(&pf->vfs.table_lock);
929 pci_disable_sriov(pf->pdev);
930 err_unroll_intr:
931 /* rearm interrupts here */
932 ice_irq_dynamic_ena(hw, NULL, NULL);
933 clear_bit(ICE_OICR_INTR_DIS, pf->state);
934 return ret;
935 }
936
937 /**
938 * ice_pci_sriov_ena - Enable or change number of VFs
939 * @pf: pointer to the PF structure
940 * @num_vfs: number of VFs to allocate
941 *
942 * Returns 0 on success and negative on failure
943 */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)944 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
945 {
946 int pre_existing_vfs = pci_num_vf(pf->pdev);
947 struct device *dev = ice_pf_to_dev(pf);
948 int err;
949
950 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
951 ice_free_vfs(pf);
952 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
953 return 0;
954
955 if (num_vfs > pf->vfs.num_supported) {
956 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
957 num_vfs, pf->vfs.num_supported);
958 return -EOPNOTSUPP;
959 }
960
961 dev_info(dev, "Enabling %d VFs\n", num_vfs);
962 err = ice_ena_vfs(pf, num_vfs);
963 if (err) {
964 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
965 return err;
966 }
967
968 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
969 return 0;
970 }
971
972 /**
973 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
974 * @pf: PF to enabled SR-IOV on
975 */
ice_check_sriov_allowed(struct ice_pf * pf)976 static int ice_check_sriov_allowed(struct ice_pf *pf)
977 {
978 struct device *dev = ice_pf_to_dev(pf);
979
980 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
981 dev_err(dev, "This device is not capable of SR-IOV\n");
982 return -EOPNOTSUPP;
983 }
984
985 if (ice_is_safe_mode(pf)) {
986 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
987 return -EOPNOTSUPP;
988 }
989
990 if (!ice_pf_state_is_nominal(pf)) {
991 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
992 return -EBUSY;
993 }
994
995 return 0;
996 }
997
998 /**
999 * ice_sriov_configure - Enable or change number of VFs via sysfs
1000 * @pdev: pointer to a pci_dev structure
1001 * @num_vfs: number of VFs to allocate or 0 to free VFs
1002 *
1003 * This function is called when the user updates the number of VFs in sysfs. On
1004 * success return whatever num_vfs was set to by the caller. Return negative on
1005 * failure.
1006 */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1007 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1008 {
1009 struct ice_pf *pf = pci_get_drvdata(pdev);
1010 struct device *dev = ice_pf_to_dev(pf);
1011 int err;
1012
1013 err = ice_check_sriov_allowed(pf);
1014 if (err)
1015 return err;
1016
1017 if (!num_vfs) {
1018 if (!pci_vfs_assigned(pdev)) {
1019 ice_free_vfs(pf);
1020 ice_mbx_deinit_snapshot(&pf->hw);
1021 if (pf->lag)
1022 ice_enable_lag(pf->lag);
1023 return 0;
1024 }
1025
1026 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1027 return -EBUSY;
1028 }
1029
1030 err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
1031 if (err)
1032 return err;
1033
1034 err = ice_pci_sriov_ena(pf, num_vfs);
1035 if (err) {
1036 ice_mbx_deinit_snapshot(&pf->hw);
1037 return err;
1038 }
1039
1040 if (pf->lag)
1041 ice_disable_lag(pf->lag);
1042 return num_vfs;
1043 }
1044
1045 /**
1046 * ice_process_vflr_event - Free VF resources via IRQ calls
1047 * @pf: pointer to the PF structure
1048 *
1049 * called from the VFLR IRQ handler to
1050 * free up VF resources and state variables
1051 */
ice_process_vflr_event(struct ice_pf * pf)1052 void ice_process_vflr_event(struct ice_pf *pf)
1053 {
1054 struct ice_hw *hw = &pf->hw;
1055 struct ice_vf *vf;
1056 unsigned int bkt;
1057 u32 reg;
1058
1059 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1060 !ice_has_vfs(pf))
1061 return;
1062
1063 mutex_lock(&pf->vfs.table_lock);
1064 ice_for_each_vf(pf, bkt, vf) {
1065 u32 reg_idx, bit_idx;
1066
1067 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1068 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1069 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1070 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1071 if (reg & BIT(bit_idx))
1072 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1073 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1074 }
1075 mutex_unlock(&pf->vfs.table_lock);
1076 }
1077
1078 /**
1079 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1080 * @pf: PF used to index all VFs
1081 * @pfq: queue index relative to the PF's function space
1082 *
1083 * If no VF is found who owns the pfq then return NULL, otherwise return a
1084 * pointer to the VF who owns the pfq
1085 *
1086 * If this function returns non-NULL, it acquires a reference count of the VF
1087 * structure. The caller is responsible for calling ice_put_vf() to drop this
1088 * reference.
1089 */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1090 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1091 {
1092 struct ice_vf *vf;
1093 unsigned int bkt;
1094
1095 rcu_read_lock();
1096 ice_for_each_vf_rcu(pf, bkt, vf) {
1097 struct ice_vsi *vsi;
1098 u16 rxq_idx;
1099
1100 vsi = ice_get_vf_vsi(vf);
1101 if (!vsi)
1102 continue;
1103
1104 ice_for_each_rxq(vsi, rxq_idx)
1105 if (vsi->rxq_map[rxq_idx] == pfq) {
1106 struct ice_vf *found;
1107
1108 if (kref_get_unless_zero(&vf->refcnt))
1109 found = vf;
1110 else
1111 found = NULL;
1112 rcu_read_unlock();
1113 return found;
1114 }
1115 }
1116 rcu_read_unlock();
1117
1118 return NULL;
1119 }
1120
1121 /**
1122 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1123 * @pf: PF used for conversion
1124 * @globalq: global queue index used to convert to PF space queue index
1125 */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1126 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1127 {
1128 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1129 }
1130
1131 /**
1132 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1133 * @pf: PF that the LAN overflow event happened on
1134 * @event: structure holding the event information for the LAN overflow event
1135 *
1136 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1137 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1138 * reset on the offending VF.
1139 */
1140 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1141 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1142 {
1143 u32 gldcb_rtctq, queue;
1144 struct ice_vf *vf;
1145
1146 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1147 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1148
1149 /* event returns device global Rx queue number */
1150 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1151 GLDCB_RTCTQ_RXQNUM_S;
1152
1153 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1154 if (!vf)
1155 return;
1156
1157 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1158 ice_put_vf(vf);
1159 }
1160
1161 /**
1162 * ice_set_vf_spoofchk
1163 * @netdev: network interface device structure
1164 * @vf_id: VF identifier
1165 * @ena: flag to enable or disable feature
1166 *
1167 * Enable or disable VF spoof checking
1168 */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)1169 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1170 {
1171 struct ice_netdev_priv *np = netdev_priv(netdev);
1172 struct ice_pf *pf = np->vsi->back;
1173 struct ice_vsi *vf_vsi;
1174 struct device *dev;
1175 struct ice_vf *vf;
1176 int ret;
1177
1178 dev = ice_pf_to_dev(pf);
1179
1180 vf = ice_get_vf_by_id(pf, vf_id);
1181 if (!vf)
1182 return -EINVAL;
1183
1184 ret = ice_check_vf_ready_for_cfg(vf);
1185 if (ret)
1186 goto out_put_vf;
1187
1188 vf_vsi = ice_get_vf_vsi(vf);
1189 if (!vf_vsi) {
1190 netdev_err(netdev, "VSI %d for VF %d is null\n",
1191 vf->lan_vsi_idx, vf->vf_id);
1192 ret = -EINVAL;
1193 goto out_put_vf;
1194 }
1195
1196 if (vf_vsi->type != ICE_VSI_VF) {
1197 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1198 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1199 ret = -ENODEV;
1200 goto out_put_vf;
1201 }
1202
1203 if (ena == vf->spoofchk) {
1204 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1205 ret = 0;
1206 goto out_put_vf;
1207 }
1208
1209 ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1210 if (ret)
1211 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1212 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1213 else
1214 vf->spoofchk = ena;
1215
1216 out_put_vf:
1217 ice_put_vf(vf);
1218 return ret;
1219 }
1220
1221 /**
1222 * ice_get_vf_cfg
1223 * @netdev: network interface device structure
1224 * @vf_id: VF identifier
1225 * @ivi: VF configuration structure
1226 *
1227 * return VF configuration
1228 */
1229 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)1230 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1231 {
1232 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1233 struct ice_vf *vf;
1234 int ret;
1235
1236 vf = ice_get_vf_by_id(pf, vf_id);
1237 if (!vf)
1238 return -EINVAL;
1239
1240 ret = ice_check_vf_ready_for_cfg(vf);
1241 if (ret)
1242 goto out_put_vf;
1243
1244 ivi->vf = vf_id;
1245 ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1246
1247 /* VF configuration for VLAN and applicable QoS */
1248 ivi->vlan = ice_vf_get_port_vlan_id(vf);
1249 ivi->qos = ice_vf_get_port_vlan_prio(vf);
1250 if (ice_vf_is_port_vlan_ena(vf))
1251 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1252
1253 ivi->trusted = vf->trusted;
1254 ivi->spoofchk = vf->spoofchk;
1255 if (!vf->link_forced)
1256 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1257 else if (vf->link_up)
1258 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1259 else
1260 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1261 ivi->max_tx_rate = vf->max_tx_rate;
1262 ivi->min_tx_rate = vf->min_tx_rate;
1263
1264 out_put_vf:
1265 ice_put_vf(vf);
1266 return ret;
1267 }
1268
1269 /**
1270 * ice_set_vf_mac
1271 * @netdev: network interface device structure
1272 * @vf_id: VF identifier
1273 * @mac: MAC address
1274 *
1275 * program VF MAC address
1276 */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)1277 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1278 {
1279 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1280 struct ice_vf *vf;
1281 int ret;
1282
1283 if (is_multicast_ether_addr(mac)) {
1284 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
1285 return -EINVAL;
1286 }
1287
1288 vf = ice_get_vf_by_id(pf, vf_id);
1289 if (!vf)
1290 return -EINVAL;
1291
1292 /* nothing left to do, unicast MAC already set */
1293 if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1294 ether_addr_equal(vf->hw_lan_addr, mac)) {
1295 ret = 0;
1296 goto out_put_vf;
1297 }
1298
1299 ret = ice_check_vf_ready_for_cfg(vf);
1300 if (ret)
1301 goto out_put_vf;
1302
1303 mutex_lock(&vf->cfg_lock);
1304
1305 /* VF is notified of its new MAC via the PF's response to the
1306 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1307 */
1308 ether_addr_copy(vf->dev_lan_addr, mac);
1309 ether_addr_copy(vf->hw_lan_addr, mac);
1310 if (is_zero_ether_addr(mac)) {
1311 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1312 vf->pf_set_mac = false;
1313 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1314 vf->vf_id);
1315 } else {
1316 /* PF will add MAC rule for the VF */
1317 vf->pf_set_mac = true;
1318 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1319 mac, vf_id);
1320 }
1321
1322 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1323 mutex_unlock(&vf->cfg_lock);
1324
1325 out_put_vf:
1326 ice_put_vf(vf);
1327 return ret;
1328 }
1329
1330 /**
1331 * ice_set_vf_trust
1332 * @netdev: network interface device structure
1333 * @vf_id: VF identifier
1334 * @trusted: Boolean value to enable/disable trusted VF
1335 *
1336 * Enable or disable a given VF as trusted
1337 */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)1338 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1339 {
1340 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1341 struct ice_vf *vf;
1342 int ret;
1343
1344 if (ice_is_eswitch_mode_switchdev(pf)) {
1345 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1346 return -EOPNOTSUPP;
1347 }
1348
1349 vf = ice_get_vf_by_id(pf, vf_id);
1350 if (!vf)
1351 return -EINVAL;
1352
1353 ret = ice_check_vf_ready_for_cfg(vf);
1354 if (ret)
1355 goto out_put_vf;
1356
1357 /* Check if already trusted */
1358 if (trusted == vf->trusted) {
1359 ret = 0;
1360 goto out_put_vf;
1361 }
1362
1363 mutex_lock(&vf->cfg_lock);
1364
1365 vf->trusted = trusted;
1366 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1367 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1368 vf_id, trusted ? "" : "un");
1369
1370 mutex_unlock(&vf->cfg_lock);
1371
1372 out_put_vf:
1373 ice_put_vf(vf);
1374 return ret;
1375 }
1376
1377 /**
1378 * ice_set_vf_link_state
1379 * @netdev: network interface device structure
1380 * @vf_id: VF identifier
1381 * @link_state: required link state
1382 *
1383 * Set VF's link state, irrespective of physical link state status
1384 */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)1385 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1386 {
1387 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1388 struct ice_vf *vf;
1389 int ret;
1390
1391 vf = ice_get_vf_by_id(pf, vf_id);
1392 if (!vf)
1393 return -EINVAL;
1394
1395 ret = ice_check_vf_ready_for_cfg(vf);
1396 if (ret)
1397 goto out_put_vf;
1398
1399 switch (link_state) {
1400 case IFLA_VF_LINK_STATE_AUTO:
1401 vf->link_forced = false;
1402 break;
1403 case IFLA_VF_LINK_STATE_ENABLE:
1404 vf->link_forced = true;
1405 vf->link_up = true;
1406 break;
1407 case IFLA_VF_LINK_STATE_DISABLE:
1408 vf->link_forced = true;
1409 vf->link_up = false;
1410 break;
1411 default:
1412 ret = -EINVAL;
1413 goto out_put_vf;
1414 }
1415
1416 ice_vc_notify_vf_link_state(vf);
1417
1418 out_put_vf:
1419 ice_put_vf(vf);
1420 return ret;
1421 }
1422
1423 /**
1424 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1425 * @pf: PF associated with VFs
1426 */
ice_calc_all_vfs_min_tx_rate(struct ice_pf * pf)1427 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1428 {
1429 struct ice_vf *vf;
1430 unsigned int bkt;
1431 int rate = 0;
1432
1433 rcu_read_lock();
1434 ice_for_each_vf_rcu(pf, bkt, vf)
1435 rate += vf->min_tx_rate;
1436 rcu_read_unlock();
1437
1438 return rate;
1439 }
1440
1441 /**
1442 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1443 * @vf: VF trying to configure min_tx_rate
1444 * @min_tx_rate: min Tx rate in Mbps
1445 *
1446 * Check if the min_tx_rate being passed in will cause oversubscription of total
1447 * min_tx_rate based on the current link speed and all other VFs configured
1448 * min_tx_rate
1449 *
1450 * Return true if the passed min_tx_rate would cause oversubscription, else
1451 * return false
1452 */
1453 static bool
ice_min_tx_rate_oversubscribed(struct ice_vf * vf,int min_tx_rate)1454 ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1455 {
1456 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1457 int all_vfs_min_tx_rate;
1458 int link_speed_mbps;
1459
1460 if (WARN_ON(!vsi))
1461 return false;
1462
1463 link_speed_mbps = ice_get_link_speed_mbps(vsi);
1464 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1465
1466 /* this VF's previous rate is being overwritten */
1467 all_vfs_min_tx_rate -= vf->min_tx_rate;
1468
1469 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1470 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1471 min_tx_rate, vf->vf_id,
1472 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1473 link_speed_mbps);
1474 return true;
1475 }
1476
1477 return false;
1478 }
1479
1480 /**
1481 * ice_set_vf_bw - set min/max VF bandwidth
1482 * @netdev: network interface device structure
1483 * @vf_id: VF identifier
1484 * @min_tx_rate: Minimum Tx rate in Mbps
1485 * @max_tx_rate: Maximum Tx rate in Mbps
1486 */
1487 int
ice_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)1488 ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1489 int max_tx_rate)
1490 {
1491 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1492 struct ice_vsi *vsi;
1493 struct device *dev;
1494 struct ice_vf *vf;
1495 int ret;
1496
1497 dev = ice_pf_to_dev(pf);
1498
1499 vf = ice_get_vf_by_id(pf, vf_id);
1500 if (!vf)
1501 return -EINVAL;
1502
1503 ret = ice_check_vf_ready_for_cfg(vf);
1504 if (ret)
1505 goto out_put_vf;
1506
1507 vsi = ice_get_vf_vsi(vf);
1508 if (!vsi) {
1509 ret = -EINVAL;
1510 goto out_put_vf;
1511 }
1512
1513 if (min_tx_rate && ice_is_dcb_active(pf)) {
1514 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1515 ret = -EOPNOTSUPP;
1516 goto out_put_vf;
1517 }
1518
1519 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1520 ret = -EINVAL;
1521 goto out_put_vf;
1522 }
1523
1524 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1525 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1526 if (ret) {
1527 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1528 vf->vf_id);
1529 goto out_put_vf;
1530 }
1531
1532 vf->min_tx_rate = min_tx_rate;
1533 }
1534
1535 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1536 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1537 if (ret) {
1538 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1539 vf->vf_id);
1540 goto out_put_vf;
1541 }
1542
1543 vf->max_tx_rate = max_tx_rate;
1544 }
1545
1546 out_put_vf:
1547 ice_put_vf(vf);
1548 return ret;
1549 }
1550
1551 /**
1552 * ice_get_vf_stats - populate some stats for the VF
1553 * @netdev: the netdev of the PF
1554 * @vf_id: the host OS identifier (0-255)
1555 * @vf_stats: pointer to the OS memory to be initialized
1556 */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)1557 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1558 struct ifla_vf_stats *vf_stats)
1559 {
1560 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1561 struct ice_eth_stats *stats;
1562 struct ice_vsi *vsi;
1563 struct ice_vf *vf;
1564 int ret;
1565
1566 vf = ice_get_vf_by_id(pf, vf_id);
1567 if (!vf)
1568 return -EINVAL;
1569
1570 ret = ice_check_vf_ready_for_cfg(vf);
1571 if (ret)
1572 goto out_put_vf;
1573
1574 vsi = ice_get_vf_vsi(vf);
1575 if (!vsi) {
1576 ret = -EINVAL;
1577 goto out_put_vf;
1578 }
1579
1580 ice_update_eth_stats(vsi);
1581 stats = &vsi->eth_stats;
1582
1583 memset(vf_stats, 0, sizeof(*vf_stats));
1584
1585 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1586 stats->rx_multicast;
1587 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1588 stats->tx_multicast;
1589 vf_stats->rx_bytes = stats->rx_bytes;
1590 vf_stats->tx_bytes = stats->tx_bytes;
1591 vf_stats->broadcast = stats->rx_broadcast;
1592 vf_stats->multicast = stats->rx_multicast;
1593 vf_stats->rx_dropped = stats->rx_discards;
1594 vf_stats->tx_dropped = stats->tx_discards;
1595
1596 out_put_vf:
1597 ice_put_vf(vf);
1598 return ret;
1599 }
1600
1601 /**
1602 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1603 * @hw: hardware structure used to check the VLAN mode
1604 * @vlan_proto: VLAN TPID being checked
1605 *
1606 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1607 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1608 * Mode (SVM), then only ETH_P_8021Q is supported.
1609 */
1610 static bool
ice_is_supported_port_vlan_proto(struct ice_hw * hw,u16 vlan_proto)1611 ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1612 {
1613 bool is_supported = false;
1614
1615 switch (vlan_proto) {
1616 case ETH_P_8021Q:
1617 is_supported = true;
1618 break;
1619 case ETH_P_8021AD:
1620 if (ice_is_dvm_ena(hw))
1621 is_supported = true;
1622 break;
1623 }
1624
1625 return is_supported;
1626 }
1627
1628 /**
1629 * ice_set_vf_port_vlan
1630 * @netdev: network interface device structure
1631 * @vf_id: VF identifier
1632 * @vlan_id: VLAN ID being set
1633 * @qos: priority setting
1634 * @vlan_proto: VLAN protocol
1635 *
1636 * program VF Port VLAN ID and/or QoS
1637 */
1638 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)1639 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1640 __be16 vlan_proto)
1641 {
1642 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1643 u16 local_vlan_proto = ntohs(vlan_proto);
1644 struct device *dev;
1645 struct ice_vf *vf;
1646 int ret;
1647
1648 dev = ice_pf_to_dev(pf);
1649
1650 if (vlan_id >= VLAN_N_VID || qos > 7) {
1651 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1652 vf_id, vlan_id, qos);
1653 return -EINVAL;
1654 }
1655
1656 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1657 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1658 local_vlan_proto);
1659 return -EPROTONOSUPPORT;
1660 }
1661
1662 vf = ice_get_vf_by_id(pf, vf_id);
1663 if (!vf)
1664 return -EINVAL;
1665
1666 ret = ice_check_vf_ready_for_cfg(vf);
1667 if (ret)
1668 goto out_put_vf;
1669
1670 if (ice_vf_get_port_vlan_prio(vf) == qos &&
1671 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1672 ice_vf_get_port_vlan_id(vf) == vlan_id) {
1673 /* duplicate request, so just return success */
1674 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1675 vlan_id, qos, local_vlan_proto);
1676 ret = 0;
1677 goto out_put_vf;
1678 }
1679
1680 mutex_lock(&vf->cfg_lock);
1681
1682 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1683 if (ice_vf_is_port_vlan_ena(vf))
1684 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1685 vlan_id, qos, local_vlan_proto, vf_id);
1686 else
1687 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1688
1689 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1690 mutex_unlock(&vf->cfg_lock);
1691
1692 out_put_vf:
1693 ice_put_vf(vf);
1694 return ret;
1695 }
1696
1697 /**
1698 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1699 * @vf: pointer to the VF structure
1700 */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)1701 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1702 {
1703 struct ice_pf *pf = vf->pf;
1704 struct device *dev;
1705
1706 dev = ice_pf_to_dev(pf);
1707
1708 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1709 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1710 vf->dev_lan_addr,
1711 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1712 ? "on" : "off");
1713 }
1714
1715 /**
1716 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1717 * @pf: pointer to the PF structure
1718 *
1719 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1720 */
ice_print_vfs_mdd_events(struct ice_pf * pf)1721 void ice_print_vfs_mdd_events(struct ice_pf *pf)
1722 {
1723 struct device *dev = ice_pf_to_dev(pf);
1724 struct ice_hw *hw = &pf->hw;
1725 struct ice_vf *vf;
1726 unsigned int bkt;
1727
1728 /* check that there are pending MDD events to print */
1729 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1730 return;
1731
1732 /* VF MDD event logs are rate limited to one second intervals */
1733 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1734 return;
1735
1736 pf->vfs.last_printed_mdd_jiffies = jiffies;
1737
1738 mutex_lock(&pf->vfs.table_lock);
1739 ice_for_each_vf(pf, bkt, vf) {
1740 /* only print Rx MDD event message if there are new events */
1741 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1742 vf->mdd_rx_events.last_printed =
1743 vf->mdd_rx_events.count;
1744 ice_print_vf_rx_mdd_event(vf);
1745 }
1746
1747 /* only print Tx MDD event message if there are new events */
1748 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1749 vf->mdd_tx_events.last_printed =
1750 vf->mdd_tx_events.count;
1751
1752 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
1753 vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
1754 vf->dev_lan_addr);
1755 }
1756 }
1757 mutex_unlock(&pf->vfs.table_lock);
1758 }
1759
1760 /**
1761 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1762 * @pdev: pointer to a pci_dev structure
1763 *
1764 * Called when recovering from a PF FLR to restore interrupt capability to
1765 * the VFs.
1766 */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)1767 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
1768 {
1769 u16 vf_id;
1770 int pos;
1771
1772 if (!pci_num_vf(pdev))
1773 return;
1774
1775 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1776 if (pos) {
1777 struct pci_dev *vfdev;
1778
1779 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
1780 &vf_id);
1781 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
1782 while (vfdev) {
1783 if (vfdev->is_virtfn && vfdev->physfn == pdev)
1784 pci_restore_msi_state(vfdev);
1785 vfdev = pci_get_device(pdev->vendor, vf_id,
1786 vfdev);
1787 }
1788 }
1789 }
1790
1791 /**
1792 * ice_is_malicious_vf - helper function to detect a malicious VF
1793 * @pf: ptr to struct ice_pf
1794 * @event: pointer to the AQ event
1795 * @num_msg_proc: the number of messages processed so far
1796 * @num_msg_pending: the number of messages peinding in admin queue
1797 */
1798 bool
ice_is_malicious_vf(struct ice_pf * pf,struct ice_rq_event_info * event,u16 num_msg_proc,u16 num_msg_pending)1799 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
1800 u16 num_msg_proc, u16 num_msg_pending)
1801 {
1802 s16 vf_id = le16_to_cpu(event->desc.retval);
1803 struct device *dev = ice_pf_to_dev(pf);
1804 struct ice_mbx_data mbxdata;
1805 bool malvf = false;
1806 struct ice_vf *vf;
1807 int status;
1808
1809 vf = ice_get_vf_by_id(pf, vf_id);
1810 if (!vf)
1811 return false;
1812
1813 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1814 goto out_put_vf;
1815
1816 mbxdata.num_msg_proc = num_msg_proc;
1817 mbxdata.num_pending_arq = num_msg_pending;
1818 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
1819 #define ICE_MBX_OVERFLOW_WATERMARK 64
1820 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
1821
1822 /* check to see if we have a malicious VF */
1823 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
1824 if (status)
1825 goto out_put_vf;
1826
1827 if (malvf) {
1828 bool report_vf = false;
1829
1830 /* if the VF is malicious and we haven't let the user
1831 * know about it, then let them know now
1832 */
1833 status = ice_mbx_report_malvf(&pf->hw, pf->vfs.malvfs,
1834 ICE_MAX_SRIOV_VFS, vf_id,
1835 &report_vf);
1836 if (status)
1837 dev_dbg(dev, "Error reporting malicious VF\n");
1838
1839 if (report_vf) {
1840 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
1841
1842 if (pf_vsi)
1843 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
1844 &vf->dev_lan_addr[0],
1845 pf_vsi->netdev->dev_addr);
1846 }
1847 }
1848
1849 out_put_vf:
1850 ice_put_vf(vf);
1851 return malvf;
1852 }
1853