/linux/drivers/net/ethernet/intel/ice/ |
A D | ice_virtchnl_pf.c | 1591 vf = &pf->vf[v]; in ice_reset_all_vfs() 1614 vf = &pf->vf[v]; in ice_reset_all_vfs() 2932 vf = &pf->vf[vf_id]; in ice_set_vf_spoofchk() 4135 vf = &pf->vf[vf_id]; in ice_set_vf_port_vlan() 4664 vf = &pf->vf[vf_id]; in ice_vc_process_vf_msg() 4817 vf = &pf->vf[vf_id]; in ice_get_vf_cfg() 4897 vf = &pf->vf[vf_id]; in ice_set_vf_mac() 4959 vf = &pf->vf[vf_id]; in ice_set_vf_trust() 4997 vf = &pf->vf[vf_id]; in ice_set_vf_link_state() 5090 vf = &pf->vf[vf_id]; in ice_set_vf_bw() [all …]
|
A D | ice_repr.c | 98 struct ice_vf *vf; in ice_repr_open() local 100 vf = repr->vf; in ice_repr_open() 126 vf = repr->vf; in ice_repr_stop() 254 repr->vf = vf; in ice_repr_add() 255 vf->repr = repr; in ice_repr_add() 291 vf->repr = NULL; in ice_repr_add() 307 kfree(vf->repr); in ice_repr_rem() 308 vf->repr = NULL; in ice_repr_rem() 321 struct ice_vf *vf = &pf->vf[i]; in ice_repr_add_for_all_vfs() local 334 struct ice_vf *vf = &pf->vf[i]; in ice_repr_add_for_all_vfs() local [all …]
|
A D | ice_virtchnl_fdir.c | 334 vf->vf_id); in ice_vf_start_ctrl_vsi() 341 vf->vf_id); in ice_vf_start_ctrl_vsi() 763 pf = vf->pf; in ice_vc_fdir_write_flow_prof() 1477 pf = vf->pf; in ice_vc_fdir_write_fltr() 1535 pf = vf->pf; in ice_vf_fdir_timer() 1573 vf = &pf->vf[ctrl_vsi->vf_id]; in ice_vc_fdir_irq_handler() 1616 pf = vf->pf; in ice_vf_fdir_dump_info() 1625 vf->vf_id, in ice_vf_fdir_dump_info() 1856 struct ice_vf *vf = &pf->vf[i]; in ice_flush_fdir_ctx() local 1986 pf = vf->pf; in ice_vc_add_fdir_fltr() [all …]
|
A D | ice_virtchnl_pf.h | 76 int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); 78 void (*reset_vf)(struct ice_vf *vf); 81 int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg); 82 int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg); 83 int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg); 88 int (*get_stats_msg)(struct ice_vf *vf, u8 *msg); 90 int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg); 92 int (*ena_vlan_stripping)(struct ice_vf *vf); 93 int (*dis_vlan_stripping)(struct ice_vf *vf); 191 bool ice_is_vf_disabled(struct ice_vf *vf); [all …]
|
/linux/drivers/net/ethernet/sfc/ |
A D | siena_sriov.c | 1105 vf = nic_data->vf + pos; in efx_siena_sriov_peer_work() 1161 vf = nic_data->vf + pos; in efx_siena_sriov_peer_work() 1205 vf = nic_data->vf + index; in efx_siena_sriov_vf_alloc() 1229 vf = nic_data->vf + pos; in efx_siena_sriov_vfs_fini() 1386 vf = nic_data->vf + pos; in efx_siena_sriov_fini() 1466 vf = nic_data->vf + vf_i; in efx_siena_sriov_flr() 1570 vf = nic_data->vf + vf_i; in efx_siena_sriov_reset() 1601 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_mac() 1620 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_vlan() 1640 vf = nic_data->vf + vf_i; in efx_siena_sriov_set_vf_spoofchk() [all …]
|
A D | ef10_sriov.c | 122 struct ef10_vf *vf = nic_data->vf + i; in efx_ef10_sriov_free_vf_vports() local 125 if (vf->pci_dev && pci_is_dev_assigned(vf->pci_dev)) in efx_ef10_sriov_free_vf_vports() 160 struct ef10_vf *vf = nic_data->vf + vf_i; in efx_ef10_sriov_assign_vf_vport() local 168 vf->vlan, &vf->vport_id); in efx_ef10_sriov_assign_vf_vport() 494 vf = nic_data->vf + vf_i; in efx_ef10_sriov_set_vf_mac() 501 vf->efx->type->filter_table_remove(vf->efx); in efx_ef10_sriov_set_vf_mac() 542 vf->efx->type->filter_table_probe(vf->efx); in efx_ef10_sriov_set_vf_mac() 568 vf = nic_data->vf + vf_i; in efx_ef10_sriov_set_vf_vlan() 580 vf->efx->type->filter_table_remove(vf->efx); in efx_ef10_sriov_set_vf_vlan() 620 vf->vlan, &vf->vport_id); in efx_ef10_sriov_set_vf_vlan() [all …]
|
/linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
A D | otx2_vf.c | 41 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg() 47 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg() 54 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg() 82 dev_err(vf->dev, in otx2vf_process_vfaf_mbox_msg() 205 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); in otx2vf_vfaf_mbox_intr_handler() 218 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); in otx2vf_vfaf_mbox_intr_handler() 407 queue_work(vf->otx2_wq, &vf->rx_mode_work); in otx2vf_set_rx_mode() 579 vf->dev = dev; in otx2vf_probe() 583 hw = &vf->hw; in otx2vf_probe() 714 qmem_free(vf->dev, vf->dync_lmt); in otx2vf_probe() [all …]
|
/linux/drivers/net/ethernet/intel/i40e/ |
A D | i40e_virtchnl_pf.c | 24 struct i40e_vf *vf = pf->vf; in i40e_vc_vf_broadcast() local 167 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) in i40e_vc_notify_vf_reset() 1588 vf = &pf->vf[v]; in i40e_reset_all_vfs() 1900 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) in i40e_vc_send_msg_to_vf_ex() 4105 vf = &pf->vf[vf_id]; in i40e_validate_vf() 4143 vf = &pf->vf[vf_id]; in i40e_ndo_set_vf_mac() 4259 vf = &pf->vf[vf_id]; in i40e_ndo_set_vf_port_vlan() 4408 vf = &pf->vf[vf_id]; in i40e_ndo_set_vf_bw() 4454 vf = &pf->vf[vf_id]; in i40e_ndo_get_vf_config() 4517 vf = &pf->vf[vf_id]; in i40e_ndo_set_vf_link_state() [all …]
|
/linux/drivers/vdpa/ifcvf/ |
A D | ifcvf_main.c | 25 return vf->config_cb.callback(vf->config_cb.private); in ifcvf_config_changed() 48 struct ifcvf_hw *vf = &adapter->vf; in ifcvf_free_irq() local 53 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); in ifcvf_free_irq() 57 devm_free_irq(&pdev->dev, vf->config_irq, vf); in ifcvf_free_irq() 64 struct ifcvf_hw *vf = &adapter->vf; in ifcvf_request_irq() local 84 vf->config_msix_name, vf); in ifcvf_request_irq() 137 ifcvf_stop_hw(vf); in ifcvf_stop_datapath() 157 ifcvf_reset(vf); in ifcvf_reset_vring() 169 return &adapter->vf; in vdpa_to_vf() 528 vf = &adapter->vf; in ifcvf_vdpa_dev_add() [all …]
|
/linux/drivers/net/ethernet/intel/ixgbe/ |
A D | ixgbe_sriov.c | 127 int vf = 0; in ixgbe_get_vfs() local 146 ++vf; in ixgbe_get_vfs() 213 for (vf = 0; vf < num_vfs; ++vf) { in ixgbe_disable_sriov() 644 if (entry->vf == vf) { in ixgbe_set_vf_macvlan() 686 entry->vf = vf; in ixgbe_set_vf_macvlan() 933 vf); in ixgbe_set_vf_mac_addr() 951 vf); in ixgbe_set_vf_vlan_msg() 1308 u32 vf; in ixgbe_msg_task() local 1310 for (vf = 0; vf < adapter->num_vfs; vf++) { in ixgbe_msg_task() 1452 adapter->vfinfo[vf].pf_vlan, vf); in ixgbe_disable_port_vlan() [all …]
|
/linux/drivers/net/ethernet/broadcom/bnx2x/ |
A D | bnx2x_sriov.c | 268 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), in bnx2x_vf_queue_create() 319 if (vf) { in bnx2x_vf_set_igu_info() 795 dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn); in bnx2x_vf_is_pcie_pending() 1607 vf->abs_vfid, vf->bus, vf->devfn, in bnx2x_iov_nic_init() 1908 vf->fw_stat_map + j * vf->stats_stride; in bnx2x_iov_adjust_stats_req() 2025 vf->abs_vfid, vf->state); in bnx2x_vf_acquire() 2047 vf_sb_count(vf), vf_rxq_count(vf), in bnx2x_vf_acquire() 2048 vf_txq_count(vf), vf_mac_rules_cnt(vf), in bnx2x_vf_acquire() 2089 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); in bnx2x_vf_init() 2094 vf->abs_vfid, vf->state); in bnx2x_vf_init() [all …]
|
A D | bnx2x_sriov.h | 166 #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) argument 167 #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) argument 168 #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) argument 169 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) argument 170 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) argument 171 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) argument 179 #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) argument 180 #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) argument 230 #define is_vf_multi(vf) (vf_rxq_count(vf) > 1) argument 343 + (vf)) [all …]
|
A D | bnx2x_vfpf.c | 1171 vf->abs_vfid); in bnx2x_vf_mbx_resp_send_msg() 1193 vf->abs_vfid); in bnx2x_vf_mbx_resp_send_msg() 1296 vfq_qzone_id(vf, vfq_get(vf, i)); in bnx2x_vf_mbx_acquire_resp() 1313 vf->abs_vfid, in bnx2x_vf_mbx_acquire_resp() 1419 vf->abs_vfid); in bnx2x_vf_mbx_acquire() 1447 vf->abs_vfid); in bnx2x_vf_mbx_acquire() 1455 vf->abs_vfid); in bnx2x_vf_mbx_acquire() 1932 vf->abs_vfid, in bnx2x_vf_mbx_set_q_filters() 1988 vf->index); in bnx2x_vf_mbx_update_rss() 2168 vf->state); in bnx2x_vf_mbx_request() [all …]
|
/linux/drivers/crypto/cavium/cpt/ |
A D | cptpf_mbox.c | 25 cpt_send_msg_to_vf(cpt, vf, mbx); in cpt_mbox_send_ack() 103 cpt_mbox_send_ack(cpt, vf, &mbx); in cpt_handle_mbox_intr() 107 mbx.data = vf; in cpt_handle_mbox_intr() 125 vf, mbx.data); in cpt_handle_mbox_intr() 128 vf, mbx.data); in cpt_handle_mbox_intr() 141 vf, mbx.msg); in cpt_handle_mbox_intr() 149 u8 vf; in cpt_mbox_intr_handler() local 153 for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) { in cpt_mbox_intr_handler() 154 if (intr & (1ULL << vf)) { in cpt_mbox_intr_handler() 156 cpt_handle_mbox_intr(cpt, vf); in cpt_mbox_intr_handler() [all …]
|
/linux/drivers/net/ethernet/qlogic/qlcnic/ |
A D | qlcnic_sriov_pf.c | 743 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_channel_cfg_cmd() local 887 struct qlcnic_vf_info *vf = tran->vf; in qlcnic_sriov_pf_create_rx_ctx_cmd() local 915 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_mac_address_cmd() local 947 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_create_tx_ctx_cmd() local 985 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_del_rx_ctx_cmd() local 1020 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_del_tx_ctx_cmd() local 1051 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_cfg_lro_cmd() local 1068 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_cfg_ip_cmd() local 1097 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_pf_cfg_intrpt_cmd() local 1240 struct qlcnic_vf_info *vf = tran->vf; in qlcnic_sriov_pf_cfg_intrcoal_cmd() local [all …]
|
A D | qlcnic_sriov_common.c | 191 vf->adapter = adapter; in qlcnic_sriov_init() 842 vf->send_cmd = NULL; in qlcnic_sriov_clear_trans() 892 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_wait_for_channel_free() local 937 struct qlcnic_vf_info *vf = trans->vf; in qlcnic_sriov_issue_bc_post() local 1015 trans->vf = vf; in qlcnic_sriov_send_bc_cmd() 1027 vf->send_cmd = trans; in qlcnic_sriov_send_bc_cmd() 1094 trans = vf->send_cmd; in qlcnic_sriov_handle_bc_resp() 1243 trans->vf = vf; in qlcnic_sriov_handle_bc_cmd() 2197 vf->num_vlan++; in qlcnic_sriov_add_vlan_id() 2211 vf->num_vlan--; in qlcnic_sriov_del_vlan_id() [all …]
|
/linux/drivers/crypto/marvell/octeontx/ |
A D | otx_cptpf_mbox.c | 91 otx_cpt_send_msg_to_vf(cpt, vf, mbx); in otx_cpt_mbox_send_ack() 100 otx_cpt_send_msg_to_vf(cpt, vf, mbx); in otx_cptpf_mbox_send_nack() 190 dump_mbox_msg(&mbx, vf); in otx_cpt_handle_mbox_intr() 200 mbx.data = vf; in otx_cpt_handle_mbox_intr() 216 vf, mbx.data); in otx_cpt_handle_mbox_intr() 235 vf, mbx.msg); in otx_cpt_handle_mbox_intr() 243 u8 vf; in otx_cpt_mbox_intr_handler() local 247 for (vf = 0; vf < cpt->max_vfs; vf++) { in otx_cpt_mbox_intr_handler() 248 if (intr & (1ULL << vf)) { in otx_cpt_mbox_intr_handler() 249 otx_cpt_handle_mbox_intr(cpt, vf); in otx_cpt_mbox_intr_handler() [all …]
|
/linux/drivers/net/ethernet/broadcom/bnxt/ |
A D | bnxt_sriov.c | 38 if (vf) in bnxt_hwrm_fwd_async_event_cmpl() 85 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_spoofchk() 169 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_trust() 191 vf = &bp->pf.vf[vf_id]; in bnxt_get_vf_config() 233 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_mac() 272 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_vlan() 302 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_bw() 343 vf = &bp->pf.vf[vf_id]; in bnxt_set_vf_link_state() 373 vf = &bp->pf.vf[i]; in bnxt_set_vf_attr() 374 memset(vf, 0, sizeof(*vf)); in bnxt_set_vf_attr() [all …]
|
/linux/drivers/net/ethernet/cisco/enic/ |
A D | enic_pp.c | 40 if (vf != PORT_SELF_VF) { in enic_is_valid_pp_vf() 43 if (vf < 0 || vf >= enic->num_vfs) { in enic_is_valid_pp_vf() 81 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_port_profile() 99 } else if (vf == PORT_SELF_VF) { in enic_set_port_profile() 103 "for VF %d\n", vf); in enic_set_port_profile() 153 if (vf == PORT_SELF_VF) in enic_unset_port_profile() 202 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_pp_disassociate() 224 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_pp_preassociate_rr() 264 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_pp_associate() 273 enic, vf, prev_pp, restore_pp); in enic_pp_associate() [all …]
|
/linux/drivers/net/ethernet/netronome/nfp/ |
A D | nfp_net_sriov.c | 31 if (vf < 0 || vf >= app->pf->num_vfs) { in nfp_net_sriov_check() 32 nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); in nfp_net_sriov_check() 46 writeb(vf, app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_NUM); in nfp_net_sriov_update() 75 mac, vf); in nfp_app_set_vf_mac() 80 vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; in nfp_app_set_vf_mac() 89 mac, vf); in nfp_app_set_vf_mac() 111 "invalid vlan id or qos for VF id %d\n", vf); in nfp_app_set_vf_vlan() 118 vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; in nfp_app_set_vf_vlan() 138 vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + in nfp_app_set_vf_spoofchk() 218 err = nfp_net_sriov_check(app, vf, 0, ""); in nfp_app_get_vf_config() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | sriov.c | 47 if (sriov->vfs_ctx[vf].node_guid || in sriov_restore_guids() 48 sriov->vfs_ctx[vf].port_guid || in sriov_restore_guids() 75 int err, vf, num_msix_count; in mlx5_device_enable_sriov() local 89 for (vf = 0; vf < num_vfs; vf++) { in mlx5_device_enable_sriov() 100 vf, err); in mlx5_device_enable_sriov() 104 sriov->vfs_ctx[vf].enabled = 1; in mlx5_device_enable_sriov() 110 vf, err); in mlx5_device_enable_sriov() 125 int vf; in mlx5_device_disable_sriov() local 127 for (vf = num_vfs - 1; vf >= 0; vf--) { in mlx5_device_disable_sriov() 128 if (!sriov->vfs_ctx[vf].enabled) in mlx5_device_disable_sriov() [all …]
|
/linux/drivers/net/netdevsim/ |
A D | netdev.c | 104 nsim_dev->vfconfigs[vf].vlan = vlan; in nsim_set_vf_vlan() 105 nsim_dev->vfconfigs[vf].qos = qos; in nsim_set_vf_vlan() 121 if (vf >= nsim_dev_get_vfs(nsim_dev)) in nsim_set_vf_rate() 135 if (vf >= nsim_dev_get_vfs(nsim_dev)) in nsim_set_vf_spoofchk() 147 if (vf >= nsim_dev_get_vfs(nsim_dev)) in nsim_set_vf_rss_query_en() 159 if (vf >= nsim_dev_get_vfs(nsim_dev)) in nsim_set_vf_trust() 161 nsim_dev->vfconfigs[vf].trusted = val; in nsim_set_vf_trust() 172 if (vf >= nsim_dev_get_vfs(nsim_dev)) in nsim_get_vf_config() 175 ivi->vf = vf; in nsim_get_vf_config() 181 ivi->qos = nsim_dev->vfconfigs[vf].qos; in nsim_get_vf_config() [all …]
|
/linux/drivers/net/ethernet/qlogic/qed/ |
A D | qed_sriov.c | 177 return vf; in qed_iov_get_vf_info() 687 if (!vf) in qed_iov_set_vf_to_disable() 801 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); in qed_iov_enable_vf_access() 808 vf->abs_vf_id, vf->num_sbs); in qed_iov_enable_vf_access() 1063 vf->relative_vf_id, i, vf->igu_sbs[i], in qed_iov_init_hw_for_vf() 1104 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); in qed_iov_release_hw_for_vf() 1116 memset(&vf->acquire, 0, sizeof(vf->acquire)); in qed_iov_release_hw_for_vf() 1326 if (!vf) in qed_iov_get_public_vf_info() 1560 vf->abs_vf_id, vf->state); in qed_iov_vf_mbx_acquire() 1597 memcpy(&vf->acquire, req, sizeof(vf->acquire)); in qed_iov_vf_mbx_acquire() [all …]
|
/linux/drivers/net/ethernet/cavium/thunder/ |
A D | nic_main.c | 165 mbx.nic_cfg.vf_id = vf; in nic_mbx_send_ready() 169 if (vf < nic->num_vf_en) { in nic_mbx_send_ready() 852 if (vf >= nic->num_vf_en) in nic_enable_vf() 867 if (vf >= nic->num_vf_en) in nic_pause_frame() 892 if (vf >= nic->num_vf_en) in nic_config_timestamp() 1060 nic->pqs_vf[vf] = 0; in nic_handle_mbx_intr() 1098 vf < NIC_VF_PER_MBX_REG ? vf : in nic_handle_mbx_intr() 1111 vf < NIC_VF_PER_MBX_REG ? vf : in nic_handle_mbx_intr() 1141 mbx.msg.msg, vf); in nic_handle_mbx_intr() 1151 u8 vf; in nic_mbx_intr_handler() local [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | ib_virt.c | 66 vf, err); in mlx5_ib_get_vf_config() 114 vfs_ctx[vf].policy = in->policy; in mlx5_ib_set_vf_link_state() 121 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, in mlx5_ib_get_vf_stats() argument 169 vfs_ctx[vf].node_guid = guid; in set_vf_node_guid() 170 vfs_ctx[vf].node_guid_valid = 1; in set_vf_node_guid() 193 vfs_ctx[vf].port_guid = guid; in set_vf_port_guid() 194 vfs_ctx[vf].port_guid_valid = 1; in set_vf_port_guid() 204 return set_vf_node_guid(device, vf, port, guid); in mlx5_ib_set_vf_guid() 206 return set_vf_port_guid(device, vf, port, guid); in mlx5_ib_set_vf_guid() 220 vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0; in mlx5_ib_get_vf_guid() [all …]
|