1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*******************************************************************************
3  *
4  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
5  * Copyright(c) 2013 - 2014 Intel Corporation.
6  *
7  * Contact Information:
8  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10  *
11  ******************************************************************************/
12 
13 #ifndef _VIRTCHNL_H_
14 #define _VIRTCHNL_H_
15 
16 /* Description:
17  * This header file describes the VF-PF communication protocol used
18  * by the drivers for all devices starting from our 40G product line
19  *
20  * Admin queue buffer usage:
21  * desc->opcode is always aqc_opc_send_msg_to_pf
22  * flags, retval, datalen, and data addr are all used normally.
23  * The Firmware copies the cookie fields when sending messages between the
24  * PF and VF, but uses all other fields internally. Due to this limitation,
25  * we must send all messages as "indirect", i.e. using an external buffer.
26  *
27  * All the VSI indexes are relative to the VF. Each VF can have maximum of
28  * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
29  * have a maximum of sixteen queues for all of its VSIs.
30  *
31  * The PF is required to return a status code in v_retval for all messages
32  * except RESET_VF, which does not require any response. The return value
33  * is of status_code type, defined in the shared type.h.
34  *
35  * In general, VF driver initialization should roughly follow the order of
36  * these opcodes. The VF driver must first validate the API version of the
37  * PF driver, then request a reset, then get resources, then configure
38  * queues and interrupts. After these operations are complete, the VF
39  * driver may start its queues, optionally add MAC and VLAN filters, and
40  * process traffic.
41  */
42 
43 /* START GENERIC DEFINES
44  * Need to ensure the following enums and defines hold the same meaning and
45  * value in current and future projects
46  */
47 
48 /* Error Codes */
49 enum virtchnl_status_code {
50 	VIRTCHNL_STATUS_SUCCESS				= 0,
51 	VIRTCHNL_STATUS_ERR_PARAM			= -5,
52 	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
53 	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
54 	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
55 	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
56 	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
57 	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
58 };
59 
60 /* Backward compatibility */
61 #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
62 #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
63 
64 #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
65 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
66 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
67 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
68 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
69 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
70 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
71 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
72 
73 enum virtchnl_link_speed {
74 	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
75 	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
76 	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
77 	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
78 	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
79 	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
80 	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
81 	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
82 	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
83 };
84 
85 /* for hsplit_0 field of Rx HMC context */
86 /* deprecated with AVF 1.0 */
87 enum virtchnl_rx_hsplit {
88 	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
89 	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
90 	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
91 	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
92 	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
93 };
94 
95 /* END GENERIC DEFINES */
96 
97 /* Opcodes for VF-PF communication. These are placed in the v_opcode field
98  * of the virtchnl_msg structure.
99  */
100 enum virtchnl_ops {
101 /* The PF sends status change events to VFs using
102  * the VIRTCHNL_OP_EVENT opcode.
103  * VFs send requests to the PF using the other ops.
104  * Use of "advanced opcode" features must be negotiated as part of capabilities
105  * exchange and are not considered part of base mode feature set.
106  */
107 	VIRTCHNL_OP_UNKNOWN = 0,
108 	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
109 	VIRTCHNL_OP_RESET_VF = 2,
110 	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
111 	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
112 	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
113 	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
114 	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
115 	VIRTCHNL_OP_ENABLE_QUEUES = 8,
116 	VIRTCHNL_OP_DISABLE_QUEUES = 9,
117 	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
118 	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
119 	VIRTCHNL_OP_ADD_VLAN = 12,
120 	VIRTCHNL_OP_DEL_VLAN = 13,
121 	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
122 	VIRTCHNL_OP_GET_STATS = 15,
123 	VIRTCHNL_OP_RSVD = 16,
124 	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
125 	VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
126 	VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
127 	VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
128 	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
129 	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
130 	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
131 	VIRTCHNL_OP_SET_RSS_HENA = 26,
132 	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
133 	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
134 	VIRTCHNL_OP_REQUEST_QUEUES = 29,
135 	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
136 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
137 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
138 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
139 	/* opcode 34 - 44 are reserved */
140 	VIRTCHNL_OP_ADD_RSS_CFG = 45,
141 	VIRTCHNL_OP_DEL_RSS_CFG = 46,
142 	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
143 	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
144 	VIRTCHNL_OP_MAX,
145 };
146 
147 /* These macros are used to generate compilation errors if a structure/union
148  * is not exactly the correct length. It gives a divide by zero error if the
149  * structure/union is not of the correct size, otherwise it creates an enum
150  * that is never used.
151  */
152 #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
153 	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
154 #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
155 	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
156 
157 /* Virtual channel message descriptor. This overlays the admin queue
158  * descriptor. All other data is passed in external buffers.
159  */
160 
161 struct virtchnl_msg {
162 	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
163 	enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
164 	enum virtchnl_status_code v_retval;  /* ditto for desc->retval */
165 	u32 vfid;			 /* used by PF when sending to VF */
166 };
167 
168 VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
169 
170 /* Message descriptions and data structures. */
171 
172 /* VIRTCHNL_OP_VERSION
173  * VF posts its version number to the PF. PF responds with its version number
174  * in the same format, along with a return code.
175  * Reply from PF has its major/minor versions also in param0 and param1.
176  * If there is a major version mismatch, then the VF cannot operate.
177  * If there is a minor version mismatch, then the VF can operate but should
178  * add a warning to the system log.
179  *
180  * This enum element MUST always be specified as == 1, regardless of other
181  * changes in the API. The PF must always respond to this message without
182  * error regardless of version mismatch.
183  */
184 #define VIRTCHNL_VERSION_MAJOR		1
185 #define VIRTCHNL_VERSION_MINOR		1
186 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
187 
188 struct virtchnl_version_info {
189 	u32 major;
190 	u32 minor;
191 };
192 
193 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
194 
195 #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
196 #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
197 
198 /* VIRTCHNL_OP_RESET_VF
199  * VF sends this request to PF with no parameters
200  * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
201  * until reset completion is indicated. The admin queue must be reinitialized
202  * after this operation.
203  *
204  * When reset is complete, PF must ensure that all queues in all VSIs associated
205  * with the VF are stopped, all queue configurations in the HMC are set to 0,
206  * and all MAC and VLAN filters (except the default MAC address) on all VSIs
207  * are cleared.
208  */
209 
210 /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
211  * vsi_type should always be 6 for backward compatibility. Add other fields
212  * as needed.
213  */
214 enum virtchnl_vsi_type {
215 	VIRTCHNL_VSI_TYPE_INVALID = 0,
216 	VIRTCHNL_VSI_SRIOV = 6,
217 };
218 
219 /* VIRTCHNL_OP_GET_VF_RESOURCES
220  * Version 1.0 VF sends this request to PF with no parameters
221  * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
222  * PF responds with an indirect message containing
223  * virtchnl_vf_resource and one or more
224  * virtchnl_vsi_resource structures.
225  */
226 
227 struct virtchnl_vsi_resource {
228 	u16 vsi_id;
229 	u16 num_queue_pairs;
230 	enum virtchnl_vsi_type vsi_type;
231 	u16 qset_handle;
232 	u8 default_mac_addr[ETH_ALEN];
233 };
234 
235 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
236 
237 /* VF capability flags
238  * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
239  * TX/RX Checksum offloading and TSO for non-tunnelled packets.
240  */
241 #define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
242 #define VIRTCHNL_VF_OFFLOAD_IWARP		BIT(1)
243 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
244 #define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
245 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
246 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
247 /* used to negotiate communicating link speeds in Mbps */
248 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
249 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
250 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
251 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
252 #define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
253 #define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
254 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
255 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
256 #define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
257 #define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
258 #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
259 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
260 
261 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
262 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
263 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
264 
265 struct virtchnl_vf_resource {
266 	u16 num_vsis;
267 	u16 num_queue_pairs;
268 	u16 max_vectors;
269 	u16 max_mtu;
270 
271 	u32 vf_cap_flags;
272 	u32 rss_key_size;
273 	u32 rss_lut_size;
274 
275 	struct virtchnl_vsi_resource vsi_res[1];
276 };
277 
278 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
279 
280 /* VIRTCHNL_OP_CONFIG_TX_QUEUE
281  * VF sends this message to set up parameters for one TX queue.
282  * External data buffer contains one instance of virtchnl_txq_info.
283  * PF configures requested queue and returns a status code.
284  */
285 
286 /* Tx queue config info */
287 struct virtchnl_txq_info {
288 	u16 vsi_id;
289 	u16 queue_id;
290 	u16 ring_len;		/* number of descriptors, multiple of 8 */
291 	u16 headwb_enabled; /* deprecated with AVF 1.0 */
292 	u64 dma_ring_addr;
293 	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
294 };
295 
296 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
297 
298 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
299  * VF sends this message to set up parameters for one RX queue.
300  * External data buffer contains one instance of virtchnl_rxq_info.
301  * PF configures requested queue and returns a status code.
302  */
303 
304 /* Rx queue config info */
305 struct virtchnl_rxq_info {
306 	u16 vsi_id;
307 	u16 queue_id;
308 	u32 ring_len;		/* number of descriptors, multiple of 32 */
309 	u16 hdr_size;
310 	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
311 	u32 databuffer_size;
312 	u32 max_pkt_size;
313 	u32 pad1;
314 	u64 dma_ring_addr;
315 	enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
316 	u32 pad2;
317 };
318 
319 VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
320 
321 /* VIRTCHNL_OP_CONFIG_VSI_QUEUES
322  * VF sends this message to set parameters for all active TX and RX queues
323  * associated with the specified VSI.
324  * PF configures queues and returns status.
325  * If the number of queues specified is greater than the number of queues
326  * associated with the VSI, an error is returned and no queues are configured.
327  */
328 struct virtchnl_queue_pair_info {
329 	/* NOTE: vsi_id and queue_id should be identical for both queues. */
330 	struct virtchnl_txq_info txq;
331 	struct virtchnl_rxq_info rxq;
332 };
333 
334 VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
335 
336 struct virtchnl_vsi_queue_config_info {
337 	u16 vsi_id;
338 	u16 num_queue_pairs;
339 	u32 pad;
340 	struct virtchnl_queue_pair_info qpair[1];
341 };
342 
343 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
344 
345 /* VIRTCHNL_OP_REQUEST_QUEUES
346  * VF sends this message to request the PF to allocate additional queues to
347  * this VF.  Each VF gets a guaranteed number of queues on init but asking for
348  * additional queues must be negotiated.  This is a best effort request as it
349  * is possible the PF does not have enough queues left to support the request.
350  * If the PF cannot support the number requested it will respond with the
351  * maximum number it is able to support.  If the request is successful, PF will
352  * then reset the VF to institute required changes.
353  */
354 
355 /* VF resource request */
356 struct virtchnl_vf_res_request {
357 	u16 num_queue_pairs;
358 };
359 
360 /* VIRTCHNL_OP_CONFIG_IRQ_MAP
361  * VF uses this message to map vectors to queues.
362  * The rxq_map and txq_map fields are bitmaps used to indicate which queues
363  * are to be associated with the specified vector.
364  * The "other" causes are always mapped to vector 0.
365  * PF configures interrupt mapping and returns status.
366  */
367 struct virtchnl_vector_map {
368 	u16 vsi_id;
369 	u16 vector_id;
370 	u16 rxq_map;
371 	u16 txq_map;
372 	u16 rxitr_idx;
373 	u16 txitr_idx;
374 };
375 
376 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
377 
378 struct virtchnl_irq_map_info {
379 	u16 num_vectors;
380 	struct virtchnl_vector_map vecmap[1];
381 };
382 
383 VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
384 
385 /* VIRTCHNL_OP_ENABLE_QUEUES
386  * VIRTCHNL_OP_DISABLE_QUEUES
387  * VF sends these message to enable or disable TX/RX queue pairs.
388  * The queues fields are bitmaps indicating which queues to act upon.
389  * (Currently, we only support 16 queues per VF, but we make the field
390  * u32 to allow for expansion.)
391  * PF performs requested action and returns status.
392  */
393 struct virtchnl_queue_select {
394 	u16 vsi_id;
395 	u16 pad;
396 	u32 rx_queues;
397 	u32 tx_queues;
398 };
399 
400 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
401 
402 /* VIRTCHNL_OP_ADD_ETH_ADDR
403  * VF sends this message in order to add one or more unicast or multicast
404  * address filters for the specified VSI.
405  * PF adds the filters and returns status.
406  */
407 
408 /* VIRTCHNL_OP_DEL_ETH_ADDR
409  * VF sends this message in order to remove one or more unicast or multicast
410  * filters for the specified VSI.
411  * PF removes the filters and returns status.
412  */
413 
414 /* VIRTCHNL_ETHER_ADDR_LEGACY
415  * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
416  * bytes. Moving forward all VF drivers should not set type to
417  * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
418  * behavior. The control plane function (i.e. PF) can use a best effort method
419  * of tracking the primary/device unicast in this case, but there is no
420  * guarantee and functionality depends on the implementation of the PF.
421  */
422 
423 /* VIRTCHNL_ETHER_ADDR_PRIMARY
424  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
425  * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
426  * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
427  * function (i.e. PF) to accurately track and use this MAC address for
428  * displaying on the host and for VM/function reset.
429  */
430 
431 /* VIRTCHNL_ETHER_ADDR_EXTRA
432  * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
433  * unicast and/or multicast filters that are being added/deleted via
434  * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
435  */
436 struct virtchnl_ether_addr {
437 	u8 addr[ETH_ALEN];
438 	u8 type;
439 #define VIRTCHNL_ETHER_ADDR_LEGACY	0
440 #define VIRTCHNL_ETHER_ADDR_PRIMARY	1
441 #define VIRTCHNL_ETHER_ADDR_EXTRA	2
442 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK	3 /* first two bits of type are valid */
443 	u8 pad;
444 };
445 
446 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
447 
448 struct virtchnl_ether_addr_list {
449 	u16 vsi_id;
450 	u16 num_elements;
451 	struct virtchnl_ether_addr list[1];
452 };
453 
454 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
455 
456 /* VIRTCHNL_OP_ADD_VLAN
457  * VF sends this message to add one or more VLAN tag filters for receives.
458  * PF adds the filters and returns status.
459  * If a port VLAN is configured by the PF, this operation will return an
460  * error to the VF.
461  */
462 
463 /* VIRTCHNL_OP_DEL_VLAN
464  * VF sends this message to remove one or more VLAN tag filters for receives.
465  * PF removes the filters and returns status.
466  * If a port VLAN is configured by the PF, this operation will return an
467  * error to the VF.
468  */
469 
470 struct virtchnl_vlan_filter_list {
471 	u16 vsi_id;
472 	u16 num_elements;
473 	u16 vlan_id[1];
474 };
475 
476 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
477 
478 /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
479  * VF sends VSI id and flags.
480  * PF returns status code in retval.
481  * Note: we assume that broadcast accept mode is always enabled.
482  */
483 struct virtchnl_promisc_info {
484 	u16 vsi_id;
485 	u16 flags;
486 };
487 
488 VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
489 
490 #define FLAG_VF_UNICAST_PROMISC	0x00000001
491 #define FLAG_VF_MULTICAST_PROMISC	0x00000002
492 
493 /* VIRTCHNL_OP_GET_STATS
494  * VF sends this message to request stats for the selected VSI. VF uses
495  * the virtchnl_queue_select struct to specify the VSI. The queue_id
496  * field is ignored by the PF.
497  *
498  * PF replies with struct eth_stats in an external buffer.
499  */
500 
501 /* VIRTCHNL_OP_CONFIG_RSS_KEY
502  * VIRTCHNL_OP_CONFIG_RSS_LUT
503  * VF sends these messages to configure RSS. Only supported if both PF
504  * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
505  * configuration negotiation. If this is the case, then the RSS fields in
506  * the VF resource struct are valid.
507  * Both the key and LUT are initialized to 0 by the PF, meaning that
508  * RSS is effectively disabled until set up by the VF.
509  */
510 struct virtchnl_rss_key {
511 	u16 vsi_id;
512 	u16 key_len;
513 	u8 key[1];         /* RSS hash key, packed bytes */
514 };
515 
516 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
517 
518 struct virtchnl_rss_lut {
519 	u16 vsi_id;
520 	u16 lut_entries;
521 	u8 lut[1];        /* RSS lookup table */
522 };
523 
524 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
525 
526 /* VIRTCHNL_OP_GET_RSS_HENA_CAPS
527  * VIRTCHNL_OP_SET_RSS_HENA
528  * VF sends these messages to get and set the hash filter enable bits for RSS.
529  * By default, the PF sets these to all possible traffic types that the
530  * hardware supports. The VF can query this value if it wants to change the
531  * traffic types that are hashed by the hardware.
532  */
533 struct virtchnl_rss_hena {
534 	u64 hena;
535 };
536 
537 VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
538 
539 /* VIRTCHNL_OP_ENABLE_CHANNELS
540  * VIRTCHNL_OP_DISABLE_CHANNELS
541  * VF sends these messages to enable or disable channels based on
542  * the user specified queue count and queue offset for each traffic class.
543  * This struct encompasses all the information that the PF needs from
544  * VF to create a channel.
545  */
546 struct virtchnl_channel_info {
547 	u16 count; /* number of queues in a channel */
548 	u16 offset; /* queues in a channel start from 'offset' */
549 	u32 pad;
550 	u64 max_tx_rate;
551 };
552 
553 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
554 
555 struct virtchnl_tc_info {
556 	u32	num_tc;
557 	u32	pad;
558 	struct	virtchnl_channel_info list[1];
559 };
560 
561 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
562 
563 /* VIRTCHNL_ADD_CLOUD_FILTER
564  * VIRTCHNL_DEL_CLOUD_FILTER
565  * VF sends these messages to add or delete a cloud filter based on the
566  * user specified match and action filters. These structures encompass
567  * all the information that the PF needs from the VF to add/delete a
568  * cloud filter.
569  */
570 
571 struct virtchnl_l4_spec {
572 	u8	src_mac[ETH_ALEN];
573 	u8	dst_mac[ETH_ALEN];
574 	__be16	vlan_id;
575 	__be16	pad; /* reserved for future use */
576 	__be32	src_ip[4];
577 	__be32	dst_ip[4];
578 	__be16	src_port;
579 	__be16	dst_port;
580 };
581 
582 VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
583 
584 union virtchnl_flow_spec {
585 	struct	virtchnl_l4_spec tcp_spec;
586 	u8	buffer[128]; /* reserved for future use */
587 };
588 
589 VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
590 
591 enum virtchnl_action {
592 	/* action types */
593 	VIRTCHNL_ACTION_DROP = 0,
594 	VIRTCHNL_ACTION_TC_REDIRECT,
595 	VIRTCHNL_ACTION_PASSTHRU,
596 	VIRTCHNL_ACTION_QUEUE,
597 	VIRTCHNL_ACTION_Q_REGION,
598 	VIRTCHNL_ACTION_MARK,
599 	VIRTCHNL_ACTION_COUNT,
600 };
601 
602 enum virtchnl_flow_type {
603 	/* flow types */
604 	VIRTCHNL_TCP_V4_FLOW = 0,
605 	VIRTCHNL_TCP_V6_FLOW,
606 };
607 
608 struct virtchnl_filter {
609 	union	virtchnl_flow_spec data;
610 	union	virtchnl_flow_spec mask;
611 	enum	virtchnl_flow_type flow_type;
612 	enum	virtchnl_action action;
613 	u32	action_meta;
614 	u8	field_flags;
615 	u8	pad[3];
616 };
617 
618 VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
619 
620 /* VIRTCHNL_OP_EVENT
621  * PF sends this message to inform the VF driver of events that may affect it.
622  * No direct response is expected from the VF, though it may generate other
623  * messages in response to this one.
624  */
625 enum virtchnl_event_codes {
626 	VIRTCHNL_EVENT_UNKNOWN = 0,
627 	VIRTCHNL_EVENT_LINK_CHANGE,
628 	VIRTCHNL_EVENT_RESET_IMPENDING,
629 	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
630 };
631 
632 #define PF_EVENT_SEVERITY_INFO		0
633 #define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
634 
635 struct virtchnl_pf_event {
636 	enum virtchnl_event_codes event;
637 	union {
638 		/* If the PF driver does not support the new speed reporting
639 		 * capabilities then use link_event else use link_event_adv to
640 		 * get the speed and link information. The ability to understand
641 		 * new speeds is indicated by setting the capability flag
642 		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
643 		 * in virtchnl_vf_resource struct and can be used to determine
644 		 * which link event struct to use below.
645 		 */
646 		struct {
647 			enum virtchnl_link_speed link_speed;
648 			bool link_status;
649 		} link_event;
650 		struct {
651 			/* link_speed provided in Mbps */
652 			u32 link_speed;
653 			u8 link_status;
654 			u8 pad[3];
655 		} link_event_adv;
656 	} event_data;
657 
658 	int severity;
659 };
660 
661 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
662 
663 /* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
664  * VF uses this message to request PF to map IWARP vectors to IWARP queues.
665  * The request for this originates from the VF IWARP driver through
666  * a client interface between VF LAN and VF IWARP driver.
667  * A vector could have an AEQ and CEQ attached to it although
668  * there is a single AEQ per VF IWARP instance in which case
669  * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
670  * There will never be a case where there will be multiple CEQs attached
671  * to a single vector.
672  * PF configures interrupt mapping and returns status.
673  */
674 
675 struct virtchnl_iwarp_qv_info {
676 	u32 v_idx; /* msix_vector */
677 	u16 ceq_idx;
678 	u16 aeq_idx;
679 	u8 itr_idx;
680 	u8 pad[3];
681 };
682 
683 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
684 
685 struct virtchnl_iwarp_qvlist_info {
686 	u32 num_vectors;
687 	struct virtchnl_iwarp_qv_info qv_info[1];
688 };
689 
690 VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
691 
692 /* VF reset states - these are written into the RSTAT register:
693  * VFGEN_RSTAT on the VF
694  * When the PF initiates a reset, it writes 0
695  * When the reset is complete, it writes 1
696  * When the PF detects that the VF has recovered, it writes 2
697  * VF checks this register periodically to determine if a reset has occurred,
698  * then polls it to know when the reset is complete.
699  * If either the PF or VF reads the register while the hardware
700  * is in a reset state, it will return DEADBEEF, which, when masked
701  * will result in 3.
702  */
703 enum virtchnl_vfr_states {
704 	VIRTCHNL_VFR_INPROGRESS = 0,
705 	VIRTCHNL_VFR_COMPLETED,
706 	VIRTCHNL_VFR_VFACTIVE,
707 };
708 
709 /* Type of RSS algorithm */
710 enum virtchnl_rss_algorithm {
711 	VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
712 	VIRTCHNL_RSS_ALG_R_ASYMMETRIC		= 1,
713 	VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
714 	VIRTCHNL_RSS_ALG_XOR_SYMMETRIC		= 3,
715 };
716 
717 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
718 #define PROTO_HDR_SHIFT			5
719 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
720 #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
721 
722 /* VF use these macros to configure each protocol header.
723  * Specify which protocol headers and protocol header fields base on
724  * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
725  * @param hdr: a struct of virtchnl_proto_hdr
726  * @param hdr_type: ETH/IPV4/TCP, etc
727  * @param field: SRC/DST/TEID/SPI, etc
728  */
729 #define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
730 	((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
731 #define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
732 	((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
733 #define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
734 	((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
735 #define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)	((hdr)->field_selector)
736 
737 #define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
738 	(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
739 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
740 #define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
741 	(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
742 		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
743 
744 #define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
745 	((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
746 #define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
747 	(((hdr)->type) >> PROTO_HDR_SHIFT)
748 #define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
749 	((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
750 #define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
751 	(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
752 	 VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
753 
754 /* Protocol header type within a packet segment. A segment consists of one or
755  * more protocol headers that make up a logical group of protocol headers. Each
756  * logical group of protocol headers encapsulates or is encapsulated using/by
757  * tunneling or encapsulation protocols for network virtualization.
758  */
759 enum virtchnl_proto_hdr_type {
760 	VIRTCHNL_PROTO_HDR_NONE,
761 	VIRTCHNL_PROTO_HDR_ETH,
762 	VIRTCHNL_PROTO_HDR_S_VLAN,
763 	VIRTCHNL_PROTO_HDR_C_VLAN,
764 	VIRTCHNL_PROTO_HDR_IPV4,
765 	VIRTCHNL_PROTO_HDR_IPV6,
766 	VIRTCHNL_PROTO_HDR_TCP,
767 	VIRTCHNL_PROTO_HDR_UDP,
768 	VIRTCHNL_PROTO_HDR_SCTP,
769 	VIRTCHNL_PROTO_HDR_GTPU_IP,
770 	VIRTCHNL_PROTO_HDR_GTPU_EH,
771 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
772 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
773 	VIRTCHNL_PROTO_HDR_PPPOE,
774 	VIRTCHNL_PROTO_HDR_L2TPV3,
775 	VIRTCHNL_PROTO_HDR_ESP,
776 	VIRTCHNL_PROTO_HDR_AH,
777 	VIRTCHNL_PROTO_HDR_PFCP,
778 };
779 
780 /* Protocol header field within a protocol header. */
781 enum virtchnl_proto_hdr_field {
782 	/* ETHER */
783 	VIRTCHNL_PROTO_HDR_ETH_SRC =
784 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
785 	VIRTCHNL_PROTO_HDR_ETH_DST,
786 	VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
787 	/* S-VLAN */
788 	VIRTCHNL_PROTO_HDR_S_VLAN_ID =
789 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
790 	/* C-VLAN */
791 	VIRTCHNL_PROTO_HDR_C_VLAN_ID =
792 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
793 	/* IPV4 */
794 	VIRTCHNL_PROTO_HDR_IPV4_SRC =
795 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
796 	VIRTCHNL_PROTO_HDR_IPV4_DST,
797 	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
798 	VIRTCHNL_PROTO_HDR_IPV4_TTL,
799 	VIRTCHNL_PROTO_HDR_IPV4_PROT,
800 	/* IPV6 */
801 	VIRTCHNL_PROTO_HDR_IPV6_SRC =
802 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
803 	VIRTCHNL_PROTO_HDR_IPV6_DST,
804 	VIRTCHNL_PROTO_HDR_IPV6_TC,
805 	VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
806 	VIRTCHNL_PROTO_HDR_IPV6_PROT,
807 	/* TCP */
808 	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
809 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
810 	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
811 	/* UDP */
812 	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
813 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
814 	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
815 	/* SCTP */
816 	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
817 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
818 	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
819 	/* GTPU_IP */
820 	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
821 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
822 	/* GTPU_EH */
823 	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
824 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
825 	VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
826 	/* PPPOE */
827 	VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
828 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
829 	/* L2TPV3 */
830 	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
831 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
832 	/* ESP */
833 	VIRTCHNL_PROTO_HDR_ESP_SPI =
834 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
835 	/* AH */
836 	VIRTCHNL_PROTO_HDR_AH_SPI =
837 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
838 	/* PFCP */
839 	VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
840 		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
841 	VIRTCHNL_PROTO_HDR_PFCP_SEID,
842 };
843 
844 struct virtchnl_proto_hdr {
845 	enum virtchnl_proto_hdr_type type;
846 	u32 field_selector; /* a bit mask to select field for header type */
847 	u8 buffer[64];
848 	/**
849 	 * binary buffer in network order for specific header type.
850 	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
851 	 * header is expected to be copied into the buffer.
852 	 */
853 };
854 
855 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
856 
857 struct virtchnl_proto_hdrs {
858 	u8 tunnel_level;
859 	u8 pad[3];
860 	/**
861 	 * specify where protocol header start from.
862 	 * 0 - from the outer layer
863 	 * 1 - from the first inner layer
864 	 * 2 - from the second inner layer
865 	 * ....
866 	 **/
867 	int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
868 	struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
869 };
870 
871 VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
872 
873 struct virtchnl_rss_cfg {
874 	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
875 	enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
876 	u8 reserved[128];			   /* reserve for future */
877 };
878 
879 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
880 
881 /* action configuration for FDIR */
882 struct virtchnl_filter_action {
883 	enum virtchnl_action type;
884 	union {
885 		/* used for queue and qgroup action */
886 		struct {
887 			u16 index;
888 			u8 region;
889 		} queue;
890 		/* used for count action */
891 		struct {
892 			/* share counter ID with other flow rules */
893 			u8 shared;
894 			u32 id; /* counter ID */
895 		} count;
896 		/* used for mark action */
897 		u32 mark_id;
898 		u8 reserve[32];
899 	} act_conf;
900 };
901 
902 VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
903 
904 #define VIRTCHNL_MAX_NUM_ACTIONS  8
905 
906 struct virtchnl_filter_action_set {
907 	/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
908 	int count;
909 	struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
910 };
911 
912 VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
913 
914 /* pattern and action for FDIR rule */
915 struct virtchnl_fdir_rule {
916 	struct virtchnl_proto_hdrs proto_hdrs;
917 	struct virtchnl_filter_action_set action_set;
918 };
919 
920 VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
921 
922 /* Status returned to VF after VF requests FDIR commands
923  * VIRTCHNL_FDIR_SUCCESS
924  * VF FDIR related request is successfully done by PF
925  * The request can be OP_ADD/DEL.
926  *
927  * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
928  * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
929  *
930  * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
931  * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
932  *
933  * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
934  * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
935  *
936  * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
937  * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
938  *
939  * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
940  * OP_ADD_FDIR_FILTER request is failed due to parameters validation
941  * or HW doesn't support.
942  *
943  * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
944  * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
945  * for programming.
946  */
947 enum virtchnl_fdir_prgm_status {
948 	VIRTCHNL_FDIR_SUCCESS = 0,
949 	VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
950 	VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
951 	VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
952 	VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
953 	VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
954 	VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
955 };
956 
957 /* VIRTCHNL_OP_ADD_FDIR_FILTER
958  * VF sends this request to PF by filling out vsi_id,
959  * validate_only and rule_cfg. PF will return flow_id
960  * if the request is successfully done and return add_status to VF.
961  */
962 struct virtchnl_fdir_add {
963 	u16 vsi_id;  /* INPUT */
964 	/*
965 	 * 1 for validating a fdir rule, 0 for creating a fdir rule.
966 	 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
967 	 */
968 	u16 validate_only; /* INPUT */
969 	u32 flow_id;       /* OUTPUT */
970 	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
971 	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
972 };
973 
974 VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
975 
976 /* VIRTCHNL_OP_DEL_FDIR_FILTER
977  * VF sends this request to PF by filling out vsi_id
978  * and flow_id. PF will return del_status to VF.
979  */
980 struct virtchnl_fdir_del {
981 	u16 vsi_id;  /* INPUT */
982 	u16 pad;
983 	u32 flow_id; /* INPUT */
984 	enum virtchnl_fdir_prgm_status status; /* OUTPUT */
985 };
986 
987 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
988 
989 /**
990  * virtchnl_vc_validate_vf_msg
991  * @ver: Virtchnl version info
992  * @v_opcode: Opcode for the message
993  * @msg: pointer to the msg buffer
994  * @msglen: msg length
995  *
996  * validate msg format against struct for each opcode
997  */
998 static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info * ver,u32 v_opcode,u8 * msg,u16 msglen)999 virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
1000 			    u8 *msg, u16 msglen)
1001 {
1002 	bool err_msg_format = false;
1003 	int valid_len = 0;
1004 
1005 	/* Validate message length. */
1006 	switch (v_opcode) {
1007 	case VIRTCHNL_OP_VERSION:
1008 		valid_len = sizeof(struct virtchnl_version_info);
1009 		break;
1010 	case VIRTCHNL_OP_RESET_VF:
1011 		break;
1012 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1013 		if (VF_IS_V11(ver))
1014 			valid_len = sizeof(u32);
1015 		break;
1016 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1017 		valid_len = sizeof(struct virtchnl_txq_info);
1018 		break;
1019 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1020 		valid_len = sizeof(struct virtchnl_rxq_info);
1021 		break;
1022 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1023 		valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
1024 		if (msglen >= valid_len) {
1025 			struct virtchnl_vsi_queue_config_info *vqc =
1026 			    (struct virtchnl_vsi_queue_config_info *)msg;
1027 			valid_len += (vqc->num_queue_pairs *
1028 				      sizeof(struct
1029 					     virtchnl_queue_pair_info));
1030 			if (vqc->num_queue_pairs == 0)
1031 				err_msg_format = true;
1032 		}
1033 		break;
1034 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1035 		valid_len = sizeof(struct virtchnl_irq_map_info);
1036 		if (msglen >= valid_len) {
1037 			struct virtchnl_irq_map_info *vimi =
1038 			    (struct virtchnl_irq_map_info *)msg;
1039 			valid_len += (vimi->num_vectors *
1040 				      sizeof(struct virtchnl_vector_map));
1041 			if (vimi->num_vectors == 0)
1042 				err_msg_format = true;
1043 		}
1044 		break;
1045 	case VIRTCHNL_OP_ENABLE_QUEUES:
1046 	case VIRTCHNL_OP_DISABLE_QUEUES:
1047 		valid_len = sizeof(struct virtchnl_queue_select);
1048 		break;
1049 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1050 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1051 		valid_len = sizeof(struct virtchnl_ether_addr_list);
1052 		if (msglen >= valid_len) {
1053 			struct virtchnl_ether_addr_list *veal =
1054 			    (struct virtchnl_ether_addr_list *)msg;
1055 			valid_len += veal->num_elements *
1056 			    sizeof(struct virtchnl_ether_addr);
1057 			if (veal->num_elements == 0)
1058 				err_msg_format = true;
1059 		}
1060 		break;
1061 	case VIRTCHNL_OP_ADD_VLAN:
1062 	case VIRTCHNL_OP_DEL_VLAN:
1063 		valid_len = sizeof(struct virtchnl_vlan_filter_list);
1064 		if (msglen >= valid_len) {
1065 			struct virtchnl_vlan_filter_list *vfl =
1066 			    (struct virtchnl_vlan_filter_list *)msg;
1067 			valid_len += vfl->num_elements * sizeof(u16);
1068 			if (vfl->num_elements == 0)
1069 				err_msg_format = true;
1070 		}
1071 		break;
1072 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1073 		valid_len = sizeof(struct virtchnl_promisc_info);
1074 		break;
1075 	case VIRTCHNL_OP_GET_STATS:
1076 		valid_len = sizeof(struct virtchnl_queue_select);
1077 		break;
1078 	case VIRTCHNL_OP_IWARP:
1079 		/* These messages are opaque to us and will be validated in
1080 		 * the RDMA client code. We just need to check for nonzero
1081 		 * length. The firmware will enforce max length restrictions.
1082 		 */
1083 		if (msglen)
1084 			valid_len = msglen;
1085 		else
1086 			err_msg_format = true;
1087 		break;
1088 	case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
1089 		break;
1090 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1091 		valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
1092 		if (msglen >= valid_len) {
1093 			struct virtchnl_iwarp_qvlist_info *qv =
1094 				(struct virtchnl_iwarp_qvlist_info *)msg;
1095 			if (qv->num_vectors == 0) {
1096 				err_msg_format = true;
1097 				break;
1098 			}
1099 			valid_len += ((qv->num_vectors - 1) *
1100 				sizeof(struct virtchnl_iwarp_qv_info));
1101 		}
1102 		break;
1103 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1104 		valid_len = sizeof(struct virtchnl_rss_key);
1105 		if (msglen >= valid_len) {
1106 			struct virtchnl_rss_key *vrk =
1107 				(struct virtchnl_rss_key *)msg;
1108 			valid_len += vrk->key_len - 1;
1109 		}
1110 		break;
1111 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1112 		valid_len = sizeof(struct virtchnl_rss_lut);
1113 		if (msglen >= valid_len) {
1114 			struct virtchnl_rss_lut *vrl =
1115 				(struct virtchnl_rss_lut *)msg;
1116 			valid_len += vrl->lut_entries - 1;
1117 		}
1118 		break;
1119 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1120 		break;
1121 	case VIRTCHNL_OP_SET_RSS_HENA:
1122 		valid_len = sizeof(struct virtchnl_rss_hena);
1123 		break;
1124 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1125 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1126 		break;
1127 	case VIRTCHNL_OP_REQUEST_QUEUES:
1128 		valid_len = sizeof(struct virtchnl_vf_res_request);
1129 		break;
1130 	case VIRTCHNL_OP_ENABLE_CHANNELS:
1131 		valid_len = sizeof(struct virtchnl_tc_info);
1132 		if (msglen >= valid_len) {
1133 			struct virtchnl_tc_info *vti =
1134 				(struct virtchnl_tc_info *)msg;
1135 			valid_len += (vti->num_tc - 1) *
1136 				     sizeof(struct virtchnl_channel_info);
1137 			if (vti->num_tc == 0)
1138 				err_msg_format = true;
1139 		}
1140 		break;
1141 	case VIRTCHNL_OP_DISABLE_CHANNELS:
1142 		break;
1143 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
1144 		valid_len = sizeof(struct virtchnl_filter);
1145 		break;
1146 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
1147 		valid_len = sizeof(struct virtchnl_filter);
1148 		break;
1149 	case VIRTCHNL_OP_ADD_RSS_CFG:
1150 	case VIRTCHNL_OP_DEL_RSS_CFG:
1151 		valid_len = sizeof(struct virtchnl_rss_cfg);
1152 		break;
1153 	case VIRTCHNL_OP_ADD_FDIR_FILTER:
1154 		valid_len = sizeof(struct virtchnl_fdir_add);
1155 		break;
1156 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
1157 		valid_len = sizeof(struct virtchnl_fdir_del);
1158 		break;
1159 	/* These are always errors coming from the VF. */
1160 	case VIRTCHNL_OP_EVENT:
1161 	case VIRTCHNL_OP_UNKNOWN:
1162 	default:
1163 		return VIRTCHNL_STATUS_ERR_PARAM;
1164 	}
1165 	/* few more checks */
1166 	if (err_msg_format || valid_len != msglen)
1167 		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
1168 
1169 	return 0;
1170 }
1171 #endif /* _VIRTCHNL_H_ */
1172