1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/pci.h>
9 #include "gve.h"
10 #include "gve_adminq.h"
11 #include "gve_register.h"
12
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16
17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18 "Expected: length=%d, feature_mask=%x.\n" \
19 "Actual: length=%d, feature_mask=%x.\n"
20
21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22
23 static
gve_get_next_option(struct gve_device_descriptor * descriptor,struct gve_device_option * option)24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
25 struct gve_device_option *option)
26 {
27 void *option_end, *descriptor_end;
28
29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
31
32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33 }
34
35 static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)36 void gve_parse_device_option(struct gve_priv *priv,
37 struct gve_device_descriptor *device_descriptor,
38 struct gve_device_option *option,
39 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
40 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
42 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
43 {
44 u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
45 u16 option_length = be16_to_cpu(option->option_length);
46 u16 option_id = be16_to_cpu(option->option_id);
47
48 /* If the length or feature mask doesn't match, continue without
49 * enabling the feature.
50 */
51 switch (option_id) {
52 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
53 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
54 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
55 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
56 "Raw Addressing",
57 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
58 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
59 option_length, req_feat_mask);
60 break;
61 }
62
63 dev_info(&priv->pdev->dev,
64 "Gqi raw addressing device option enabled.\n");
65 priv->queue_format = GVE_GQI_RDA_FORMAT;
66 break;
67 case GVE_DEV_OPT_ID_GQI_RDA:
68 if (option_length < sizeof(**dev_op_gqi_rda) ||
69 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
70 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
71 "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
72 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
73 option_length, req_feat_mask);
74 break;
75 }
76
77 if (option_length > sizeof(**dev_op_gqi_rda)) {
78 dev_warn(&priv->pdev->dev,
79 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
80 }
81 *dev_op_gqi_rda = (void *)(option + 1);
82 break;
83 case GVE_DEV_OPT_ID_GQI_QPL:
84 if (option_length < sizeof(**dev_op_gqi_qpl) ||
85 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
86 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
87 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
88 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
89 option_length, req_feat_mask);
90 break;
91 }
92
93 if (option_length > sizeof(**dev_op_gqi_qpl)) {
94 dev_warn(&priv->pdev->dev,
95 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
96 }
97 *dev_op_gqi_qpl = (void *)(option + 1);
98 break;
99 case GVE_DEV_OPT_ID_DQO_RDA:
100 if (option_length < sizeof(**dev_op_dqo_rda) ||
101 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
102 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
103 "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
104 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
105 option_length, req_feat_mask);
106 break;
107 }
108
109 if (option_length > sizeof(**dev_op_dqo_rda)) {
110 dev_warn(&priv->pdev->dev,
111 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
112 }
113 *dev_op_dqo_rda = (void *)(option + 1);
114 break;
115 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
116 if (option_length < sizeof(**dev_op_jumbo_frames) ||
117 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
118 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
119 "Jumbo Frames",
120 (int)sizeof(**dev_op_jumbo_frames),
121 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
122 option_length, req_feat_mask);
123 break;
124 }
125
126 if (option_length > sizeof(**dev_op_jumbo_frames)) {
127 dev_warn(&priv->pdev->dev,
128 GVE_DEVICE_OPTION_TOO_BIG_FMT,
129 "Jumbo Frames");
130 }
131 *dev_op_jumbo_frames = (void *)(option + 1);
132 break;
133 default:
134 /* If we don't recognize the option just continue
135 * without doing anything.
136 */
137 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
138 option_id);
139 }
140 }
141
142 /* Process all device options for a given describe device call. */
143 static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames)144 gve_process_device_options(struct gve_priv *priv,
145 struct gve_device_descriptor *descriptor,
146 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
147 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
148 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
149 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
150 {
151 const int num_options = be16_to_cpu(descriptor->num_device_options);
152 struct gve_device_option *dev_opt;
153 int i;
154
155 /* The options struct directly follows the device descriptor. */
156 dev_opt = (void *)(descriptor + 1);
157 for (i = 0; i < num_options; i++) {
158 struct gve_device_option *next_opt;
159
160 next_opt = gve_get_next_option(descriptor, dev_opt);
161 if (!next_opt) {
162 dev_err(&priv->dev->dev,
163 "options exceed device_descriptor's total length.\n");
164 return -EINVAL;
165 }
166
167 gve_parse_device_option(priv, descriptor, dev_opt,
168 dev_op_gqi_rda, dev_op_gqi_qpl,
169 dev_op_dqo_rda, dev_op_jumbo_frames);
170 dev_opt = next_opt;
171 }
172
173 return 0;
174 }
175
gve_adminq_alloc(struct device * dev,struct gve_priv * priv)176 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
177 {
178 priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
179 &priv->adminq_bus_addr, GFP_KERNEL);
180 if (unlikely(!priv->adminq))
181 return -ENOMEM;
182
183 priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
184 priv->adminq_prod_cnt = 0;
185 priv->adminq_cmd_fail = 0;
186 priv->adminq_timeouts = 0;
187 priv->adminq_describe_device_cnt = 0;
188 priv->adminq_cfg_device_resources_cnt = 0;
189 priv->adminq_register_page_list_cnt = 0;
190 priv->adminq_unregister_page_list_cnt = 0;
191 priv->adminq_create_tx_queue_cnt = 0;
192 priv->adminq_create_rx_queue_cnt = 0;
193 priv->adminq_destroy_tx_queue_cnt = 0;
194 priv->adminq_destroy_rx_queue_cnt = 0;
195 priv->adminq_dcfg_device_resources_cnt = 0;
196 priv->adminq_set_driver_parameter_cnt = 0;
197 priv->adminq_report_stats_cnt = 0;
198 priv->adminq_report_link_speed_cnt = 0;
199 priv->adminq_get_ptype_map_cnt = 0;
200
201 /* Setup Admin queue with the device */
202 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
203 &priv->reg_bar0->adminq_pfn);
204
205 gve_set_admin_queue_ok(priv);
206 return 0;
207 }
208
gve_adminq_release(struct gve_priv * priv)209 void gve_adminq_release(struct gve_priv *priv)
210 {
211 int i = 0;
212
213 /* Tell the device the adminq is leaving */
214 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
215 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
216 /* If this is reached the device is unrecoverable and still
217 * holding memory. Continue looping to avoid memory corruption,
218 * but WARN so it is visible what is going on.
219 */
220 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
221 WARN(1, "Unrecoverable platform error!");
222 i++;
223 msleep(GVE_ADMINQ_SLEEP_LEN);
224 }
225 gve_clear_device_rings_ok(priv);
226 gve_clear_device_resources_ok(priv);
227 gve_clear_admin_queue_ok(priv);
228 }
229
gve_adminq_free(struct device * dev,struct gve_priv * priv)230 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
231 {
232 if (!gve_get_admin_queue_ok(priv))
233 return;
234 gve_adminq_release(priv);
235 dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
236 gve_clear_admin_queue_ok(priv);
237 }
238
gve_adminq_kick_cmd(struct gve_priv * priv,u32 prod_cnt)239 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
240 {
241 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
242 }
243
gve_adminq_wait_for_cmd(struct gve_priv * priv,u32 prod_cnt)244 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
245 {
246 int i;
247
248 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
249 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
250 == prod_cnt)
251 return true;
252 msleep(GVE_ADMINQ_SLEEP_LEN);
253 }
254
255 return false;
256 }
257
gve_adminq_parse_err(struct gve_priv * priv,u32 status)258 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
259 {
260 if (status != GVE_ADMINQ_COMMAND_PASSED &&
261 status != GVE_ADMINQ_COMMAND_UNSET) {
262 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
263 priv->adminq_cmd_fail++;
264 }
265 switch (status) {
266 case GVE_ADMINQ_COMMAND_PASSED:
267 return 0;
268 case GVE_ADMINQ_COMMAND_UNSET:
269 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
270 return -EINVAL;
271 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
272 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
273 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
274 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
275 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
276 return -EAGAIN;
277 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
278 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
279 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
280 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
281 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
282 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
283 return -EINVAL;
284 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
285 return -ETIME;
286 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
287 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
288 return -EACCES;
289 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
290 return -ENOMEM;
291 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
292 return -EOPNOTSUPP;
293 default:
294 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
295 return -EINVAL;
296 }
297 }
298
299 /* Flushes all AQ commands currently queued and waits for them to complete.
300 * If there are failures, it will return the first error.
301 */
gve_adminq_kick_and_wait(struct gve_priv * priv)302 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
303 {
304 int tail, head;
305 int i;
306
307 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
308 head = priv->adminq_prod_cnt;
309
310 gve_adminq_kick_cmd(priv, head);
311 if (!gve_adminq_wait_for_cmd(priv, head)) {
312 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
313 priv->adminq_timeouts++;
314 return -ENOTRECOVERABLE;
315 }
316
317 for (i = tail; i < head; i++) {
318 union gve_adminq_command *cmd;
319 u32 status, err;
320
321 cmd = &priv->adminq[i & priv->adminq_mask];
322 status = be32_to_cpu(READ_ONCE(cmd->status));
323 err = gve_adminq_parse_err(priv, status);
324 if (err)
325 // Return the first error if we failed.
326 return err;
327 }
328
329 return 0;
330 }
331
332 /* This function is not threadsafe - the caller is responsible for any
333 * necessary locks.
334 */
gve_adminq_issue_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)335 static int gve_adminq_issue_cmd(struct gve_priv *priv,
336 union gve_adminq_command *cmd_orig)
337 {
338 union gve_adminq_command *cmd;
339 u32 opcode;
340 u32 tail;
341
342 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
343
344 // Check if next command will overflow the buffer.
345 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
346 (tail & priv->adminq_mask)) {
347 int err;
348
349 // Flush existing commands to make room.
350 err = gve_adminq_kick_and_wait(priv);
351 if (err)
352 return err;
353
354 // Retry.
355 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
356 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
357 (tail & priv->adminq_mask)) {
358 // This should never happen. We just flushed the
359 // command queue so there should be enough space.
360 return -ENOMEM;
361 }
362 }
363
364 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
365 priv->adminq_prod_cnt++;
366
367 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
368 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
369
370 switch (opcode) {
371 case GVE_ADMINQ_DESCRIBE_DEVICE:
372 priv->adminq_describe_device_cnt++;
373 break;
374 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
375 priv->adminq_cfg_device_resources_cnt++;
376 break;
377 case GVE_ADMINQ_REGISTER_PAGE_LIST:
378 priv->adminq_register_page_list_cnt++;
379 break;
380 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
381 priv->adminq_unregister_page_list_cnt++;
382 break;
383 case GVE_ADMINQ_CREATE_TX_QUEUE:
384 priv->adminq_create_tx_queue_cnt++;
385 break;
386 case GVE_ADMINQ_CREATE_RX_QUEUE:
387 priv->adminq_create_rx_queue_cnt++;
388 break;
389 case GVE_ADMINQ_DESTROY_TX_QUEUE:
390 priv->adminq_destroy_tx_queue_cnt++;
391 break;
392 case GVE_ADMINQ_DESTROY_RX_QUEUE:
393 priv->adminq_destroy_rx_queue_cnt++;
394 break;
395 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
396 priv->adminq_dcfg_device_resources_cnt++;
397 break;
398 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
399 priv->adminq_set_driver_parameter_cnt++;
400 break;
401 case GVE_ADMINQ_REPORT_STATS:
402 priv->adminq_report_stats_cnt++;
403 break;
404 case GVE_ADMINQ_REPORT_LINK_SPEED:
405 priv->adminq_report_link_speed_cnt++;
406 break;
407 case GVE_ADMINQ_GET_PTYPE_MAP:
408 priv->adminq_get_ptype_map_cnt++;
409 break;
410 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
411 priv->adminq_verify_driver_compatibility_cnt++;
412 break;
413 default:
414 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
415 }
416
417 return 0;
418 }
419
420 /* This function is not threadsafe - the caller is responsible for any
421 * necessary locks.
422 * The caller is also responsible for making sure there are no commands
423 * waiting to be executed.
424 */
gve_adminq_execute_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)425 static int gve_adminq_execute_cmd(struct gve_priv *priv,
426 union gve_adminq_command *cmd_orig)
427 {
428 u32 tail, head;
429 int err;
430
431 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
432 head = priv->adminq_prod_cnt;
433 if (tail != head)
434 // This is not a valid path
435 return -EINVAL;
436
437 err = gve_adminq_issue_cmd(priv, cmd_orig);
438 if (err)
439 return err;
440
441 return gve_adminq_kick_and_wait(priv);
442 }
443
444 /* The device specifies that the management vector can either be the first irq
445 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
446 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
447 * the management vector is first.
448 *
449 * gve arranges the msix vectors so that the management vector is last.
450 */
451 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
gve_adminq_configure_device_resources(struct gve_priv * priv,dma_addr_t counter_array_bus_addr,u32 num_counters,dma_addr_t db_array_bus_addr,u32 num_ntfy_blks)452 int gve_adminq_configure_device_resources(struct gve_priv *priv,
453 dma_addr_t counter_array_bus_addr,
454 u32 num_counters,
455 dma_addr_t db_array_bus_addr,
456 u32 num_ntfy_blks)
457 {
458 union gve_adminq_command cmd;
459
460 memset(&cmd, 0, sizeof(cmd));
461 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
462 cmd.configure_device_resources =
463 (struct gve_adminq_configure_device_resources) {
464 .counter_array = cpu_to_be64(counter_array_bus_addr),
465 .num_counters = cpu_to_be32(num_counters),
466 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
467 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
468 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
469 .ntfy_blk_msix_base_idx =
470 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
471 .queue_format = priv->queue_format,
472 };
473
474 return gve_adminq_execute_cmd(priv, &cmd);
475 }
476
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)477 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
478 {
479 union gve_adminq_command cmd;
480
481 memset(&cmd, 0, sizeof(cmd));
482 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
483
484 return gve_adminq_execute_cmd(priv, &cmd);
485 }
486
gve_adminq_create_tx_queue(struct gve_priv * priv,u32 queue_index)487 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
488 {
489 struct gve_tx_ring *tx = &priv->tx[queue_index];
490 union gve_adminq_command cmd;
491
492 memset(&cmd, 0, sizeof(cmd));
493 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
494 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
495 .queue_id = cpu_to_be32(queue_index),
496 .queue_resources_addr =
497 cpu_to_be64(tx->q_resources_bus),
498 .tx_ring_addr = cpu_to_be64(tx->bus),
499 .ntfy_id = cpu_to_be32(tx->ntfy_id),
500 };
501
502 if (gve_is_gqi(priv)) {
503 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
504 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
505
506 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
507 } else {
508 cmd.create_tx_queue.tx_ring_size =
509 cpu_to_be16(priv->tx_desc_cnt);
510 cmd.create_tx_queue.tx_comp_ring_addr =
511 cpu_to_be64(tx->complq_bus_dqo);
512 cmd.create_tx_queue.tx_comp_ring_size =
513 cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
514 }
515
516 return gve_adminq_issue_cmd(priv, &cmd);
517 }
518
gve_adminq_create_tx_queues(struct gve_priv * priv,u32 num_queues)519 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
520 {
521 int err;
522 int i;
523
524 for (i = 0; i < num_queues; i++) {
525 err = gve_adminq_create_tx_queue(priv, i);
526 if (err)
527 return err;
528 }
529
530 return gve_adminq_kick_and_wait(priv);
531 }
532
gve_adminq_create_rx_queue(struct gve_priv * priv,u32 queue_index)533 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
534 {
535 struct gve_rx_ring *rx = &priv->rx[queue_index];
536 union gve_adminq_command cmd;
537
538 memset(&cmd, 0, sizeof(cmd));
539 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
540 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
541 .queue_id = cpu_to_be32(queue_index),
542 .ntfy_id = cpu_to_be32(rx->ntfy_id),
543 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
544 };
545
546 if (gve_is_gqi(priv)) {
547 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
548 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
549
550 cmd.create_rx_queue.rx_desc_ring_addr =
551 cpu_to_be64(rx->desc.bus),
552 cmd.create_rx_queue.rx_data_ring_addr =
553 cpu_to_be64(rx->data.data_bus),
554 cmd.create_rx_queue.index = cpu_to_be32(queue_index);
555 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
556 cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
557 } else {
558 cmd.create_rx_queue.rx_ring_size =
559 cpu_to_be16(priv->rx_desc_cnt);
560 cmd.create_rx_queue.rx_desc_ring_addr =
561 cpu_to_be64(rx->dqo.complq.bus);
562 cmd.create_rx_queue.rx_data_ring_addr =
563 cpu_to_be64(rx->dqo.bufq.bus);
564 cmd.create_rx_queue.packet_buffer_size =
565 cpu_to_be16(priv->data_buffer_size_dqo);
566 cmd.create_rx_queue.rx_buff_ring_size =
567 cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
568 cmd.create_rx_queue.enable_rsc =
569 !!(priv->dev->features & NETIF_F_LRO);
570 }
571
572 return gve_adminq_issue_cmd(priv, &cmd);
573 }
574
gve_adminq_create_rx_queues(struct gve_priv * priv,u32 num_queues)575 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
576 {
577 int err;
578 int i;
579
580 for (i = 0; i < num_queues; i++) {
581 err = gve_adminq_create_rx_queue(priv, i);
582 if (err)
583 return err;
584 }
585
586 return gve_adminq_kick_and_wait(priv);
587 }
588
gve_adminq_destroy_tx_queue(struct gve_priv * priv,u32 queue_index)589 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
590 {
591 union gve_adminq_command cmd;
592 int err;
593
594 memset(&cmd, 0, sizeof(cmd));
595 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
596 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
597 .queue_id = cpu_to_be32(queue_index),
598 };
599
600 err = gve_adminq_issue_cmd(priv, &cmd);
601 if (err)
602 return err;
603
604 return 0;
605 }
606
gve_adminq_destroy_tx_queues(struct gve_priv * priv,u32 num_queues)607 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
608 {
609 int err;
610 int i;
611
612 for (i = 0; i < num_queues; i++) {
613 err = gve_adminq_destroy_tx_queue(priv, i);
614 if (err)
615 return err;
616 }
617
618 return gve_adminq_kick_and_wait(priv);
619 }
620
gve_adminq_destroy_rx_queue(struct gve_priv * priv,u32 queue_index)621 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
622 {
623 union gve_adminq_command cmd;
624 int err;
625
626 memset(&cmd, 0, sizeof(cmd));
627 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
628 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
629 .queue_id = cpu_to_be32(queue_index),
630 };
631
632 err = gve_adminq_issue_cmd(priv, &cmd);
633 if (err)
634 return err;
635
636 return 0;
637 }
638
gve_adminq_destroy_rx_queues(struct gve_priv * priv,u32 num_queues)639 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
640 {
641 int err;
642 int i;
643
644 for (i = 0; i < num_queues; i++) {
645 err = gve_adminq_destroy_rx_queue(priv, i);
646 if (err)
647 return err;
648 }
649
650 return gve_adminq_kick_and_wait(priv);
651 }
652
gve_set_desc_cnt(struct gve_priv * priv,struct gve_device_descriptor * descriptor)653 static int gve_set_desc_cnt(struct gve_priv *priv,
654 struct gve_device_descriptor *descriptor)
655 {
656 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
657 if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
658 dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
659 priv->tx_desc_cnt);
660 return -EINVAL;
661 }
662 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
663 if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
664 < PAGE_SIZE) {
665 dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
666 priv->rx_desc_cnt);
667 return -EINVAL;
668 }
669 return 0;
670 }
671
672 static int
gve_set_desc_cnt_dqo(struct gve_priv * priv,const struct gve_device_descriptor * descriptor,const struct gve_device_option_dqo_rda * dev_op_dqo_rda)673 gve_set_desc_cnt_dqo(struct gve_priv *priv,
674 const struct gve_device_descriptor *descriptor,
675 const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
676 {
677 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
678 priv->options_dqo_rda.tx_comp_ring_entries =
679 be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
680 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
681 priv->options_dqo_rda.rx_buff_ring_entries =
682 be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
683
684 return 0;
685 }
686
gve_enable_supported_features(struct gve_priv * priv,u32 supported_features_mask,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames)687 static void gve_enable_supported_features(struct gve_priv *priv,
688 u32 supported_features_mask,
689 const struct gve_device_option_jumbo_frames
690 *dev_op_jumbo_frames)
691 {
692 /* Before control reaches this point, the page-size-capped max MTU from
693 * the gve_device_descriptor field has already been stored in
694 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
695 */
696 if (dev_op_jumbo_frames &&
697 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
698 dev_info(&priv->pdev->dev,
699 "JUMBO FRAMES device option enabled.\n");
700 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
701 }
702 }
703
gve_adminq_describe_device(struct gve_priv * priv)704 int gve_adminq_describe_device(struct gve_priv *priv)
705 {
706 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
707 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
708 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
709 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
710 struct gve_device_descriptor *descriptor;
711 u32 supported_features_mask = 0;
712 union gve_adminq_command cmd;
713 dma_addr_t descriptor_bus;
714 int err = 0;
715 u8 *mac;
716 u16 mtu;
717
718 memset(&cmd, 0, sizeof(cmd));
719 descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
720 &descriptor_bus, GFP_KERNEL);
721 if (!descriptor)
722 return -ENOMEM;
723 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
724 cmd.describe_device.device_descriptor_addr =
725 cpu_to_be64(descriptor_bus);
726 cmd.describe_device.device_descriptor_version =
727 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
728 cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
729
730 err = gve_adminq_execute_cmd(priv, &cmd);
731 if (err)
732 goto free_device_descriptor;
733
734 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
735 &dev_op_gqi_qpl, &dev_op_dqo_rda,
736 &dev_op_jumbo_frames);
737 if (err)
738 goto free_device_descriptor;
739
740 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
741 * is not set to GqiRda, choose the queue format in a priority order:
742 * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
743 */
744 if (dev_op_dqo_rda) {
745 priv->queue_format = GVE_DQO_RDA_FORMAT;
746 dev_info(&priv->pdev->dev,
747 "Driver is running with DQO RDA queue format.\n");
748 supported_features_mask =
749 be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
750 } else if (dev_op_gqi_rda) {
751 priv->queue_format = GVE_GQI_RDA_FORMAT;
752 dev_info(&priv->pdev->dev,
753 "Driver is running with GQI RDA queue format.\n");
754 supported_features_mask =
755 be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
756 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
757 dev_info(&priv->pdev->dev,
758 "Driver is running with GQI RDA queue format.\n");
759 } else {
760 priv->queue_format = GVE_GQI_QPL_FORMAT;
761 if (dev_op_gqi_qpl)
762 supported_features_mask =
763 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
764 dev_info(&priv->pdev->dev,
765 "Driver is running with GQI QPL queue format.\n");
766 }
767 if (gve_is_gqi(priv)) {
768 err = gve_set_desc_cnt(priv, descriptor);
769 } else {
770 /* DQO supports LRO. */
771 priv->dev->hw_features |= NETIF_F_LRO;
772 err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
773 }
774 if (err)
775 goto free_device_descriptor;
776
777 priv->max_registered_pages =
778 be64_to_cpu(descriptor->max_registered_pages);
779 mtu = be16_to_cpu(descriptor->mtu);
780 if (mtu < ETH_MIN_MTU) {
781 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
782 err = -EINVAL;
783 goto free_device_descriptor;
784 }
785 priv->dev->max_mtu = mtu;
786 priv->num_event_counters = be16_to_cpu(descriptor->counters);
787 eth_hw_addr_set(priv->dev, descriptor->mac);
788 mac = descriptor->mac;
789 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
790 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
791 priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
792
793 if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
794 dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
795 priv->rx_data_slot_cnt);
796 priv->rx_desc_cnt = priv->rx_data_slot_cnt;
797 }
798 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
799
800 gve_enable_supported_features(priv, supported_features_mask,
801 dev_op_jumbo_frames);
802
803 free_device_descriptor:
804 dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
805 descriptor_bus);
806 return err;
807 }
808
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)809 int gve_adminq_register_page_list(struct gve_priv *priv,
810 struct gve_queue_page_list *qpl)
811 {
812 struct device *hdev = &priv->pdev->dev;
813 u32 num_entries = qpl->num_entries;
814 u32 size = num_entries * sizeof(qpl->page_buses[0]);
815 union gve_adminq_command cmd;
816 dma_addr_t page_list_bus;
817 __be64 *page_list;
818 int err;
819 int i;
820
821 memset(&cmd, 0, sizeof(cmd));
822 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
823 if (!page_list)
824 return -ENOMEM;
825
826 for (i = 0; i < num_entries; i++)
827 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
828
829 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
830 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
831 .page_list_id = cpu_to_be32(qpl->id),
832 .num_pages = cpu_to_be32(num_entries),
833 .page_address_list_addr = cpu_to_be64(page_list_bus),
834 };
835
836 err = gve_adminq_execute_cmd(priv, &cmd);
837 dma_free_coherent(hdev, size, page_list, page_list_bus);
838 return err;
839 }
840
gve_adminq_unregister_page_list(struct gve_priv * priv,u32 page_list_id)841 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
842 {
843 union gve_adminq_command cmd;
844
845 memset(&cmd, 0, sizeof(cmd));
846 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
847 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
848 .page_list_id = cpu_to_be32(page_list_id),
849 };
850
851 return gve_adminq_execute_cmd(priv, &cmd);
852 }
853
gve_adminq_set_mtu(struct gve_priv * priv,u64 mtu)854 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
855 {
856 union gve_adminq_command cmd;
857
858 memset(&cmd, 0, sizeof(cmd));
859 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
860 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
861 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
862 .parameter_value = cpu_to_be64(mtu),
863 };
864
865 return gve_adminq_execute_cmd(priv, &cmd);
866 }
867
gve_adminq_report_stats(struct gve_priv * priv,u64 stats_report_len,dma_addr_t stats_report_addr,u64 interval)868 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
869 dma_addr_t stats_report_addr, u64 interval)
870 {
871 union gve_adminq_command cmd;
872
873 memset(&cmd, 0, sizeof(cmd));
874 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
875 cmd.report_stats = (struct gve_adminq_report_stats) {
876 .stats_report_len = cpu_to_be64(stats_report_len),
877 .stats_report_addr = cpu_to_be64(stats_report_addr),
878 .interval = cpu_to_be64(interval),
879 };
880
881 return gve_adminq_execute_cmd(priv, &cmd);
882 }
883
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,u64 driver_info_len,dma_addr_t driver_info_addr)884 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
885 u64 driver_info_len,
886 dma_addr_t driver_info_addr)
887 {
888 union gve_adminq_command cmd;
889
890 memset(&cmd, 0, sizeof(cmd));
891 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
892 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
893 .driver_info_len = cpu_to_be64(driver_info_len),
894 .driver_info_addr = cpu_to_be64(driver_info_addr),
895 };
896
897 return gve_adminq_execute_cmd(priv, &cmd);
898 }
899
gve_adminq_report_link_speed(struct gve_priv * priv)900 int gve_adminq_report_link_speed(struct gve_priv *priv)
901 {
902 union gve_adminq_command gvnic_cmd;
903 dma_addr_t link_speed_region_bus;
904 __be64 *link_speed_region;
905 int err;
906
907 link_speed_region =
908 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
909 &link_speed_region_bus, GFP_KERNEL);
910
911 if (!link_speed_region)
912 return -ENOMEM;
913
914 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
915 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
916 gvnic_cmd.report_link_speed.link_speed_address =
917 cpu_to_be64(link_speed_region_bus);
918
919 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
920
921 priv->link_speed = be64_to_cpu(*link_speed_region);
922 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
923 link_speed_region_bus);
924 return err;
925 }
926
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut)927 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
928 struct gve_ptype_lut *ptype_lut)
929 {
930 struct gve_ptype_map *ptype_map;
931 union gve_adminq_command cmd;
932 dma_addr_t ptype_map_bus;
933 int err = 0;
934 int i;
935
936 memset(&cmd, 0, sizeof(cmd));
937 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
938 &ptype_map_bus, GFP_KERNEL);
939 if (!ptype_map)
940 return -ENOMEM;
941
942 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
943 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
944 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
945 .ptype_map_addr = cpu_to_be64(ptype_map_bus),
946 };
947
948 err = gve_adminq_execute_cmd(priv, &cmd);
949 if (err)
950 goto err;
951
952 /* Populate ptype_lut. */
953 for (i = 0; i < GVE_NUM_PTYPES; i++) {
954 ptype_lut->ptypes[i].l3_type =
955 ptype_map->ptypes[i].l3_type;
956 ptype_lut->ptypes[i].l4_type =
957 ptype_map->ptypes[i].l4_type;
958 }
959 err:
960 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
961 ptype_map_bus);
962 return err;
963 }
964