1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR "Intel Corporation"
18 #define IFCVF_DRIVER_NAME "ifcvf"
19
ifcvf_config_changed(int irq,void * arg)20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22 struct ifcvf_hw *vf = arg;
23
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
26
27 return IRQ_HANDLED;
28 }
29
ifcvf_vq_intr_handler(int irq,void * arg)30 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
31 {
32 struct vring_info *vring = arg;
33
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
36
37 return IRQ_HANDLED;
38 }
39
ifcvf_vqs_reused_intr_handler(int irq,void * arg)40 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
41 {
42 struct ifcvf_hw *vf = arg;
43 struct vring_info *vring;
44 int i;
45
46 for (i = 0; i < vf->nr_vring; i++) {
47 vring = &vf->vring[i];
48 if (vring->cb.callback)
49 vring->cb.callback(vring->cb.private);
50 }
51
52 return IRQ_HANDLED;
53 }
54
ifcvf_dev_intr_handler(int irq,void * arg)55 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
56 {
57 struct ifcvf_hw *vf = arg;
58 u8 isr;
59
60 isr = vp_ioread8(vf->isr);
61 if (isr & VIRTIO_PCI_ISR_CONFIG)
62 ifcvf_config_changed(irq, arg);
63
64 return ifcvf_vqs_reused_intr_handler(irq, arg);
65 }
66
ifcvf_free_irq_vectors(void * data)67 static void ifcvf_free_irq_vectors(void *data)
68 {
69 pci_free_irq_vectors(data);
70 }
71
ifcvf_free_per_vq_irq(struct ifcvf_hw * vf)72 static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
73 {
74 struct pci_dev *pdev = vf->pdev;
75 int i;
76
77 for (i = 0; i < vf->nr_vring; i++) {
78 if (vf->vring[i].irq != -EINVAL) {
79 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
80 vf->vring[i].irq = -EINVAL;
81 }
82 }
83 }
84
ifcvf_free_vqs_reused_irq(struct ifcvf_hw * vf)85 static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
86 {
87 struct pci_dev *pdev = vf->pdev;
88
89 if (vf->vqs_reused_irq != -EINVAL) {
90 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
91 vf->vqs_reused_irq = -EINVAL;
92 }
93
94 }
95
ifcvf_free_vq_irq(struct ifcvf_hw * vf)96 static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
97 {
98 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
99 ifcvf_free_per_vq_irq(vf);
100 else
101 ifcvf_free_vqs_reused_irq(vf);
102 }
103
ifcvf_free_config_irq(struct ifcvf_hw * vf)104 static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
105 {
106 struct pci_dev *pdev = vf->pdev;
107
108 if (vf->config_irq == -EINVAL)
109 return;
110
111 /* If the irq is shared by all vqs and the config interrupt,
112 * it is already freed in ifcvf_free_vq_irq, so here only
113 * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
114 */
115 if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
116 devm_free_irq(&pdev->dev, vf->config_irq, vf);
117 vf->config_irq = -EINVAL;
118 }
119 }
120
ifcvf_free_irq(struct ifcvf_hw * vf)121 static void ifcvf_free_irq(struct ifcvf_hw *vf)
122 {
123 struct pci_dev *pdev = vf->pdev;
124
125 ifcvf_free_vq_irq(vf);
126 ifcvf_free_config_irq(vf);
127 ifcvf_free_irq_vectors(pdev);
128 }
129
130 /* ifcvf MSIX vectors allocator, this helper tries to allocate
131 * vectors for all virtqueues and the config interrupt.
132 * It returns the number of allocated vectors, negative
133 * return value when fails.
134 */
ifcvf_alloc_vectors(struct ifcvf_hw * vf)135 static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
136 {
137 struct pci_dev *pdev = vf->pdev;
138 int max_intr, ret;
139
140 /* all queues and config interrupt */
141 max_intr = vf->nr_vring + 1;
142 ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
143
144 if (ret < 0) {
145 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
146 return ret;
147 }
148
149 if (ret < max_intr)
150 IFCVF_INFO(pdev,
151 "Requested %u vectors, however only %u allocated, lower performance\n",
152 max_intr, ret);
153
154 return ret;
155 }
156
ifcvf_request_per_vq_irq(struct ifcvf_hw * vf)157 static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
158 {
159 struct pci_dev *pdev = vf->pdev;
160 int i, vector, ret, irq;
161
162 vf->vqs_reused_irq = -EINVAL;
163 for (i = 0; i < vf->nr_vring; i++) {
164 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
165 vector = i;
166 irq = pci_irq_vector(pdev, vector);
167 ret = devm_request_irq(&pdev->dev, irq,
168 ifcvf_vq_intr_handler, 0,
169 vf->vring[i].msix_name,
170 &vf->vring[i]);
171 if (ret) {
172 IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
173 goto err;
174 }
175
176 vf->vring[i].irq = irq;
177 ret = ifcvf_set_vq_vector(vf, i, vector);
178 if (ret == VIRTIO_MSI_NO_VECTOR) {
179 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
180 goto err;
181 }
182 }
183
184 return 0;
185 err:
186 ifcvf_free_irq(vf);
187
188 return -EFAULT;
189 }
190
ifcvf_request_vqs_reused_irq(struct ifcvf_hw * vf)191 static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
192 {
193 struct pci_dev *pdev = vf->pdev;
194 int i, vector, ret, irq;
195
196 vector = 0;
197 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
198 irq = pci_irq_vector(pdev, vector);
199 ret = devm_request_irq(&pdev->dev, irq,
200 ifcvf_vqs_reused_intr_handler, 0,
201 vf->vring[0].msix_name, vf);
202 if (ret) {
203 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
204 goto err;
205 }
206
207 vf->vqs_reused_irq = irq;
208 for (i = 0; i < vf->nr_vring; i++) {
209 vf->vring[i].irq = -EINVAL;
210 ret = ifcvf_set_vq_vector(vf, i, vector);
211 if (ret == VIRTIO_MSI_NO_VECTOR) {
212 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
213 goto err;
214 }
215 }
216
217 return 0;
218 err:
219 ifcvf_free_irq(vf);
220
221 return -EFAULT;
222 }
223
ifcvf_request_dev_irq(struct ifcvf_hw * vf)224 static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
225 {
226 struct pci_dev *pdev = vf->pdev;
227 int i, vector, ret, irq;
228
229 vector = 0;
230 snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
231 irq = pci_irq_vector(pdev, vector);
232 ret = devm_request_irq(&pdev->dev, irq,
233 ifcvf_dev_intr_handler, 0,
234 vf->vring[0].msix_name, vf);
235 if (ret) {
236 IFCVF_ERR(pdev, "Failed to request irq for the device\n");
237 goto err;
238 }
239
240 vf->vqs_reused_irq = irq;
241 for (i = 0; i < vf->nr_vring; i++) {
242 vf->vring[i].irq = -EINVAL;
243 ret = ifcvf_set_vq_vector(vf, i, vector);
244 if (ret == VIRTIO_MSI_NO_VECTOR) {
245 IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
246 goto err;
247 }
248 }
249
250 vf->config_irq = irq;
251 ret = ifcvf_set_config_vector(vf, vector);
252 if (ret == VIRTIO_MSI_NO_VECTOR) {
253 IFCVF_ERR(pdev, "No msix vector for device config\n");
254 goto err;
255 }
256
257 return 0;
258 err:
259 ifcvf_free_irq(vf);
260
261 return -EFAULT;
262
263 }
264
ifcvf_request_vq_irq(struct ifcvf_hw * vf)265 static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
266 {
267 int ret;
268
269 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
270 ret = ifcvf_request_per_vq_irq(vf);
271 else
272 ret = ifcvf_request_vqs_reused_irq(vf);
273
274 return ret;
275 }
276
ifcvf_request_config_irq(struct ifcvf_hw * vf)277 static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
278 {
279 struct pci_dev *pdev = vf->pdev;
280 int config_vector, ret;
281
282 if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
283 config_vector = vf->nr_vring;
284 else if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
285 /* vector 0 for vqs and 1 for config interrupt */
286 config_vector = 1;
287 else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
288 /* re-use the vqs vector */
289 return 0;
290 else
291 return -EINVAL;
292
293 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
294 pci_name(pdev));
295 vf->config_irq = pci_irq_vector(pdev, config_vector);
296 ret = devm_request_irq(&pdev->dev, vf->config_irq,
297 ifcvf_config_changed, 0,
298 vf->config_msix_name, vf);
299 if (ret) {
300 IFCVF_ERR(pdev, "Failed to request config irq\n");
301 goto err;
302 }
303
304 ret = ifcvf_set_config_vector(vf, config_vector);
305 if (ret == VIRTIO_MSI_NO_VECTOR) {
306 IFCVF_ERR(pdev, "No msix vector for device config\n");
307 goto err;
308 }
309
310 return 0;
311 err:
312 ifcvf_free_irq(vf);
313
314 return -EFAULT;
315 }
316
ifcvf_request_irq(struct ifcvf_hw * vf)317 static int ifcvf_request_irq(struct ifcvf_hw *vf)
318 {
319 int nvectors, ret, max_intr;
320
321 nvectors = ifcvf_alloc_vectors(vf);
322 if (nvectors <= 0)
323 return -EFAULT;
324
325 vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
326 max_intr = vf->nr_vring + 1;
327 if (nvectors < max_intr)
328 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
329
330 if (nvectors == 1) {
331 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
332 ret = ifcvf_request_dev_irq(vf);
333
334 return ret;
335 }
336
337 ret = ifcvf_request_vq_irq(vf);
338 if (ret)
339 return ret;
340
341 ret = ifcvf_request_config_irq(vf);
342
343 if (ret)
344 return ret;
345
346 return 0;
347 }
348
ifcvf_start_datapath(struct ifcvf_adapter * adapter)349 static int ifcvf_start_datapath(struct ifcvf_adapter *adapter)
350 {
351 struct ifcvf_hw *vf = adapter->vf;
352 u8 status;
353 int ret;
354
355 ret = ifcvf_start_hw(vf);
356 if (ret < 0) {
357 status = ifcvf_get_status(vf);
358 status |= VIRTIO_CONFIG_S_FAILED;
359 ifcvf_set_status(vf, status);
360 }
361
362 return ret;
363 }
364
ifcvf_stop_datapath(struct ifcvf_adapter * adapter)365 static int ifcvf_stop_datapath(struct ifcvf_adapter *adapter)
366 {
367 struct ifcvf_hw *vf = adapter->vf;
368 int i;
369
370 for (i = 0; i < vf->nr_vring; i++)
371 vf->vring[i].cb.callback = NULL;
372
373 ifcvf_stop_hw(vf);
374
375 return 0;
376 }
377
ifcvf_reset_vring(struct ifcvf_adapter * adapter)378 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
379 {
380 struct ifcvf_hw *vf = adapter->vf;
381 int i;
382
383 for (i = 0; i < vf->nr_vring; i++) {
384 vf->vring[i].last_avail_idx = 0;
385 vf->vring[i].desc = 0;
386 vf->vring[i].avail = 0;
387 vf->vring[i].used = 0;
388 vf->vring[i].ready = 0;
389 vf->vring[i].cb.callback = NULL;
390 vf->vring[i].cb.private = NULL;
391 }
392
393 ifcvf_reset(vf);
394 }
395
vdpa_to_adapter(struct vdpa_device * vdpa_dev)396 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
397 {
398 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
399 }
400
vdpa_to_vf(struct vdpa_device * vdpa_dev)401 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
402 {
403 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
404
405 return adapter->vf;
406 }
407
ifcvf_vdpa_get_device_features(struct vdpa_device * vdpa_dev)408 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
409 {
410 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
411 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
412 struct pci_dev *pdev = adapter->pdev;
413 u32 type = vf->dev_type;
414 u64 features;
415
416 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
417 features = ifcvf_get_features(vf);
418 else {
419 features = 0;
420 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
421 }
422
423 return features;
424 }
425
ifcvf_vdpa_set_driver_features(struct vdpa_device * vdpa_dev,u64 features)426 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
427 {
428 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
429 int ret;
430
431 ret = ifcvf_verify_min_features(vf, features);
432 if (ret)
433 return ret;
434
435 vf->req_features = features;
436
437 return 0;
438 }
439
ifcvf_vdpa_get_driver_features(struct vdpa_device * vdpa_dev)440 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
441 {
442 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
443
444 return vf->req_features;
445 }
446
ifcvf_vdpa_get_status(struct vdpa_device * vdpa_dev)447 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
448 {
449 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
450
451 return ifcvf_get_status(vf);
452 }
453
ifcvf_vdpa_set_status(struct vdpa_device * vdpa_dev,u8 status)454 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
455 {
456 struct ifcvf_adapter *adapter;
457 struct ifcvf_hw *vf;
458 u8 status_old;
459 int ret;
460
461 vf = vdpa_to_vf(vdpa_dev);
462 adapter = vdpa_to_adapter(vdpa_dev);
463 status_old = ifcvf_get_status(vf);
464
465 if (status_old == status)
466 return;
467
468 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
469 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
470 ret = ifcvf_request_irq(vf);
471 if (ret) {
472 status = ifcvf_get_status(vf);
473 status |= VIRTIO_CONFIG_S_FAILED;
474 ifcvf_set_status(vf, status);
475 return;
476 }
477
478 if (ifcvf_start_datapath(adapter) < 0)
479 IFCVF_ERR(adapter->pdev,
480 "Failed to set ifcvf vdpa status %u\n",
481 status);
482 }
483
484 ifcvf_set_status(vf, status);
485 }
486
ifcvf_vdpa_reset(struct vdpa_device * vdpa_dev)487 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
488 {
489 struct ifcvf_adapter *adapter;
490 struct ifcvf_hw *vf;
491 u8 status_old;
492
493 vf = vdpa_to_vf(vdpa_dev);
494 adapter = vdpa_to_adapter(vdpa_dev);
495 status_old = ifcvf_get_status(vf);
496
497 if (status_old == 0)
498 return 0;
499
500 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
501 ifcvf_stop_datapath(adapter);
502 ifcvf_free_irq(vf);
503 }
504
505 ifcvf_reset_vring(adapter);
506
507 return 0;
508 }
509
ifcvf_vdpa_get_vq_num_max(struct vdpa_device * vdpa_dev)510 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
511 {
512 return IFCVF_QUEUE_MAX;
513 }
514
ifcvf_vdpa_get_vq_state(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_vq_state * state)515 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
516 struct vdpa_vq_state *state)
517 {
518 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
519
520 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
521 return 0;
522 }
523
ifcvf_vdpa_set_vq_state(struct vdpa_device * vdpa_dev,u16 qid,const struct vdpa_vq_state * state)524 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
525 const struct vdpa_vq_state *state)
526 {
527 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
528
529 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
530 }
531
ifcvf_vdpa_set_vq_cb(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_callback * cb)532 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
533 struct vdpa_callback *cb)
534 {
535 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
536
537 vf->vring[qid].cb = *cb;
538 }
539
ifcvf_vdpa_set_vq_ready(struct vdpa_device * vdpa_dev,u16 qid,bool ready)540 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
541 u16 qid, bool ready)
542 {
543 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
544
545 vf->vring[qid].ready = ready;
546 }
547
ifcvf_vdpa_get_vq_ready(struct vdpa_device * vdpa_dev,u16 qid)548 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
549 {
550 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
551
552 return vf->vring[qid].ready;
553 }
554
ifcvf_vdpa_set_vq_num(struct vdpa_device * vdpa_dev,u16 qid,u32 num)555 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
556 u32 num)
557 {
558 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
559
560 vf->vring[qid].size = num;
561 }
562
ifcvf_vdpa_set_vq_address(struct vdpa_device * vdpa_dev,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)563 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
564 u64 desc_area, u64 driver_area,
565 u64 device_area)
566 {
567 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
568
569 vf->vring[qid].desc = desc_area;
570 vf->vring[qid].avail = driver_area;
571 vf->vring[qid].used = device_area;
572
573 return 0;
574 }
575
ifcvf_vdpa_kick_vq(struct vdpa_device * vdpa_dev,u16 qid)576 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
577 {
578 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
579
580 ifcvf_notify_queue(vf, qid);
581 }
582
ifcvf_vdpa_get_generation(struct vdpa_device * vdpa_dev)583 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
584 {
585 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
586
587 return vp_ioread8(&vf->common_cfg->config_generation);
588 }
589
ifcvf_vdpa_get_device_id(struct vdpa_device * vdpa_dev)590 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
591 {
592 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
593
594 return vf->dev_type;
595 }
596
ifcvf_vdpa_get_vendor_id(struct vdpa_device * vdpa_dev)597 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
598 {
599 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
600 struct pci_dev *pdev = adapter->pdev;
601
602 return pdev->subsystem_vendor;
603 }
604
ifcvf_vdpa_get_vq_align(struct vdpa_device * vdpa_dev)605 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
606 {
607 return IFCVF_QUEUE_ALIGNMENT;
608 }
609
ifcvf_vdpa_get_config_size(struct vdpa_device * vdpa_dev)610 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
611 {
612 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
613
614 return vf->config_size;
615 }
616
ifcvf_vdpa_get_vq_group(struct vdpa_device * vdpa,u16 idx)617 static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
618 {
619 return 0;
620 }
621
ifcvf_vdpa_get_config(struct vdpa_device * vdpa_dev,unsigned int offset,void * buf,unsigned int len)622 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
623 unsigned int offset,
624 void *buf, unsigned int len)
625 {
626 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
627
628 ifcvf_read_dev_config(vf, offset, buf, len);
629 }
630
ifcvf_vdpa_set_config(struct vdpa_device * vdpa_dev,unsigned int offset,const void * buf,unsigned int len)631 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
632 unsigned int offset, const void *buf,
633 unsigned int len)
634 {
635 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
636
637 ifcvf_write_dev_config(vf, offset, buf, len);
638 }
639
ifcvf_vdpa_set_config_cb(struct vdpa_device * vdpa_dev,struct vdpa_callback * cb)640 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
641 struct vdpa_callback *cb)
642 {
643 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
644
645 vf->config_cb.callback = cb->callback;
646 vf->config_cb.private = cb->private;
647 }
648
ifcvf_vdpa_get_vq_irq(struct vdpa_device * vdpa_dev,u16 qid)649 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
650 u16 qid)
651 {
652 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
653
654 if (vf->vqs_reused_irq < 0)
655 return vf->vring[qid].irq;
656 else
657 return -EINVAL;
658 }
659
ifcvf_get_vq_notification(struct vdpa_device * vdpa_dev,u16 idx)660 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
661 u16 idx)
662 {
663 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
664 struct vdpa_notification_area area;
665
666 area.addr = vf->vring[idx].notify_pa;
667 if (!vf->notify_off_multiplier)
668 area.size = PAGE_SIZE;
669 else
670 area.size = vf->notify_off_multiplier;
671
672 return area;
673 }
674
675 /*
676 * IFCVF currently doesn't have on-chip IOMMU, so not
677 * implemented set_map()/dma_map()/dma_unmap()
678 */
679 static const struct vdpa_config_ops ifc_vdpa_ops = {
680 .get_device_features = ifcvf_vdpa_get_device_features,
681 .set_driver_features = ifcvf_vdpa_set_driver_features,
682 .get_driver_features = ifcvf_vdpa_get_driver_features,
683 .get_status = ifcvf_vdpa_get_status,
684 .set_status = ifcvf_vdpa_set_status,
685 .reset = ifcvf_vdpa_reset,
686 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
687 .get_vq_state = ifcvf_vdpa_get_vq_state,
688 .set_vq_state = ifcvf_vdpa_set_vq_state,
689 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
690 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
691 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
692 .set_vq_num = ifcvf_vdpa_set_vq_num,
693 .set_vq_address = ifcvf_vdpa_set_vq_address,
694 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
695 .kick_vq = ifcvf_vdpa_kick_vq,
696 .get_generation = ifcvf_vdpa_get_generation,
697 .get_device_id = ifcvf_vdpa_get_device_id,
698 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
699 .get_vq_align = ifcvf_vdpa_get_vq_align,
700 .get_vq_group = ifcvf_vdpa_get_vq_group,
701 .get_config_size = ifcvf_vdpa_get_config_size,
702 .get_config = ifcvf_vdpa_get_config,
703 .set_config = ifcvf_vdpa_set_config,
704 .set_config_cb = ifcvf_vdpa_set_config_cb,
705 .get_vq_notification = ifcvf_get_vq_notification,
706 };
707
708 static struct virtio_device_id id_table_net[] = {
709 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
710 {0},
711 };
712
713 static struct virtio_device_id id_table_blk[] = {
714 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
715 {0},
716 };
717
get_dev_type(struct pci_dev * pdev)718 static u32 get_dev_type(struct pci_dev *pdev)
719 {
720 u32 dev_type;
721
722 /* This drirver drives both modern virtio devices and transitional
723 * devices in modern mode.
724 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
725 * so legacy devices and transitional devices in legacy
726 * mode will not work for vDPA, this driver will not
727 * drive devices with legacy interface.
728 */
729
730 if (pdev->device < 0x1040)
731 dev_type = pdev->subsystem_device;
732 else
733 dev_type = pdev->device - 0x1040;
734
735 return dev_type;
736 }
737
ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev * mdev,const char * name,const struct vdpa_dev_set_config * config)738 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
739 const struct vdpa_dev_set_config *config)
740 {
741 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
742 struct ifcvf_adapter *adapter;
743 struct vdpa_device *vdpa_dev;
744 struct pci_dev *pdev;
745 struct ifcvf_hw *vf;
746 u64 device_features;
747 int ret;
748
749 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
750 vf = &ifcvf_mgmt_dev->vf;
751 pdev = vf->pdev;
752 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
753 &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
754 if (IS_ERR(adapter)) {
755 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
756 return PTR_ERR(adapter);
757 }
758
759 ifcvf_mgmt_dev->adapter = adapter;
760 adapter->pdev = pdev;
761 adapter->vdpa.dma_dev = &pdev->dev;
762 adapter->vdpa.mdev = mdev;
763 adapter->vf = vf;
764 vdpa_dev = &adapter->vdpa;
765
766 device_features = vf->hw_features;
767 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
768 if (config->device_features & ~device_features) {
769 IFCVF_ERR(pdev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
770 config->device_features, device_features);
771 return -EINVAL;
772 }
773 device_features &= config->device_features;
774 }
775 vf->dev_features = device_features;
776
777 if (name)
778 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
779 else
780 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
781
782 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
783 if (ret) {
784 put_device(&adapter->vdpa.dev);
785 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
786 return ret;
787 }
788
789 return 0;
790 }
791
ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev * mdev,struct vdpa_device * dev)792 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
793 {
794 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
795
796 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
797 _vdpa_unregister_device(dev);
798 ifcvf_mgmt_dev->adapter = NULL;
799 }
800
801 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
802 .dev_add = ifcvf_vdpa_dev_add,
803 .dev_del = ifcvf_vdpa_dev_del
804 };
805
ifcvf_probe(struct pci_dev * pdev,const struct pci_device_id * id)806 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
807 {
808 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
809 struct device *dev = &pdev->dev;
810 struct ifcvf_hw *vf;
811 u32 dev_type;
812 int ret, i;
813
814 ret = pcim_enable_device(pdev);
815 if (ret) {
816 IFCVF_ERR(pdev, "Failed to enable device\n");
817 return ret;
818 }
819 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
820 IFCVF_DRIVER_NAME);
821 if (ret) {
822 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
823 return ret;
824 }
825
826 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
827 if (ret) {
828 IFCVF_ERR(pdev, "No usable DMA configuration\n");
829 return ret;
830 }
831
832 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
833 if (ret) {
834 IFCVF_ERR(pdev,
835 "Failed for adding devres for freeing irq vectors\n");
836 return ret;
837 }
838
839 pci_set_master(pdev);
840 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
841 if (!ifcvf_mgmt_dev) {
842 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
843 return -ENOMEM;
844 }
845
846 vf = &ifcvf_mgmt_dev->vf;
847 vf->dev_type = get_dev_type(pdev);
848 vf->base = pcim_iomap_table(pdev);
849 vf->pdev = pdev;
850
851 ret = ifcvf_init_hw(vf, pdev);
852 if (ret) {
853 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
854 goto err;
855 }
856
857 for (i = 0; i < vf->nr_vring; i++)
858 vf->vring[i].irq = -EINVAL;
859
860 vf->hw_features = ifcvf_get_hw_features(vf);
861 vf->config_size = ifcvf_get_config_size(vf);
862
863 dev_type = get_dev_type(pdev);
864 switch (dev_type) {
865 case VIRTIO_ID_NET:
866 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
867 break;
868 case VIRTIO_ID_BLOCK:
869 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
870 break;
871 default:
872 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
873 ret = -EOPNOTSUPP;
874 goto err;
875 }
876
877 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
878 ifcvf_mgmt_dev->mdev.device = dev;
879 ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
880 ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
881 ifcvf_mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
882
883 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
884 if (ret) {
885 IFCVF_ERR(pdev,
886 "Failed to initialize the management interfaces\n");
887 goto err;
888 }
889
890 pci_set_drvdata(pdev, ifcvf_mgmt_dev);
891
892 return 0;
893
894 err:
895 kfree(ifcvf_mgmt_dev);
896 return ret;
897 }
898
ifcvf_remove(struct pci_dev * pdev)899 static void ifcvf_remove(struct pci_dev *pdev)
900 {
901 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
902
903 ifcvf_mgmt_dev = pci_get_drvdata(pdev);
904 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
905 kfree(ifcvf_mgmt_dev);
906 }
907
908 static struct pci_device_id ifcvf_pci_ids[] = {
909 /* N3000 network device */
910 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
911 N3000_DEVICE_ID,
912 PCI_VENDOR_ID_INTEL,
913 N3000_SUBSYS_DEVICE_ID) },
914 /* C5000X-PL network device */
915 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
916 VIRTIO_TRANS_ID_NET,
917 PCI_VENDOR_ID_INTEL,
918 VIRTIO_ID_NET) },
919 /* C5000X-PL block device */
920 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
921 VIRTIO_TRANS_ID_BLOCK,
922 PCI_VENDOR_ID_INTEL,
923 VIRTIO_ID_BLOCK) },
924
925 { 0 },
926 };
927 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
928
929 static struct pci_driver ifcvf_driver = {
930 .name = IFCVF_DRIVER_NAME,
931 .id_table = ifcvf_pci_ids,
932 .probe = ifcvf_probe,
933 .remove = ifcvf_remove,
934 };
935
936 module_pci_driver(ifcvf_driver);
937
938 MODULE_LICENSE("GPL v2");
939