1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel IFC VF NIC driver for virtio dataplane offloading
4 *
5 * Copyright (C) 2020 Intel Corporation.
6 *
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8 *
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR "Intel Corporation"
18 #define IFCVF_DRIVER_NAME "ifcvf"
19
ifcvf_config_changed(int irq,void * arg)20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22 struct ifcvf_hw *vf = arg;
23
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
26
27 return IRQ_HANDLED;
28 }
29
ifcvf_intr_handler(int irq,void * arg)30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31 {
32 struct vring_info *vring = arg;
33
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
36
37 return IRQ_HANDLED;
38 }
39
ifcvf_free_irq_vectors(void * data)40 static void ifcvf_free_irq_vectors(void *data)
41 {
42 pci_free_irq_vectors(data);
43 }
44
ifcvf_free_irq(struct ifcvf_adapter * adapter,int queues)45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46 {
47 struct pci_dev *pdev = adapter->pdev;
48 struct ifcvf_hw *vf = &adapter->vf;
49 int i;
50
51
52 for (i = 0; i < queues; i++) {
53 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54 vf->vring[i].irq = -EINVAL;
55 }
56
57 devm_free_irq(&pdev->dev, vf->config_irq, vf);
58 ifcvf_free_irq_vectors(pdev);
59 }
60
ifcvf_request_irq(struct ifcvf_adapter * adapter)61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62 {
63 struct pci_dev *pdev = adapter->pdev;
64 struct ifcvf_hw *vf = &adapter->vf;
65 int vector, i, ret, irq;
66 u16 max_intr;
67
68 /* all queues and config interrupt */
69 max_intr = vf->nr_vring + 1;
70
71 ret = pci_alloc_irq_vectors(pdev, max_intr,
72 max_intr, PCI_IRQ_MSIX);
73 if (ret < 0) {
74 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
75 return ret;
76 }
77
78 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
79 pci_name(pdev));
80 vector = 0;
81 vf->config_irq = pci_irq_vector(pdev, vector);
82 ret = devm_request_irq(&pdev->dev, vf->config_irq,
83 ifcvf_config_changed, 0,
84 vf->config_msix_name, vf);
85 if (ret) {
86 IFCVF_ERR(pdev, "Failed to request config irq\n");
87 return ret;
88 }
89
90 for (i = 0; i < vf->nr_vring; i++) {
91 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
92 pci_name(pdev), i);
93 vector = i + IFCVF_MSI_QUEUE_OFF;
94 irq = pci_irq_vector(pdev, vector);
95 ret = devm_request_irq(&pdev->dev, irq,
96 ifcvf_intr_handler, 0,
97 vf->vring[i].msix_name,
98 &vf->vring[i]);
99 if (ret) {
100 IFCVF_ERR(pdev,
101 "Failed to request irq for vq %d\n", i);
102 ifcvf_free_irq(adapter, i);
103
104 return ret;
105 }
106
107 vf->vring[i].irq = irq;
108 }
109
110 return 0;
111 }
112
ifcvf_start_datapath(void * private)113 static int ifcvf_start_datapath(void *private)
114 {
115 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
116 u8 status;
117 int ret;
118
119 ret = ifcvf_start_hw(vf);
120 if (ret < 0) {
121 status = ifcvf_get_status(vf);
122 status |= VIRTIO_CONFIG_S_FAILED;
123 ifcvf_set_status(vf, status);
124 }
125
126 return ret;
127 }
128
ifcvf_stop_datapath(void * private)129 static int ifcvf_stop_datapath(void *private)
130 {
131 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
132 int i;
133
134 for (i = 0; i < vf->nr_vring; i++)
135 vf->vring[i].cb.callback = NULL;
136
137 ifcvf_stop_hw(vf);
138
139 return 0;
140 }
141
ifcvf_reset_vring(struct ifcvf_adapter * adapter)142 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
143 {
144 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
145 int i;
146
147 for (i = 0; i < vf->nr_vring; i++) {
148 vf->vring[i].last_avail_idx = 0;
149 vf->vring[i].desc = 0;
150 vf->vring[i].avail = 0;
151 vf->vring[i].used = 0;
152 vf->vring[i].ready = 0;
153 vf->vring[i].cb.callback = NULL;
154 vf->vring[i].cb.private = NULL;
155 }
156
157 ifcvf_reset(vf);
158 }
159
vdpa_to_adapter(struct vdpa_device * vdpa_dev)160 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
161 {
162 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
163 }
164
vdpa_to_vf(struct vdpa_device * vdpa_dev)165 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
166 {
167 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
168
169 return &adapter->vf;
170 }
171
ifcvf_vdpa_get_features(struct vdpa_device * vdpa_dev)172 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
173 {
174 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
175 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
176 struct pci_dev *pdev = adapter->pdev;
177 u32 type = vf->dev_type;
178 u64 features;
179
180 if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
181 features = ifcvf_get_features(vf);
182 else {
183 features = 0;
184 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
185 }
186
187 return features;
188 }
189
ifcvf_vdpa_set_features(struct vdpa_device * vdpa_dev,u64 features)190 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
191 {
192 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
193 int ret;
194
195 ret = ifcvf_verify_min_features(vf, features);
196 if (ret)
197 return ret;
198
199 vf->req_features = features;
200
201 return 0;
202 }
203
ifcvf_vdpa_get_status(struct vdpa_device * vdpa_dev)204 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
205 {
206 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
207
208 return ifcvf_get_status(vf);
209 }
210
ifcvf_vdpa_set_status(struct vdpa_device * vdpa_dev,u8 status)211 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
212 {
213 struct ifcvf_adapter *adapter;
214 struct ifcvf_hw *vf;
215 u8 status_old;
216 int ret;
217
218 vf = vdpa_to_vf(vdpa_dev);
219 adapter = vdpa_to_adapter(vdpa_dev);
220 status_old = ifcvf_get_status(vf);
221
222 if (status_old == status)
223 return;
224
225 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
226 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
227 ret = ifcvf_request_irq(adapter);
228 if (ret) {
229 status = ifcvf_get_status(vf);
230 status |= VIRTIO_CONFIG_S_FAILED;
231 ifcvf_set_status(vf, status);
232 return;
233 }
234
235 if (ifcvf_start_datapath(adapter) < 0)
236 IFCVF_ERR(adapter->pdev,
237 "Failed to set ifcvf vdpa status %u\n",
238 status);
239 }
240
241 ifcvf_set_status(vf, status);
242 }
243
ifcvf_vdpa_reset(struct vdpa_device * vdpa_dev)244 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
245 {
246 struct ifcvf_adapter *adapter;
247 struct ifcvf_hw *vf;
248 u8 status_old;
249
250 vf = vdpa_to_vf(vdpa_dev);
251 adapter = vdpa_to_adapter(vdpa_dev);
252 status_old = ifcvf_get_status(vf);
253
254 if (status_old == 0)
255 return 0;
256
257 if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
258 ifcvf_stop_datapath(adapter);
259 ifcvf_free_irq(adapter, vf->nr_vring);
260 }
261
262 ifcvf_reset_vring(adapter);
263
264 return 0;
265 }
266
ifcvf_vdpa_get_vq_num_max(struct vdpa_device * vdpa_dev)267 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
268 {
269 return IFCVF_QUEUE_MAX;
270 }
271
ifcvf_vdpa_get_vq_state(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_vq_state * state)272 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
273 struct vdpa_vq_state *state)
274 {
275 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
276
277 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
278 return 0;
279 }
280
ifcvf_vdpa_set_vq_state(struct vdpa_device * vdpa_dev,u16 qid,const struct vdpa_vq_state * state)281 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
282 const struct vdpa_vq_state *state)
283 {
284 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
285
286 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
287 }
288
ifcvf_vdpa_set_vq_cb(struct vdpa_device * vdpa_dev,u16 qid,struct vdpa_callback * cb)289 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
290 struct vdpa_callback *cb)
291 {
292 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
293
294 vf->vring[qid].cb = *cb;
295 }
296
ifcvf_vdpa_set_vq_ready(struct vdpa_device * vdpa_dev,u16 qid,bool ready)297 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
298 u16 qid, bool ready)
299 {
300 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
301
302 vf->vring[qid].ready = ready;
303 }
304
ifcvf_vdpa_get_vq_ready(struct vdpa_device * vdpa_dev,u16 qid)305 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
306 {
307 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
308
309 return vf->vring[qid].ready;
310 }
311
ifcvf_vdpa_set_vq_num(struct vdpa_device * vdpa_dev,u16 qid,u32 num)312 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
313 u32 num)
314 {
315 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
316
317 vf->vring[qid].size = num;
318 }
319
ifcvf_vdpa_set_vq_address(struct vdpa_device * vdpa_dev,u16 qid,u64 desc_area,u64 driver_area,u64 device_area)320 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
321 u64 desc_area, u64 driver_area,
322 u64 device_area)
323 {
324 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
325
326 vf->vring[qid].desc = desc_area;
327 vf->vring[qid].avail = driver_area;
328 vf->vring[qid].used = device_area;
329
330 return 0;
331 }
332
ifcvf_vdpa_kick_vq(struct vdpa_device * vdpa_dev,u16 qid)333 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
334 {
335 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
336
337 ifcvf_notify_queue(vf, qid);
338 }
339
ifcvf_vdpa_get_generation(struct vdpa_device * vdpa_dev)340 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
341 {
342 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
343
344 return ioread8(&vf->common_cfg->config_generation);
345 }
346
ifcvf_vdpa_get_device_id(struct vdpa_device * vdpa_dev)347 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
348 {
349 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
350
351 return vf->dev_type;
352 }
353
ifcvf_vdpa_get_vendor_id(struct vdpa_device * vdpa_dev)354 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
355 {
356 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
357 struct pci_dev *pdev = adapter->pdev;
358
359 return pdev->subsystem_vendor;
360 }
361
ifcvf_vdpa_get_vq_align(struct vdpa_device * vdpa_dev)362 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
363 {
364 return IFCVF_QUEUE_ALIGNMENT;
365 }
366
ifcvf_vdpa_get_config_size(struct vdpa_device * vdpa_dev)367 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
368 {
369 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
370 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
371 struct pci_dev *pdev = adapter->pdev;
372 size_t size;
373
374 switch (vf->dev_type) {
375 case VIRTIO_ID_NET:
376 size = sizeof(struct virtio_net_config);
377 break;
378 case VIRTIO_ID_BLOCK:
379 size = sizeof(struct virtio_blk_config);
380 break;
381 default:
382 size = 0;
383 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
384 }
385
386 return size;
387 }
388
ifcvf_vdpa_get_config(struct vdpa_device * vdpa_dev,unsigned int offset,void * buf,unsigned int len)389 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
390 unsigned int offset,
391 void *buf, unsigned int len)
392 {
393 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
394
395 WARN_ON(offset + len > sizeof(struct virtio_net_config));
396 ifcvf_read_net_config(vf, offset, buf, len);
397 }
398
ifcvf_vdpa_set_config(struct vdpa_device * vdpa_dev,unsigned int offset,const void * buf,unsigned int len)399 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
400 unsigned int offset, const void *buf,
401 unsigned int len)
402 {
403 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
404
405 WARN_ON(offset + len > sizeof(struct virtio_net_config));
406 ifcvf_write_net_config(vf, offset, buf, len);
407 }
408
ifcvf_vdpa_set_config_cb(struct vdpa_device * vdpa_dev,struct vdpa_callback * cb)409 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
410 struct vdpa_callback *cb)
411 {
412 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
413
414 vf->config_cb.callback = cb->callback;
415 vf->config_cb.private = cb->private;
416 }
417
ifcvf_vdpa_get_vq_irq(struct vdpa_device * vdpa_dev,u16 qid)418 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
419 u16 qid)
420 {
421 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
422
423 return vf->vring[qid].irq;
424 }
425
ifcvf_get_vq_notification(struct vdpa_device * vdpa_dev,u16 idx)426 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
427 u16 idx)
428 {
429 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
430 struct vdpa_notification_area area;
431
432 area.addr = vf->vring[idx].notify_pa;
433 if (!vf->notify_off_multiplier)
434 area.size = PAGE_SIZE;
435 else
436 area.size = vf->notify_off_multiplier;
437
438 return area;
439 }
440
441 /*
442 * IFCVF currently does't have on-chip IOMMU, so not
443 * implemented set_map()/dma_map()/dma_unmap()
444 */
445 static const struct vdpa_config_ops ifc_vdpa_ops = {
446 .get_features = ifcvf_vdpa_get_features,
447 .set_features = ifcvf_vdpa_set_features,
448 .get_status = ifcvf_vdpa_get_status,
449 .set_status = ifcvf_vdpa_set_status,
450 .reset = ifcvf_vdpa_reset,
451 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
452 .get_vq_state = ifcvf_vdpa_get_vq_state,
453 .set_vq_state = ifcvf_vdpa_set_vq_state,
454 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
455 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
456 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
457 .set_vq_num = ifcvf_vdpa_set_vq_num,
458 .set_vq_address = ifcvf_vdpa_set_vq_address,
459 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
460 .kick_vq = ifcvf_vdpa_kick_vq,
461 .get_generation = ifcvf_vdpa_get_generation,
462 .get_device_id = ifcvf_vdpa_get_device_id,
463 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
464 .get_vq_align = ifcvf_vdpa_get_vq_align,
465 .get_config_size = ifcvf_vdpa_get_config_size,
466 .get_config = ifcvf_vdpa_get_config,
467 .set_config = ifcvf_vdpa_set_config,
468 .set_config_cb = ifcvf_vdpa_set_config_cb,
469 .get_vq_notification = ifcvf_get_vq_notification,
470 };
471
472 static struct virtio_device_id id_table_net[] = {
473 {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
474 {0},
475 };
476
477 static struct virtio_device_id id_table_blk[] = {
478 {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
479 {0},
480 };
481
get_dev_type(struct pci_dev * pdev)482 static u32 get_dev_type(struct pci_dev *pdev)
483 {
484 u32 dev_type;
485
486 /* This drirver drives both modern virtio devices and transitional
487 * devices in modern mode.
488 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
489 * so legacy devices and transitional devices in legacy
490 * mode will not work for vDPA, this driver will not
491 * drive devices with legacy interface.
492 */
493
494 if (pdev->device < 0x1040)
495 dev_type = pdev->subsystem_device;
496 else
497 dev_type = pdev->device - 0x1040;
498
499 return dev_type;
500 }
501
ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev * mdev,const char * name,const struct vdpa_dev_set_config * config)502 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
503 const struct vdpa_dev_set_config *config)
504 {
505 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
506 struct ifcvf_adapter *adapter;
507 struct pci_dev *pdev;
508 struct ifcvf_hw *vf;
509 struct device *dev;
510 int ret, i;
511
512 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
513 if (ifcvf_mgmt_dev->adapter)
514 return -EOPNOTSUPP;
515
516 pdev = ifcvf_mgmt_dev->pdev;
517 dev = &pdev->dev;
518 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
519 dev, &ifc_vdpa_ops, name, false);
520 if (IS_ERR(adapter)) {
521 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
522 return PTR_ERR(adapter);
523 }
524
525 ifcvf_mgmt_dev->adapter = adapter;
526 pci_set_drvdata(pdev, ifcvf_mgmt_dev);
527
528 vf = &adapter->vf;
529 vf->dev_type = get_dev_type(pdev);
530 vf->base = pcim_iomap_table(pdev);
531
532 adapter->pdev = pdev;
533 adapter->vdpa.dma_dev = &pdev->dev;
534
535 ret = ifcvf_init_hw(vf, pdev);
536 if (ret) {
537 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
538 goto err;
539 }
540
541 for (i = 0; i < vf->nr_vring; i++)
542 vf->vring[i].irq = -EINVAL;
543
544 vf->hw_features = ifcvf_get_hw_features(vf);
545
546 adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
547 ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
548 if (ret) {
549 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
550 goto err;
551 }
552
553 return 0;
554
555 err:
556 put_device(&adapter->vdpa.dev);
557 return ret;
558 }
559
ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev * mdev,struct vdpa_device * dev)560 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
561 {
562 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
563
564 ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
565 _vdpa_unregister_device(dev);
566 ifcvf_mgmt_dev->adapter = NULL;
567 }
568
569 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
570 .dev_add = ifcvf_vdpa_dev_add,
571 .dev_del = ifcvf_vdpa_dev_del
572 };
573
ifcvf_probe(struct pci_dev * pdev,const struct pci_device_id * id)574 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
575 {
576 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
577 struct device *dev = &pdev->dev;
578 u32 dev_type;
579 int ret;
580
581 ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
582 if (!ifcvf_mgmt_dev) {
583 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
584 return -ENOMEM;
585 }
586
587 dev_type = get_dev_type(pdev);
588 switch (dev_type) {
589 case VIRTIO_ID_NET:
590 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
591 break;
592 case VIRTIO_ID_BLOCK:
593 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
594 break;
595 default:
596 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
597 ret = -EOPNOTSUPP;
598 goto err;
599 }
600
601 ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
602 ifcvf_mgmt_dev->mdev.device = dev;
603 ifcvf_mgmt_dev->pdev = pdev;
604
605 ret = pcim_enable_device(pdev);
606 if (ret) {
607 IFCVF_ERR(pdev, "Failed to enable device\n");
608 goto err;
609 }
610
611 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
612 IFCVF_DRIVER_NAME);
613 if (ret) {
614 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
615 goto err;
616 }
617
618 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
619 if (ret) {
620 IFCVF_ERR(pdev, "No usable DMA configuration\n");
621 goto err;
622 }
623
624 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
625 if (ret) {
626 IFCVF_ERR(pdev,
627 "Failed for adding devres for freeing irq vectors\n");
628 goto err;
629 }
630
631 pci_set_master(pdev);
632
633 ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
634 if (ret) {
635 IFCVF_ERR(pdev,
636 "Failed to initialize the management interfaces\n");
637 goto err;
638 }
639
640 return 0;
641
642 err:
643 kfree(ifcvf_mgmt_dev);
644 return ret;
645 }
646
ifcvf_remove(struct pci_dev * pdev)647 static void ifcvf_remove(struct pci_dev *pdev)
648 {
649 struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
650
651 ifcvf_mgmt_dev = pci_get_drvdata(pdev);
652 vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
653 kfree(ifcvf_mgmt_dev);
654 }
655
656 static struct pci_device_id ifcvf_pci_ids[] = {
657 /* N3000 network device */
658 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
659 N3000_DEVICE_ID,
660 PCI_VENDOR_ID_INTEL,
661 N3000_SUBSYS_DEVICE_ID) },
662 /* C5000X-PL network device */
663 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
664 VIRTIO_TRANS_ID_NET,
665 PCI_VENDOR_ID_INTEL,
666 VIRTIO_ID_NET) },
667 /* C5000X-PL block device */
668 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
669 VIRTIO_TRANS_ID_BLOCK,
670 PCI_VENDOR_ID_INTEL,
671 VIRTIO_ID_BLOCK) },
672
673 { 0 },
674 };
675 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
676
677 static struct pci_driver ifcvf_driver = {
678 .name = IFCVF_DRIVER_NAME,
679 .id_table = ifcvf_pci_ids,
680 .probe = ifcvf_probe,
681 .remove = ifcvf_remove,
682 };
683
684 module_pci_driver(ifcvf_driver);
685
686 MODULE_LICENSE("GPL v2");
687