1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23
24 #include <linux/pci_regs.h>
25
26 #include <uapi/linux/pcitest.h>
27
28 #define DRV_MODULE_NAME "pci-endpoint-test"
29
30 #define IRQ_TYPE_UNDEFINED -1
31 #define IRQ_TYPE_LEGACY 0
32 #define IRQ_TYPE_MSI 1
33 #define IRQ_TYPE_MSIX 2
34
35 #define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37 #define PCI_ENDPOINT_TEST_COMMAND 0x4
38 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
41 #define COMMAND_READ BIT(3)
42 #define COMMAND_WRITE BIT(4)
43 #define COMMAND_COPY BIT(5)
44
45 #define PCI_ENDPOINT_TEST_STATUS 0x8
46 #define STATUS_READ_SUCCESS BIT(0)
47 #define STATUS_READ_FAIL BIT(1)
48 #define STATUS_WRITE_SUCCESS BIT(2)
49 #define STATUS_WRITE_FAIL BIT(3)
50 #define STATUS_COPY_SUCCESS BIT(4)
51 #define STATUS_COPY_FAIL BIT(5)
52 #define STATUS_IRQ_RAISED BIT(6)
53 #define STATUS_SRC_ADDR_INVALID BIT(7)
54 #define STATUS_DST_ADDR_INVALID BIT(8)
55
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
62 #define PCI_ENDPOINT_TEST_SIZE 0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
67
68 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
69 #define FLAG_USE_DMA BIT(0)
70
71 #define PCI_DEVICE_ID_TI_AM654 0xb00c
72 #define PCI_DEVICE_ID_TI_J7200 0xb00f
73 #define PCI_DEVICE_ID_TI_AM64 0xb010
74 #define PCI_DEVICE_ID_LS1088A 0x80c0
75 #define PCI_DEVICE_ID_IMX8 0x0808
76
77 #define is_am654_pci_dev(pdev) \
78 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
79
80 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
81 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
82 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
83 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
84
85 static DEFINE_IDA(pci_endpoint_test_ida);
86
87 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
88 miscdev)
89
90 static bool no_msi;
91 module_param(no_msi, bool, 0444);
92 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
93
94 static int irq_type = IRQ_TYPE_MSI;
95 module_param(irq_type, int, 0444);
96 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
97
98 enum pci_barno {
99 BAR_0,
100 BAR_1,
101 BAR_2,
102 BAR_3,
103 BAR_4,
104 BAR_5,
105 };
106
107 struct pci_endpoint_test {
108 struct pci_dev *pdev;
109 void __iomem *base;
110 void __iomem *bar[PCI_STD_NUM_BARS];
111 struct completion irq_raised;
112 int last_irq;
113 int num_irqs;
114 int irq_type;
115 /* mutex to protect the ioctls */
116 struct mutex mutex;
117 struct miscdevice miscdev;
118 enum pci_barno test_reg_bar;
119 size_t alignment;
120 const char *name;
121 };
122
123 struct pci_endpoint_test_data {
124 enum pci_barno test_reg_bar;
125 size_t alignment;
126 int irq_type;
127 };
128
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)129 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
130 u32 offset)
131 {
132 return readl(test->base + offset);
133 }
134
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)135 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
136 u32 offset, u32 value)
137 {
138 writel(value, test->base + offset);
139 }
140
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)141 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
142 int bar, int offset)
143 {
144 return readl(test->bar[bar] + offset);
145 }
146
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)147 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
148 int bar, u32 offset, u32 value)
149 {
150 writel(value, test->bar[bar] + offset);
151 }
152
pci_endpoint_test_irqhandler(int irq,void * dev_id)153 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
154 {
155 struct pci_endpoint_test *test = dev_id;
156 u32 reg;
157
158 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
159 if (reg & STATUS_IRQ_RAISED) {
160 test->last_irq = irq;
161 complete(&test->irq_raised);
162 reg &= ~STATUS_IRQ_RAISED;
163 }
164 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
165 reg);
166
167 return IRQ_HANDLED;
168 }
169
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)170 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
171 {
172 struct pci_dev *pdev = test->pdev;
173
174 pci_free_irq_vectors(pdev);
175 test->irq_type = IRQ_TYPE_UNDEFINED;
176 }
177
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)178 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
179 int type)
180 {
181 int irq = -1;
182 struct pci_dev *pdev = test->pdev;
183 struct device *dev = &pdev->dev;
184 bool res = true;
185
186 switch (type) {
187 case IRQ_TYPE_LEGACY:
188 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
189 if (irq < 0)
190 dev_err(dev, "Failed to get Legacy interrupt\n");
191 break;
192 case IRQ_TYPE_MSI:
193 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
194 if (irq < 0)
195 dev_err(dev, "Failed to get MSI interrupts\n");
196 break;
197 case IRQ_TYPE_MSIX:
198 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
199 if (irq < 0)
200 dev_err(dev, "Failed to get MSI-X interrupts\n");
201 break;
202 default:
203 dev_err(dev, "Invalid IRQ type selected\n");
204 }
205
206 if (irq < 0) {
207 irq = 0;
208 res = false;
209 }
210
211 test->irq_type = type;
212 test->num_irqs = irq;
213
214 return res;
215 }
216
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)217 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
218 {
219 int i;
220 struct pci_dev *pdev = test->pdev;
221 struct device *dev = &pdev->dev;
222
223 for (i = 0; i < test->num_irqs; i++)
224 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
225
226 test->num_irqs = 0;
227 }
228
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)229 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
230 {
231 int i;
232 int err;
233 struct pci_dev *pdev = test->pdev;
234 struct device *dev = &pdev->dev;
235
236 for (i = 0; i < test->num_irqs; i++) {
237 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
238 pci_endpoint_test_irqhandler,
239 IRQF_SHARED, test->name, test);
240 if (err)
241 goto fail;
242 }
243
244 return true;
245
246 fail:
247 switch (irq_type) {
248 case IRQ_TYPE_LEGACY:
249 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
250 pci_irq_vector(pdev, i));
251 break;
252 case IRQ_TYPE_MSI:
253 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
254 pci_irq_vector(pdev, i),
255 i + 1);
256 break;
257 case IRQ_TYPE_MSIX:
258 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
259 pci_irq_vector(pdev, i),
260 i + 1);
261 break;
262 }
263
264 return false;
265 }
266
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)267 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
268 enum pci_barno barno)
269 {
270 int j;
271 u32 val;
272 int size;
273 struct pci_dev *pdev = test->pdev;
274
275 if (!test->bar[barno])
276 return false;
277
278 size = pci_resource_len(pdev, barno);
279
280 if (barno == test->test_reg_bar)
281 size = 0x4;
282
283 for (j = 0; j < size; j += 4)
284 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
285
286 for (j = 0; j < size; j += 4) {
287 val = pci_endpoint_test_bar_readl(test, barno, j);
288 if (val != 0xA0A0A0A0)
289 return false;
290 }
291
292 return true;
293 }
294
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)295 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
296 {
297 u32 val;
298
299 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
300 IRQ_TYPE_LEGACY);
301 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
302 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
303 COMMAND_RAISE_LEGACY_IRQ);
304 val = wait_for_completion_timeout(&test->irq_raised,
305 msecs_to_jiffies(1000));
306 if (!val)
307 return false;
308
309 return true;
310 }
311
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)312 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
313 u16 msi_num, bool msix)
314 {
315 u32 val;
316 struct pci_dev *pdev = test->pdev;
317
318 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
319 msix == false ? IRQ_TYPE_MSI :
320 IRQ_TYPE_MSIX);
321 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
322 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
323 msix == false ? COMMAND_RAISE_MSI_IRQ :
324 COMMAND_RAISE_MSIX_IRQ);
325 val = wait_for_completion_timeout(&test->irq_raised,
326 msecs_to_jiffies(1000));
327 if (!val)
328 return false;
329
330 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
331 return true;
332
333 return false;
334 }
335
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)336 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
337 struct pci_endpoint_test_xfer_param *param, size_t alignment)
338 {
339 if (!param->size) {
340 dev_dbg(dev, "Data size is zero\n");
341 return -EINVAL;
342 }
343
344 if (param->size > SIZE_MAX - alignment) {
345 dev_dbg(dev, "Maximum transfer data size exceeded\n");
346 return -EINVAL;
347 }
348
349 return 0;
350 }
351
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)352 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
353 unsigned long arg)
354 {
355 struct pci_endpoint_test_xfer_param param;
356 bool ret = false;
357 void *src_addr;
358 void *dst_addr;
359 u32 flags = 0;
360 bool use_dma;
361 size_t size;
362 dma_addr_t src_phys_addr;
363 dma_addr_t dst_phys_addr;
364 struct pci_dev *pdev = test->pdev;
365 struct device *dev = &pdev->dev;
366 void *orig_src_addr;
367 dma_addr_t orig_src_phys_addr;
368 void *orig_dst_addr;
369 dma_addr_t orig_dst_phys_addr;
370 size_t offset;
371 size_t alignment = test->alignment;
372 int irq_type = test->irq_type;
373 u32 src_crc32;
374 u32 dst_crc32;
375 int err;
376
377 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
378 if (err) {
379 dev_err(dev, "Failed to get transfer param\n");
380 return false;
381 }
382
383 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
384 if (err)
385 return false;
386
387 size = param.size;
388
389 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
390 if (use_dma)
391 flags |= FLAG_USE_DMA;
392
393 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
394 dev_err(dev, "Invalid IRQ type option\n");
395 goto err;
396 }
397
398 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
399 if (!orig_src_addr) {
400 dev_err(dev, "Failed to allocate source buffer\n");
401 ret = false;
402 goto err;
403 }
404
405 get_random_bytes(orig_src_addr, size + alignment);
406 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
407 size + alignment, DMA_TO_DEVICE);
408 if (dma_mapping_error(dev, orig_src_phys_addr)) {
409 dev_err(dev, "failed to map source buffer address\n");
410 ret = false;
411 goto err_src_phys_addr;
412 }
413
414 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
415 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
416 offset = src_phys_addr - orig_src_phys_addr;
417 src_addr = orig_src_addr + offset;
418 } else {
419 src_phys_addr = orig_src_phys_addr;
420 src_addr = orig_src_addr;
421 }
422
423 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
424 lower_32_bits(src_phys_addr));
425
426 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
427 upper_32_bits(src_phys_addr));
428
429 src_crc32 = crc32_le(~0, src_addr, size);
430
431 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
432 if (!orig_dst_addr) {
433 dev_err(dev, "Failed to allocate destination address\n");
434 ret = false;
435 goto err_dst_addr;
436 }
437
438 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
439 size + alignment, DMA_FROM_DEVICE);
440 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
441 dev_err(dev, "failed to map destination buffer address\n");
442 ret = false;
443 goto err_dst_phys_addr;
444 }
445
446 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
447 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
448 offset = dst_phys_addr - orig_dst_phys_addr;
449 dst_addr = orig_dst_addr + offset;
450 } else {
451 dst_phys_addr = orig_dst_phys_addr;
452 dst_addr = orig_dst_addr;
453 }
454
455 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
456 lower_32_bits(dst_phys_addr));
457 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
458 upper_32_bits(dst_phys_addr));
459
460 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
461 size);
462
463 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
464 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
465 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
466 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
467 COMMAND_COPY);
468
469 wait_for_completion(&test->irq_raised);
470
471 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
472 DMA_FROM_DEVICE);
473
474 dst_crc32 = crc32_le(~0, dst_addr, size);
475 if (dst_crc32 == src_crc32)
476 ret = true;
477
478 err_dst_phys_addr:
479 kfree(orig_dst_addr);
480
481 err_dst_addr:
482 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
483 DMA_TO_DEVICE);
484
485 err_src_phys_addr:
486 kfree(orig_src_addr);
487
488 err:
489 return ret;
490 }
491
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)492 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
493 unsigned long arg)
494 {
495 struct pci_endpoint_test_xfer_param param;
496 bool ret = false;
497 u32 flags = 0;
498 bool use_dma;
499 u32 reg;
500 void *addr;
501 dma_addr_t phys_addr;
502 struct pci_dev *pdev = test->pdev;
503 struct device *dev = &pdev->dev;
504 void *orig_addr;
505 dma_addr_t orig_phys_addr;
506 size_t offset;
507 size_t alignment = test->alignment;
508 int irq_type = test->irq_type;
509 size_t size;
510 u32 crc32;
511 int err;
512
513 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
514 if (err != 0) {
515 dev_err(dev, "Failed to get transfer param\n");
516 return false;
517 }
518
519 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
520 if (err)
521 return false;
522
523 size = param.size;
524
525 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
526 if (use_dma)
527 flags |= FLAG_USE_DMA;
528
529 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
530 dev_err(dev, "Invalid IRQ type option\n");
531 goto err;
532 }
533
534 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
535 if (!orig_addr) {
536 dev_err(dev, "Failed to allocate address\n");
537 ret = false;
538 goto err;
539 }
540
541 get_random_bytes(orig_addr, size + alignment);
542
543 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
544 DMA_TO_DEVICE);
545 if (dma_mapping_error(dev, orig_phys_addr)) {
546 dev_err(dev, "failed to map source buffer address\n");
547 ret = false;
548 goto err_phys_addr;
549 }
550
551 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
552 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
553 offset = phys_addr - orig_phys_addr;
554 addr = orig_addr + offset;
555 } else {
556 phys_addr = orig_phys_addr;
557 addr = orig_addr;
558 }
559
560 crc32 = crc32_le(~0, addr, size);
561 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
562 crc32);
563
564 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
565 lower_32_bits(phys_addr));
566 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
567 upper_32_bits(phys_addr));
568
569 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
570
571 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
572 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
573 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
574 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
575 COMMAND_READ);
576
577 wait_for_completion(&test->irq_raised);
578
579 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
580 if (reg & STATUS_READ_SUCCESS)
581 ret = true;
582
583 dma_unmap_single(dev, orig_phys_addr, size + alignment,
584 DMA_TO_DEVICE);
585
586 err_phys_addr:
587 kfree(orig_addr);
588
589 err:
590 return ret;
591 }
592
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)593 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
594 unsigned long arg)
595 {
596 struct pci_endpoint_test_xfer_param param;
597 bool ret = false;
598 u32 flags = 0;
599 bool use_dma;
600 size_t size;
601 void *addr;
602 dma_addr_t phys_addr;
603 struct pci_dev *pdev = test->pdev;
604 struct device *dev = &pdev->dev;
605 void *orig_addr;
606 dma_addr_t orig_phys_addr;
607 size_t offset;
608 size_t alignment = test->alignment;
609 int irq_type = test->irq_type;
610 u32 crc32;
611 int err;
612
613 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
614 if (err) {
615 dev_err(dev, "Failed to get transfer param\n");
616 return false;
617 }
618
619 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
620 if (err)
621 return false;
622
623 size = param.size;
624
625 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
626 if (use_dma)
627 flags |= FLAG_USE_DMA;
628
629 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
630 dev_err(dev, "Invalid IRQ type option\n");
631 goto err;
632 }
633
634 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
635 if (!orig_addr) {
636 dev_err(dev, "Failed to allocate destination address\n");
637 ret = false;
638 goto err;
639 }
640
641 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
642 DMA_FROM_DEVICE);
643 if (dma_mapping_error(dev, orig_phys_addr)) {
644 dev_err(dev, "failed to map source buffer address\n");
645 ret = false;
646 goto err_phys_addr;
647 }
648
649 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
650 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
651 offset = phys_addr - orig_phys_addr;
652 addr = orig_addr + offset;
653 } else {
654 phys_addr = orig_phys_addr;
655 addr = orig_addr;
656 }
657
658 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
659 lower_32_bits(phys_addr));
660 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
661 upper_32_bits(phys_addr));
662
663 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
664
665 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
666 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
667 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
668 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
669 COMMAND_WRITE);
670
671 wait_for_completion(&test->irq_raised);
672
673 dma_unmap_single(dev, orig_phys_addr, size + alignment,
674 DMA_FROM_DEVICE);
675
676 crc32 = crc32_le(~0, addr, size);
677 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
678 ret = true;
679
680 err_phys_addr:
681 kfree(orig_addr);
682 err:
683 return ret;
684 }
685
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)686 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
687 {
688 pci_endpoint_test_release_irq(test);
689 pci_endpoint_test_free_irq_vectors(test);
690 return true;
691 }
692
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)693 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
694 int req_irq_type)
695 {
696 struct pci_dev *pdev = test->pdev;
697 struct device *dev = &pdev->dev;
698
699 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
700 dev_err(dev, "Invalid IRQ type option\n");
701 return false;
702 }
703
704 if (test->irq_type == req_irq_type)
705 return true;
706
707 pci_endpoint_test_release_irq(test);
708 pci_endpoint_test_free_irq_vectors(test);
709
710 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
711 goto err;
712
713 if (!pci_endpoint_test_request_irq(test))
714 goto err;
715
716 return true;
717
718 err:
719 pci_endpoint_test_free_irq_vectors(test);
720 return false;
721 }
722
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)723 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
724 unsigned long arg)
725 {
726 int ret = -EINVAL;
727 enum pci_barno bar;
728 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
729 struct pci_dev *pdev = test->pdev;
730
731 mutex_lock(&test->mutex);
732 switch (cmd) {
733 case PCITEST_BAR:
734 bar = arg;
735 if (bar > BAR_5)
736 goto ret;
737 if (is_am654_pci_dev(pdev) && bar == BAR_0)
738 goto ret;
739 ret = pci_endpoint_test_bar(test, bar);
740 break;
741 case PCITEST_LEGACY_IRQ:
742 ret = pci_endpoint_test_legacy_irq(test);
743 break;
744 case PCITEST_MSI:
745 case PCITEST_MSIX:
746 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
747 break;
748 case PCITEST_WRITE:
749 ret = pci_endpoint_test_write(test, arg);
750 break;
751 case PCITEST_READ:
752 ret = pci_endpoint_test_read(test, arg);
753 break;
754 case PCITEST_COPY:
755 ret = pci_endpoint_test_copy(test, arg);
756 break;
757 case PCITEST_SET_IRQTYPE:
758 ret = pci_endpoint_test_set_irq(test, arg);
759 break;
760 case PCITEST_GET_IRQTYPE:
761 ret = irq_type;
762 break;
763 case PCITEST_CLEAR_IRQ:
764 ret = pci_endpoint_test_clear_irq(test);
765 break;
766 }
767
768 ret:
769 mutex_unlock(&test->mutex);
770 return ret;
771 }
772
773 static const struct file_operations pci_endpoint_test_fops = {
774 .owner = THIS_MODULE,
775 .unlocked_ioctl = pci_endpoint_test_ioctl,
776 };
777
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)778 static int pci_endpoint_test_probe(struct pci_dev *pdev,
779 const struct pci_device_id *ent)
780 {
781 int err;
782 int id;
783 char name[24];
784 enum pci_barno bar;
785 void __iomem *base;
786 struct device *dev = &pdev->dev;
787 struct pci_endpoint_test *test;
788 struct pci_endpoint_test_data *data;
789 enum pci_barno test_reg_bar = BAR_0;
790 struct miscdevice *misc_device;
791
792 if (pci_is_bridge(pdev))
793 return -ENODEV;
794
795 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
796 if (!test)
797 return -ENOMEM;
798
799 test->test_reg_bar = 0;
800 test->alignment = 0;
801 test->pdev = pdev;
802 test->irq_type = IRQ_TYPE_UNDEFINED;
803
804 if (no_msi)
805 irq_type = IRQ_TYPE_LEGACY;
806
807 data = (struct pci_endpoint_test_data *)ent->driver_data;
808 if (data) {
809 test_reg_bar = data->test_reg_bar;
810 test->test_reg_bar = test_reg_bar;
811 test->alignment = data->alignment;
812 irq_type = data->irq_type;
813 }
814
815 init_completion(&test->irq_raised);
816 mutex_init(&test->mutex);
817
818 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
819 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
820 dev_err(dev, "Cannot set DMA mask\n");
821 return -EINVAL;
822 }
823
824 err = pci_enable_device(pdev);
825 if (err) {
826 dev_err(dev, "Cannot enable PCI device\n");
827 return err;
828 }
829
830 err = pci_request_regions(pdev, DRV_MODULE_NAME);
831 if (err) {
832 dev_err(dev, "Cannot obtain PCI resources\n");
833 goto err_disable_pdev;
834 }
835
836 pci_set_master(pdev);
837
838 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
839 err = -EINVAL;
840 goto err_disable_irq;
841 }
842
843 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
844 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
845 base = pci_ioremap_bar(pdev, bar);
846 if (!base) {
847 dev_err(dev, "Failed to read BAR%d\n", bar);
848 WARN_ON(bar == test_reg_bar);
849 }
850 test->bar[bar] = base;
851 }
852 }
853
854 test->base = test->bar[test_reg_bar];
855 if (!test->base) {
856 err = -ENOMEM;
857 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
858 test_reg_bar);
859 goto err_iounmap;
860 }
861
862 pci_set_drvdata(pdev, test);
863
864 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
865 if (id < 0) {
866 err = id;
867 dev_err(dev, "Unable to get id\n");
868 goto err_iounmap;
869 }
870
871 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
872 test->name = kstrdup(name, GFP_KERNEL);
873 if (!test->name) {
874 err = -ENOMEM;
875 goto err_ida_remove;
876 }
877
878 if (!pci_endpoint_test_request_irq(test)) {
879 err = -EINVAL;
880 goto err_kfree_test_name;
881 }
882
883 misc_device = &test->miscdev;
884 misc_device->minor = MISC_DYNAMIC_MINOR;
885 misc_device->name = kstrdup(name, GFP_KERNEL);
886 if (!misc_device->name) {
887 err = -ENOMEM;
888 goto err_release_irq;
889 }
890 misc_device->parent = &pdev->dev;
891 misc_device->fops = &pci_endpoint_test_fops;
892
893 err = misc_register(misc_device);
894 if (err) {
895 dev_err(dev, "Failed to register device\n");
896 goto err_kfree_name;
897 }
898
899 return 0;
900
901 err_kfree_name:
902 kfree(misc_device->name);
903
904 err_release_irq:
905 pci_endpoint_test_release_irq(test);
906
907 err_kfree_test_name:
908 kfree(test->name);
909
910 err_ida_remove:
911 ida_simple_remove(&pci_endpoint_test_ida, id);
912
913 err_iounmap:
914 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
915 if (test->bar[bar])
916 pci_iounmap(pdev, test->bar[bar]);
917 }
918
919 err_disable_irq:
920 pci_endpoint_test_free_irq_vectors(test);
921 pci_release_regions(pdev);
922
923 err_disable_pdev:
924 pci_disable_device(pdev);
925
926 return err;
927 }
928
pci_endpoint_test_remove(struct pci_dev * pdev)929 static void pci_endpoint_test_remove(struct pci_dev *pdev)
930 {
931 int id;
932 enum pci_barno bar;
933 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
934 struct miscdevice *misc_device = &test->miscdev;
935
936 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
937 return;
938 if (id < 0)
939 return;
940
941 misc_deregister(&test->miscdev);
942 kfree(misc_device->name);
943 kfree(test->name);
944 ida_simple_remove(&pci_endpoint_test_ida, id);
945 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
946 if (test->bar[bar])
947 pci_iounmap(pdev, test->bar[bar]);
948 }
949
950 pci_endpoint_test_release_irq(test);
951 pci_endpoint_test_free_irq_vectors(test);
952
953 pci_release_regions(pdev);
954 pci_disable_device(pdev);
955 }
956
957 static const struct pci_endpoint_test_data default_data = {
958 .test_reg_bar = BAR_0,
959 .alignment = SZ_4K,
960 .irq_type = IRQ_TYPE_MSI,
961 };
962
963 static const struct pci_endpoint_test_data am654_data = {
964 .test_reg_bar = BAR_2,
965 .alignment = SZ_64K,
966 .irq_type = IRQ_TYPE_MSI,
967 };
968
969 static const struct pci_endpoint_test_data j721e_data = {
970 .alignment = 256,
971 .irq_type = IRQ_TYPE_MSI,
972 };
973
974 static const struct pci_device_id pci_endpoint_test_tbl[] = {
975 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
976 .driver_data = (kernel_ulong_t)&default_data,
977 },
978 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
979 .driver_data = (kernel_ulong_t)&default_data,
980 },
981 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
982 .driver_data = (kernel_ulong_t)&default_data,
983 },
984 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
985 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
986 .driver_data = (kernel_ulong_t)&default_data,
987 },
988 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
989 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
990 .driver_data = (kernel_ulong_t)&am654_data
991 },
992 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
993 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
994 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
995 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
996 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
997 .driver_data = (kernel_ulong_t)&j721e_data,
998 },
999 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1000 .driver_data = (kernel_ulong_t)&j721e_data,
1001 },
1002 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1003 .driver_data = (kernel_ulong_t)&j721e_data,
1004 },
1005 { }
1006 };
1007 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1008
1009 static struct pci_driver pci_endpoint_test_driver = {
1010 .name = DRV_MODULE_NAME,
1011 .id_table = pci_endpoint_test_tbl,
1012 .probe = pci_endpoint_test_probe,
1013 .remove = pci_endpoint_test_remove,
1014 .sriov_configure = pci_sriov_configure_simple,
1015 };
1016 module_pci_driver(pci_endpoint_test_driver);
1017
1018 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1019 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1020 MODULE_LICENSE("GPL v2");
1021