1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/spi/spi-loopback-test.c
4 *
5 * (c) Martin Sperl <kernel@martin.sperl.org>
6 *
7 * Loopback test driver to test several typical spi_message conditions
8 * that a spi_master driver may encounter
9 * this can also get used for regression testing
10 */
11
12 #include <linux/delay.h>
13 #include <linux/kernel.h>
14 #include <linux/ktime.h>
15 #include <linux/list.h>
16 #include <linux/list_sort.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/printk.h>
20 #include <linux/vmalloc.h>
21 #include <linux/spi/spi.h>
22
23 #include "spi-test.h"
24
25 /* flag to only simulate transfers */
26 static int simulate_only;
27 module_param(simulate_only, int, 0);
28 MODULE_PARM_DESC(simulate_only, "if not 0 do not execute the spi message");
29
30 /* dump spi messages */
31 static int dump_messages;
32 module_param(dump_messages, int, 0);
33 MODULE_PARM_DESC(dump_messages,
34 "=1 dump the basic spi_message_structure, " \
35 "=2 dump the spi_message_structure including data, " \
36 "=3 dump the spi_message structure before and after execution");
37 /* the device is jumpered for loopback - enabling some rx_buf tests */
38 static int loopback;
39 module_param(loopback, int, 0);
40 MODULE_PARM_DESC(loopback,
41 "if set enable loopback mode, where the rx_buf " \
42 "is checked to match tx_buf after the spi_message " \
43 "is executed");
44
45 static int loop_req;
46 module_param(loop_req, int, 0);
47 MODULE_PARM_DESC(loop_req,
48 "if set controller will be asked to enable test loop mode. " \
49 "If controller supported it, MISO and MOSI will be connected");
50
51 static int no_cs;
52 module_param(no_cs, int, 0);
53 MODULE_PARM_DESC(no_cs,
54 "if set Chip Select (CS) will not be used");
55
56 /* run only a specific test */
57 static int run_only_test = -1;
58 module_param(run_only_test, int, 0);
59 MODULE_PARM_DESC(run_only_test,
60 "only run the test with this number (0-based !)");
61
62 /* use vmalloc'ed buffers */
63 static int use_vmalloc;
64 module_param(use_vmalloc, int, 0644);
65 MODULE_PARM_DESC(use_vmalloc,
66 "use vmalloc'ed buffers instead of kmalloc'ed");
67
68 /* check rx ranges */
69 static int check_ranges = 1;
70 module_param(check_ranges, int, 0644);
71 MODULE_PARM_DESC(check_ranges,
72 "checks rx_buffer pattern are valid");
73
74 static unsigned int delay_ms = 100;
75 module_param(delay_ms, uint, 0644);
76 MODULE_PARM_DESC(delay_ms,
77 "delay between tests, in milliseconds (default: 100)");
78
79 /* the actual tests to execute */
80 static struct spi_test spi_tests[] = {
81 {
82 .description = "tx/rx-transfer - start of page",
83 .fill_option = FILL_COUNT_8,
84 .iterate_len = { ITERATE_MAX_LEN },
85 .iterate_tx_align = ITERATE_ALIGN,
86 .iterate_rx_align = ITERATE_ALIGN,
87 .transfer_count = 1,
88 .transfers = {
89 {
90 .tx_buf = TX(0),
91 .rx_buf = RX(0),
92 },
93 },
94 },
95 {
96 .description = "tx/rx-transfer - crossing PAGE_SIZE",
97 .fill_option = FILL_COUNT_8,
98 .iterate_len = { ITERATE_LEN },
99 .iterate_tx_align = ITERATE_ALIGN,
100 .iterate_rx_align = ITERATE_ALIGN,
101 .transfer_count = 1,
102 .transfers = {
103 {
104 .tx_buf = TX(PAGE_SIZE - 4),
105 .rx_buf = RX(PAGE_SIZE - 4),
106 },
107 },
108 },
109 {
110 .description = "tx-transfer - only",
111 .fill_option = FILL_COUNT_8,
112 .iterate_len = { ITERATE_MAX_LEN },
113 .iterate_tx_align = ITERATE_ALIGN,
114 .transfer_count = 1,
115 .transfers = {
116 {
117 .tx_buf = TX(0),
118 },
119 },
120 },
121 {
122 .description = "rx-transfer - only",
123 .fill_option = FILL_COUNT_8,
124 .iterate_len = { ITERATE_MAX_LEN },
125 .iterate_rx_align = ITERATE_ALIGN,
126 .transfer_count = 1,
127 .transfers = {
128 {
129 .rx_buf = RX(0),
130 },
131 },
132 },
133 {
134 .description = "two tx-transfers - alter both",
135 .fill_option = FILL_COUNT_8,
136 .iterate_len = { ITERATE_LEN },
137 .iterate_tx_align = ITERATE_ALIGN,
138 .iterate_transfer_mask = BIT(0) | BIT(1),
139 .transfer_count = 2,
140 .transfers = {
141 {
142 .tx_buf = TX(0),
143 },
144 {
145 /* this is why we cant use ITERATE_MAX_LEN */
146 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
147 },
148 },
149 },
150 {
151 .description = "two tx-transfers - alter first",
152 .fill_option = FILL_COUNT_8,
153 .iterate_len = { ITERATE_MAX_LEN },
154 .iterate_tx_align = ITERATE_ALIGN,
155 .iterate_transfer_mask = BIT(0),
156 .transfer_count = 2,
157 .transfers = {
158 {
159 .tx_buf = TX(64),
160 },
161 {
162 .len = 1,
163 .tx_buf = TX(0),
164 },
165 },
166 },
167 {
168 .description = "two tx-transfers - alter second",
169 .fill_option = FILL_COUNT_8,
170 .iterate_len = { ITERATE_MAX_LEN },
171 .iterate_tx_align = ITERATE_ALIGN,
172 .iterate_transfer_mask = BIT(1),
173 .transfer_count = 2,
174 .transfers = {
175 {
176 .len = 16,
177 .tx_buf = TX(0),
178 },
179 {
180 .tx_buf = TX(64),
181 },
182 },
183 },
184 {
185 .description = "two transfers tx then rx - alter both",
186 .fill_option = FILL_COUNT_8,
187 .iterate_len = { ITERATE_MAX_LEN },
188 .iterate_tx_align = ITERATE_ALIGN,
189 .iterate_transfer_mask = BIT(0) | BIT(1),
190 .transfer_count = 2,
191 .transfers = {
192 {
193 .tx_buf = TX(0),
194 },
195 {
196 .rx_buf = RX(0),
197 },
198 },
199 },
200 {
201 .description = "two transfers tx then rx - alter tx",
202 .fill_option = FILL_COUNT_8,
203 .iterate_len = { ITERATE_MAX_LEN },
204 .iterate_tx_align = ITERATE_ALIGN,
205 .iterate_transfer_mask = BIT(0),
206 .transfer_count = 2,
207 .transfers = {
208 {
209 .tx_buf = TX(0),
210 },
211 {
212 .len = 1,
213 .rx_buf = RX(0),
214 },
215 },
216 },
217 {
218 .description = "two transfers tx then rx - alter rx",
219 .fill_option = FILL_COUNT_8,
220 .iterate_len = { ITERATE_MAX_LEN },
221 .iterate_tx_align = ITERATE_ALIGN,
222 .iterate_transfer_mask = BIT(1),
223 .transfer_count = 2,
224 .transfers = {
225 {
226 .len = 1,
227 .tx_buf = TX(0),
228 },
229 {
230 .rx_buf = RX(0),
231 },
232 },
233 },
234 {
235 .description = "two tx+rx transfers - alter both",
236 .fill_option = FILL_COUNT_8,
237 .iterate_len = { ITERATE_LEN },
238 .iterate_tx_align = ITERATE_ALIGN,
239 .iterate_transfer_mask = BIT(0) | BIT(1),
240 .transfer_count = 2,
241 .transfers = {
242 {
243 .tx_buf = TX(0),
244 .rx_buf = RX(0),
245 },
246 {
247 /* making sure we align without overwrite
248 * the reason we can not use ITERATE_MAX_LEN
249 */
250 .tx_buf = TX(SPI_TEST_MAX_SIZE_HALF),
251 .rx_buf = RX(SPI_TEST_MAX_SIZE_HALF),
252 },
253 },
254 },
255 {
256 .description = "two tx+rx transfers - alter first",
257 .fill_option = FILL_COUNT_8,
258 .iterate_len = { ITERATE_MAX_LEN },
259 .iterate_tx_align = ITERATE_ALIGN,
260 .iterate_transfer_mask = BIT(0),
261 .transfer_count = 2,
262 .transfers = {
263 {
264 /* making sure we align without overwrite */
265 .tx_buf = TX(1024),
266 .rx_buf = RX(1024),
267 },
268 {
269 .len = 1,
270 /* making sure we align without overwrite */
271 .tx_buf = TX(0),
272 .rx_buf = RX(0),
273 },
274 },
275 },
276 {
277 .description = "two tx+rx transfers - alter second",
278 .fill_option = FILL_COUNT_8,
279 .iterate_len = { ITERATE_MAX_LEN },
280 .iterate_tx_align = ITERATE_ALIGN,
281 .iterate_transfer_mask = BIT(1),
282 .transfer_count = 2,
283 .transfers = {
284 {
285 .len = 1,
286 .tx_buf = TX(0),
287 .rx_buf = RX(0),
288 },
289 {
290 /* making sure we align without overwrite */
291 .tx_buf = TX(1024),
292 .rx_buf = RX(1024),
293 },
294 },
295 },
296 {
297 .description = "two tx+rx transfers - delay after transfer",
298 .fill_option = FILL_COUNT_8,
299 .iterate_len = { ITERATE_MAX_LEN },
300 .iterate_transfer_mask = BIT(0) | BIT(1),
301 .transfer_count = 2,
302 .transfers = {
303 {
304 .tx_buf = TX(0),
305 .rx_buf = RX(0),
306 .delay = {
307 .value = 1000,
308 .unit = SPI_DELAY_UNIT_USECS,
309 },
310 },
311 {
312 .tx_buf = TX(0),
313 .rx_buf = RX(0),
314 .delay = {
315 .value = 1000,
316 .unit = SPI_DELAY_UNIT_USECS,
317 },
318 },
319 },
320 },
321 {
322 .description = "three tx+rx transfers with overlapping cache lines",
323 .fill_option = FILL_COUNT_8,
324 /*
325 * This should be large enough for the controller driver to
326 * choose to transfer it with DMA.
327 */
328 .iterate_len = { 512, -1 },
329 .iterate_transfer_mask = BIT(1),
330 .transfer_count = 3,
331 .transfers = {
332 {
333 .len = 1,
334 .tx_buf = TX(0),
335 .rx_buf = RX(0),
336 },
337 {
338 .tx_buf = TX(1),
339 .rx_buf = RX(1),
340 },
341 {
342 .len = 1,
343 .tx_buf = TX(513),
344 .rx_buf = RX(513),
345 },
346 },
347 },
348
349 { /* end of tests sequence */ }
350 };
351
spi_loopback_test_probe(struct spi_device * spi)352 static int spi_loopback_test_probe(struct spi_device *spi)
353 {
354 int ret;
355
356 if (loop_req || no_cs) {
357 spi->mode |= loop_req ? SPI_LOOP : 0;
358 spi->mode |= no_cs ? SPI_NO_CS : 0;
359 ret = spi_setup(spi);
360 if (ret) {
361 dev_err(&spi->dev, "SPI setup with SPI_LOOP or SPI_NO_CS failed (%d)\n",
362 ret);
363 return ret;
364 }
365 }
366
367 dev_info(&spi->dev, "Executing spi-loopback-tests\n");
368
369 ret = spi_test_run_tests(spi, spi_tests);
370
371 dev_info(&spi->dev, "Finished spi-loopback-tests with return: %i\n",
372 ret);
373
374 return ret;
375 }
376
377 /* non const match table to permit to change via a module parameter */
378 static struct of_device_id spi_loopback_test_of_match[] = {
379 { .compatible = "linux,spi-loopback-test", },
380 { }
381 };
382
383 /* allow to override the compatible string via a module_parameter */
384 module_param_string(compatible, spi_loopback_test_of_match[0].compatible,
385 sizeof(spi_loopback_test_of_match[0].compatible),
386 0000);
387
388 MODULE_DEVICE_TABLE(of, spi_loopback_test_of_match);
389
390 static struct spi_driver spi_loopback_test_driver = {
391 .driver = {
392 .name = "spi-loopback-test",
393 .owner = THIS_MODULE,
394 .of_match_table = spi_loopback_test_of_match,
395 },
396 .probe = spi_loopback_test_probe,
397 };
398
399 module_spi_driver(spi_loopback_test_driver);
400
401 MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
402 MODULE_DESCRIPTION("test spi_driver to check core functionality");
403 MODULE_LICENSE("GPL");
404
405 /*-------------------------------------------------------------------------*/
406
407 /* spi_test implementation */
408
409 #define RANGE_CHECK(ptr, plen, start, slen) \
410 ((ptr >= start) && (ptr + plen <= start + slen))
411
412 /* we allocate one page more, to allow for offsets */
413 #define SPI_TEST_MAX_SIZE_PLUS (SPI_TEST_MAX_SIZE + PAGE_SIZE)
414
spi_test_print_hex_dump(char * pre,const void * ptr,size_t len)415 static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
416 {
417 /* limit the hex_dump */
418 if (len < 1024) {
419 print_hex_dump(KERN_INFO, pre,
420 DUMP_PREFIX_OFFSET, 16, 1,
421 ptr, len, 0);
422 return;
423 }
424 /* print head */
425 print_hex_dump(KERN_INFO, pre,
426 DUMP_PREFIX_OFFSET, 16, 1,
427 ptr, 512, 0);
428 /* print tail */
429 pr_info("%s truncated - continuing at offset %04zx\n",
430 pre, len - 512);
431 print_hex_dump(KERN_INFO, pre,
432 DUMP_PREFIX_OFFSET, 16, 1,
433 ptr + (len - 512), 512, 0);
434 }
435
spi_test_dump_message(struct spi_device * spi,struct spi_message * msg,bool dump_data)436 static void spi_test_dump_message(struct spi_device *spi,
437 struct spi_message *msg,
438 bool dump_data)
439 {
440 struct spi_transfer *xfer;
441 int i;
442 u8 b;
443
444 dev_info(&spi->dev, " spi_msg@%pK\n", msg);
445 if (msg->status)
446 dev_info(&spi->dev, " status: %i\n",
447 msg->status);
448 dev_info(&spi->dev, " frame_length: %i\n",
449 msg->frame_length);
450 dev_info(&spi->dev, " actual_length: %i\n",
451 msg->actual_length);
452
453 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
454 dev_info(&spi->dev, " spi_transfer@%pK\n", xfer);
455 dev_info(&spi->dev, " len: %i\n", xfer->len);
456 dev_info(&spi->dev, " tx_buf: %pK\n", xfer->tx_buf);
457 if (dump_data && xfer->tx_buf)
458 spi_test_print_hex_dump(" TX: ",
459 xfer->tx_buf,
460 xfer->len);
461
462 dev_info(&spi->dev, " rx_buf: %pK\n", xfer->rx_buf);
463 if (dump_data && xfer->rx_buf)
464 spi_test_print_hex_dump(" RX: ",
465 xfer->rx_buf,
466 xfer->len);
467 /* check for unwritten test pattern on rx_buf */
468 if (xfer->rx_buf) {
469 for (i = 0 ; i < xfer->len ; i++) {
470 b = ((u8 *)xfer->rx_buf)[xfer->len - 1 - i];
471 if (b != SPI_TEST_PATTERN_UNWRITTEN)
472 break;
473 }
474 if (i)
475 dev_info(&spi->dev,
476 " rx_buf filled with %02x starts at offset: %i\n",
477 SPI_TEST_PATTERN_UNWRITTEN,
478 xfer->len - i);
479 }
480 }
481 }
482
483 struct rx_ranges {
484 struct list_head list;
485 u8 *start;
486 u8 *end;
487 };
488
rx_ranges_cmp(void * priv,const struct list_head * a,const struct list_head * b)489 static int rx_ranges_cmp(void *priv, const struct list_head *a,
490 const struct list_head *b)
491 {
492 struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list);
493 struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list);
494
495 if (rx_a->start > rx_b->start)
496 return 1;
497 if (rx_a->start < rx_b->start)
498 return -1;
499 return 0;
500 }
501
spi_check_rx_ranges(struct spi_device * spi,struct spi_message * msg,void * rx)502 static int spi_check_rx_ranges(struct spi_device *spi,
503 struct spi_message *msg,
504 void *rx)
505 {
506 struct spi_transfer *xfer;
507 struct rx_ranges ranges[SPI_TEST_MAX_TRANSFERS], *r;
508 int i = 0;
509 LIST_HEAD(ranges_list);
510 u8 *addr;
511 int ret = 0;
512
513 /* loop over all transfers to fill in the rx_ranges */
514 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
515 /* if there is no rx, then no check is needed */
516 if (!xfer->rx_buf)
517 continue;
518 /* fill in the rx_range */
519 if (RANGE_CHECK(xfer->rx_buf, xfer->len,
520 rx, SPI_TEST_MAX_SIZE_PLUS)) {
521 ranges[i].start = xfer->rx_buf;
522 ranges[i].end = xfer->rx_buf + xfer->len;
523 list_add(&ranges[i].list, &ranges_list);
524 i++;
525 }
526 }
527
528 /* if no ranges, then we can return and avoid the checks...*/
529 if (!i)
530 return 0;
531
532 /* sort the list */
533 list_sort(NULL, &ranges_list, rx_ranges_cmp);
534
535 /* and iterate over all the rx addresses */
536 for (addr = rx; addr < (u8 *)rx + SPI_TEST_MAX_SIZE_PLUS; addr++) {
537 /* if we are the DO not write pattern,
538 * then continue with the loop...
539 */
540 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
541 continue;
542
543 /* check if we are inside a range */
544 list_for_each_entry(r, &ranges_list, list) {
545 /* if so then set to end... */
546 if ((addr >= r->start) && (addr < r->end))
547 addr = r->end;
548 }
549 /* second test after a (hopefull) translation */
550 if (*addr == SPI_TEST_PATTERN_DO_NOT_WRITE)
551 continue;
552
553 /* if still not found then something has modified too much */
554 /* we could list the "closest" transfer here... */
555 dev_err(&spi->dev,
556 "loopback strangeness - rx changed outside of allowed range at: %pK\n",
557 addr);
558 /* do not return, only set ret,
559 * so that we list all addresses
560 */
561 ret = -ERANGE;
562 }
563
564 return ret;
565 }
566
spi_test_check_elapsed_time(struct spi_device * spi,struct spi_test * test)567 static int spi_test_check_elapsed_time(struct spi_device *spi,
568 struct spi_test *test)
569 {
570 int i;
571 unsigned long long estimated_time = 0;
572 unsigned long long delay_usecs = 0;
573
574 for (i = 0; i < test->transfer_count; i++) {
575 struct spi_transfer *xfer = test->transfers + i;
576 unsigned long long nbits = (unsigned long long)BITS_PER_BYTE *
577 xfer->len;
578
579 delay_usecs += xfer->delay.value;
580 if (!xfer->speed_hz)
581 continue;
582 estimated_time += div_u64(nbits * NSEC_PER_SEC, xfer->speed_hz);
583 }
584
585 estimated_time += delay_usecs * NSEC_PER_USEC;
586 if (test->elapsed_time < estimated_time) {
587 dev_err(&spi->dev,
588 "elapsed time %lld ns is shorter than minimum estimated time %lld ns\n",
589 test->elapsed_time, estimated_time);
590
591 return -EINVAL;
592 }
593
594 return 0;
595 }
596
spi_test_check_loopback_result(struct spi_device * spi,struct spi_message * msg,void * tx,void * rx)597 static int spi_test_check_loopback_result(struct spi_device *spi,
598 struct spi_message *msg,
599 void *tx, void *rx)
600 {
601 struct spi_transfer *xfer;
602 u8 rxb, txb;
603 size_t i;
604 int ret;
605
606 /* checks rx_buffer pattern are valid with loopback or without */
607 if (check_ranges) {
608 ret = spi_check_rx_ranges(spi, msg, rx);
609 if (ret)
610 return ret;
611 }
612
613 /* if we run without loopback, then return now */
614 if (!loopback)
615 return 0;
616
617 /* if applicable to transfer check that rx_buf is equal to tx_buf */
618 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
619 /* if there is no rx, then no check is needed */
620 if (!xfer->len || !xfer->rx_buf)
621 continue;
622 /* so depending on tx_buf we need to handle things */
623 if (xfer->tx_buf) {
624 for (i = 0; i < xfer->len; i++) {
625 txb = ((u8 *)xfer->tx_buf)[i];
626 rxb = ((u8 *)xfer->rx_buf)[i];
627 if (txb != rxb)
628 goto mismatch_error;
629 }
630 } else {
631 /* first byte received */
632 txb = ((u8 *)xfer->rx_buf)[0];
633 /* first byte may be 0 or xff */
634 if (!((txb == 0) || (txb == 0xff))) {
635 dev_err(&spi->dev,
636 "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n",
637 txb);
638 return -EINVAL;
639 }
640 /* check that all bytes are identical */
641 for (i = 1; i < xfer->len; i++) {
642 rxb = ((u8 *)xfer->rx_buf)[i];
643 if (rxb != txb)
644 goto mismatch_error;
645 }
646 }
647 }
648
649 return 0;
650
651 mismatch_error:
652 dev_err(&spi->dev,
653 "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n",
654 i, txb, rxb);
655
656 return -EINVAL;
657 }
658
spi_test_translate(struct spi_device * spi,void ** ptr,size_t len,void * tx,void * rx)659 static int spi_test_translate(struct spi_device *spi,
660 void **ptr, size_t len,
661 void *tx, void *rx)
662 {
663 size_t off;
664
665 /* return on null */
666 if (!*ptr)
667 return 0;
668
669 /* in the MAX_SIZE_HALF case modify the pointer */
670 if (((size_t)*ptr) & SPI_TEST_MAX_SIZE_HALF)
671 /* move the pointer to the correct range */
672 *ptr += (SPI_TEST_MAX_SIZE_PLUS / 2) -
673 SPI_TEST_MAX_SIZE_HALF;
674
675 /* RX range
676 * - we check against MAX_SIZE_PLUS to allow for automated alignment
677 */
678 if (RANGE_CHECK(*ptr, len, RX(0), SPI_TEST_MAX_SIZE_PLUS)) {
679 off = *ptr - RX(0);
680 *ptr = rx + off;
681
682 return 0;
683 }
684
685 /* TX range */
686 if (RANGE_CHECK(*ptr, len, TX(0), SPI_TEST_MAX_SIZE_PLUS)) {
687 off = *ptr - TX(0);
688 *ptr = tx + off;
689
690 return 0;
691 }
692
693 dev_err(&spi->dev,
694 "PointerRange [%pK:%pK[ not in range [%pK:%pK[ or [%pK:%pK[\n",
695 *ptr, *ptr + len,
696 RX(0), RX(SPI_TEST_MAX_SIZE),
697 TX(0), TX(SPI_TEST_MAX_SIZE));
698
699 return -EINVAL;
700 }
701
spi_test_fill_pattern(struct spi_device * spi,struct spi_test * test)702 static int spi_test_fill_pattern(struct spi_device *spi,
703 struct spi_test *test)
704 {
705 struct spi_transfer *xfers = test->transfers;
706 u8 *tx_buf;
707 size_t count = 0;
708 int i, j;
709
710 #ifdef __BIG_ENDIAN
711 #define GET_VALUE_BYTE(value, index, bytes) \
712 (value >> (8 * (bytes - 1 - count % bytes)))
713 #else
714 #define GET_VALUE_BYTE(value, index, bytes) \
715 (value >> (8 * (count % bytes)))
716 #endif
717
718 /* fill all transfers with the pattern requested */
719 for (i = 0; i < test->transfer_count; i++) {
720 /* fill rx_buf with SPI_TEST_PATTERN_UNWRITTEN */
721 if (xfers[i].rx_buf)
722 memset(xfers[i].rx_buf, SPI_TEST_PATTERN_UNWRITTEN,
723 xfers[i].len);
724 /* if tx_buf is NULL then skip */
725 tx_buf = (u8 *)xfers[i].tx_buf;
726 if (!tx_buf)
727 continue;
728 /* modify all the transfers */
729 for (j = 0; j < xfers[i].len; j++, tx_buf++, count++) {
730 /* fill tx */
731 switch (test->fill_option) {
732 case FILL_MEMSET_8:
733 *tx_buf = test->fill_pattern;
734 break;
735 case FILL_MEMSET_16:
736 *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
737 count, 2);
738 break;
739 case FILL_MEMSET_24:
740 *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
741 count, 3);
742 break;
743 case FILL_MEMSET_32:
744 *tx_buf = GET_VALUE_BYTE(test->fill_pattern,
745 count, 4);
746 break;
747 case FILL_COUNT_8:
748 *tx_buf = count;
749 break;
750 case FILL_COUNT_16:
751 *tx_buf = GET_VALUE_BYTE(count, count, 2);
752 break;
753 case FILL_COUNT_24:
754 *tx_buf = GET_VALUE_BYTE(count, count, 3);
755 break;
756 case FILL_COUNT_32:
757 *tx_buf = GET_VALUE_BYTE(count, count, 4);
758 break;
759 case FILL_TRANSFER_BYTE_8:
760 *tx_buf = j;
761 break;
762 case FILL_TRANSFER_BYTE_16:
763 *tx_buf = GET_VALUE_BYTE(j, j, 2);
764 break;
765 case FILL_TRANSFER_BYTE_24:
766 *tx_buf = GET_VALUE_BYTE(j, j, 3);
767 break;
768 case FILL_TRANSFER_BYTE_32:
769 *tx_buf = GET_VALUE_BYTE(j, j, 4);
770 break;
771 case FILL_TRANSFER_NUM:
772 *tx_buf = i;
773 break;
774 default:
775 dev_err(&spi->dev,
776 "unsupported fill_option: %i\n",
777 test->fill_option);
778 return -EINVAL;
779 }
780 }
781 }
782
783 return 0;
784 }
785
_spi_test_run_iter(struct spi_device * spi,struct spi_test * test,void * tx,void * rx)786 static int _spi_test_run_iter(struct spi_device *spi,
787 struct spi_test *test,
788 void *tx, void *rx)
789 {
790 struct spi_message *msg = &test->msg;
791 struct spi_transfer *x;
792 int i, ret;
793
794 /* initialize message - zero-filled via static initialization */
795 spi_message_init_no_memset(msg);
796
797 /* fill rx with the DO_NOT_WRITE pattern */
798 memset(rx, SPI_TEST_PATTERN_DO_NOT_WRITE, SPI_TEST_MAX_SIZE_PLUS);
799
800 /* add the individual transfers */
801 for (i = 0; i < test->transfer_count; i++) {
802 x = &test->transfers[i];
803
804 /* patch the values of tx_buf */
805 ret = spi_test_translate(spi, (void **)&x->tx_buf, x->len,
806 (void *)tx, rx);
807 if (ret)
808 return ret;
809
810 /* patch the values of rx_buf */
811 ret = spi_test_translate(spi, &x->rx_buf, x->len,
812 (void *)tx, rx);
813 if (ret)
814 return ret;
815
816 /* and add it to the list */
817 spi_message_add_tail(x, msg);
818 }
819
820 /* fill in the transfer buffers with pattern */
821 ret = spi_test_fill_pattern(spi, test);
822 if (ret)
823 return ret;
824
825 /* and execute */
826 if (test->execute_msg)
827 ret = test->execute_msg(spi, test, tx, rx);
828 else
829 ret = spi_test_execute_msg(spi, test, tx, rx);
830
831 /* handle result */
832 if (ret == test->expected_return)
833 return 0;
834
835 dev_err(&spi->dev,
836 "test failed - test returned %i, but we expect %i\n",
837 ret, test->expected_return);
838
839 if (ret)
840 return ret;
841
842 /* if it is 0, as we expected something else,
843 * then return something special
844 */
845 return -EFAULT;
846 }
847
spi_test_run_iter(struct spi_device * spi,const struct spi_test * testtemplate,void * tx,void * rx,size_t len,size_t tx_off,size_t rx_off)848 static int spi_test_run_iter(struct spi_device *spi,
849 const struct spi_test *testtemplate,
850 void *tx, void *rx,
851 size_t len,
852 size_t tx_off,
853 size_t rx_off
854 )
855 {
856 struct spi_test test;
857 int i, tx_count, rx_count;
858
859 /* copy the test template to test */
860 memcpy(&test, testtemplate, sizeof(test));
861
862 /* if iterate_transfer_mask is not set,
863 * then set it to first transfer only
864 */
865 if (!(test.iterate_transfer_mask & (BIT(test.transfer_count) - 1)))
866 test.iterate_transfer_mask = 1;
867
868 /* count number of transfers with tx/rx_buf != NULL */
869 rx_count = tx_count = 0;
870 for (i = 0; i < test.transfer_count; i++) {
871 if (test.transfers[i].tx_buf)
872 tx_count++;
873 if (test.transfers[i].rx_buf)
874 rx_count++;
875 }
876
877 /* in some iteration cases warn and exit early,
878 * as there is nothing to do, that has not been tested already...
879 */
880 if (tx_off && (!tx_count)) {
881 dev_warn_once(&spi->dev,
882 "%s: iterate_tx_off configured with tx_buf==NULL - ignoring\n",
883 test.description);
884 return 0;
885 }
886 if (rx_off && (!rx_count)) {
887 dev_warn_once(&spi->dev,
888 "%s: iterate_rx_off configured with rx_buf==NULL - ignoring\n",
889 test.description);
890 return 0;
891 }
892
893 /* write out info */
894 if (!(len || tx_off || rx_off)) {
895 dev_info(&spi->dev, "Running test %s\n", test.description);
896 } else {
897 dev_info(&spi->dev,
898 " with iteration values: len = %zu, tx_off = %zu, rx_off = %zu\n",
899 len, tx_off, rx_off);
900 }
901
902 /* update in the values from iteration values */
903 for (i = 0; i < test.transfer_count; i++) {
904 /* only when bit in transfer mask is set */
905 if (!(test.iterate_transfer_mask & BIT(i)))
906 continue;
907 test.transfers[i].len = len;
908 if (test.transfers[i].tx_buf)
909 test.transfers[i].tx_buf += tx_off;
910 if (test.transfers[i].rx_buf)
911 test.transfers[i].rx_buf += rx_off;
912 }
913
914 /* and execute */
915 return _spi_test_run_iter(spi, &test, tx, rx);
916 }
917
918 /**
919 * spi_test_execute_msg - default implementation to run a test
920 *
921 * @spi: @spi_device on which to run the @spi_message
922 * @test: the test to execute, which already contains @msg
923 * @tx: the tx buffer allocated for the test sequence
924 * @rx: the rx buffer allocated for the test sequence
925 *
926 * Returns: error code of spi_sync as well as basic error checking
927 */
spi_test_execute_msg(struct spi_device * spi,struct spi_test * test,void * tx,void * rx)928 int spi_test_execute_msg(struct spi_device *spi, struct spi_test *test,
929 void *tx, void *rx)
930 {
931 struct spi_message *msg = &test->msg;
932 int ret = 0;
933 int i;
934
935 /* only if we do not simulate */
936 if (!simulate_only) {
937 ktime_t start;
938
939 /* dump the complete message before and after the transfer */
940 if (dump_messages == 3)
941 spi_test_dump_message(spi, msg, true);
942
943 start = ktime_get();
944 /* run spi message */
945 ret = spi_sync(spi, msg);
946 test->elapsed_time = ktime_to_ns(ktime_sub(ktime_get(), start));
947 if (ret == -ETIMEDOUT) {
948 dev_info(&spi->dev,
949 "spi-message timed out - rerunning...\n");
950 /* rerun after a few explicit schedules */
951 for (i = 0; i < 16; i++)
952 schedule();
953 ret = spi_sync(spi, msg);
954 }
955 if (ret) {
956 dev_err(&spi->dev,
957 "Failed to execute spi_message: %i\n",
958 ret);
959 goto exit;
960 }
961
962 /* do some extra error checks */
963 if (msg->frame_length != msg->actual_length) {
964 dev_err(&spi->dev,
965 "actual length differs from expected\n");
966 ret = -EIO;
967 goto exit;
968 }
969
970 /* run rx-buffer tests */
971 ret = spi_test_check_loopback_result(spi, msg, tx, rx);
972 if (ret)
973 goto exit;
974
975 ret = spi_test_check_elapsed_time(spi, test);
976 }
977
978 /* if requested or on error dump message (including data) */
979 exit:
980 if (dump_messages || ret)
981 spi_test_dump_message(spi, msg,
982 (dump_messages >= 2) || (ret));
983
984 return ret;
985 }
986 EXPORT_SYMBOL_GPL(spi_test_execute_msg);
987
988 /**
989 * spi_test_run_test - run an individual spi_test
990 * including all the relevant iterations on:
991 * length and buffer alignment
992 *
993 * @spi: the spi_device to send the messages to
994 * @test: the test which we need to execute
995 * @tx: the tx buffer allocated for the test sequence
996 * @rx: the rx buffer allocated for the test sequence
997 *
998 * Returns: status code of spi_sync or other failures
999 */
1000
spi_test_run_test(struct spi_device * spi,const struct spi_test * test,void * tx,void * rx)1001 int spi_test_run_test(struct spi_device *spi, const struct spi_test *test,
1002 void *tx, void *rx)
1003 {
1004 int idx_len;
1005 size_t len;
1006 size_t tx_align, rx_align;
1007 int ret;
1008
1009 /* test for transfer limits */
1010 if (test->transfer_count >= SPI_TEST_MAX_TRANSFERS) {
1011 dev_err(&spi->dev,
1012 "%s: Exceeded max number of transfers with %i\n",
1013 test->description, test->transfer_count);
1014 return -E2BIG;
1015 }
1016
1017 /* setting up some values in spi_message
1018 * based on some settings in spi_master
1019 * some of this can also get done in the run() method
1020 */
1021
1022 /* iterate over all the iterable values using macros
1023 * (to make it a bit more readable...
1024 */
1025 #define FOR_EACH_ALIGNMENT(var) \
1026 for (var = 0; \
1027 var < (test->iterate_##var ? \
1028 (spi->master->dma_alignment ? \
1029 spi->master->dma_alignment : \
1030 test->iterate_##var) : \
1031 1); \
1032 var++)
1033
1034 for (idx_len = 0; idx_len < SPI_TEST_MAX_ITERATE &&
1035 (len = test->iterate_len[idx_len]) != -1; idx_len++) {
1036 FOR_EACH_ALIGNMENT(tx_align) {
1037 FOR_EACH_ALIGNMENT(rx_align) {
1038 /* and run the iteration */
1039 ret = spi_test_run_iter(spi, test,
1040 tx, rx,
1041 len,
1042 tx_align,
1043 rx_align);
1044 if (ret)
1045 return ret;
1046 }
1047 }
1048 }
1049
1050 return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(spi_test_run_test);
1053
1054 /**
1055 * spi_test_run_tests - run an array of spi_messages tests
1056 * @spi: the spi device on which to run the tests
1057 * @tests: NULL-terminated array of @spi_test
1058 *
1059 * Returns: status errors as per @spi_test_run_test()
1060 */
1061
spi_test_run_tests(struct spi_device * spi,struct spi_test * tests)1062 int spi_test_run_tests(struct spi_device *spi,
1063 struct spi_test *tests)
1064 {
1065 char *rx = NULL, *tx = NULL;
1066 int ret = 0, count = 0;
1067 struct spi_test *test;
1068
1069 /* allocate rx/tx buffers of 128kB size without devm
1070 * in the hope that is on a page boundary
1071 */
1072 if (use_vmalloc)
1073 rx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
1074 else
1075 rx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
1076 if (!rx)
1077 return -ENOMEM;
1078
1079
1080 if (use_vmalloc)
1081 tx = vmalloc(SPI_TEST_MAX_SIZE_PLUS);
1082 else
1083 tx = kzalloc(SPI_TEST_MAX_SIZE_PLUS, GFP_KERNEL);
1084 if (!tx) {
1085 ret = -ENOMEM;
1086 goto err_tx;
1087 }
1088
1089 /* now run the individual tests in the table */
1090 for (test = tests, count = 0; test->description[0];
1091 test++, count++) {
1092 /* only run test if requested */
1093 if ((run_only_test > -1) && (count != run_only_test))
1094 continue;
1095 /* run custom implementation */
1096 if (test->run_test)
1097 ret = test->run_test(spi, test, tx, rx);
1098 else
1099 ret = spi_test_run_test(spi, test, tx, rx);
1100 if (ret)
1101 goto out;
1102 /* add some delays so that we can easily
1103 * detect the individual tests when using a logic analyzer
1104 * we also add scheduling to avoid potential spi_timeouts...
1105 */
1106 if (delay_ms)
1107 mdelay(delay_ms);
1108 schedule();
1109 }
1110
1111 out:
1112 kvfree(tx);
1113 err_tx:
1114 kvfree(rx);
1115 return ret;
1116 }
1117 EXPORT_SYMBOL_GPL(spi_test_run_tests);
1118