Lines Matching refs:rdx
138 mov LOCALE_T___LOCALES+LC_CTYPE*LP_SIZE(%rdx), %RAX_LP
140 mov (%rdx), %RAX_LP
314 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
317 movdqa (%rdi,%rdx), %xmm0
319 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
321 movdqa (%rsi,%rdx), %xmm1
325 lea 16(%rdx), %rdx
332 movdqa (%rdi,%rdx), %xmm0
334 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
336 movdqa (%rsi,%rdx), %xmm1
340 lea 16(%rdx), %rdx
356 lea -16(%rdx, %rcx), %rcx
362 movl (%rcx,%rdx,4), %edx
398 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
406 movdqa (%rdi, %rdx), %xmm0
407 palignr $1, -16(%rdi, %rdx), D(%xmm0)
409 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
411 movdqa (%rsi,%rdx), %xmm1
421 add $16, %rdx
425 movdqa (%rdi, %rdx), %xmm0
426 palignr $1, -16(%rdi, %rdx), D(%xmm0)
428 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
430 movdqa (%rsi,%rdx), %xmm1
439 add $16, %rdx
445 movdqa -16(%rdi, %rdx), %xmm0
486 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
494 movdqa (%rdi, %rdx), %xmm0
495 palignr $2, -16(%rdi, %rdx), D(%xmm0)
497 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
499 movdqa (%rsi,%rdx), %xmm1
509 add $16, %rdx
513 movdqa (%rdi, %rdx), %xmm0
514 palignr $2, -16(%rdi, %rdx), D(%xmm0)
516 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
518 movdqa (%rsi,%rdx), %xmm1
527 add $16, %rdx
533 movdqa -16(%rdi, %rdx), %xmm0
575 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
582 movdqa (%rdi, %rdx), %xmm0
583 palignr $3, -16(%rdi, %rdx), D(%xmm0)
585 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
587 movdqa (%rsi,%rdx), %xmm1
597 add $16, %rdx
601 movdqa (%rdi, %rdx), %xmm0
602 palignr $3, -16(%rdi, %rdx), D(%xmm0)
604 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
606 movdqa (%rsi,%rdx), %xmm1
615 add $16, %rdx
621 movdqa -16(%rdi, %rdx), %xmm0
663 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
671 movdqa (%rdi, %rdx), %xmm0
672 palignr $4, -16(%rdi, %rdx), D(%xmm0)
674 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
676 movdqa (%rsi,%rdx), %xmm1
686 add $16, %rdx
690 movdqa (%rdi, %rdx), %xmm0
691 palignr $4, -16(%rdi, %rdx), D(%xmm0)
693 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
695 movdqa (%rsi,%rdx), %xmm1
704 add $16, %rdx
710 movdqa -16(%rdi, %rdx), %xmm0
752 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
760 movdqa (%rdi, %rdx), %xmm0
761 palignr $5, -16(%rdi, %rdx), D(%xmm0)
763 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
765 movdqa (%rsi,%rdx), %xmm1
775 add $16, %rdx
779 movdqa (%rdi, %rdx), %xmm0
781 palignr $5, -16(%rdi, %rdx), D(%xmm0)
783 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
785 movdqa (%rsi,%rdx), %xmm1
794 add $16, %rdx
800 movdqa -16(%rdi, %rdx), %xmm0
842 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
850 movdqa (%rdi, %rdx), %xmm0
851 palignr $6, -16(%rdi, %rdx), D(%xmm0)
853 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
855 movdqa (%rsi,%rdx), %xmm1
865 add $16, %rdx
869 movdqa (%rdi, %rdx), %xmm0
870 palignr $6, -16(%rdi, %rdx), D(%xmm0)
872 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
874 movdqa (%rsi,%rdx), %xmm1
883 add $16, %rdx
889 movdqa -16(%rdi, %rdx), %xmm0
931 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
939 movdqa (%rdi, %rdx), %xmm0
940 palignr $7, -16(%rdi, %rdx), D(%xmm0)
942 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
944 movdqa (%rsi,%rdx), %xmm1
954 add $16, %rdx
958 movdqa (%rdi, %rdx), %xmm0
959 palignr $7, -16(%rdi, %rdx), D(%xmm0)
961 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
963 movdqa (%rsi,%rdx), %xmm1
972 add $16, %rdx
978 movdqa -16(%rdi, %rdx), %xmm0
1020 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1028 movdqa (%rdi, %rdx), %xmm0
1029 palignr $8, -16(%rdi, %rdx), D(%xmm0)
1031 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1033 movdqa (%rsi,%rdx), %xmm1
1043 add $16, %rdx
1047 movdqa (%rdi, %rdx), %xmm0
1048 palignr $8, -16(%rdi, %rdx), D(%xmm0)
1050 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1052 movdqa (%rsi,%rdx), %xmm1
1061 add $16, %rdx
1067 movdqa -16(%rdi, %rdx), %xmm0
1109 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1117 movdqa (%rdi, %rdx), %xmm0
1119 palignr $9, -16(%rdi, %rdx), D(%xmm0)
1121 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1123 movdqa (%rsi,%rdx), %xmm1
1133 add $16, %rdx
1137 movdqa (%rdi, %rdx), %xmm0
1138 palignr $9, -16(%rdi, %rdx), D(%xmm0)
1140 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1142 movdqa (%rsi,%rdx), %xmm1
1151 add $16, %rdx
1157 movdqa -16(%rdi, %rdx), %xmm0
1199 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1207 movdqa (%rdi, %rdx), %xmm0
1208 palignr $10, -16(%rdi, %rdx), D(%xmm0)
1210 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1212 movdqa (%rsi,%rdx), %xmm1
1222 add $16, %rdx
1226 movdqa (%rdi, %rdx), %xmm0
1227 palignr $10, -16(%rdi, %rdx), D(%xmm0)
1229 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1231 movdqa (%rsi,%rdx), %xmm1
1240 add $16, %rdx
1246 movdqa -16(%rdi, %rdx), %xmm0
1288 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1296 movdqa (%rdi, %rdx), %xmm0
1297 palignr $11, -16(%rdi, %rdx), D(%xmm0)
1299 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1301 movdqa (%rsi,%rdx), %xmm1
1311 add $16, %rdx
1315 movdqa (%rdi, %rdx), %xmm0
1316 palignr $11, -16(%rdi, %rdx), D(%xmm0)
1318 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1320 movdqa (%rsi,%rdx), %xmm1
1329 add $16, %rdx
1335 movdqa -16(%rdi, %rdx), %xmm0
1377 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1385 movdqa (%rdi, %rdx), %xmm0
1386 palignr $12, -16(%rdi, %rdx), D(%xmm0)
1388 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1390 movdqa (%rsi,%rdx), %xmm1
1400 add $16, %rdx
1404 movdqa (%rdi, %rdx), %xmm0
1405 palignr $12, -16(%rdi, %rdx), D(%xmm0)
1407 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1409 movdqa (%rsi,%rdx), %xmm1
1418 add $16, %rdx
1424 movdqa -16(%rdi, %rdx), %xmm0
1467 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1475 movdqa (%rdi, %rdx), %xmm0
1476 palignr $13, -16(%rdi, %rdx), D(%xmm0)
1478 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1480 movdqa (%rsi,%rdx), %xmm1
1490 add $16, %rdx
1494 movdqa (%rdi, %rdx), %xmm0
1495 palignr $13, -16(%rdi, %rdx), D(%xmm0)
1497 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1499 movdqa (%rsi,%rdx), %xmm1
1508 add $16, %rdx
1514 movdqa -16(%rdi, %rdx), %xmm0
1557 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1565 movdqa (%rdi, %rdx), %xmm0
1566 palignr $14, -16(%rdi, %rdx), D(%xmm0)
1568 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1570 movdqa (%rsi,%rdx), %xmm1
1580 add $16, %rdx
1584 movdqa (%rdi, %rdx), %xmm0
1585 palignr $14, -16(%rdi, %rdx), D(%xmm0)
1587 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1589 movdqa (%rsi,%rdx), %xmm1
1598 add $16, %rdx
1604 movdqa -16(%rdi, %rdx), %xmm0
1649 mov %rcx, %rdx /* only for offset of sse4 instruction loop*/
1657 movdqa (%rdi, %rdx), %xmm0
1658 palignr $15, -16(%rdi, %rdx), D(%xmm0)
1660 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1662 movdqa (%rsi,%rdx), %xmm1
1672 add $16, %rdx
1676 movdqa (%rdi, %rdx), %xmm0
1677 palignr $15, -16(%rdi, %rdx), D(%xmm0)
1679 pcmpistri $0x1a, (%rsi,%rdx), %xmm0
1681 movdqa (%rsi,%rdx), %xmm1
1690 add $16, %rdx
1696 movdqa -16(%rdi, %rdx), %xmm0
1708 pcmpistri $0x1a,(%rsi,%rdx), %xmm0
1710 movdqa (%rsi,%rdx), %xmm1
1721 add %rcx, %rdx
1723 movzbl (%rdi, %rdx), %eax
1724 movzbl (%rsi, %rdx), %edx
1731 movl (%rcx,%rdx,4), %edx
1748 bsf %rdx, %rdx /* find and store bit index in %rdx */
1751 sub %rdx, %r11
1754 movzbl (%rsi, %rdx), %ecx
1755 movzbl (%rdi, %rdx), %eax
1758 leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rdx
1759 movl (%rdx,%rcx,4), %ecx
1760 movl (%rdx,%rax,4), %eax
1777 leaq _nl_C_LC_CTYPE_tolower+128*4(%rip), %rdx
1778 movl (%rdx,%rcx,4), %ecx
1779 movl (%rdx,%rax,4), %eax