1 /* Copyright (C) 1992-2021 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3 
4    The GNU C Library is free software; you can redistribute it and/or
5    modify it under the terms of the GNU Lesser General Public
6    License as published by the Free Software Foundation; either
7    version 2.1 of the License, or (at your option) any later version.
8 
9    The GNU C Library is distributed in the hope that it will be useful,
10    but WITHOUT ANY WARRANTY; without even the implied warranty of
11    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12    Lesser General Public License for more details.
13 
14    You should have received a copy of the GNU Lesser General Public
15    License along with the GNU C Library; if not, see
16    <https://www.gnu.org/licenses/>.  */
17 
18 #ifndef _LINUX_I386_SYSDEP_H
19 #define _LINUX_I386_SYSDEP_H 1
20 
21 /* There is some commonality.  */
22 #include <sysdeps/unix/sysv/linux/sysdep.h>
23 #include <sysdeps/unix/i386/sysdep.h>
24 /* Defines RTLD_PRIVATE_ERRNO and USE_DL_SYSINFO.  */
25 #include <dl-sysdep.h>
26 #include <tls.h>
27 
28 
29 /* For Linux we can use the system call table in the header file
30 	/usr/include/asm/unistd.h
31    of the kernel.  But these symbols do not follow the SYS_* syntax
32    so we have to redefine the `SYS_ify' macro here.  */
33 #undef SYS_ify
34 #define SYS_ify(syscall_name)	__NR_##syscall_name
35 
36 #ifndef I386_USE_SYSENTER
37 # if defined USE_DL_SYSINFO \
38      && (IS_IN (libc) || IS_IN (libpthread))
39 #  define I386_USE_SYSENTER	1
40 # else
41 #  define I386_USE_SYSENTER	0
42 # endif
43 #endif
44 
45 /* Since GCC 5 and above can properly spill %ebx with PIC when needed,
46    we can inline syscalls with 6 arguments if GCC 5 or above is used
47    to compile glibc.  Disable GCC 5 optimization when compiling for
48    profiling or when -fno-omit-frame-pointer is used since asm ("ebp")
49    can't be used to put the 6th argument in %ebp for syscall.  */
50 #if !defined PROF && CAN_USE_REGISTER_ASM_EBP
51 # define OPTIMIZE_FOR_GCC_5
52 #endif
53 
54 #ifdef __ASSEMBLER__
55 
56 /* Linux uses a negative return value to indicate syscall errors,
57    unlike most Unices, which use the condition codes' carry flag.
58 
59    Since version 2.1 the return value of a system call might be
60    negative even if the call succeeded.  E.g., the `lseek' system call
61    might return a large offset.  Therefore we must not anymore test
62    for < 0, but test for a real error by making sure the value in %eax
63    is a real error number.  Linus said he will make sure the no syscall
64    returns a value in -1 .. -4095 as a valid result so we can savely
65    test with -4095.  */
66 
67 /* We don't want the label for the error handle to be global when we define
68    it here.  */
69 #undef SYSCALL_ERROR_LABEL
70 #define SYSCALL_ERROR_LABEL __syscall_error
71 
72 #undef	PSEUDO
73 #define	PSEUDO(name, syscall_name, args)				      \
74   .text;								      \
75   ENTRY (name)								      \
76     DO_CALL (syscall_name, args);					      \
77     cmpl $-4095, %eax;							      \
78     jae SYSCALL_ERROR_LABEL
79 
80 #undef	PSEUDO_END
81 #define	PSEUDO_END(name)						      \
82   SYSCALL_ERROR_HANDLER							      \
83   END (name)
84 
85 #undef	PSEUDO_NOERRNO
86 #define	PSEUDO_NOERRNO(name, syscall_name, args)			      \
87   .text;								      \
88   ENTRY (name)								      \
89     DO_CALL (syscall_name, args)
90 
91 #undef	PSEUDO_END_NOERRNO
92 #define	PSEUDO_END_NOERRNO(name)					      \
93   END (name)
94 
95 #define ret_NOERRNO ret
96 
97 /* The function has to return the error code.  */
98 #undef	PSEUDO_ERRVAL
99 #define	PSEUDO_ERRVAL(name, syscall_name, args) \
100   .text;								      \
101   ENTRY (name)								      \
102     DO_CALL (syscall_name, args);					      \
103     negl %eax
104 
105 #undef	PSEUDO_END_ERRVAL
106 #define	PSEUDO_END_ERRVAL(name) \
107   END (name)
108 
109 #define ret_ERRVAL ret
110 
111 #define SYSCALL_ERROR_HANDLER	/* Nothing here; code in sysdep.c is used.  */
112 
113 /* The original calling convention for system calls on Linux/i386 is
114    to use int $0x80.  */
115 #if I386_USE_SYSENTER
116 # ifdef PIC
117 #  define ENTER_KERNEL call *%gs:SYSINFO_OFFSET
118 # else
119 #  define ENTER_KERNEL call *_dl_sysinfo
120 # endif
121 #else
122 # define ENTER_KERNEL int $0x80
123 #endif
124 
125 /* Linux takes system call arguments in registers:
126 
127 	syscall number	%eax	     call-clobbered
128 	arg 1		%ebx	     call-saved
129 	arg 2		%ecx	     call-clobbered
130 	arg 3		%edx	     call-clobbered
131 	arg 4		%esi	     call-saved
132 	arg 5		%edi	     call-saved
133 	arg 6		%ebp	     call-saved
134 
135    The stack layout upon entering the function is:
136 
137 	24(%esp)	Arg# 6
138 	20(%esp)	Arg# 5
139 	16(%esp)	Arg# 4
140 	12(%esp)	Arg# 3
141 	 8(%esp)	Arg# 2
142 	 4(%esp)	Arg# 1
143 	  (%esp)	Return address
144 
145    (Of course a function with say 3 arguments does not have entries for
146    arguments 4, 5, and 6.)
147 
148    The following code tries hard to be optimal.  A general assumption
149    (which is true according to the data books I have) is that
150 
151 	2 * xchg	is more expensive than	pushl + movl + popl
152 
153    Beside this a neat trick is used.  The calling conventions for Linux
154    tell that among the registers used for parameters %ecx and %edx need
155    not be saved.  Beside this we may clobber this registers even when
156    they are not used for parameter passing.
157 
158    As a result one can see below that we save the content of the %ebx
159    register in the %edx register when we have less than 3 arguments
160    (2 * movl is less expensive than pushl + popl).
161 
162    Second unlike for the other registers we don't save the content of
163    %ecx and %edx when we have more than 1 and 2 registers resp.
164 
165    The code below might look a bit long but we have to take care for
166    the pipelined processors (i586).  Here the `pushl' and `popl'
167    instructions are marked as NP (not pairable) but the exception is
168    two consecutive of these instruction.  This gives no penalty on
169    other processors though.  */
170 
171 #undef	DO_CALL
172 #define DO_CALL(syscall_name, args)			      		      \
173     PUSHARGS_##args							      \
174     DOARGS_##args							      \
175     movl $SYS_ify (syscall_name), %eax;					      \
176     ENTER_KERNEL							      \
177     POPARGS_##args
178 
179 #define PUSHARGS_0	/* No arguments to push.  */
180 #define	DOARGS_0	/* No arguments to frob.  */
181 #define	POPARGS_0	/* No arguments to pop.  */
182 #define	_PUSHARGS_0	/* No arguments to push.  */
183 #define _DOARGS_0(n)	/* No arguments to frob.  */
184 #define	_POPARGS_0	/* No arguments to pop.  */
185 
186 #define PUSHARGS_1	movl %ebx, %edx; L(SAVEBX1): PUSHARGS_0
187 #define	DOARGS_1	_DOARGS_1 (4)
188 #define	POPARGS_1	POPARGS_0; movl %edx, %ebx; L(RESTBX1):
189 #define	_PUSHARGS_1	pushl %ebx; cfi_adjust_cfa_offset (4); \
190 			cfi_rel_offset (ebx, 0); L(PUSHBX1): _PUSHARGS_0
191 #define _DOARGS_1(n)	movl n(%esp), %ebx; _DOARGS_0(n-4)
192 #define	_POPARGS_1	_POPARGS_0; popl %ebx; cfi_adjust_cfa_offset (-4); \
193 			cfi_restore (ebx); L(POPBX1):
194 
195 #define PUSHARGS_2	PUSHARGS_1
196 #define	DOARGS_2	_DOARGS_2 (8)
197 #define	POPARGS_2	POPARGS_1
198 #define _PUSHARGS_2	_PUSHARGS_1
199 #define	_DOARGS_2(n)	movl n(%esp), %ecx; _DOARGS_1 (n-4)
200 #define	_POPARGS_2	_POPARGS_1
201 
202 #define PUSHARGS_3	_PUSHARGS_2
203 #define DOARGS_3	_DOARGS_3 (16)
204 #define POPARGS_3	_POPARGS_3
205 #define _PUSHARGS_3	_PUSHARGS_2
206 #define _DOARGS_3(n)	movl n(%esp), %edx; _DOARGS_2 (n-4)
207 #define _POPARGS_3	_POPARGS_2
208 
209 #define PUSHARGS_4	_PUSHARGS_4
210 #define DOARGS_4	_DOARGS_4 (24)
211 #define POPARGS_4	_POPARGS_4
212 #define _PUSHARGS_4	pushl %esi; cfi_adjust_cfa_offset (4); \
213 			cfi_rel_offset (esi, 0); L(PUSHSI1): _PUSHARGS_3
214 #define _DOARGS_4(n)	movl n(%esp), %esi; _DOARGS_3 (n-4)
215 #define _POPARGS_4	_POPARGS_3; popl %esi; cfi_adjust_cfa_offset (-4); \
216 			cfi_restore (esi); L(POPSI1):
217 
218 #define PUSHARGS_5	_PUSHARGS_5
219 #define DOARGS_5	_DOARGS_5 (32)
220 #define POPARGS_5	_POPARGS_5
221 #define _PUSHARGS_5	pushl %edi; cfi_adjust_cfa_offset (4); \
222 			cfi_rel_offset (edi, 0); L(PUSHDI1): _PUSHARGS_4
223 #define _DOARGS_5(n)	movl n(%esp), %edi; _DOARGS_4 (n-4)
224 #define _POPARGS_5	_POPARGS_4; popl %edi; cfi_adjust_cfa_offset (-4); \
225 			cfi_restore (edi); L(POPDI1):
226 
227 #define PUSHARGS_6	_PUSHARGS_6
228 #define DOARGS_6	_DOARGS_6 (40)
229 #define POPARGS_6	_POPARGS_6
230 #define _PUSHARGS_6	pushl %ebp; cfi_adjust_cfa_offset (4); \
231 			cfi_rel_offset (ebp, 0); L(PUSHBP1): _PUSHARGS_5
232 #define _DOARGS_6(n)	movl n(%esp), %ebp; _DOARGS_5 (n-4)
233 #define _POPARGS_6	_POPARGS_5; popl %ebp; cfi_adjust_cfa_offset (-4); \
234 			cfi_restore (ebp); L(POPBP1):
235 
236 #else	/* !__ASSEMBLER__ */
237 
238 extern int __syscall_error (int)
239   attribute_hidden __attribute__ ((__regparm__ (1)));
240 
241 #ifndef OPTIMIZE_FOR_GCC_5
242 /* We need some help from the assembler to generate optimal code.  We
243    define some macros here which later will be used.  */
244 asm (".L__X'%ebx = 1\n\t"
245      ".L__X'%ecx = 2\n\t"
246      ".L__X'%edx = 2\n\t"
247      ".L__X'%eax = 3\n\t"
248      ".L__X'%esi = 3\n\t"
249      ".L__X'%edi = 3\n\t"
250      ".L__X'%ebp = 3\n\t"
251      ".L__X'%esp = 3\n\t"
252      ".macro bpushl name reg\n\t"
253      ".if 1 - \\name\n\t"
254      ".if 2 - \\name\n\t"
255      "error\n\t"
256      ".else\n\t"
257      "xchgl \\reg, %ebx\n\t"
258      ".endif\n\t"
259      ".endif\n\t"
260      ".endm\n\t"
261      ".macro bpopl name reg\n\t"
262      ".if 1 - \\name\n\t"
263      ".if 2 - \\name\n\t"
264      "error\n\t"
265      ".else\n\t"
266      "xchgl \\reg, %ebx\n\t"
267      ".endif\n\t"
268      ".endif\n\t"
269      ".endm\n\t");
270 
271 /* Six-argument syscalls use an out-of-line helper, because an inline
272    asm using all registers apart from %esp cannot work reliably and
273    the assembler does not support describing an asm that saves and
274    restores %ebp itself as a separate stack frame.  This structure
275    stores the arguments not passed in registers; %edi is passed with a
276    pointer to this structure.  */
277 struct libc_do_syscall_args
278 {
279   int ebx, edi, ebp;
280 };
281 #endif
282 
283 # define VDSO_NAME  "LINUX_2.6"
284 # define VDSO_HASH  61765110
285 
286 /* List of system calls which are supported as vsyscalls.  */
287 # define HAVE_CLOCK_GETTIME_VSYSCALL    "__vdso_clock_gettime"
288 # define HAVE_CLOCK_GETTIME64_VSYSCALL  "__vdso_clock_gettime64"
289 # define HAVE_GETTIMEOFDAY_VSYSCALL     "__vdso_gettimeofday"
290 # define HAVE_TIME_VSYSCALL             "__vdso_time"
291 # define HAVE_CLOCK_GETRES_VSYSCALL     "__vdso_clock_getres"
292 
293 # define HAVE_CLONE3_WRAPPER		1
294 
295 # undef HAVE_INTERNAL_BRK_ADDR_SYMBOL
296 # define HAVE_INTERNAL_BRK_ADDR_SYMBOL 1
297 
298 /* Define a macro which expands inline into the wrapper code for a system
299    call.  This use is for internal calls that do not need to handle errors
300    normally.  It will never touch errno.  This returns just what the kernel
301    gave back.
302 
303    The _NCS variant allows non-constant syscall numbers but it is not
304    possible to use more than four parameters.  */
305 #undef INTERNAL_SYSCALL
306 #define INTERNAL_SYSCALL_MAIN_0(name, args...) \
307     INTERNAL_SYSCALL_MAIN_INLINE(name, 0, args)
308 #define INTERNAL_SYSCALL_MAIN_1(name, args...) \
309     INTERNAL_SYSCALL_MAIN_INLINE(name, 1, args)
310 #define INTERNAL_SYSCALL_MAIN_2(name, args...) \
311     INTERNAL_SYSCALL_MAIN_INLINE(name, 2, args)
312 #define INTERNAL_SYSCALL_MAIN_3(name, args...) \
313     INTERNAL_SYSCALL_MAIN_INLINE(name, 3, args)
314 #define INTERNAL_SYSCALL_MAIN_4(name, args...) \
315     INTERNAL_SYSCALL_MAIN_INLINE(name, 4, args)
316 #define INTERNAL_SYSCALL_MAIN_5(name, args...) \
317     INTERNAL_SYSCALL_MAIN_INLINE(name, 5, args)
318 
319 #define INTERNAL_SYSCALL_MAIN_NCS_0(name, args...) \
320     INTERNAL_SYSCALL_MAIN_NCS(name, 0, args)
321 #define INTERNAL_SYSCALL_MAIN_NCS_1(name, args...) \
322     INTERNAL_SYSCALL_MAIN_NCS(name, 1, args)
323 #define INTERNAL_SYSCALL_MAIN_NCS_2(name, args...) \
324     INTERNAL_SYSCALL_MAIN_NCS(name, 2, args)
325 #define INTERNAL_SYSCALL_MAIN_NCS_3(name, args...) \
326     INTERNAL_SYSCALL_MAIN_NCS(name, 3, args)
327 #define INTERNAL_SYSCALL_MAIN_NCS_4(name, args...) \
328     INTERNAL_SYSCALL_MAIN_NCS(name, 4, args)
329 #define INTERNAL_SYSCALL_MAIN_NCS_5(name, args...) \
330     INTERNAL_SYSCALL_MAIN_NCS(name, 5, args)
331 
332 /* Each object using 6-argument inline syscalls must include a
333    definition of __libc_do_syscall.  */
334 #ifdef OPTIMIZE_FOR_GCC_5
335 # define INTERNAL_SYSCALL_MAIN_6(name, args...) \
336     INTERNAL_SYSCALL_MAIN_INLINE(name, 6, args)
337 # define INTERNAL_SYSCALL_MAIN_NCS_6(name, args...) \
338     INTERNAL_SYSCALL_MAIN_NCS(name, 6, args)
339 #else /* GCC 5  */
340 # define INTERNAL_SYSCALL_MAIN_6(name, arg1, arg2, arg3,		\
341 				 arg4, arg5, arg6)			\
342   struct libc_do_syscall_args _xv =					\
343     {									\
344       (int) (arg1),							\
345       (int) (arg5),							\
346       (int) (arg6)							\
347     };									\
348     asm volatile (							\
349     "movl %1, %%eax\n\t"						\
350     "call __libc_do_syscall"						\
351     : "=a" (resultvar)							\
352     : "i" (__NR_##name), "c" (arg2), "d" (arg3), "S" (arg4), "D" (&_xv) \
353     : "memory", "cc")
354 # define INTERNAL_SYSCALL_MAIN_NCS_6(name, arg1, arg2, arg3,		\
355 				     arg4, arg5, arg6)			\
356   struct libc_do_syscall_args _xv =					\
357     {									\
358       (int) (arg1),							\
359       (int) (arg5),							\
360       (int) (arg6)							\
361     };									\
362     asm volatile (							\
363     "movl %1, %%eax\n\t"						\
364     "call __libc_do_syscall"						\
365     : "=a" (resultvar)							\
366     : "a" (name), "c" (arg2), "d" (arg3), "S" (arg4), "D" (&_xv)	\
367     : "memory", "cc")
368 #endif /* GCC 5  */
369 
370 #define INTERNAL_SYSCALL(name, nr, args...) \
371   ({									      \
372     register unsigned int resultvar;					      \
373     INTERNAL_SYSCALL_MAIN_##nr (name, args);			      	      \
374     (int) resultvar; })
375 #define INTERNAL_SYSCALL_NCS(name, nr, args...) \
376   ({									      \
377     register unsigned int resultvar;					      \
378     INTERNAL_SYSCALL_MAIN_NCS_##nr (name, args);		      	      \
379     (int) resultvar; })
380 
381 #if I386_USE_SYSENTER
382 # ifdef OPTIMIZE_FOR_GCC_5
383 #  ifdef PIC
384 #   define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
385     LOADREGS_##nr(args)							\
386     asm volatile (							\
387     "call *%%gs:%P2"							\
388     : "=a" (resultvar)							\
389     : "a" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo))		\
390       ASMARGS_##nr(args) : "memory", "cc")
391 #   define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
392     LOADREGS_##nr(args)							\
393     asm volatile (							\
394     "call *%%gs:%P2"							\
395     : "=a" (resultvar)							\
396     : "a" (name), "i" (offsetof (tcbhead_t, sysinfo))			\
397       ASMARGS_##nr(args) : "memory", "cc")
398 #  else
399 #   define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
400     LOADREGS_##nr(args)							\
401     asm volatile (							\
402     "call *_dl_sysinfo"							\
403     : "=a" (resultvar)							\
404     : "a" (__NR_##name) ASMARGS_##nr(args) : "memory", "cc")
405 #   define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
406     LOADREGS_##nr(args)							\
407     asm volatile (							\
408     "call *_dl_sysinfo"							\
409     : "=a" (resultvar)							\
410     : "a" (name) ASMARGS_##nr(args) : "memory", "cc")
411 #  endif
412 # else /* GCC 5  */
413 #  ifdef PIC
414 #   define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
415     EXTRAVAR_##nr							      \
416     asm volatile (							      \
417     LOADARGS_##nr							      \
418     "movl %1, %%eax\n\t"						      \
419     "call *%%gs:%P2\n\t"						      \
420     RESTOREARGS_##nr							      \
421     : "=a" (resultvar)							      \
422     : "i" (__NR_##name), "i" (offsetof (tcbhead_t, sysinfo))		      \
423       ASMFMT_##nr(args) : "memory", "cc")
424 #   define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
425     EXTRAVAR_##nr							      \
426     asm volatile (							      \
427     LOADARGS_##nr							      \
428     "call *%%gs:%P2\n\t"						      \
429     RESTOREARGS_##nr							      \
430     : "=a" (resultvar)							      \
431     : "0" (name), "i" (offsetof (tcbhead_t, sysinfo))			      \
432       ASMFMT_##nr(args) : "memory", "cc")
433 #  else
434 #   define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
435     EXTRAVAR_##nr							      \
436     asm volatile (							      \
437     LOADARGS_##nr							      \
438     "movl %1, %%eax\n\t"						      \
439     "call *_dl_sysinfo\n\t"						      \
440     RESTOREARGS_##nr							      \
441     : "=a" (resultvar)							      \
442     : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc")
443 #   define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
444     EXTRAVAR_##nr							      \
445     asm volatile (							      \
446     LOADARGS_##nr							      \
447     "call *_dl_sysinfo\n\t"						      \
448     RESTOREARGS_##nr							      \
449     : "=a" (resultvar)							      \
450     : "0" (name) ASMFMT_##nr(args) : "memory", "cc")
451 #  endif
452 # endif /* GCC 5  */
453 #else
454 # ifdef OPTIMIZE_FOR_GCC_5
455 #  define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
456     LOADREGS_##nr(args)							\
457     asm volatile (							\
458     "int $0x80"								\
459     : "=a" (resultvar)							\
460     : "a" (__NR_##name) ASMARGS_##nr(args) : "memory", "cc")
461 #  define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
462     LOADREGS_##nr(args)							\
463     asm volatile (							\
464     "int $0x80"								\
465     : "=a" (resultvar)							\
466     : "a" (name) ASMARGS_##nr(args) : "memory", "cc")
467 # else /* GCC 5  */
468 #  define INTERNAL_SYSCALL_MAIN_INLINE(name, nr, args...) \
469     EXTRAVAR_##nr							      \
470     asm volatile (							      \
471     LOADARGS_##nr							      \
472     "movl %1, %%eax\n\t"						      \
473     "int $0x80\n\t"							      \
474     RESTOREARGS_##nr							      \
475     : "=a" (resultvar)							      \
476     : "i" (__NR_##name) ASMFMT_##nr(args) : "memory", "cc")
477 #  define INTERNAL_SYSCALL_MAIN_NCS(name, nr, args...) \
478     EXTRAVAR_##nr							      \
479     asm volatile (							      \
480     LOADARGS_##nr							      \
481     "int $0x80\n\t"							      \
482     RESTOREARGS_##nr							      \
483     : "=a" (resultvar)							      \
484     : "0" (name) ASMFMT_##nr(args) : "memory", "cc")
485 # endif /* GCC 5  */
486 #endif
487 
488 #define LOADARGS_0
489 #ifdef __PIC__
490 # if I386_USE_SYSENTER && defined PIC
491 #  define LOADARGS_1 \
492     "bpushl .L__X'%k3, %k3\n\t"
493 #  define LOADARGS_5 \
494     "movl %%ebx, %4\n\t"						      \
495     "movl %3, %%ebx\n\t"
496 # else
497 #  define LOADARGS_1 \
498     "bpushl .L__X'%k2, %k2\n\t"
499 #  define LOADARGS_5 \
500     "movl %%ebx, %3\n\t"						      \
501     "movl %2, %%ebx\n\t"
502 # endif
503 # define LOADARGS_2	LOADARGS_1
504 # define LOADARGS_3 \
505     "xchgl %%ebx, %%edi\n\t"
506 # define LOADARGS_4	LOADARGS_3
507 #else
508 # define LOADARGS_1
509 # define LOADARGS_2
510 # define LOADARGS_3
511 # define LOADARGS_4
512 # define LOADARGS_5
513 #endif
514 
515 #define RESTOREARGS_0
516 #ifdef __PIC__
517 # if I386_USE_SYSENTER && defined PIC
518 #  define RESTOREARGS_1 \
519     "bpopl .L__X'%k3, %k3\n\t"
520 #  define RESTOREARGS_5 \
521     "movl %4, %%ebx"
522 # else
523 #  define RESTOREARGS_1 \
524     "bpopl .L__X'%k2, %k2\n\t"
525 #  define RESTOREARGS_5 \
526     "movl %3, %%ebx"
527 # endif
528 # define RESTOREARGS_2	RESTOREARGS_1
529 # define RESTOREARGS_3 \
530     "xchgl %%edi, %%ebx\n\t"
531 # define RESTOREARGS_4	RESTOREARGS_3
532 #else
533 # define RESTOREARGS_1
534 # define RESTOREARGS_2
535 # define RESTOREARGS_3
536 # define RESTOREARGS_4
537 # define RESTOREARGS_5
538 #endif
539 
540 #ifdef OPTIMIZE_FOR_GCC_5
541 # define LOADREGS_0()
542 # define ASMARGS_0()
543 # define LOADREGS_1(arg1) \
544 	LOADREGS_0 ()
545 # define ASMARGS_1(arg1) \
546 	ASMARGS_0 (), "b" ((unsigned int) (arg1))
547 # define LOADREGS_2(arg1, arg2) \
548 	LOADREGS_1 (arg1)
549 # define ASMARGS_2(arg1, arg2) \
550 	ASMARGS_1 (arg1), "c" ((unsigned int) (arg2))
551 # define LOADREGS_3(arg1, arg2, arg3) \
552 	LOADREGS_2 (arg1, arg2)
553 # define ASMARGS_3(arg1, arg2, arg3) \
554 	ASMARGS_2 (arg1, arg2), "d" ((unsigned int) (arg3))
555 # define LOADREGS_4(arg1, arg2, arg3, arg4) \
556 	LOADREGS_3 (arg1, arg2, arg3)
557 # define ASMARGS_4(arg1, arg2, arg3, arg4) \
558 	ASMARGS_3 (arg1, arg2, arg3), "S" ((unsigned int) (arg4))
559 # define LOADREGS_5(arg1, arg2, arg3, arg4, arg5) \
560 	LOADREGS_4 (arg1, arg2, arg3, arg4)
561 # define ASMARGS_5(arg1, arg2, arg3, arg4, arg5) \
562 	ASMARGS_4 (arg1, arg2, arg3, arg4), "D" ((unsigned int) (arg5))
563 # define LOADREGS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
564 	register unsigned int _a6 asm ("ebp") = (unsigned int) (arg6); \
565 	LOADREGS_5 (arg1, arg2, arg3, arg4, arg5)
566 # define ASMARGS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
567 	ASMARGS_5 (arg1, arg2, arg3, arg4, arg5), "r" (_a6)
568 #endif /* GCC 5  */
569 
570 #define ASMFMT_0()
571 #ifdef __PIC__
572 # define ASMFMT_1(arg1) \
573 	, "cd" (arg1)
574 # define ASMFMT_2(arg1, arg2) \
575 	, "d" (arg1), "c" (arg2)
576 # define ASMFMT_3(arg1, arg2, arg3) \
577 	, "D" (arg1), "c" (arg2), "d" (arg3)
578 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
579 	, "D" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
580 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
581 	, "0" (arg1), "m" (_xv), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
582 #else
583 # define ASMFMT_1(arg1) \
584 	, "b" (arg1)
585 # define ASMFMT_2(arg1, arg2) \
586 	, "b" (arg1), "c" (arg2)
587 # define ASMFMT_3(arg1, arg2, arg3) \
588 	, "b" (arg1), "c" (arg2), "d" (arg3)
589 # define ASMFMT_4(arg1, arg2, arg3, arg4) \
590 	, "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
591 # define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
592 	, "b" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
593 #endif
594 
595 #define EXTRAVAR_0
596 #define EXTRAVAR_1
597 #define EXTRAVAR_2
598 #define EXTRAVAR_3
599 #define EXTRAVAR_4
600 #ifdef __PIC__
601 # define EXTRAVAR_5 int _xv;
602 #else
603 # define EXTRAVAR_5
604 #endif
605 
606 #endif	/* __ASSEMBLER__ */
607 
608 
609 /* Pointer mangling support.  */
610 #if IS_IN (rtld)
611 /* We cannot use the thread descriptor because in ld.so we use setjmp
612    earlier than the descriptor is initialized.  Using a global variable
613    is too complicated here since we have no PC-relative addressing mode.  */
614 #else
615 # ifdef __ASSEMBLER__
616 #  define PTR_MANGLE(reg)	xorl %gs:POINTER_GUARD, reg;		      \
617 				roll $9, reg
618 #  define PTR_DEMANGLE(reg)	rorl $9, reg;				      \
619 				xorl %gs:POINTER_GUARD, reg
620 # else
621 #  define PTR_MANGLE(var)	asm ("xorl %%gs:%c2, %0\n"		      \
622 				     "roll $9, %0"			      \
623 				     : "=r" (var)			      \
624 				     : "0" (var),			      \
625 				       "i" (offsetof (tcbhead_t,	      \
626 						      pointer_guard)))
627 #  define PTR_DEMANGLE(var)	asm ("rorl $9, %0\n"			      \
628 				     "xorl %%gs:%c2, %0"		      \
629 				     : "=r" (var)			      \
630 				     : "0" (var),			      \
631 				       "i" (offsetof (tcbhead_t,	      \
632 						      pointer_guard)))
633 # endif
634 #endif
635 
636 /* Each shadow stack slot takes 4 bytes.  Assuming that each stack
637    frame takes 128 bytes, this is used to compute shadow stack size
638    from stack size.  */
639 #define STACK_SIZE_TO_SHADOW_STACK_SIZE_SHIFT 5
640 
641 #endif /* linux/i386/sysdep.h */
642