1 /* Copyright (C) 1995-2021 Free Software Foundation, Inc.
2 
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public License as
7    published by the Free Software Foundation; either version 2.1 of the
8    License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #ifndef dl_machine_h
20 #define dl_machine_h
21 
22 #define ELF_MACHINE_NAME "aarch64"
23 
24 #include <sysdep.h>
25 #include <tls.h>
26 #include <dl-tlsdesc.h>
27 #include <dl-static-tls.h>
28 #include <dl-irel.h>
29 #include <dl-machine-rel.h>
30 #include <cpu-features.c>
31 
32 /* Translate a processor specific dynamic tag to the index in l_info array.  */
33 #define DT_AARCH64(x) (DT_AARCH64_##x - DT_LOPROC + DT_NUM)
34 
35 /* Return nonzero iff ELF header is compatible with the running host.  */
36 static inline int __attribute__ ((unused))
elf_machine_matches_host(const ElfW (Ehdr)* ehdr)37 elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
38 {
39   return ehdr->e_machine == EM_AARCH64;
40 }
41 
42 /* Return the run-time load address of the shared object.  */
43 
44 static inline ElfW(Addr) __attribute__ ((unused))
elf_machine_load_address(void)45 elf_machine_load_address (void)
46 {
47   extern const ElfW(Ehdr) __ehdr_start attribute_hidden;
48   return (ElfW(Addr)) &__ehdr_start;
49 }
50 
51 /* Return the link-time address of _DYNAMIC.  */
52 
53 static inline ElfW(Addr) __attribute__ ((unused))
elf_machine_dynamic(void)54 elf_machine_dynamic (void)
55 {
56   extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
57   return (ElfW(Addr)) _DYNAMIC - elf_machine_load_address ();
58 }
59 
60 /* Set up the loaded object described by L so its unrelocated PLT
61    entries will jump to the on-demand fixup code in dl-runtime.c.  */
62 
63 static inline int __attribute__ ((unused))
elf_machine_runtime_setup(struct link_map * l,struct r_scope_elem * scope[],int lazy,int profile)64 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
65 			   int lazy, int profile)
66 {
67   if (l->l_info[DT_JMPREL] && lazy)
68     {
69       ElfW(Addr) *got;
70       extern void _dl_runtime_resolve (ElfW(Word));
71       extern void _dl_runtime_profile (ElfW(Word));
72 
73       got = (ElfW(Addr) *) D_PTR (l, l_info[DT_PLTGOT]);
74       if (got[1])
75 	{
76 	  l->l_mach.plt = got[1] + l->l_addr;
77 	}
78       got[1] = (ElfW(Addr)) l;
79 
80       /* The got[2] entry contains the address of a function which gets
81 	 called to get the address of a so far unresolved function and
82 	 jump to it.  The profiling extension of the dynamic linker allows
83 	 to intercept the calls to collect information.  In this case we
84 	 don't store the address in the GOT so that all future calls also
85 	 end in this function.  */
86       if ( profile)
87 	{
88 	   got[2] = (ElfW(Addr)) &_dl_runtime_profile;
89 
90 	  if (GLRO(dl_profile) != NULL
91 	      && _dl_name_match_p (GLRO(dl_profile), l))
92 	    /* Say that we really want profiling and the timers are
93 	       started.  */
94 	    GL(dl_profile_map) = l;
95 	}
96       else
97 	{
98 	  /* This function will get called to fix up the GOT entry
99 	     indicated by the offset on the stack, and then jump to
100 	     the resolved address.  */
101 	  got[2] = (ElfW(Addr)) &_dl_runtime_resolve;
102 	}
103     }
104 
105   return lazy;
106 }
107 
108 /* Initial entry point for the dynamic linker. The C function
109    _dl_start is the real entry point, its return value is the user
110    program's entry point */
111 #ifdef __LP64__
112 # define RTLD_START RTLD_START_1 ("x", "3", "sp")
113 #else
114 # define RTLD_START RTLD_START_1 ("w", "2", "wsp")
115 #endif
116 
117 
118 #define RTLD_START_1(PTR, PTR_SIZE_LOG, PTR_SP) asm ("\
119 .text									\n\
120 .globl _start								\n\
121 .type _start, %function							\n\
122 .globl _dl_start_user							\n\
123 .type _dl_start_user, %function						\n\
124 _start:									\n\
125 	// bti c							\n\
126 	hint	34							\n\
127 	mov	" PTR "0, " PTR_SP "					\n\
128 	bl	_dl_start						\n\
129 	// returns user entry point in x0				\n\
130 	mov	x21, x0							\n\
131 _dl_start_user:								\n\
132 	// get the original arg count					\n\
133 	ldr	" PTR "1, [sp]						\n\
134 	// get the argv address						\n\
135 	add	" PTR "2, " PTR_SP ", #(1<<"  PTR_SIZE_LOG ")		\n\
136 	// get _dl_skip_args to see if we were				\n\
137 	// invoked as an executable					\n\
138 	adrp	x4, _dl_skip_args					\n\
139         ldr	w4, [x4, #:lo12:_dl_skip_args]				\n\
140 	// do we need to adjust argc/argv				\n\
141         cmp	w4, 0							\n\
142 	beq	.L_done_stack_adjust					\n\
143 	// subtract _dl_skip_args from original arg count		\n\
144 	sub	" PTR "1, " PTR "1, " PTR "4				\n\
145 	// store adjusted argc back to stack				\n\
146 	str	" PTR "1, [sp]						\n\
147 	// find the first unskipped argument				\n\
148 	mov	" PTR "3, " PTR "2					\n\
149 	add	" PTR "4, " PTR "2, " PTR "4, lsl #" PTR_SIZE_LOG "	\n\
150 	// shuffle argv down						\n\
151 1:	ldr	" PTR "5, [x4], #(1<<"  PTR_SIZE_LOG ")			\n\
152 	str	" PTR "5, [x3], #(1<<"  PTR_SIZE_LOG ")			\n\
153 	cmp	" PTR "5, #0						\n\
154 	bne	1b							\n\
155 	// shuffle envp down						\n\
156 1:	ldr	" PTR "5, [x4], #(1<<"  PTR_SIZE_LOG ")			\n\
157 	str	" PTR "5, [x3], #(1<<"  PTR_SIZE_LOG ")			\n\
158 	cmp	" PTR "5, #0						\n\
159 	bne	1b							\n\
160 	// shuffle auxv down						\n\
161 1:	ldp	" PTR "0, " PTR "5, [x4, #(2<<"  PTR_SIZE_LOG ")]!	\n\
162 	stp	" PTR "0, " PTR "5, [x3], #(2<<"  PTR_SIZE_LOG ")	\n\
163 	cmp	" PTR "0, #0						\n\
164 	bne	1b							\n\
165 	// Update _dl_argv						\n\
166 	adrp	x3, __GI__dl_argv					\n\
167 	str	" PTR "2, [x3, #:lo12:__GI__dl_argv]			\n\
168 .L_done_stack_adjust:							\n\
169 	// compute envp							\n\
170 	add	" PTR "3, " PTR "2, " PTR "1, lsl #" PTR_SIZE_LOG "	\n\
171 	add	" PTR "3, " PTR "3, #(1<<"  PTR_SIZE_LOG ")		\n\
172 	adrp	x16, _rtld_local					\n\
173         add	" PTR "16, " PTR "16, #:lo12:_rtld_local		\n\
174         ldr	" PTR "0, [x16]						\n\
175 	bl	_dl_init						\n\
176 	// load the finalizer function					\n\
177 	adrp	x0, _dl_fini						\n\
178 	add	" PTR "0, " PTR "0, #:lo12:_dl_fini			\n\
179 	// jump to the user_s entry point				\n\
180 	mov     x16, x21						\n\
181 	br      x16							\n\
182 ");
183 
184 #define elf_machine_type_class(type)					\
185   ((((type) == AARCH64_R(JUMP_SLOT)					\
186      || (type) == AARCH64_R(TLS_DTPMOD)					\
187      || (type) == AARCH64_R(TLS_DTPREL)					\
188      || (type) == AARCH64_R(TLS_TPREL)					\
189      || (type) == AARCH64_R(TLSDESC)) * ELF_RTYPE_CLASS_PLT)		\
190    | (((type) == AARCH64_R(COPY)) * ELF_RTYPE_CLASS_COPY)		\
191    | (((type) == AARCH64_R(GLOB_DAT)) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA))
192 
193 #define ELF_MACHINE_JMP_SLOT	AARCH64_R(JUMP_SLOT)
194 
195 #define DL_PLATFORM_INIT dl_platform_init ()
196 
197 static inline void __attribute__ ((unused))
dl_platform_init(void)198 dl_platform_init (void)
199 {
200   if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0')
201     /* Avoid an empty string which would disturb us.  */
202     GLRO(dl_platform) = NULL;
203 
204 #ifdef SHARED
205   /* init_cpu_features has been called early from __libc_start_main in
206      static executable.  */
207   init_cpu_features (&GLRO(dl_aarch64_cpu_features));
208 #endif
209 }
210 
211 
212 static inline ElfW(Addr)
elf_machine_fixup_plt(struct link_map * map,lookup_t t,const ElfW (Sym)* refsym,const ElfW (Sym)* sym,const ElfW (Rela)* reloc,ElfW (Addr)* reloc_addr,ElfW (Addr)value)213 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
214 		       const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
215 		       const ElfW(Rela) *reloc,
216 		       ElfW(Addr) *reloc_addr,
217 		       ElfW(Addr) value)
218 {
219   return *reloc_addr = value;
220 }
221 
222 /* Return the final value of a plt relocation.  */
223 static inline ElfW(Addr)
elf_machine_plt_value(struct link_map * map,const ElfW (Rela)* reloc,ElfW (Addr)value)224 elf_machine_plt_value (struct link_map *map,
225 		       const ElfW(Rela) *reloc,
226 		       ElfW(Addr) value)
227 {
228   return value;
229 }
230 
231 #endif
232 
233 /* Names of the architecture-specific auditing callback functions.  */
234 #define ARCH_LA_PLTENTER aarch64_gnu_pltenter
235 #define ARCH_LA_PLTEXIT  aarch64_gnu_pltexit
236 
237 #ifdef RESOLVE_MAP
238 
239 static inline void
240 __attribute__ ((always_inline))
elf_machine_rela(struct link_map * map,struct r_scope_elem * scope[],const ElfW (Rela)* reloc,const ElfW (Sym)* sym,const struct r_found_version * version,void * const reloc_addr_arg,int skip_ifunc)241 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
242 		  const ElfW(Rela) *reloc, const ElfW(Sym) *sym,
243 		  const struct r_found_version *version,
244 		  void *const reloc_addr_arg, int skip_ifunc)
245 {
246   ElfW(Addr) *const reloc_addr = reloc_addr_arg;
247   const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
248 
249   if (__builtin_expect (r_type == AARCH64_R(RELATIVE), 0))
250       *reloc_addr = map->l_addr + reloc->r_addend;
251   else if (__builtin_expect (r_type == R_AARCH64_NONE, 0))
252       return;
253   else
254     {
255       const ElfW(Sym) *const refsym = sym;
256       struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version,
257 					      r_type);
258       ElfW(Addr) value = SYMBOL_ADDRESS (sym_map, sym, true);
259 
260       if (sym != NULL
261 	  && __glibc_unlikely (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC)
262 	  && __glibc_likely (sym->st_shndx != SHN_UNDEF)
263 	  && __glibc_likely (!skip_ifunc))
264 	value = elf_ifunc_invoke (value);
265 
266       switch (r_type)
267 	{
268 	case AARCH64_R(COPY):
269 	  if (sym == NULL)
270 	      break;
271 
272 	  if (sym->st_size > refsym->st_size
273 	      || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
274 	    {
275 	      const char *strtab;
276 
277 	      strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
278 	      _dl_error_printf ("\
279 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
280 				RTLD_PROGNAME, strtab + refsym->st_name);
281 	    }
282 	  memcpy (reloc_addr_arg, (void *) value,
283 		  sym->st_size < refsym->st_size
284 		  ? sym->st_size : refsym->st_size);
285 	  break;
286 
287 	case AARCH64_R(RELATIVE):
288 	case AARCH64_R(GLOB_DAT):
289 	case AARCH64_R(JUMP_SLOT):
290 	case AARCH64_R(ABS32):
291 #ifdef __LP64__
292 	case AARCH64_R(ABS64):
293 #endif
294 	  *reloc_addr = value + reloc->r_addend;
295 	  break;
296 
297 	case AARCH64_R(TLSDESC):
298 	  {
299 	    struct tlsdesc volatile *td =
300 	      (struct tlsdesc volatile *)reloc_addr;
301 #ifndef RTLD_BOOTSTRAP
302 	    if (! sym)
303 	      {
304 		td->arg = (void*)reloc->r_addend;
305 		td->entry = _dl_tlsdesc_undefweak;
306 	      }
307 	    else
308 #endif
309 	      {
310 #ifndef RTLD_BOOTSTRAP
311 # ifndef SHARED
312 		CHECK_STATIC_TLS (map, sym_map);
313 # else
314 		if (!TRY_STATIC_TLS (map, sym_map))
315 		  {
316 		    td->arg = _dl_make_tlsdesc_dynamic
317 		      (sym_map, sym->st_value + reloc->r_addend);
318 		    td->entry = _dl_tlsdesc_dynamic;
319 		  }
320 		else
321 # endif
322 #endif
323 		  {
324 		    td->arg = (void*)(sym->st_value + sym_map->l_tls_offset
325 				      + reloc->r_addend);
326 		    td->entry = _dl_tlsdesc_return;
327 		  }
328 	      }
329 	    break;
330 	  }
331 
332 	case AARCH64_R(TLS_DTPMOD):
333 #ifdef RTLD_BOOTSTRAP
334 	  *reloc_addr = 1;
335 #else
336 	  if (sym_map != NULL)
337 	    {
338 	      *reloc_addr = sym_map->l_tls_modid;
339 	    }
340 #endif
341 	  break;
342 
343 	case AARCH64_R(TLS_DTPREL):
344 	  if (sym)
345 	    *reloc_addr = sym->st_value + reloc->r_addend;
346 	  break;
347 
348 	case AARCH64_R(TLS_TPREL):
349 	  if (sym)
350 	    {
351 	      CHECK_STATIC_TLS (map, sym_map);
352 	      *reloc_addr =
353 		sym->st_value + reloc->r_addend + sym_map->l_tls_offset;
354 	    }
355 	  break;
356 
357 	case AARCH64_R(IRELATIVE):
358 	  value = map->l_addr + reloc->r_addend;
359 	  if (__glibc_likely (!skip_ifunc))
360 	    value = elf_ifunc_invoke (value);
361 	  *reloc_addr = value;
362 	  break;
363 
364 	default:
365 	  _dl_reloc_bad_type (map, r_type, 0);
366 	  break;
367 	}
368     }
369 }
370 
371 static inline void
372 __attribute__ ((always_inline))
elf_machine_rela_relative(ElfW (Addr)l_addr,const ElfW (Rela)* reloc,void * const reloc_addr_arg)373 elf_machine_rela_relative (ElfW(Addr) l_addr,
374 			   const ElfW(Rela) *reloc,
375 			   void *const reloc_addr_arg)
376 {
377   ElfW(Addr) *const reloc_addr = reloc_addr_arg;
378   *reloc_addr = l_addr + reloc->r_addend;
379 }
380 
381 static inline void
382 __attribute__ ((always_inline))
elf_machine_lazy_rel(struct link_map * map,struct r_scope_elem * scope[],ElfW (Addr)l_addr,const ElfW (Rela)* reloc,int skip_ifunc)383 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
384 		      ElfW(Addr) l_addr,
385 		      const ElfW(Rela) *reloc,
386 		      int skip_ifunc)
387 {
388   ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
389   const unsigned int r_type = ELFW (R_TYPE) (reloc->r_info);
390   /* Check for unexpected PLT reloc type.  */
391   if (__builtin_expect (r_type == AARCH64_R(JUMP_SLOT), 1))
392     {
393       if (__glibc_unlikely (map->l_info[DT_AARCH64 (VARIANT_PCS)] != NULL))
394 	{
395 	  /* Check the symbol table for variant PCS symbols.  */
396 	  const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
397 	  const ElfW (Sym) *symtab =
398 	    (const void *)D_PTR (map, l_info[DT_SYMTAB]);
399 	  const ElfW (Sym) *sym = &symtab[symndx];
400 	  if (__glibc_unlikely (sym->st_other & STO_AARCH64_VARIANT_PCS))
401 	    {
402 	      /* Avoid lazy resolution of variant PCS symbols.  */
403 	      const struct r_found_version *version = NULL;
404 	      if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
405 		{
406 		  const ElfW (Half) *vernum =
407 		    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
408 		  version = &map->l_versions[vernum[symndx] & 0x7fff];
409 		}
410 	      elf_machine_rela (map, scope, reloc, sym, version, reloc_addr,
411 				skip_ifunc);
412 	      return;
413 	    }
414 	}
415 
416       if (map->l_mach.plt == 0)
417 	*reloc_addr += l_addr;
418       else
419 	*reloc_addr = map->l_mach.plt;
420     }
421   else if (__builtin_expect (r_type == AARCH64_R(TLSDESC), 1))
422     {
423       const Elf_Symndx symndx = ELFW (R_SYM) (reloc->r_info);
424       const ElfW (Sym) *symtab = (const void *)D_PTR (map, l_info[DT_SYMTAB]);
425       const ElfW (Sym) *sym = &symtab[symndx];
426       const struct r_found_version *version = NULL;
427 
428       if (map->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
429 	{
430 	  const ElfW (Half) *vernum =
431 	    (const void *)D_PTR (map, l_info[VERSYMIDX (DT_VERSYM)]);
432 	  version = &map->l_versions[vernum[symndx] & 0x7fff];
433 	}
434 
435       /* Always initialize TLS descriptors completely, because lazy
436 	 initialization requires synchronization at every TLS access.  */
437       elf_machine_rela (map, scope, reloc, sym, version, reloc_addr,
438 			skip_ifunc);
439     }
440   else if (__glibc_unlikely (r_type == AARCH64_R(IRELATIVE)))
441     {
442       ElfW(Addr) value = map->l_addr + reloc->r_addend;
443       if (__glibc_likely (!skip_ifunc))
444 	value = elf_ifunc_invoke (value);
445       *reloc_addr = value;
446     }
447   else
448     _dl_reloc_bad_type (map, r_type, 1);
449 }
450 
451 #endif
452