1 /* Machine-dependent ELF dynamic relocation inline functions. Sparc64 version.
2 Copyright (C) 1997-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef dl_machine_h
20 #define dl_machine_h
21
22 #define ELF_MACHINE_NAME "sparc64"
23
24 #include <string.h>
25 #include <sys/param.h>
26 #include <ldsodefs.h>
27 #include <sysdep.h>
28 #include <dl-plt.h>
29 #include <dl-static-tls.h>
30 #include <dl-machine-rel.h>
31
32 #define ELF64_R_TYPE_ID(info) ((info) & 0xff)
33 #define ELF64_R_TYPE_DATA(info) ((info) >> 8)
34
35 /* Return nonzero iff ELF header is compatible with the running host. */
36 static inline int
elf_machine_matches_host(const Elf64_Ehdr * ehdr)37 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
38 {
39 return ehdr->e_machine == EM_SPARCV9;
40 }
41
42 /* We have to do this because elf_machine_{dynamic,load_address} can be
43 invoked from functions that have no GOT references, and thus the compiler
44 has no obligation to load the PIC register. */
45 #define LOAD_PIC_REG(PIC_REG) \
46 do { Elf64_Addr tmp; \
47 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
48 "rd %%pc, %0\n\t" \
49 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t" \
50 "add %0, %1, %0" \
51 : "=r" (PIC_REG), "=r" (tmp)); \
52 } while (0)
53
54 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
55 first element of the GOT. This must be inlined in a function which
56 uses global data. */
57 static inline Elf64_Addr
elf_machine_dynamic(void)58 elf_machine_dynamic (void)
59 {
60 register Elf64_Addr *elf_pic_register __asm__("%l7");
61
62 LOAD_PIC_REG (elf_pic_register);
63
64 return *elf_pic_register;
65 }
66
67 /* Return the run-time load address of the shared object. */
68 static inline Elf64_Addr
elf_machine_load_address(void)69 elf_machine_load_address (void)
70 {
71 register Elf32_Addr *pc __asm ("%o7");
72 register Elf64_Addr *got __asm ("%l7");
73
74 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
75 "call 1f\n\t"
76 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
77 "call _DYNAMIC\n\t"
78 "call _GLOBAL_OFFSET_TABLE_\n"
79 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
80
81 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
82 *got is _DYNAMIC
83 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
84 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
85 return (Elf64_Addr) got - *got + (Elf32_Sword) ((pc[2] - pc[3]) * 4) - 4;
86 }
87
88 static inline Elf64_Addr __attribute__ ((always_inline))
elf_machine_fixup_plt(struct link_map * map,lookup_t t,const ElfW (Sym)* refsym,const ElfW (Sym)* sym,const Elf64_Rela * reloc,Elf64_Addr * reloc_addr,Elf64_Addr value)89 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
90 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
91 const Elf64_Rela *reloc,
92 Elf64_Addr *reloc_addr, Elf64_Addr value)
93 {
94 sparc64_fixup_plt (map, reloc, reloc_addr, value + reloc->r_addend,
95 reloc->r_addend, 1);
96 return value;
97 }
98
99 /* Return the final value of a plt relocation. */
100 static inline Elf64_Addr
elf_machine_plt_value(struct link_map * map,const Elf64_Rela * reloc,Elf64_Addr value)101 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
102 Elf64_Addr value)
103 {
104 /* Don't add addend here, but in elf_machine_fixup_plt instead.
105 value + reloc->r_addend is the value which should actually be
106 stored into .plt data slot. */
107 return value;
108 }
109
110 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
111 PLT entries should not be allowed to define the value.
112 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
113 of the main executable's symbols, as for a COPY reloc. */
114 #define elf_machine_type_class(type) \
115 ((((type) == R_SPARC_JMP_SLOT \
116 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
117 * ELF_RTYPE_CLASS_PLT) \
118 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
119
120 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
121 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
122
123 /* Set up the loaded object described by L so its unrelocated PLT
124 entries will jump to the on-demand fixup code in dl-runtime.c. */
125
126 static inline int
elf_machine_runtime_setup(struct link_map * l,struct r_scope_elem * scope[],int lazy,int profile)127 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
128 int lazy, int profile)
129 {
130 if (l->l_info[DT_JMPREL] && lazy)
131 {
132 extern void _dl_runtime_resolve_0 (void);
133 extern void _dl_runtime_resolve_1 (void);
134 extern void _dl_runtime_profile_0 (void);
135 extern void _dl_runtime_profile_1 (void);
136 Elf64_Addr res0_addr, res1_addr;
137 unsigned int *plt = (void *) D_PTR (l, l_info[DT_PLTGOT]);
138
139 if (__builtin_expect(profile, 0))
140 {
141 res0_addr = (Elf64_Addr) &_dl_runtime_profile_0;
142 res1_addr = (Elf64_Addr) &_dl_runtime_profile_1;
143
144 if (GLRO(dl_profile) != NULL
145 && _dl_name_match_p (GLRO(dl_profile), l))
146 GL(dl_profile_map) = l;
147 }
148 else
149 {
150 res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0;
151 res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1;
152 }
153
154 /* PLT0 looks like:
155
156 sethi %uhi(_dl_runtime_{resolve,profile}_0), %g4
157 sethi %hi(_dl_runtime_{resolve,profile}_0), %g5
158 or %g4, %ulo(_dl_runtime_{resolve,profile}_0), %g4
159 or %g5, %lo(_dl_runtime_{resolve,profile}_0), %g5
160 sllx %g4, 32, %g4
161 add %g4, %g5, %g5
162 jmpl %g5, %g4
163 nop
164 */
165
166 plt[0] = 0x09000000 | (res0_addr >> (64 - 22));
167 plt[1] = 0x0b000000 | ((res0_addr >> 10) & 0x003fffff);
168 plt[2] = 0x88112000 | ((res0_addr >> 32) & 0x3ff);
169 plt[3] = 0x8a116000 | (res0_addr & 0x3ff);
170 plt[4] = 0x89293020;
171 plt[5] = 0x8a010005;
172 plt[6] = 0x89c14000;
173 plt[7] = 0x01000000;
174
175 /* PLT1 looks like:
176
177 sethi %uhi(_dl_runtime_{resolve,profile}_1), %g4
178 sethi %hi(_dl_runtime_{resolve,profile}_1), %g5
179 or %g4, %ulo(_dl_runtime_{resolve,profile}_1), %g4
180 or %g5, %lo(_dl_runtime_{resolve,profile}_1), %g5
181 sllx %g4, 32, %g4
182 add %g4, %g5, %g5
183 jmpl %g5, %g4
184 nop
185 */
186
187 plt[8] = 0x09000000 | (res1_addr >> (64 - 22));
188 plt[9] = 0x0b000000 | ((res1_addr >> 10) & 0x003fffff);
189 plt[10] = 0x88112000 | ((res1_addr >> 32) & 0x3ff);
190 plt[11] = 0x8a116000 | (res1_addr & 0x3ff);
191 plt[12] = 0x89293020;
192 plt[13] = 0x8a010005;
193 plt[14] = 0x89c14000;
194 plt[15] = 0x01000000;
195
196 /* Now put the magic cookie at the beginning of .PLT2
197 Entry .PLT3 is unused by this implementation. */
198 *((struct link_map **)(&plt[16])) = l;
199
200 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
201 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
202 {
203 /* Need to reinitialize .plt to undo prelinking. */
204 Elf64_Rela *rela = (Elf64_Rela *) D_PTR (l, l_info[DT_JMPREL]);
205 Elf64_Rela *relaend
206 = (Elf64_Rela *) ((char *) rela
207 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
208
209 /* prelink must ensure there are no R_SPARC_NONE relocs left
210 in .rela.plt. */
211 while (rela < relaend)
212 {
213 if (__builtin_expect (rela->r_addend, 0) != 0)
214 {
215 Elf64_Addr slot = ((rela->r_offset + l->l_addr + 0x400
216 - (Elf64_Addr) plt)
217 / 0x1400) * 0x1400
218 + (Elf64_Addr) plt - 0x400;
219 /* ldx [%o7 + X], %g1 */
220 unsigned int first_ldx = *(unsigned int *)(slot + 12);
221 Elf64_Addr ptr = slot + (first_ldx & 0xfff) + 4;
222
223 *(Elf64_Addr *) (rela->r_offset + l->l_addr)
224 = (Elf64_Addr) plt
225 - (slot + ((rela->r_offset + l->l_addr - ptr) / 8) * 24
226 + 4);
227 ++rela;
228 continue;
229 }
230
231 *(unsigned int *) (rela->r_offset + l->l_addr)
232 = 0x03000000 | (rela->r_offset + l->l_addr - (Elf64_Addr) plt);
233 *(unsigned int *) (rela->r_offset + l->l_addr + 4)
234 = 0x30680000 | ((((Elf64_Addr) plt + 32 - rela->r_offset
235 - l->l_addr - 4) >> 2) & 0x7ffff);
236 __asm __volatile ("flush %0" : : "r" (rela->r_offset
237 + l->l_addr));
238 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset
239 + l->l_addr));
240 ++rela;
241 }
242 }
243 }
244
245 return lazy;
246 }
247
248 /* The PLT uses Elf64_Rela relocs. */
249 #define elf_machine_relplt elf_machine_rela
250
251 /* Undo the sub %sp, 6*8, %sp; add %sp, STACK_BIAS + 22*8, %o0 below
252 (but w/o STACK_BIAS) to get at the value we want in __libc_stack_end. */
253 #define DL_STACK_END(cookie) \
254 ((void *) (((long) (cookie)) - (22 - 6) * 8))
255
256 /* Initial entry point code for the dynamic linker.
257 The C function `_dl_start' is the real entry point;
258 its return value is the user program's entry point. */
259
260 #define RTLD_GOT_ADDRESS(pic_reg, reg, symbol) \
261 "sethi %gdop_hix22(" #symbol "), " #reg "\n\t" \
262 "xor " #reg ", %gdop_lox10(" #symbol "), " #reg "\n\t" \
263 "ldx [" #pic_reg " + " #reg "], " #reg ", %gdop(" #symbol ")\n"
264
265 #define __S1(x) #x
266 #define __S(x) __S1(x)
267
268 #define RTLD_START __asm__ ( "\n" \
269 " .text\n" \
270 " .global _start\n" \
271 " .type _start, @function\n" \
272 " .align 32\n" \
273 "_start:\n" \
274 " /* Make room for functions to drop their arguments on the stack. */\n" \
275 " sub %sp, 6*8, %sp\n" \
276 " /* Pass pointer to argument block to _dl_start. */\n" \
277 " call _dl_start\n" \
278 " add %sp," __S(STACK_BIAS) "+22*8,%o0\n" \
279 " /* FALLTHRU */\n" \
280 " .size _start, .-_start\n" \
281 "\n" \
282 " .global _dl_start_user\n" \
283 " .type _dl_start_user, @function\n" \
284 "_dl_start_user:\n" \
285 " /* Load the GOT register. */\n" \
286 "1: call 11f\n" \
287 " sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
288 "11: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
289 " add %l7, %o7, %l7\n" \
290 " /* Save the user entry point address in %l0. */\n" \
291 " mov %o0, %l0\n" \
292 " /* See if we were run as a command with the executable file name as an\n" \
293 " extra leading argument. If so, we must shift things around since we\n" \
294 " must keep the stack doubleword aligned. */\n" \
295 RTLD_GOT_ADDRESS(%l7, %g5, _dl_skip_args) \
296 " ld [%g5], %i0\n" \
297 " brz,pt %i0, 2f\n" \
298 " ldx [%sp + " __S(STACK_BIAS) " + 22*8], %i5\n" \
299 " /* Find out how far to shift. */\n" \
300 " sub %i5, %i0, %i5\n" \
301 " sllx %i0, 3, %l6\n" \
302 RTLD_GOT_ADDRESS(%l7, %l4, _dl_argv) \
303 " stx %i5, [%sp + " __S(STACK_BIAS) " + 22*8]\n" \
304 " add %sp, " __S(STACK_BIAS) " + 23*8, %i1\n" \
305 " add %i1, %l6, %i2\n" \
306 " ldx [%l4], %l5\n" \
307 " /* Copy down argv. */\n" \
308 "12: ldx [%i2], %i3\n" \
309 " add %i2, 8, %i2\n" \
310 " stx %i3, [%i1]\n" \
311 " brnz,pt %i3, 12b\n" \
312 " add %i1, 8, %i1\n" \
313 " sub %l5, %l6, %l5\n" \
314 " /* Copy down envp. */\n" \
315 "13: ldx [%i2], %i3\n" \
316 " add %i2, 8, %i2\n" \
317 " stx %i3, [%i1]\n" \
318 " brnz,pt %i3, 13b\n" \
319 " add %i1, 8, %i1\n" \
320 " /* Copy down auxiliary table. */\n" \
321 "14: ldx [%i2], %i3\n" \
322 " ldx [%i2 + 8], %i4\n" \
323 " add %i2, 16, %i2\n" \
324 " stx %i3, [%i1]\n" \
325 " stx %i4, [%i1 + 8]\n" \
326 " brnz,pt %i3, 14b\n" \
327 " add %i1, 16, %i1\n" \
328 " stx %l5, [%l4]\n" \
329 " /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n" \
330 "2:\t" RTLD_GOT_ADDRESS(%l7, %o0, _rtld_local) \
331 " sllx %i5, 3, %o3\n" \
332 " add %sp, " __S(STACK_BIAS) " + 23*8, %o2\n" \
333 " add %o3, 8, %o3\n" \
334 " mov %i5, %o1\n" \
335 " add %o2, %o3, %o3\n" \
336 " call _dl_init\n" \
337 " ldx [%o0], %o0\n" \
338 " /* Pass our finalizer function to the user in %g1. */\n" \
339 RTLD_GOT_ADDRESS(%l7, %g1, _dl_fini) \
340 " /* Jump to the user's entry point and deallocate the extra stack we got. */\n" \
341 " jmp %l0\n" \
342 " add %sp, 6*8, %sp\n" \
343 " .size _dl_start_user, . - _dl_start_user\n" \
344 " .previous\n");
345
346 #endif /* dl_machine_h */
347
348 #define ARCH_LA_PLTENTER sparc64_gnu_pltenter
349 #define ARCH_LA_PLTEXIT sparc64_gnu_pltexit
350
351 #ifdef RESOLVE_MAP
352
353 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
354 MAP is the object containing the reloc. */
355
356 static inline void
357 __attribute__ ((always_inline))
elf_machine_rela(struct link_map * map,struct r_scope_elem * scope[],const Elf64_Rela * reloc,const Elf64_Sym * sym,const struct r_found_version * version,void * const reloc_addr_arg,int skip_ifunc)358 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
359 const Elf64_Rela *reloc, const Elf64_Sym *sym,
360 const struct r_found_version *version,
361 void *const reloc_addr_arg, int skip_ifunc)
362 {
363 Elf64_Addr *const reloc_addr = reloc_addr_arg;
364 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
365 const Elf64_Sym *const refsym = sym;
366 #endif
367 Elf64_Addr value;
368 const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info);
369 #if !defined RESOLVE_CONFLICT_FIND_MAP
370 struct link_map *sym_map = NULL;
371 #endif
372
373 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
374 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
375 reference weak so static programs can still link. This declaration
376 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
377 because rtld.c contains the common defn for _dl_rtld_map, which is
378 incompatible with a weak decl in the same file. */
379 weak_extern (_dl_rtld_map);
380 #endif
381
382 if (__glibc_unlikely (r_type == R_SPARC_NONE))
383 return;
384
385 if (__glibc_unlikely (r_type == R_SPARC_SIZE64))
386 {
387 *reloc_addr = sym->st_size + reloc->r_addend;
388 return;
389 }
390
391 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
392 if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
393 {
394 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
395 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
396 # endif
397 *reloc_addr += map->l_addr + reloc->r_addend;
398 return;
399 }
400 #endif
401
402 #ifndef RESOLVE_CONFLICT_FIND_MAP
403 if (__builtin_expect (ELF64_ST_BIND (sym->st_info) == STB_LOCAL, 0)
404 && sym->st_shndx != SHN_UNDEF)
405 {
406 sym_map = map;
407 value = map->l_addr;
408 }
409 else
410 {
411 sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
412 value = SYMBOL_ADDRESS (sym_map, sym, true);
413 }
414 #else
415 value = 0;
416 #endif
417
418 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
419
420 if (sym != NULL
421 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
422 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
423 && __builtin_expect (!skip_ifunc, 1))
424 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
425
426 switch (r_type)
427 {
428 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
429 case R_SPARC_COPY:
430 if (sym == NULL)
431 /* This can happen in trace mode if an object could not be
432 found. */
433 break;
434 if (sym->st_size > refsym->st_size
435 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
436 {
437 const char *strtab;
438
439 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
440 _dl_error_printf ("\
441 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
442 RTLD_PROGNAME, strtab + refsym->st_name);
443 }
444 memcpy (reloc_addr_arg, (void *) value,
445 MIN (sym->st_size, refsym->st_size));
446 break;
447 #endif
448 case R_SPARC_64:
449 case R_SPARC_GLOB_DAT:
450 *reloc_addr = value;
451 break;
452 case R_SPARC_IRELATIVE:
453 if (__glibc_likely (!skip_ifunc))
454 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
455 *reloc_addr = value;
456 break;
457 case R_SPARC_JMP_IREL:
458 if (__glibc_likely (!skip_ifunc))
459 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
460 /* 'high' is always zero, for large PLT entries the linker
461 emits an R_SPARC_IRELATIVE. */
462 #ifdef RESOLVE_CONFLICT_FIND_MAP
463 sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0);
464 #else
465 sparc64_fixup_plt (map, reloc, reloc_addr, value, 0, 0);
466 #endif
467 break;
468 case R_SPARC_JMP_SLOT:
469 #ifdef RESOLVE_CONFLICT_FIND_MAP
470 /* R_SPARC_JMP_SLOT conflicts against .plt[32768+]
471 relocs should be turned into R_SPARC_64 relocs
472 in .gnu.conflict section.
473 r_addend non-zero does not mean it is a .plt[32768+]
474 reloc, instead it is the actual address of the function
475 to call. */
476 sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0);
477 #else
478 sparc64_fixup_plt (map, reloc, reloc_addr, value, reloc->r_addend, 0);
479 #endif
480 break;
481 #ifndef RESOLVE_CONFLICT_FIND_MAP
482 case R_SPARC_TLS_DTPMOD64:
483 /* Get the information from the link map returned by the
484 resolv function. */
485 if (sym_map != NULL)
486 *reloc_addr = sym_map->l_tls_modid;
487 break;
488 case R_SPARC_TLS_DTPOFF64:
489 /* During relocation all TLS symbols are defined and used.
490 Therefore the offset is already correct. */
491 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
492 break;
493 case R_SPARC_TLS_TPOFF64:
494 /* The offset is negative, forward from the thread pointer. */
495 /* We know the offset of object the symbol is contained in.
496 It is a negative value which will be added to the
497 thread pointer. */
498 if (sym != NULL)
499 {
500 CHECK_STATIC_TLS (map, sym_map);
501 *reloc_addr = sym->st_value - sym_map->l_tls_offset
502 + reloc->r_addend;
503 }
504 break;
505 # ifndef RTLD_BOOTSTRAP
506 case R_SPARC_TLS_LE_HIX22:
507 case R_SPARC_TLS_LE_LOX10:
508 if (sym != NULL)
509 {
510 CHECK_STATIC_TLS (map, sym_map);
511 value = sym->st_value - sym_map->l_tls_offset
512 + reloc->r_addend;
513 if (r_type == R_SPARC_TLS_LE_HIX22)
514 *(unsigned int *)reloc_addr =
515 ((*(unsigned int *)reloc_addr & 0xffc00000)
516 | (((~value) >> 10) & 0x3fffff));
517 else
518 *(unsigned int *)reloc_addr =
519 ((*(unsigned int *)reloc_addr & 0xffffe000) | (value & 0x3ff)
520 | 0x1c00);
521 }
522 break;
523 # endif
524 #endif
525 #ifndef RTLD_BOOTSTRAP
526 case R_SPARC_8:
527 *(char *) reloc_addr = value;
528 break;
529 case R_SPARC_16:
530 *(short *) reloc_addr = value;
531 break;
532 case R_SPARC_32:
533 *(unsigned int *) reloc_addr = value;
534 break;
535 case R_SPARC_DISP8:
536 *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
537 break;
538 case R_SPARC_DISP16:
539 *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
540 break;
541 case R_SPARC_DISP32:
542 *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
543 break;
544 case R_SPARC_DISP64:
545 *reloc_addr = (value - (Elf64_Addr) reloc_addr);
546 break;
547 case R_SPARC_REGISTER:
548 *reloc_addr = value;
549 break;
550 case R_SPARC_WDISP30:
551 *(unsigned int *) reloc_addr =
552 ((*(unsigned int *)reloc_addr & 0xc0000000)
553 | (((value - (Elf64_Addr) reloc_addr) >> 2) & 0x3fffffff));
554 break;
555
556 /* MEDLOW code model relocs */
557 case R_SPARC_LO10:
558 *(unsigned int *) reloc_addr =
559 ((*(unsigned int *)reloc_addr & ~0x3ff)
560 | (value & 0x3ff));
561 break;
562 case R_SPARC_HI22:
563 *(unsigned int *) reloc_addr =
564 ((*(unsigned int *)reloc_addr & 0xffc00000)
565 | ((value >> 10) & 0x3fffff));
566 break;
567 case R_SPARC_OLO10:
568 *(unsigned int *) reloc_addr =
569 ((*(unsigned int *)reloc_addr & ~0x1fff)
570 | (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff));
571 break;
572
573 /* ABS34 code model reloc */
574 case R_SPARC_H34:
575 *(unsigned int *) reloc_addr =
576 ((*(unsigned int *)reloc_addr & 0xffc00000)
577 | ((value >> 12) & 0x3fffff));
578 break;
579
580 /* MEDMID code model relocs */
581 case R_SPARC_H44:
582 *(unsigned int *) reloc_addr =
583 ((*(unsigned int *)reloc_addr & 0xffc00000)
584 | ((value >> 22) & 0x3fffff));
585 break;
586 case R_SPARC_M44:
587 *(unsigned int *) reloc_addr =
588 ((*(unsigned int *)reloc_addr & ~0x3ff)
589 | ((value >> 12) & 0x3ff));
590 break;
591 case R_SPARC_L44:
592 *(unsigned int *) reloc_addr =
593 ((*(unsigned int *)reloc_addr & ~0xfff)
594 | (value & 0xfff));
595 break;
596
597 /* MEDANY code model relocs */
598 case R_SPARC_HH22:
599 *(unsigned int *) reloc_addr =
600 ((*(unsigned int *)reloc_addr & 0xffc00000)
601 | (value >> 42));
602 break;
603 case R_SPARC_HM10:
604 *(unsigned int *) reloc_addr =
605 ((*(unsigned int *)reloc_addr & ~0x3ff)
606 | ((value >> 32) & 0x3ff));
607 break;
608 case R_SPARC_LM22:
609 *(unsigned int *) reloc_addr =
610 ((*(unsigned int *)reloc_addr & 0xffc00000)
611 | ((value >> 10) & 0x003fffff));
612 break;
613 case R_SPARC_UA16:
614 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
615 ((unsigned char *) reloc_addr_arg) [1] = value;
616 break;
617 case R_SPARC_UA32:
618 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
619 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
620 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
621 ((unsigned char *) reloc_addr_arg) [3] = value;
622 break;
623 case R_SPARC_UA64:
624 if (! ((long) reloc_addr_arg & 3))
625 {
626 /* Common in .eh_frame */
627 ((unsigned int *) reloc_addr_arg) [0] = value >> 32;
628 ((unsigned int *) reloc_addr_arg) [1] = value;
629 break;
630 }
631 ((unsigned char *) reloc_addr_arg) [0] = value >> 56;
632 ((unsigned char *) reloc_addr_arg) [1] = value >> 48;
633 ((unsigned char *) reloc_addr_arg) [2] = value >> 40;
634 ((unsigned char *) reloc_addr_arg) [3] = value >> 32;
635 ((unsigned char *) reloc_addr_arg) [4] = value >> 24;
636 ((unsigned char *) reloc_addr_arg) [5] = value >> 16;
637 ((unsigned char *) reloc_addr_arg) [6] = value >> 8;
638 ((unsigned char *) reloc_addr_arg) [7] = value;
639 break;
640 #endif
641 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
642 default:
643 _dl_reloc_bad_type (map, r_type, 0);
644 break;
645 #endif
646 }
647 }
648
649 static inline void
650 __attribute__ ((always_inline))
elf_machine_rela_relative(Elf64_Addr l_addr,const Elf64_Rela * reloc,void * const reloc_addr_arg)651 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
652 void *const reloc_addr_arg)
653 {
654 Elf64_Addr *const reloc_addr = reloc_addr_arg;
655 *reloc_addr = l_addr + reloc->r_addend;
656 }
657
658 static inline void
659 __attribute__ ((always_inline))
elf_machine_lazy_rel(struct link_map * map,struct r_scope_elem * scope[],Elf64_Addr l_addr,const Elf64_Rela * reloc,int skip_ifunc)660 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
661 Elf64_Addr l_addr, const Elf64_Rela *reloc,
662 int skip_ifunc)
663 {
664 Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
665 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
666
667 if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
668 ;
669 else if (r_type == R_SPARC_JMP_IREL
670 || r_type == R_SPARC_IRELATIVE)
671 {
672 Elf64_Addr value = map->l_addr + reloc->r_addend;
673 if (__glibc_likely (!skip_ifunc))
674 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
675 if (r_type == R_SPARC_JMP_IREL)
676 {
677 /* 'high' is always zero, for large PLT entries the linker
678 emits an R_SPARC_IRELATIVE. */
679 sparc64_fixup_plt (map, reloc, reloc_addr, value, 0, 1);
680 }
681 else
682 *reloc_addr = value;
683 }
684 else if (r_type == R_SPARC_NONE)
685 ;
686 else
687 _dl_reloc_bad_type (map, r_type, 1);
688 }
689
690 #endif /* RESOLVE_MAP */
691