1 /* Relocate a shared object and resolve its references to other loaded objects.
2    Copyright (C) 1995-2021 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #include <errno.h>
20 #include <libintl.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23 #include <ldsodefs.h>
24 #include <sys/mman.h>
25 #include <sys/param.h>
26 #include <sys/types.h>
27 #include <_itoa.h>
28 #include <libc-pointer-arith.h>
29 #include "dynamic-link.h"
30 
31 /* Statistics function.  */
32 #ifdef SHARED
33 # define bump_num_cache_relocations() ++GL(dl_num_cache_relocations)
34 #else
35 # define bump_num_cache_relocations() ((void) 0)
36 #endif
37 
38 
39 /* We are trying to perform a static TLS relocation in MAP, but it was
40    dynamically loaded.  This can only work if there is enough surplus in
41    the static TLS area already allocated for each running thread.  If this
42    object's TLS segment is too big to fit, we fail with -1.  If it fits,
43    we set MAP->l_tls_offset and return 0.
44    A portion of the surplus static TLS can be optionally used to optimize
45    dynamic TLS access (with TLSDESC or powerpc TLS optimizations).
46    If OPTIONAL is true then TLS is allocated for such optimization and
47    the caller must have a fallback in case the optional portion of surplus
48    TLS runs out.  If OPTIONAL is false then the entire surplus TLS area is
49    considered and the allocation only fails if that runs out.  */
50 int
_dl_try_allocate_static_tls(struct link_map * map,bool optional)51 _dl_try_allocate_static_tls (struct link_map *map, bool optional)
52 {
53   /* If we've already used the variable with dynamic access, or if the
54      alignment requirements are too high, fail.  */
55   if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
56       || map->l_tls_align > GLRO (dl_tls_static_align))
57     {
58     fail:
59       return -1;
60     }
61 
62 #if TLS_TCB_AT_TP
63   size_t freebytes = GLRO (dl_tls_static_size) - GL(dl_tls_static_used);
64   if (freebytes < TLS_TCB_SIZE)
65     goto fail;
66   freebytes -= TLS_TCB_SIZE;
67 
68   size_t blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
69   if (freebytes < blsize)
70     goto fail;
71 
72   size_t n = (freebytes - blsize) / map->l_tls_align;
73 
74   /* Account optional static TLS surplus usage.  */
75   size_t use = freebytes - n * map->l_tls_align - map->l_tls_firstbyte_offset;
76   if (optional && use > GL(dl_tls_static_optional))
77     goto fail;
78   else if (optional)
79     GL(dl_tls_static_optional) -= use;
80 
81   size_t offset = GL(dl_tls_static_used) + use;
82 
83   map->l_tls_offset = GL(dl_tls_static_used) = offset;
84 #elif TLS_DTV_AT_TP
85   /* dl_tls_static_used includes the TCB at the beginning.  */
86   size_t offset = (ALIGN_UP(GL(dl_tls_static_used)
87 			    - map->l_tls_firstbyte_offset,
88 			    map->l_tls_align)
89 		   + map->l_tls_firstbyte_offset);
90   size_t used = offset + map->l_tls_blocksize;
91 
92   if (used > GLRO (dl_tls_static_size))
93     goto fail;
94 
95   /* Account optional static TLS surplus usage.  */
96   size_t use = used - GL(dl_tls_static_used);
97   if (optional && use > GL(dl_tls_static_optional))
98     goto fail;
99   else if (optional)
100     GL(dl_tls_static_optional) -= use;
101 
102   map->l_tls_offset = offset;
103   map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
104   GL(dl_tls_static_used) = used;
105 #else
106 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
107 #endif
108 
109   /* If the object is not yet relocated we cannot initialize the
110      static TLS region.  Delay it.  */
111   if (map->l_real->l_relocated)
112     {
113 #ifdef SHARED
114       if (__builtin_expect (THREAD_DTV()[0].counter != GL(dl_tls_generation),
115 			    0))
116 	/* Update the slot information data for at least the generation of
117 	   the DSO we are allocating data for.  */
118 	(void) _dl_update_slotinfo (map->l_tls_modid);
119 #endif
120 
121       dl_init_static_tls (map);
122     }
123   else
124     map->l_need_tls_init = 1;
125 
126   return 0;
127 }
128 
129 /* This function intentionally does not return any value but signals error
130    directly, as static TLS should be rare and code handling it should
131    not be inlined as much as possible.  */
132 void
133 __attribute_noinline__
_dl_allocate_static_tls(struct link_map * map)134 _dl_allocate_static_tls (struct link_map *map)
135 {
136   if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
137       || _dl_try_allocate_static_tls (map, false))
138     {
139       _dl_signal_error (0, map->l_name, NULL, N_("\
140 cannot allocate memory in static TLS block"));
141     }
142 }
143 
144 #if !PTHREAD_IN_LIBC
145 /* Initialize static TLS area and DTV for current (only) thread.
146    libpthread implementations should provide their own hook
147    to handle all threads.  */
148 void
_dl_nothread_init_static_tls(struct link_map * map)149 _dl_nothread_init_static_tls (struct link_map *map)
150 {
151 #if TLS_TCB_AT_TP
152   void *dest = (char *) THREAD_SELF - map->l_tls_offset;
153 #elif TLS_DTV_AT_TP
154   void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
155 #else
156 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
157 #endif
158 
159   /* Initialize the memory.  */
160   memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
161 	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
162 }
163 #endif /* !PTHREAD_IN_LIBC */
164 
165 /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code.  */
166 #define RESOLVE_MAP(l, scope, ref, version, r_type)			      \
167     ((ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL			      \
168       && __glibc_likely (!dl_symbol_visibility_binds_local_p (*ref)))	      \
169      ? ((__glibc_unlikely ((*ref) == l->l_lookup_cache.sym)		      \
170 	 && elf_machine_type_class (r_type) == l->l_lookup_cache.type_class)  \
171 	? (bump_num_cache_relocations (),				      \
172 	   (*ref) = l->l_lookup_cache.ret,				      \
173 	   l->l_lookup_cache.value)					      \
174 	: ({ lookup_t _lr;						      \
175 	     int _tc = elf_machine_type_class (r_type);			      \
176 	     l->l_lookup_cache.type_class = _tc;			      \
177 	     l->l_lookup_cache.sym = (*ref);				      \
178 	     const struct r_found_version *v = NULL;			      \
179 	     if ((version) != NULL && (version)->hash != 0)		      \
180 	       v = (version);						      \
181 	     _lr = _dl_lookup_symbol_x ((const char *) D_PTR (l, l_info[DT_STRTAB]) + (*ref)->st_name, \
182 					l, (ref), scope, v, _tc,	      \
183 					DL_LOOKUP_ADD_DEPENDENCY	      \
184 					| DL_LOOKUP_FOR_RELOCATE, NULL);      \
185 	     l->l_lookup_cache.ret = (*ref);				      \
186 	     l->l_lookup_cache.value = _lr; }))				      \
187      : l)
188 
189 #include "dynamic-link.h"
190 
191 void
_dl_relocate_object(struct link_map * l,struct r_scope_elem * scope[],int reloc_mode,int consider_profiling)192 _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[],
193 		     int reloc_mode, int consider_profiling)
194 {
195   struct textrels
196   {
197     caddr_t start;
198     size_t len;
199     int prot;
200     struct textrels *next;
201   } *textrels = NULL;
202   /* Initialize it to make the compiler happy.  */
203   const char *errstring = NULL;
204   int lazy = reloc_mode & RTLD_LAZY;
205   int skip_ifunc = reloc_mode & __RTLD_NOIFUNC;
206 
207 #ifdef SHARED
208   bool consider_symbind = false;
209   /* If we are auditing, install the same handlers we need for profiling.  */
210   if ((reloc_mode & __RTLD_AUDIT) == 0)
211     {
212       struct audit_ifaces *afct = GLRO(dl_audit);
213       for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
214 	{
215 	  /* Profiling is needed only if PLT hooks are provided.  */
216 	  if (afct->ARCH_LA_PLTENTER != NULL
217 	      || afct->ARCH_LA_PLTEXIT != NULL)
218 	    consider_profiling = 1;
219 	  if (afct->symbind != NULL)
220 	    consider_symbind = true;
221 
222 	  afct = afct->next;
223 	}
224     }
225 #elif defined PROF
226   /* Never use dynamic linker profiling for gprof profiling code.  */
227 # define consider_profiling 0
228 #else
229 # define consider_symbind 0
230 #endif
231 
232   if (l->l_relocated)
233     return;
234 
235   /* If DT_BIND_NOW is set relocate all references in this object.  We
236      do not do this if we are profiling, of course.  */
237   // XXX Correct for auditing?
238   if (!consider_profiling
239       && __builtin_expect (l->l_info[DT_BIND_NOW] != NULL, 0))
240     lazy = 0;
241 
242   if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_RELOC))
243     _dl_debug_printf ("\nrelocation processing: %s%s\n",
244 		      DSO_FILENAME (l->l_name), lazy ? " (lazy)" : "");
245 
246   /* DT_TEXTREL is now in level 2 and might phase out at some time.
247      But we rewrite the DT_FLAGS entry to a DT_TEXTREL entry to make
248      testing easier and therefore it will be available at all time.  */
249   if (__glibc_unlikely (l->l_info[DT_TEXTREL] != NULL))
250     {
251       /* Bletch.  We must make read-only segments writable
252 	 long enough to relocate them.  */
253       const ElfW(Phdr) *ph;
254       for (ph = l->l_phdr; ph < &l->l_phdr[l->l_phnum]; ++ph)
255 	if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0)
256 	  {
257 	    struct textrels *newp;
258 
259 	    newp = (struct textrels *) alloca (sizeof (*newp));
260 	    newp->len = ALIGN_UP (ph->p_vaddr + ph->p_memsz, GLRO(dl_pagesize))
261 			- ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize));
262 	    newp->start = PTR_ALIGN_DOWN (ph->p_vaddr, GLRO(dl_pagesize))
263 			  + (caddr_t) l->l_addr;
264 
265 	    newp->prot = 0;
266 	    if (ph->p_flags & PF_R)
267 	      newp->prot |= PROT_READ;
268 	    if (ph->p_flags & PF_W)
269 	      newp->prot |= PROT_WRITE;
270 	    if (ph->p_flags & PF_X)
271 	      newp->prot |= PROT_EXEC;
272 
273 	    if (__mprotect (newp->start, newp->len, newp->prot|PROT_WRITE) < 0)
274 	      {
275 		errstring = N_("cannot make segment writable for relocation");
276 	      call_error:
277 		_dl_signal_error (errno, l->l_name, NULL, errstring);
278 	      }
279 
280 	    newp->next = textrels;
281 	    textrels = newp;
282 	  }
283     }
284 
285   {
286     /* Do the actual relocation of the object's GOT and other data.  */
287 
288     ELF_DYNAMIC_RELOCATE (l, scope, lazy, consider_profiling, skip_ifunc);
289 
290 #ifndef PROF
291     if ((consider_profiling || consider_symbind)
292 	&& l->l_info[DT_PLTRELSZ] != NULL)
293       {
294 	/* Allocate the array which will contain the already found
295 	   relocations.  If the shared object lacks a PLT (for example
296 	   if it only contains lead function) the l_info[DT_PLTRELSZ]
297 	   will be NULL.  */
298 	size_t sizeofrel = l->l_info[DT_PLTREL]->d_un.d_val == DT_RELA
299 			   ? sizeof (ElfW(Rela))
300 			   : sizeof (ElfW(Rel));
301 	size_t relcount = l->l_info[DT_PLTRELSZ]->d_un.d_val / sizeofrel;
302 	l->l_reloc_result = calloc (sizeof (l->l_reloc_result[0]), relcount);
303 
304 	if (l->l_reloc_result == NULL)
305 	  {
306 	    errstring = N_("\
307 %s: out of memory to store relocation results for %s\n");
308 	    _dl_fatal_printf (errstring, RTLD_PROGNAME, l->l_name);
309 	  }
310       }
311 #endif
312   }
313 
314   /* Mark the object so we know this work has been done.  */
315   l->l_relocated = 1;
316 
317   /* Undo the segment protection changes.  */
318   while (__builtin_expect (textrels != NULL, 0))
319     {
320       if (__mprotect (textrels->start, textrels->len, textrels->prot) < 0)
321 	{
322 	  errstring = N_("cannot restore segment prot after reloc");
323 	  goto call_error;
324 	}
325 
326 #ifdef CLEAR_CACHE
327       CLEAR_CACHE (textrels->start, textrels->start + textrels->len);
328 #endif
329 
330       textrels = textrels->next;
331     }
332 
333   /* In case we can protect the data now that the relocations are
334      done, do it.  */
335   if (l->l_relro_size != 0)
336     _dl_protect_relro (l);
337 }
338 
339 
340 void
_dl_protect_relro(struct link_map * l)341 _dl_protect_relro (struct link_map *l)
342 {
343   ElfW(Addr) start = ALIGN_DOWN((l->l_addr
344 				 + l->l_relro_addr),
345 				GLRO(dl_pagesize));
346   ElfW(Addr) end = ALIGN_DOWN((l->l_addr
347 			       + l->l_relro_addr
348 			       + l->l_relro_size),
349 			      GLRO(dl_pagesize));
350   if (start != end
351       && __mprotect ((void *) start, end - start, PROT_READ) < 0)
352     {
353       static const char errstring[] = N_("\
354 cannot apply additional memory protection after relocation");
355       _dl_signal_error (errno, l->l_name, NULL, errstring);
356     }
357 }
358 
359 void
360 __attribute_noinline__
_dl_reloc_bad_type(struct link_map * map,unsigned int type,int plt)361 _dl_reloc_bad_type (struct link_map *map, unsigned int type, int plt)
362 {
363 #define DIGIT(b)	_itoa_lower_digits[(b) & 0xf];
364 
365   /* XXX We cannot translate these messages.  */
366   static const char msg[2][32
367 #if __ELF_NATIVE_CLASS == 64
368 			   + 6
369 #endif
370   ] = { "unexpected reloc type 0x",
371 	"unexpected PLT reloc type 0x" };
372   char msgbuf[sizeof (msg[0])];
373   char *cp;
374 
375   cp = __stpcpy (msgbuf, msg[plt]);
376 #if __ELF_NATIVE_CLASS == 64
377   if (__builtin_expect(type > 0xff, 0))
378     {
379       *cp++ = DIGIT (type >> 28);
380       *cp++ = DIGIT (type >> 24);
381       *cp++ = DIGIT (type >> 20);
382       *cp++ = DIGIT (type >> 16);
383       *cp++ = DIGIT (type >> 12);
384       *cp++ = DIGIT (type >> 8);
385     }
386 #endif
387   *cp++ = DIGIT (type >> 4);
388   *cp++ = DIGIT (type);
389   *cp = '\0';
390 
391   _dl_signal_error (0, map->l_name, NULL, msgbuf);
392 }
393