1 /* glibc.malloc.check implementation.
2    Copyright (C) 2001-2021 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public License as
7    published by the Free Software Foundation; either version 2.1 of the
8    License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; see the file COPYING.LIB.  If
17    not, see <https://www.gnu.org/licenses/>.  */
18 
19 #define __mremap mremap
20 #include "malloc.c"
21 
22 /* When memory is tagged, the checking data is stored in the user part
23    of the chunk.  We can't rely on the user not having modified the
24    tags, so fetch the tag at each location before dereferencing
25    it.  */
26 #define SAFE_CHAR_OFFSET(p,offset) \
27   ((unsigned char *) tag_at (((unsigned char *) p) + offset))
28 
29 /* A simple, standard set of debugging hooks.  Overhead is `only' one
30    byte per chunk; still this will catch most cases of double frees or
31    overruns.  The goal here is to avoid obscure crashes due to invalid
32    usage, unlike in the MALLOC_DEBUG code. */
33 
34 static unsigned char
magicbyte(const void * p)35 magicbyte (const void *p)
36 {
37   unsigned char magic;
38 
39   magic = (((uintptr_t) p >> 3) ^ ((uintptr_t) p >> 11)) & 0xFF;
40   /* Do not return 1.  See the comment in mem2mem_check().  */
41   if (magic == 1)
42     ++magic;
43   return magic;
44 }
45 
46 /* Visualize the chunk as being partitioned into blocks of 255 bytes from the
47    highest address of the chunk, downwards.  The end of each block tells
48    us the size of that block, up to the actual size of the requested
49    memory.  Our magic byte is right at the end of the requested size, so we
50    must reach it with this iteration, otherwise we have witnessed a memory
51    corruption.  */
52 static size_t
malloc_check_get_size(void * mem)53 malloc_check_get_size (void *mem)
54 {
55   size_t size;
56   unsigned char c;
57   mchunkptr p = mem2chunk (mem);
58   unsigned char magic = magicbyte (p);
59 
60   for (size = CHUNK_HDR_SZ + memsize (p) - 1;
61        (c = *SAFE_CHAR_OFFSET (p, size)) != magic;
62        size -= c)
63     {
64       if (c <= 0 || size < (c + CHUNK_HDR_SZ))
65 	malloc_printerr ("malloc_check_get_size: memory corruption");
66     }
67 
68   /* chunk2mem size.  */
69   return size - CHUNK_HDR_SZ;
70 }
71 
72 /* Instrument a chunk with overrun detector byte(s) and convert it
73    into a user pointer with requested size req_sz. */
74 
75 static void *
mem2mem_check(void * ptr,size_t req_sz)76 mem2mem_check (void *ptr, size_t req_sz)
77 {
78   mchunkptr p;
79   unsigned char *m_ptr = ptr;
80   size_t max_sz, block_sz, i;
81   unsigned char magic;
82 
83   if (!ptr)
84     return ptr;
85 
86   p = mem2chunk (ptr);
87   magic = magicbyte (p);
88   max_sz = memsize (p);
89 
90   for (i = max_sz - 1; i > req_sz; i -= block_sz)
91     {
92       block_sz = MIN (i - req_sz, 0xff);
93       /* Don't allow the magic byte to appear in the chain of length bytes.
94          For the following to work, magicbyte cannot return 0x01.  */
95       if (block_sz == magic)
96         --block_sz;
97 
98       *SAFE_CHAR_OFFSET (m_ptr, i) = block_sz;
99     }
100   *SAFE_CHAR_OFFSET (m_ptr, req_sz) = magic;
101   return (void *) m_ptr;
102 }
103 
104 /* Convert a pointer to be free()d or realloc()ed to a valid chunk
105    pointer.  If the provided pointer is not valid, return NULL. */
106 
107 static mchunkptr
mem2chunk_check(void * mem,unsigned char ** magic_p)108 mem2chunk_check (void *mem, unsigned char **magic_p)
109 {
110   mchunkptr p;
111   INTERNAL_SIZE_T sz, c;
112   unsigned char magic;
113 
114   if (!aligned_OK (mem))
115     return NULL;
116 
117   p = mem2chunk (mem);
118   sz = chunksize (p);
119   magic = magicbyte (p);
120   if (!chunk_is_mmapped (p))
121     {
122       /* Must be a chunk in conventional heap memory. */
123       int contig = contiguous (&main_arena);
124       if ((contig &&
125            ((char *) p < mp_.sbrk_base ||
126             ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
127           sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
128           (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
129                                (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
130                                next_chunk (prev_chunk (p)) != p)))
131         return NULL;
132 
133       for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
134 	   (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
135 	   sz -= c)
136         {
137           if (c == 0 || sz < (c + CHUNK_HDR_SZ))
138             return NULL;
139         }
140     }
141   else
142     {
143       unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
144 
145       /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
146          alignment relative to the beginning of a page.  Check this
147          first. */
148       offset = (unsigned long) mem & page_mask;
149       if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
150            offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
151            offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
152            offset < 0x2000) ||
153           !chunk_is_mmapped (p) || prev_inuse (p) ||
154           ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
155           ((prev_size (p) + sz) & page_mask) != 0)
156         return NULL;
157 
158       for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
159 	   (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
160 	   sz -= c)
161         {
162           if (c == 0 || sz < (c + CHUNK_HDR_SZ))
163             return NULL;
164         }
165     }
166 
167   unsigned char* safe_p = SAFE_CHAR_OFFSET (p, sz);
168   *safe_p ^= 0xFF;
169   if (magic_p)
170     *magic_p = safe_p;
171   return p;
172 }
173 
174 /* Check for corruption of the top chunk.  */
175 static void
top_check(void)176 top_check (void)
177 {
178   mchunkptr t = top (&main_arena);
179 
180   if (t == initial_top (&main_arena) ||
181       (!chunk_is_mmapped (t) &&
182        chunksize (t) >= MINSIZE &&
183        prev_inuse (t) &&
184        (!contiguous (&main_arena) ||
185         (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
186     return;
187 
188   malloc_printerr ("malloc: top chunk is corrupt");
189 }
190 
191 static void *
malloc_check(size_t sz)192 malloc_check (size_t sz)
193 {
194   void *victim;
195   size_t nb;
196 
197   if (__builtin_add_overflow (sz, 1, &nb))
198     {
199       __set_errno (ENOMEM);
200       return NULL;
201     }
202 
203   __libc_lock_lock (main_arena.mutex);
204   top_check ();
205   victim = _int_malloc (&main_arena, nb);
206   __libc_lock_unlock (main_arena.mutex);
207   return mem2mem_check (tag_new_usable (victim), sz);
208 }
209 
210 static void
free_check(void * mem)211 free_check (void *mem)
212 {
213   mchunkptr p;
214 
215   if (!mem)
216     return;
217 
218   int err = errno;
219 
220   /* Quickly check that the freed pointer matches the tag for the memory.
221      This gives a useful double-free detection.  */
222   if (__glibc_unlikely (mtag_enabled))
223     *(volatile char *)mem;
224 
225   __libc_lock_lock (main_arena.mutex);
226   p = mem2chunk_check (mem, NULL);
227   if (!p)
228     malloc_printerr ("free(): invalid pointer");
229   if (chunk_is_mmapped (p))
230     {
231       __libc_lock_unlock (main_arena.mutex);
232       munmap_chunk (p);
233     }
234   else
235     {
236       /* Mark the chunk as belonging to the library again.  */
237       (void)tag_region (chunk2mem (p), memsize (p));
238       _int_free (&main_arena, p, 1);
239       __libc_lock_unlock (main_arena.mutex);
240     }
241   __set_errno (err);
242 }
243 
244 static void *
realloc_check(void * oldmem,size_t bytes)245 realloc_check (void *oldmem, size_t bytes)
246 {
247   INTERNAL_SIZE_T chnb;
248   void *newmem = 0;
249   unsigned char *magic_p;
250   size_t rb;
251 
252   if (__builtin_add_overflow (bytes, 1, &rb))
253     {
254       __set_errno (ENOMEM);
255       return NULL;
256     }
257   if (oldmem == 0)
258     return malloc_check (bytes);
259 
260   if (bytes == 0)
261     {
262       free_check (oldmem);
263       return NULL;
264     }
265 
266   /* Quickly check that the freed pointer matches the tag for the memory.
267      This gives a useful double-free detection.  */
268   if (__glibc_unlikely (mtag_enabled))
269     *(volatile char *)oldmem;
270 
271   __libc_lock_lock (main_arena.mutex);
272   const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
273   __libc_lock_unlock (main_arena.mutex);
274   if (!oldp)
275     malloc_printerr ("realloc(): invalid pointer");
276   const INTERNAL_SIZE_T oldsize = chunksize (oldp);
277 
278   if (!checked_request2size (rb, &chnb))
279     {
280       __set_errno (ENOMEM);
281       goto invert;
282     }
283 
284   __libc_lock_lock (main_arena.mutex);
285 
286   if (chunk_is_mmapped (oldp))
287     {
288 #if HAVE_MREMAP
289       mchunkptr newp = mremap_chunk (oldp, chnb);
290       if (newp)
291         newmem = chunk2mem_tag (newp);
292       else
293 #endif
294       {
295 	/* Note the extra SIZE_SZ overhead. */
296         if (oldsize - SIZE_SZ >= chnb)
297           newmem = oldmem; /* do nothing */
298         else
299           {
300             /* Must alloc, copy, free. */
301 	    top_check ();
302 	    newmem = _int_malloc (&main_arena, rb);
303             if (newmem)
304               {
305                 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
306                 munmap_chunk (oldp);
307               }
308           }
309       }
310     }
311   else
312     {
313       top_check ();
314       newmem = _int_realloc (&main_arena, oldp, oldsize, chnb);
315     }
316 
317   DIAG_PUSH_NEEDS_COMMENT;
318 #if __GNUC_PREREQ (7, 0)
319   /* GCC 7 warns about magic_p may be used uninitialized.  But we never
320      reach here if magic_p is uninitialized.  */
321   DIAG_IGNORE_NEEDS_COMMENT (7, "-Wmaybe-uninitialized");
322 #endif
323   /* mem2chunk_check changed the magic byte in the old chunk.
324      If newmem is NULL, then the old chunk will still be used though,
325      so we need to invert that change here.  */
326 invert:
327   if (newmem == NULL)
328     *magic_p ^= 0xFF;
329   DIAG_POP_NEEDS_COMMENT;
330 
331   __libc_lock_unlock (main_arena.mutex);
332 
333   return mem2mem_check (tag_new_usable (newmem), bytes);
334 }
335 
336 static void *
memalign_check(size_t alignment,size_t bytes)337 memalign_check (size_t alignment, size_t bytes)
338 {
339   void *mem;
340 
341   if (alignment <= MALLOC_ALIGNMENT)
342     return malloc_check (bytes);
343 
344   if (alignment < MINSIZE)
345     alignment = MINSIZE;
346 
347   /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
348      power of 2 and will cause overflow in the check below.  */
349   if (alignment > SIZE_MAX / 2 + 1)
350     {
351       __set_errno (EINVAL);
352       return NULL;
353     }
354 
355   /* Check for overflow.  */
356   if (bytes > SIZE_MAX - alignment - MINSIZE)
357     {
358       __set_errno (ENOMEM);
359       return NULL;
360     }
361 
362   /* Make sure alignment is power of 2.  */
363   if (!powerof2 (alignment))
364     {
365       size_t a = MALLOC_ALIGNMENT * 2;
366       while (a < alignment)
367         a <<= 1;
368       alignment = a;
369     }
370 
371   __libc_lock_lock (main_arena.mutex);
372   top_check ();
373   mem = _int_memalign (&main_arena, alignment, bytes + 1);
374   __libc_lock_unlock (main_arena.mutex);
375   return mem2mem_check (tag_new_usable (mem), bytes);
376 }
377 
378 #if HAVE_TUNABLES
379 static void
TUNABLE_CALLBACK(set_mallopt_check)380 TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
381 {
382   int32_t value = (int32_t) valp->numval;
383   if (value != 0)
384     __malloc_debug_enable (MALLOC_CHECK_HOOK);
385 }
386 #endif
387 
388 static bool
initialize_malloc_check(void)389 initialize_malloc_check (void)
390 {
391   /* This is the copy of the malloc initializer that we pulled in along with
392      malloc-check.  This does not affect any of the libc malloc structures.  */
393   ptmalloc_init ();
394 #if HAVE_TUNABLES
395   TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
396 #else
397   const char *s = secure_getenv ("MALLOC_CHECK_");
398   if (s && s[0] != '\0' && s[0] != '0')
399     __malloc_debug_enable (MALLOC_CHECK_HOOK);
400 #endif
401   return __is_malloc_debug_enabled (MALLOC_CHECK_HOOK);
402 }
403