Lines Matching refs:bytes
52 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 * For small (<= 64 bytes by default) requests, it is a caching
92 Supported pointer representation: 4 or 8 bytes
93 Supported size_t representation: 4 or 8 bytes
94 Note that size_t is allowed to be 4 bytes even if pointers are 8.
102 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
107 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
113 16/24/32 bytes.
115 Even a request for zero bytes (i.e., malloc(0)) returns a
118 The maximum overhead wastage (i.e., number of extra bytes
122 sizeof(size_t) bytes plus the remainder from a system page (the
123 minimal mmap unit); typically 4096 or 8192 bytes.
326 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
327 idx 1 bytes 25..40 or 13..20
328 idx 2 bytes 41..56 or 21..28
346 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
599 Returns a pointer to a newly allocated chunk of at least n bytes, or null
604 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
630 Returns a pointer to n_elements * element_size bytes, with all locations
638 as does chunk p up to the minimum of (n, p's size) bytes, or null
650 if n is for fewer bytes than already held by p, the newly unused
667 Returns a pointer to a newly allocated chunk of n bytes, aligned
693 arena: current total non-mmapped bytes allocated from system
698 hblkhd: total bytes held in mmapped regions
700 fsmblks: total bytes held in fastbin blocks
703 keepcost: the maximum number of bytes that could ideally be released
753 Returns the number of bytes you can actually use in
756 You can use this many bytes without worrying about
772 number of bytes allocated via malloc (or realloc, etc) but not yet
773 freed. Note that this is the number of bytes allocated, not the
1156 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1187 | Size of chunk, in bytes |A|M|P|
1191 . (malloc_usable_size() bytes) .
1196 | Size of next chunk, in bytes |A|0|1|
1212 `head:' | Size of chunk, in bytes |A|0|P|
1218 | Unused space (may be 0 bytes long) .
1222 `foot:' | Size of chunk, in bytes |
1224 | Size of next chunk, in bytes |A|0|0|
1258 MINSIZE bytes long, it is replenished.
1328 /* pad request bytes into a usable size -- internal version */
1498 requirements. The result is a little over 1K bytes (for 4byte
1555 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1556 8 bytes apart. Larger bins are approximately logarithmically spaced:
2415 space to service request for nb bytes, thus requiring that av->top
2459 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2559 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2653 /* The fencepost takes at least MINSIZE bytes, because it might
2818 Skip over some bytes to arrive at an aligned position.
2819 We don't need to specially mark these wasted front bytes.
2874 Skip over some bytes to arrive at an aligned position.
2875 We don't need to specially mark these wasted front bytes.
3239 const size_t bytes = sizeof (tcache_perthread_struct);
3244 arena_get (ar_ptr, bytes);
3245 victim = _int_malloc (ar_ptr, bytes);
3248 ar_ptr = arena_get_retry (ar_ptr, bytes);
3249 victim = _int_malloc (ar_ptr, bytes);
3286 __libc_malloc (size_t bytes)
3299 if (!checked_request2size (bytes, &tbytes))
3321 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3327 arena_get (ar_ptr, bytes);
3329 victim = _int_malloc (ar_ptr, bytes);
3334 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3335 ar_ptr = arena_get_retry (ar_ptr, bytes);
3336 victim = _int_malloc (ar_ptr, bytes);
3399 __libc_realloc (void *oldmem, size_t bytes)
3410 if (bytes == 0 && oldmem != NULL)
3418 return __libc_malloc (bytes);
3446 if (!checked_request2size (bytes, &nb))
3474 newmem = __libc_malloc (bytes);
3503 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3504 newp = __libc_malloc (bytes);
3519 __libc_memalign (size_t alignment, size_t bytes)
3525 return _mid_memalign (alignment, bytes, address);
3529 _mid_memalign (size_t alignment, size_t bytes, void *address)
3536 return __libc_malloc (bytes);
3562 p = _int_memalign (&main_arena, alignment, bytes);
3568 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3570 p = _int_memalign (ar_ptr, alignment, bytes);
3573 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3574 ar_ptr = arena_get_retry (ar_ptr, bytes);
3575 p = _int_memalign (ar_ptr, alignment, bytes);
3590 __libc_valloc (size_t bytes)
3597 return _mid_memalign (pagesize, bytes, address);
3601 __libc_pvalloc (size_t bytes)
3610 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3632 ptrdiff_t bytes;
3634 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3640 sz = bytes;
3723 /* clear only the bytes from non-freshly-sbrked memory */
3728 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3770 _int_malloc (mstate av, size_t bytes)
3795 Convert request size to internal form by adding SIZE_SZ bytes
3803 if (!checked_request2size (bytes, &nb))
3815 alloc_perturb (p, bytes);
3887 alloc_perturb (p, bytes);
3945 alloc_perturb (p, bytes);
4046 alloc_perturb (p, bytes);
4078 alloc_perturb (p, bytes);
4240 alloc_perturb (p, bytes);
4348 alloc_perturb (p, bytes);
4386 alloc_perturb (p, bytes);
4409 alloc_perturb (p, bytes);
4440 /* We know that each chunk is at least MINSIZE bytes in size or a
4940 _int_memalign (mstate av, size_t alignment, size_t bytes)
4955 if (!checked_request2size (bytes, &nb))
5283 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5284 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5297 fprintf (stderr, "system bytes = %10u\n", system_b);
5298 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5300 fprintf (stderr, "max mmap bytes = %10lu\n",