Lines Matching defs:size
83 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
103 Each malloced chunk has a hidden word of overhead holding size
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
112 free list pointers. Thus, the minimum allocatable size is
116 pointer to something of the minimum allocatable size.
120 to the minimum size, except for requests >= mmap_threshold that
125 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
322 /* When "x" is a user-provided size. */
346 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
424 void *tag_new_zero_region (void *ptr, size_t size)
430 void *tag_region (void *ptr, size_t size)
432 Color the region of memory pointed to by PTR and size SIZE with
459 tag_region (void *ptr, size_t size)
462 return __libc_mtag_tag_region (ptr, size);
467 tag_new_zero_region (void *ptr, size_t size)
470 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
471 return memset (ptr, 0, size);
553 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
555 multiple of page size. This backup strategy generally applies only
561 limited, the size should be large, to avoid too many mmap calls and
604 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
637 Returns a pointer to a chunk of size n that contains the same data
638 as does chunk p up to the minimum of (n, p's size) bytes, or null
652 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
683 size of the system. If the pagesize is unknown, 4096 is used.
755 often not) due to alignment and minimum size constraints.
790 posix_memalign(void **memptr, size_t alignment, size_t size);
792 POSIX wrapper like memalign(), checking for validity of size.
826 M_MXFAST is the maximum request size used for "fastbins", special bins
828 enables future requests for chunks of the same size to be handled
842 M_MXFAST is set in REQUEST size units. It is internally used in
884 might set to a value close to the average size of a process
902 The trim value It must be greater than page size to have any useful
908 freeing a chunk with size less than or equal to MXFAST. Trimming is
947 Automatic rounding-up to page-size units is normally sufficient
972 maximum heap size and its alignment. Going above 512k (i.e., 1M
982 M_MMAP_THRESHOLD is the request size threshold for using mmap()
983 to service a request. Requests of at least this size that cannot
1028 artificial limits on brk size imposed by the kernel. What is more,
1039 In 2001, the kernel had a maximum size for brk() which was about 800
1060 started doing dynamic allocations of the same size (which will
1067 starts freeing memory of a certain size, it's highly probable that this is
1068 a size the application uses for transient allocations. This estimator
1138 #define MMAP(addr, size, prot, flags) \
1139 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1161 /* Only used for large blocks: pointer to next larger size. */
1178 size fields also hold bits representing whether chunks are free or
1194 | (size of chunk, but used for application data) |
1228 chunk size (which is always a multiple of two words), is an in-use
1230 word before the current chunk size contains the previous chunk
1231 size, and can be used to find the front of the previous chunk.
1235 the size of the previous chunk, and might even get a memory
1255 trailing size field since there is no next contiguous chunk
1261 bit M (IS_MMAPPED) set in their size fields. Because they are
1262 allocated one-by-one, each must contain its own trailing size
1315 /* The smallest size we can malloc is an aligned minimal chunk */
1328 /* pad request bytes into a usable size -- internal version */
1337 is less than PTRDIFF_T. Returns TRUE and the requested size or MINSIZE in
1348 allocate blocks that are rounded up to the granule size. Take
1371 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1378 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1385 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1398 Bits to mask off when extracting size
1400 Note: IS_MMAPPED is intentionally not masked off from size field in
1407 /* Get size, ignoring use bits */
1419 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1451 /* Set size at head, without disturbing its use bit */
1454 /* Set size/use field */
1457 /* Set size at footer (only when chunk is not in use) */
1463 /* This is the size of the real usable data in the chunk. Not valid for
1471 size, this is wasteful for small allocations so not done by default.
1516 Chunks in bins are kept in size order, with ties going to the
1524 Chunks of the same size are linked with the most
1555 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1558 64 bins of size 8
1559 32 bins of size 64
1560 16 bins of size 512
1561 8 bins of size 4096
1562 4 bins of size 32768
1563 2 bins of size 262144
1564 1 bin of size what's left
1573 a valid chunk size the small bins are bumped up one.
1629 malloc_printerr ("corrupted size vs. prev_size");
1676 does not have to be taken into account in size comparisons.
1690 points to its own bin with initial zero size, thus forcing
1751 /* The maximum fastbin request size we support */
1757 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1785 /* Maximum size of memory handled in fastbins. */
1806 _int_malloc after constant propagation of the size parameter.
2023 madvise_thp (void *p, INTERNAL_SIZE_T size)
2028 if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
2036 size += PTR_DIFF (p, q);
2040 __madvise (p, size, MADV_HUGEPAGE);
2100 /* top size is always at least MINSIZE */
2151 else /* markers are always of size SIZE_SZ */
2215 /* Legal size ... */
2268 INTERNAL_SIZE_T size;
2353 size = chunksize (p);
2354 total += size;
2358 idx = bin_index (size);
2364 if (!in_smallbin_range (size))
2387 else if (!in_smallbin_range (size))
2422 long int size;
2425 Round up size to nearest page. For mmapped chunks, the overhead is one
2433 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2435 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2437 /* Don't try if size wraps around 0. */
2438 if ((unsigned long) (size) <= (unsigned long) (nb))
2441 char *mm = (char *) MMAP (0, size,
2449 madvise_thp (mm, size);
2479 set_head (p, (size - correction) | IS_MMAPPED);
2485 set_head (p, size | IS_MMAPPED);
2493 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2502 Allocate memory using mmap() based on S and NB requested size, aligning to
2504 succeedes S is updated with the allocated size. This is used as a fallback
2512 long int size = *s;
2514 /* Cannot merge with old top, so add its size back in */
2516 size = ALIGN_UP (size + old_size, pagesize);
2519 if ((unsigned long) (size) < minsize)
2520 size = minsize;
2522 /* Don't try if size wraps around 0 */
2523 if ((unsigned long) (size) <= (unsigned long) (nb))
2526 char *mbrk = (char *) (MMAP (0, size,
2534 madvise_thp (mbrk, size);
2542 *s = size;
2550 INTERNAL_SIZE_T old_size; /* its size */
2553 long size; /* arg to first MORECORE or mmap call */
2565 unsigned long remainder_size; /* its size */
2573 If have mmap, and the request size meets the mmap threshold, and
2633 old_heap_size = old_heap->size;
2637 av->system_mem += old_heap->size - old_heap_size;
2638 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2646 av->system_mem += heap->size;
2649 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2652 MALLOC_ALIGNMENT in size. */
2687 size = nb + mp_.top_pad + MINSIZE;
2696 size -= old_size;
2699 Round to a multiple of page size or huge page size.
2711 uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
2713 size = top - (uintptr_t) __curbrk;
2717 size = ALIGN_UP (size, GLRO(dl_pagesize));
2725 if (size > 0)
2727 brk = (char *) (MORECORE (size));
2729 madvise_thp (brk, size);
2730 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2747 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
2752 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
2758 snd_brk = brk + size;
2766 av->system_mem += size;
2769 If MORECORE extends previous space, we can likewise extend top size.
2773 set_head (old_top, (size + old_size) | PREV_INUSE);
2789 request size to account for fact that we will not be able to
2837 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2911 Shrink old_top to insert fenceposts, keeping size a
2947 size = chunksize (p);
2950 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2952 remainder_size = size - nb;
3046 INTERNAL_SIZE_T size = chunksize (p);
3052 size_t total_size = prev_size (p) + size;
3055 page size. But gcc does not recognize the optimization possibility
3078 INTERNAL_SIZE_T size = chunksize (p);
3085 size_t total_size = offset + size;
3113 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
3114 + new_size - size - offset;
3135 overall size low is mildly important. Note that COUNTS and ENTRIES
3402 INTERNAL_SIZE_T nb; /* padded request size */
3427 /* its size */
3440 we can exclude some size values which might appear here by
3538 /* Otherwise, ensure that it is at least a minimum chunk size */
3772 INTERNAL_SIZE_T nb; /* normalized request size */
3777 INTERNAL_SIZE_T size; /* its size */
3781 unsigned long remainder_size; /* its size */
3795 Convert request size to internal form by adding SIZE_SZ bytes
3797 to obtain a size of at least MINSIZE, the smallest allocatable
3798 size. Also, checked_request2size returns false for request sizes
3820 If the size qualifies as a fastbin, first check corresponding bin.
3861 /* While we're here, if we see other chunks of the same size,
3895 hold one size each, no searching within bins is necessary.
3919 /* While we're here, if we see other chunks of the same size,
3997 size = chunksize (victim);
3998 mchunkptr next = chunk_at_offset (victim, size);
4000 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
4001 || __glibc_unlikely (size > av->system_mem))
4002 malloc_printerr ("malloc(): invalid size (unsorted)");
4005 malloc_printerr ("malloc(): invalid next size (unsorted)");
4006 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
4025 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4028 remainder_size = size - nb;
4058 if (size == nb)
4060 set_inuse_bit_at_offset (victim, size);
4087 if (in_smallbin_range (size))
4089 victim_index = smallbin_index (size);
4095 victim_index = largebin_index (size);
4103 size |= PREV_INUSE;
4106 if ((unsigned long) (size)
4119 while ((unsigned long) size < chunksize_nomask (fwd))
4125 if ((unsigned long) size
4193 while (((unsigned long) (size = chunksize (victim)) <
4197 /* Avoid removing the first entry for a size so that the skip
4204 remainder_size = size - nb;
4210 set_inuse_bit_at_offset (victim, size);
4299 size = chunksize (victim);
4302 assert ((unsigned long) (size) >= (unsigned long) (nb));
4304 remainder_size = size - nb;
4312 set_inuse_bit_at_offset (victim, size);
4362 We require that av->top always exists (i.e., has size >=
4370 size = chunksize (victim);
4372 if (__glibc_unlikely (size > av->system_mem))
4373 malloc_printerr ("malloc(): corrupted top size");
4375 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4377 remainder_size = size - nb;
4422 INTERNAL_SIZE_T size; /* its size */
4425 INTERNAL_SIZE_T nextsize; /* its size */
4427 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4431 size = chunksize (p);
4435 Therefore we can exclude some size values which might appear
4437 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4440 /* We know that each chunk is at least MINSIZE bytes in size or a
4442 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4443 malloc_printerr ("free(): invalid size");
4449 size_t tc_idx = csize2tidx (size);
4493 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4500 && (chunk_at_offset(p, size) != av->top)
4504 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4506 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4516 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4517 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4522 malloc_printerr ("free(): invalid next size (fast)");
4525 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4528 unsigned int idx = fastbin_index(size);
4556 /* Check that size of fastbin chunk at the top is the same as
4557 size of the chunk that we are adding. We can dereference OLD
4578 nextchunk = chunk_at_offset(p, size);
4596 malloc_printerr ("free(): invalid next size (normal)");
4598 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4603 size += prevsize;
4606 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4617 size += nextsize;
4633 if (!in_smallbin_range(size))
4641 set_head(p, size | PREV_INUSE);
4642 set_foot(p, size);
4653 size += nextsize;
4654 set_head(p, size | PREV_INUSE);
4672 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4725 INTERNAL_SIZE_T size;
4755 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4762 size = chunksize (p);
4763 nextchunk = chunk_at_offset(p, size);
4768 size += prevsize;
4771 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4779 size += nextsize;
4788 if (!in_smallbin_range (size)) {
4793 set_head(p, size | PREV_INUSE);
4796 set_foot(p, size);
4800 size += nextsize;
4801 set_head(p, size | PREV_INUSE);
4820 INTERNAL_SIZE_T newsize; /* its size */
4826 unsigned long remainder_size; /* its size */
4828 /* oldmem size */
4831 malloc_printerr ("realloc(): invalid old size");
4842 malloc_printerr ("realloc(): invalid next size");
4942 INTERNAL_SIZE_T nb; /* padded request size */
4947 INTERNAL_SIZE_T newsize; /* its size */
4950 unsigned long remainder_size; /* its size */
4951 INTERNAL_SIZE_T size;
5016 size = chunksize (p);
5017 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
5019 remainder_size = size - nb;
5055 INTERNAL_SIZE_T size = chunksize (p);
5057 if (size > psm1 + sizeof (struct malloc_chunk))
5066 assert ((char *) p + size > paligned_mem);
5068 /* This is the size we could potentially free. */
5069 size -= paligned_mem - (char *) p;
5071 if (size > psm1)
5076 memset (paligned_mem, 0x89, size & ~psm1);
5078 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5605 void *osMoreCore(int size)
5610 if (size > 0)
5612 if (size < MINIMUM_MORECORE_SIZE)
5613 size = MINIMUM_MORECORE_SIZE;
5615 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5624 sbrk_top = (char *) ptr + size;
5627 else if (size < 0)
5674 __posix_memalign (void **memptr, size_t alignment, size_t size)
5690 mem = _mid_memalign (alignment, size, address);
5825 heap_size += heap->size;
5844 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5857 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5858 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5859 "<system type=\"current\" size=\"%zu\"/>\n"
5860 "<system type=\"max\" size=\"%zu\"/>\n",
5867 "<aspace type=\"total\" size=\"%zu\"/>\n"
5868 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5869 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5877 "<aspace type=\"total\" size=\"%zu\"/>\n"
5878 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5890 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5891 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5892 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5893 "<system type=\"current\" size=\"%zu\"/>\n"
5894 "<system type=\"max\" size=\"%zu\"/>\n"
5895 "<aspace type=\"total\" size=\"%zu\"/>\n"
5896 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"