diff --git a/h_malloc.c b/h_malloc.c index 7057639..fecc539 100644 --- a/h_malloc.c +++ b/h_malloc.c @@ -108,10 +108,10 @@ static const size_t min_align = 16; #define MIN_SLAB_SIZE_CLASS_SHIFT 4 #if !CONFIG_EXTENDED_SIZE_CLASSES -static const size_t max_slab_size_class = 16384; +static const size_t MAX_SLAB_SIZE_CLASS = 16384; #define MAX_SLAB_SIZE_CLASS_SHIFT 14 #else -static const size_t max_slab_size_class = 131072; +static const size_t MAX_SLAB_SIZE_CLASS = 131072; #define MAX_SLAB_SIZE_CLASS_SHIFT 17 #endif @@ -210,7 +210,7 @@ static size_t get_slab_size(size_t slots, size_t size) { } // limit on the number of cached empty slabs before attempting purging instead -static const size_t max_empty_slabs_total = 128 * 1024; +static const size_t max_empty_slabs_total = MAX_SLAB_SIZE_CLASS; struct __attribute__((aligned(CACHELINE_SIZE))) size_class { struct mutex lock; @@ -1216,7 +1216,7 @@ static void *allocate_large(size_t size) { } static inline void *allocate(unsigned arena, size_t size) { - return size <= max_slab_size_class ? allocate_small(arena, size) : allocate_large(size); + return size <= MAX_SLAB_SIZE_CLASS ? allocate_small(arena, size) : allocate_large(size); } static void deallocate_large(void *p, const size_t *expected_size) { @@ -1248,7 +1248,7 @@ static int alloc_aligned(unsigned arena, void **memptr, size_t alignment, size_t } if (alignment <= PAGE_SIZE) { - if (size <= max_slab_size_class && alignment > min_align) { + if (size <= MAX_SLAB_SIZE_CLASS && alignment > min_align) { size = get_size_info_align(size, alignment).size; } @@ -1299,7 +1299,7 @@ static void *alloc_aligned_simple(unsigned arena, size_t alignment, size_t size) } static size_t adjust_size_for_canaries(size_t size) { - if (size > 0 && size <= max_slab_size_class) { + if (size > 0 && size <= MAX_SLAB_SIZE_CLASS) { return size + canary_size; } return size; @@ -1329,7 +1329,7 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) { total_size = adjust_size_for_canaries(total_size); void *p = allocate(arena, total_size); thread_seal_metadata(); - if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) { + if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= MAX_SLAB_SIZE_CLASS) { memset(p, 0, total_size - canary_size); } return p; @@ -1342,7 +1342,7 @@ EXPORT void *h_realloc(void *old, size_t size) { size = adjust_size_for_canaries(size); - if (size > max_slab_size_class) { + if (size > MAX_SLAB_SIZE_CLASS) { size = get_large_size_class(size); if (unlikely(!size)) { errno = ENOMEM; @@ -1353,7 +1353,7 @@ EXPORT void *h_realloc(void *old, size_t size) { size_t old_size; if (old >= get_slab_region_start() && old < ro.slab_region_end) { old_size = slab_usable_size(old); - if (size <= max_slab_size_class && get_size_info(size).size == old_size) { + if (size <= MAX_SLAB_SIZE_CLASS && get_size_info(size).size == old_size) { return old; } thread_unseal_metadata(); @@ -1377,7 +1377,7 @@ EXPORT void *h_realloc(void *old, size_t size) { } mutex_unlock(&ra->lock); - if (size > max_slab_size_class) { + if (size > MAX_SLAB_SIZE_CLASS) { // in-place shrink if (size < old_size) { void *new_end = (char *)old + size; @@ -1461,11 +1461,11 @@ EXPORT void *h_realloc(void *old, size_t size) { return NULL; } size_t copy_size = min(size, old_size); - if (copy_size > 0 && copy_size <= max_slab_size_class) { + if (copy_size > 0 && copy_size <= MAX_SLAB_SIZE_CLASS) { copy_size -= canary_size; } memcpy(new, old, copy_size); - if (old_size <= max_slab_size_class) { + if (old_size <= MAX_SLAB_SIZE_CLASS) { deallocate_small(old, NULL); } else { deallocate_large(old, NULL);