From 99d68238d2f0375162a96deda8a16848fdb0e158 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Fri, 7 Sep 2018 00:00:32 -0400 Subject: [PATCH] implement slab allocation write-after-free check --- malloc.c | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/malloc.c b/malloc.c index 5d58692..b59f04d 100644 --- a/malloc.c +++ b/malloc.c @@ -20,6 +20,7 @@ static_assert(sizeof(void *) == 8, "64-bit only"); static const bool guard_slabs = true; +static const bool enable_write_after_free_check = true; // either sizeof(uint64_t) or 0 static const size_t canary_size = sizeof(uint64_t); @@ -243,10 +244,20 @@ static void *slot_pointer(size_t size, void *slab, size_t slot) { return (char *)slab + slot * size; } -static void set_canary(struct slab_metadata *metadata, void *p, size_t size, size_t requested_size) { - if (requested_size != 0) { - memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size); +static void write_after_free_check(char *p, size_t size) { + if (!enable_write_after_free_check) { + return; } + + for (size_t i = 0; i < size; i += sizeof(uint64_t)) { + if (*(uint64_t *)(p + i)) { + fatal_error("write after free"); + } + } +} + +static void set_canary(struct slab_metadata *metadata, void *p, size_t size) { + memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size); } static inline void *slab_allocate(size_t requested_size) { @@ -273,7 +284,10 @@ static inline void *slab_allocate(size_t requested_size) { size_t slot = get_free_slot(&c->rng, slots, metadata); set_slot(metadata, slot); void *p = slot_pointer(size, slab, slot); - set_canary(metadata, p, size, requested_size); + if (requested_size) { + write_after_free_check(p, size - canary_size); + set_canary(metadata, p, size); + } pthread_mutex_unlock(&c->mutex); return p; @@ -282,7 +296,7 @@ static inline void *slab_allocate(size_t requested_size) { metadata->canary_value = get_random_u64(&c->rng); void *slab = get_slab(c, slab_size, metadata); - if (requested_size != 0 && memory_protect_rw(slab, slab_size)) { + if (requested_size && memory_protect_rw(slab, slab_size)) { pthread_mutex_unlock(&c->mutex); return NULL; } @@ -300,7 +314,9 @@ static inline void *slab_allocate(size_t requested_size) { size_t slot = get_free_slot(&c->rng, slots, metadata); set_slot(metadata, slot); void *p = slot_pointer(size, slab, slot); - set_canary(metadata, p, size, requested_size); + if (requested_size) { + set_canary(metadata, p, size); + } pthread_mutex_unlock(&c->mutex); return p; @@ -318,7 +334,9 @@ static inline void *slab_allocate(size_t requested_size) { size_t slot = get_free_slot(&c->rng, slots, metadata); set_slot(metadata, slot); void *p = slot_pointer(size, slab, slot); - set_canary(metadata, p, size, requested_size); + if (requested_size) { + set_canary(metadata, p, size); + } pthread_mutex_unlock(&c->mutex); return p; @@ -337,7 +355,10 @@ static inline void *slab_allocate(size_t requested_size) { void *slab = get_slab(c, slab_size, metadata); void *p = slot_pointer(size, slab, slot); - set_canary(metadata, p, size, requested_size); + if (requested_size) { + write_after_free_check(p, size - canary_size); + set_canary(metadata, p, size); + } pthread_mutex_unlock(&c->mutex); return p;