Compare commits

...

295 Commits

Author SHA1 Message Date
Dmitry Muhomor
8d5c631224 android: implement fatal_error() via async_safe_fatal()
async_safe_fatal() performs the following steps:
- logs the error message to stderr and logcat
- passes error message to debuggerd via android_set_abort_message(). debuggerd then saves the error
message in the crash report file ("tombstone")
- calls abort()
2023-09-28 13:47:11 -04:00
Christian Göttsche
903cba5a84 test: add regression test for missing init() in realloc() 2023-09-27 19:19:19 -04:00
Christian Göttsche
9cb4e6daf6 do not skip init() in realloc()
If N_ARENA is greater than 1 `thread_arena` is initially to N_ARENA,
which is an invalid index into `ro.size_class_metadata[]`.

The actual used arena is computed in init().

Ensure init() is called if a new thread is only using realloc() to avoid
UB, e.g. pthread_mutex_lock() might crash due the memory not holding an
initialized mutex.

Affects mesa 23.2.0~rc4.

Example back trace using glmark2 (note `arena=4` with the default
N_ARENA being 4):

    Program terminated with signal SIGSEGV, Segmentation fault.
    #0  ___pthread_mutex_lock (mutex=0x7edff8d3f200) at ./nptl/pthread_mutex_lock.c:80
            type = <optimized out>
            __PRETTY_FUNCTION__ = "___pthread_mutex_lock"
            id = <optimized out>
    #1  0x00007f0ab62091a6 in mutex_lock (m=0x7edff8d3f200) at ./mutex.h:21
    No locals.
    #2  0x00007f0ab620c9b5 in allocate_small (arena=4, requested_size=24) at h_malloc.c:517
            info = {size = 32, class = 2}
            size = 32
            c = 0x7edff8d3f200
            slots = 128
            slab_size = 4096
            metadata = 0x0
            slot = 0
            slab = 0x0
            p = 0x0
    #3  0x00007f0ab6209809 in allocate (arena=4, size=24) at h_malloc.c:1252
    No locals.
    #4  0x00007f0ab6208e26 in realloc (old=0x72b138199120, size=24) at h_malloc.c:1499
            vma_merging_reliable = false
            old_size = 16
            new = 0x0
            copy_size = 139683981990973
    #5  0x00007299f919e556 in attach_shader (ctx=0x7299e9ef9000, shProg=0x7370c9277d30, sh=0x7370c9278230) at ../src/mesa/main/shaderapi.c:336
            n = 1
    #6  0x00007299f904223e in _mesa_unmarshal_AttachShader (ctx=<optimized out>, cmd=<optimized out>) at src/mapi/glapi/gen/marshal_generated2.c:1539
            program = <optimized out>
            shader = <optimized out>
            cmd_size = 2
    #7  0x00007299f8f2e3b2 in glthread_unmarshal_batch (job=job@entry=0x7299e9ef9168, gdata=gdata@entry=0x0, thread_index=thread_index@entry=0) at ../src/mesa/main/glthread.c:139
            cmd = 0x7299e9ef9180
            batch = 0x7299e9ef9168
            ctx = 0x7299e9ef9000
            pos = 0
            used = 3
            buffer = 0x7299e9ef9180
            shared = <optimized out>
            lock_mutexes = <optimized out>
            batch_index = <optimized out>
    #8  0x00007299f8ecc2d9 in util_queue_thread_func (input=input@entry=0x72c1160e5580) at ../src/util/u_queue.c:309
            job = {job = 0x7299e9ef9168, global_data = 0x0, job_size = 0, fence = 0x7299e9ef9168, execute = <optimized out>, cleanup = <optimized out>}
            queue = 0x7299e9ef9058
            thread_index = 0
    #9  0x00007299f8f1bcbb in impl_thrd_routine (p=<optimized out>) at ../src/c11/impl/threads_posix.c:67
            pack = {func = 0x7299f8ecc190 <util_queue_thread_func>, arg = 0x72c1160e5580}
    #10 0x00007f0ab5aa63ec in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:444
            ret = <optimized out>
            pd = <optimized out>
            out = <optimized out>
            unwind_buf = {cancel_jmp_buf = {{jmp_buf = {139683974242608, 2767510063778797177, -168, 11, 140727286820160, 126005371879424, -4369625917767903623, -2847048016936659335}, mask_was_saved = 0}}, priv = {pad = {0x0, 0x0, 0x0,
              0x0}, data = {prev = 0x0, cleanup = 0x0, canceltype = 0}}}
            not_first_call = <optimized out>
    #11 0x00007f0ab5b26a2c in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
2023-09-26 20:03:02 -04:00
dependabot[bot]
8696431b88 Bump actions/checkout from 3 to 4
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-09-04 15:37:49 -04:00
Daniel Micay
2d302f7d85 enable -Wundef 2023-06-10 14:58:33 -04:00
Daniel Micay
d5f9909eca add missing include 2023-06-10 14:58:20 -04:00
Daniel Micay
5e1901e85d silence unwanted tidy warning 2023-06-10 14:52:08 -04:00
Daniel Micay
462c2c5293 conditionally include bits/functexcept.h 2023-06-10 14:20:20 -04:00
Daniel Micay
8f3281ed6a enable strict prototypes warning 2023-06-10 14:18:27 -04:00
Christian Göttsche
7d75acc62a use strict prototype
h_malloc.c:83:21: error: function declaration isn’t a prototype [-Werror=strict-prototypes]
       83 | static inline void *get_slab_region_end() {
          |                     ^~~~~~~~~~~~~~~~~~~
2023-06-10 14:18:27 -04:00
Christian Göttsche
af866a7faa support versioned Clang
make CC=clang-14
    clang-14  -std=c17 -O3 -flto -fPIC -fvisibility=hidden -fno-plt -fstack-clash-protection -fstack-protector-strong -pipe -Wall -Wcast-qual -Wextra -Wfloat-equal -Wformat=2 -Winit-self -Wmissing-format-attribute -Wmissing-noreturn -Wmissing-prototypes -Wnull-dereference -Wpointer-arith -Wshadow -Wstrict-prototypes -Wundef -Wunused -Wwrite-strings -Wcast-align=strict -Wcast-qual -Wwrite-strings -Werror -march=native -Wmissing-prototypes  -D_GNU_SOURCE -I include -DCONFIG_SEAL_METADATA=false -DZERO_ON_FREE=true -DWRITE_AFTER_FREE_CHECK=true -DSLOT_RANDOMIZE=true -DSLAB_CANARY=true -DSLAB_QUARANTINE_RANDOM_LENGTH=1 -DSLAB_QUARANTINE_QUEUE_LENGTH=1 -DCONFIG_EXTENDED_SIZE_CLASSES=true -DCONFIG_LARGE_SIZE_CLASSES=true -DGUARD_SLABS_INTERVAL=1 -DGUARD_SIZE_DIVISOR=2 -DREGION_QUARANTINE_RANDOM_LENGTH=256 -DREGION_QUARANTINE_QUEUE_LENGTH=1024 -DREGION_QUARANTINE_SKIP_THRESHOLD=33554432  -DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32 -DCONFIG_CLASS_REGION_SIZE=34359738368  -DN_ARENA=4 -DCONFIG_STATS=false  -c -o out/chacha.o chacha.c
    error: unknown warning option '-Wcast-align=strict'; did you mean '-Wcast-align'? [-Werror,-Wunknown-warning-option]
    make: *** [Makefile:114: out/chacha.o] Error 1
2023-06-10 14:18:27 -04:00
Daniel Micay
64dad0a69f drop legacy glibc support 2023-06-10 14:04:46 -04:00
Daniel Micay
95c4b40caf update minimum dependency version list 2023-06-10 14:02:55 -04:00
Daniel Micay
cc70583beb drop info on MPK with unsupported kernels 2023-06-10 13:59:56 -04:00
Daniel Micay
62a98efb13 update supported Android branch 2023-06-10 13:59:36 -04:00
Daniel Micay
d3152b8e8f preserve errno for free calls
This is a future POSIX requirement recently implemented by musl and
glibc.
2023-02-17 13:07:26 -05:00
Daniel Micay
2e9daf3122 merge fprintf/fputs calls in malloc_info 2023-02-17 13:07:26 -05:00
Daniel Micay
6038030d0b no need to check for -fstack-clash-protection
This is supported by the compiler versions listed as minimum
requirements in the README.
2023-02-17 13:07:26 -05:00
Daniel Micay
4d23fa37ad enable Intel CET support 2023-02-17 13:07:26 -05:00
Daniel Micay
6d36e758f5 update copyright notice 2023-02-17 13:07:26 -05:00
Daniel Micay
cd9b875297 reorganize compiler switches 2023-02-17 13:07:24 -05:00
Daniel Micay
2250130c53 remove unnecessary UNUSED marker 2022-09-16 01:03:47 -04:00
Daniel Micay
72dba6765f disable tidy identifier length lint 2022-09-16 00:57:08 -04:00
Daniel Micay
8f38bbdee6 add configuration for self-init
This needs to be disabled for compatibility with the exploit protection
compatibility mode on GrapheneOS. hardened_malloc shouldn't be trying to
initialize itself when exploit protection compatibility mode is enabled.
This has to be handled in our Bionic integration instead.
2022-09-14 03:41:31 -04:00
Daniel Micay
dd427cb3b8 arm64 page table / page size docs 2022-09-08 23:17:25 -04:00
Daniel Micay
b5dd9d11d9 raise class region size to 32GB for arm64 Android 2022-09-08 23:13:15 -04:00
Daniel Micay
72fb3576f5 Android 13 is now all we'll be supporting 2022-08-16 07:48:47 -04:00
Dmitry Muhomor
f8fec401c7 update Android.bp for Android 13 2022-08-16 07:46:44 -04:00
Daniel Micay
0d6d63cbe7 improve package installation for CI 2022-03-11 22:09:13 -05:00
dependabot[bot]
8fd31e4bc1 Bump actions/checkout from 2 to 3
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-03-02 13:44:16 -05:00
jvoisin
b511696c55 clean up includes and remove non-portable includes
This marginally increases the portability of hardened_malloc,
eg. on OSX.
2022-02-07 07:14:51 -05:00
jvoisin
943704de7c Remove __pycache__ as well in make clean 2022-02-06 18:56:56 -05:00
jvoisin
04a86566c3 Don't depend on gcc_s 2022-01-28 14:59:58 -05:00
Daniel Micay
448170a412 fix case for non-macro constant 2022-01-21 23:59:37 -05:00
Daniel Micay
995ce07d45 add is_init likely/unlikely markers 2022-01-21 19:46:49 -05:00
Daniel Micay
c9d1abcd7e explicitly mark fatal error conditions unlikely 2022-01-21 19:45:05 -05:00
Daniel Micay
8f0b252c33 mark more out-of-memory conditions as unlikely 2022-01-21 19:03:02 -05:00
Daniel Micay
3cffc1e1af treat zero size malloc as unlikely
Calls to malloc with a zero size are extremely rare relative to normal
usage of the API. It's generally only done by inefficient C code with
open coded dynamic array implementations where they aren't handling zero
size as a special case for their usage of malloc/realloc. Efficient code
wouldn't be making these allocations. It doesn't make sense to optimize
for the performance of rare edge cases caused by inefficient code.
2022-01-21 18:27:04 -05:00
Daniel Micay
ae2524bf88 ignore environment for test Makefile variables 2022-01-21 16:24:49 -05:00
Daniel Micay
e28addda19 add back gitignore entries 2022-01-21 15:07:13 -05:00
Daniel Micay
9d89712386 remove extra newline 2022-01-21 15:06:29 -05:00
jvoisin
84eadd8568 Move memory corruption tests up a directory 2022-01-21 15:00:18 -05:00
Daniel Micay
0bbcc5d610 malloc.c was renamed to h_malloc.c 2022-01-19 16:42:12 -05:00
jvoisin
3fa30842ed Use $(MAKE) instead of make in Makefiles
This will pass the correct flags to the make
invocations.
2022-01-17 16:21:00 -05:00
Daniel Micay
b3d78bd5f6 use static const for local constants 2022-01-16 21:02:17 -05:00
Daniel Micay
8d61e63274 add comment about special small size classes 2022-01-16 20:50:49 -05:00
Daniel Micay
422ee78b3e reorganize pages.h header 2022-01-16 16:57:22 -05:00
Daniel Micay
3e312695e1 document clz64/log2u64 not being able to handle 0 2022-01-16 16:28:49 -05:00
Daniel Micay
81cf2f27a0 calculate slab size class instead of array loop 2022-01-16 16:18:14 -05:00
Daniel Micay
d8cb2d9f7a use consistent wrappers around clz/ffs 2022-01-16 15:39:59 -05:00
Daniel Micay
86f9c739ee define constant for u64 bit width 2022-01-16 15:06:36 -05:00
Daniel Micay
536f852538 reuse a single size alignment implementation 2022-01-16 14:44:28 -05:00
Daniel Micay
e814cf4f5c enable linking optimization for GNU linker 2022-01-16 12:18:00 -05:00
Daniel Micay
705211ef49 define UBSan flags for SHARED_FLAGS to reuse it 2022-01-16 11:50:55 -05:00
Daniel Micay
189d3362d5 enable sized deallocation ABI for consistency 2022-01-16 11:49:51 -05:00
Daniel Micay
e2bcf4a356 stop silencing constant logical operand warning
This was resolved by 3696f071a4.
2022-01-13 14:51:22 -05:00
Daniel Micay
d470ae56a5 switch Android build to C17 from C11 2022-01-13 14:48:56 -05:00
Daniel Micay
42b097f3b0 CONFIG_SEAL_METADATA is regularly tested now 2022-01-13 14:25:41 -05:00
Daniel Micay
17891d743e switch from c11 to c17 standard 2022-01-12 10:20:47 -05:00
Daniel Micay
efd71e70c7 update documentation based on light configuration 2022-01-12 08:58:00 -05:00
Daniel Micay
a6d27848af wrap overly long line 2022-01-12 08:44:39 -05:00
Daniel Micay
110126d7f0 README: fix path to configuration templates 2022-01-12 08:43:36 -05:00
Daniel Micay
a2bdb4da27 update gitignore for renamed / added tests 2022-01-12 08:41:21 -05:00
Daniel Micay
0c0561e563 update gitignore for config template system 2022-01-12 08:41:12 -05:00
Daniel Micay
5a577e9ee0 document configuration template system 2022-01-12 08:38:33 -05:00
Daniel Micay
b3372e1576 add configuration template system 2022-01-10 04:47:01 -05:00
jvoisin
052b756840 Fix two warnings 2022-01-09 08:50:46 -05:00
jvoisin
001eb0687b Fix an unused parameter warning 2022-01-04 12:16:53 -05:00
Daniel Micay
2a5662948e rename bitmap manipulation functions 2022-01-04 12:14:55 -05:00
Daniel Micay
d1c39edc9b use const for malloc_object_size API 2022-01-04 10:14:41 -05:00
Daniel Micay
aa1746a90d alloc_size attribute for legacy valloc function 2022-01-04 10:04:26 -05:00
Daniel Micay
f3efc26638 add malloc attribute where appropriate 2022-01-04 09:56:29 -05:00
jvoisin
78cbb964d4 Add alloc_size and alloc_align attributes
This should help a bit the compiler to emit better diagnostics and to improve
the correctness of `__builtin_object_size`.

See https://clang.llvm.org/docs/AttributeReference.html#alloc-size
and https://clang.llvm.org/docs/AttributeReference.html#alloc-align
2022-01-04 09:45:20 -05:00
jvoisin
36dfed3354 Add aarch64 to the CI 2022-01-04 09:45:00 -05:00
Daniel Micay
8a500088c6 add missing include for overflow tests 2022-01-03 21:24:31 -05:00
Daniel Micay
c50d06bc6a comment explaining XOR for 8 byte overflow test 2022-01-03 21:23:14 -05:00
Daniel Micay
645414cc9f add 1 byte overflow tests 2022-01-03 21:20:15 -05:00
Daniel Micay
13a1f578cb use calculated size for overflow tests
This greatly reduces how much these tests depend on hard-wired knowledge
about the size classes.
2022-01-03 21:11:31 -05:00
Daniel Micay
acda766e2c fix small allocation canary overwrite test
Overwriting one byte of a canary with 0 has a 1/256 chance of not
triggering the expected failure.
2022-01-03 21:08:14 -05:00
Daniel Micay
5f32942263 get rid of canary_value when canaries are disabled 2022-01-03 20:39:30 -05:00
Daniel Micay
346529574d check whole allocation for uninit read large test 2022-01-03 17:55:05 -05:00
Daniel Micay
16c991b8f7 use 256k for large allocation tests 2022-01-03 16:11:16 -05:00
jvoisin
5f59ee3935 Add two tests to check that uninitialized read are zeroed 2022-01-03 16:10:01 -05:00
Daniel Micay
3696f071a4 use SLAB_CANARY for conditional checks 2022-01-03 02:17:04 -05:00
Daniel Micay
7d6663ed80 update copyright notice 2022-01-03 01:41:27 -05:00
Daniel Micay
c6af50d088 use unsigned for ffzl definition
This makes more sense and avoids clang tidy conversion warnings.
2022-01-03 01:29:12 -05:00
Daniel Micay
8ae78237ae avoid unnecessarily mixing 32-bit and 64-bit ints
It's ever so slightly faster to stick to stick to 64-bit arithmetic and
it avoids clang tidy being unhappy about the implicit widening.
2022-01-03 00:54:43 -05:00
Daniel Micay
3f8e9d3184 make MREMAP_MOVE_THRESHOLD into size_t constant
This avoids a clang-tidy warning and is a bit cleaner.
2022-01-03 00:32:06 -05:00
Daniel Micay
1e526fc36b disable incredibly impractical clang-tidy check
bugprone-easily-swappable-parameters is completely impractical for real
world usage. It's a reasonable thing to consider as part of API design
but it mostly applies to having APIs taking a lot of parameters. It's
unreasonable to disallow APIs simply taking 2 integer parameters even as
a loose guideline.
2022-01-03 00:27:49 -05:00
jvoisin
c5be4b1888 Fix two mundane clang warnings in the testsuite 2022-01-02 08:27:46 -05:00
jvoisin
ffdf7b1ee1 Make the testsuite work for read-after-free
This commit makes the testsuite fail if
the read-after-free tests are failing, instead
of simply printing some info.
2022-01-02 08:25:08 -05:00
jvoisin
2d56c1de01 Fix a couple of mundane typo in the readme 2022-01-02 08:20:13 -05:00
jvoisin
3878f4a5f4 Don't ignore the return value of the testsuite 2022-01-02 00:55:21 -05:00
Daniel Micay
de7a3b6e5a enable sized deallocation for sized deletion test
Clang doesn't currently enable sized deallocation by default like GCC.
2022-01-01 23:18:52 -05:00
jvoisin
9142a9376b Add a bunch of const qualifiers 2021-12-30 21:25:16 -05:00
Daniel Micay
75e26afdb6 remove legacy safe_flag check for -fno-plt
This is supported by the minimum versions of the dependencies.
2021-12-30 19:17:33 -05:00
jvoisin
cff1d6d4b5 Add a test to prove that hardened_malloc handles too-large-to-be-true allocations
This pattern, used by https://github.com/kaist-hacking/HardsHeap,
uncovered bugs in other memory allocators.
2021-12-28 19:47:05 -05:00
jvoisin
75952581ee Silence a GCC warning
As suggested in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425#c34
2021-12-27 06:22:32 -05:00
jvoisin
a84d3f5310 Run the testsuite on musl as well in the CI 2021-12-27 06:22:32 -05:00
jvoisin
0655c1d024 Add a missing const 2021-12-26 18:19:59 -05:00
jvoisin
2b25c791ee Run the CI every day at 2am UTC
This should help to catch issues in newer versions
of distributions/packages.
2021-12-26 17:02:51 -05:00
jvoisin
e816c545ea Run the CI in clang 2021-12-26 16:29:18 -05:00
jvoisin
06192ae499 make clean is now thorough 2021-12-26 16:28:03 -05:00
Daniel Micay
4ccd6f16df always enable C++17
The safe_flag check doesn't work properly for C++ flags with Clang so
this wasn't getting enabled despite the conditional compilation being
removed from the code, leading to breaking Clang builds.
2021-12-26 16:26:38 -05:00
jvoisin
9966adbdad Add another ifdef for GNU extension 2021-12-23 14:45:43 -05:00
jvoisin
769e01fc4b Don't use mallinfo on non-android and non-glibc 2021-12-23 14:38:29 -05:00
Daniel Micay
460fef456d only Android 12 is supported 2021-12-13 19:42:40 -05:00
Daniel Micay
1a650b0317 update copyright notice 2021-12-13 19:42:33 -05:00
Lelmister101
fa46a7a85d small typo fix
“expanded cover” changed to “expanded to cover”
2021-12-05 09:52:50 -05:00
Daniel Micay
d8817417cc use compiler extension instead of C11 noreturn
C11 noreturn isn't available in C++.
2021-11-23 16:00:06 -05:00
Daniel Micay
7106bff27f update required dependencies 2021-11-23 15:53:03 -05:00
Lelmister101
1bdbb2d3f7 minor typo fix
“entirely independently arenas” changed to “entirely independent arenas”
2021-11-23 15:39:53 -05:00
Thibaut Sautereau
a33d2ca97d Fix CPPFLAGS in test Makefile
In particular, the _GNU_SOURCE feature test macro needs to be set in
order to correctly define mmap(2) flags such as MAP_ANONYMOUS.
Otherwise, compilation of some test files fails when CPPFLAGS is not
defined in the initial user environment, as Make then does not export it
from the root Makefile to the sub-make.
2021-11-02 16:13:09 -04:00
Daniel Micay
aa94408cc2 fix wording 2021-10-19 23:52:18 -04:00
Daniel Micay
8f9305df57 Android 12 is supported 2021-10-08 13:44:28 -04:00
anupritaisno1
cc0a1e1736 make hardened malloc vendor ramdisk avaiable
Signed-off-by: anupritaisno1 <www.anuprita804@gmail.com>
2021-10-08 13:43:11 -04:00
Daniel Micay
3b72a4f810 set C / C++ standard parameter for clang-tidy 2021-09-30 11:05:47 -04:00
Daniel Micay
e41d37c3de remove unnecessary else 2021-09-30 10:57:05 -04:00
Daniel Micay
23969727d8 disable readability-function-cognitive-complexity 2021-09-30 10:55:12 -04:00
Daniel Micay
4d30b491e3 set up dependabot for GitHub Actions 2021-09-30 01:18:36 -04:00
Daniel Micay
11207a9c98 add GitHub workflow for building and running tests 2021-09-30 01:17:08 -04:00
Daniel Micay
801e8d959f disable mallinfo2 test for old glibc 2021-09-30 01:16:10 -04:00
Daniel Micay
8dfea34fc0 current AOSP stable branch is Android 11 QPR3 2021-09-05 01:59:45 -04:00
Daniel Micay
4d6456cf58 update libdivide to 5.0.0 2021-07-17 14:58:47 -04:00
Daniel Micay
be6dde66f9 fix missing include for Intel MPK support 2021-05-21 09:07:28 -04:00
Daniel Micay
e0ecacff45 drop support for legacy C++ versions 2021-05-21 09:00:39 -04:00
Daniel Micay
050871122b update non-Android max_map_count recommendation 2021-05-13 19:50:35 -04:00
Daniel Micay
27fcfccb67 make __GLIBC_PREREQ check for mallinfo2 portable 2021-05-12 22:53:20 -04:00
Daniel Micay
93a87ce30b add new mallinfo2 test to gitignore too 2021-05-12 21:13:06 -04:00
Daniel Micay
da190f1469 mark pvalloc error path as unlikely 2021-05-12 21:01:13 -04:00
Daniel Micay
b0f81365a8 reuse code for aligned allocation API entry points 2021-05-12 20:59:04 -04:00
Daniel Micay
c9820b6e37 mark alloc_aligned_simple error path unlikely 2021-05-12 20:41:46 -04:00
Daniel Micay
f1cdc1e484 remove disconcerting newline 2021-05-12 20:34:18 -04:00
Daniel Micay
26b74b87bf improve code reuse for malloc API entry points 2021-05-12 20:28:50 -04:00
Daniel Micay
89faba4232 set errno in malloc_get_state to match glibc 2021-05-12 20:19:12 -04:00
Daniel Micay
a45dacc57b add support for glibc mallinfo2 2021-05-12 20:07:15 -04:00
Daniel Micay
a71ab1a2eb MREMAP_DONTUNMAP may be usable in realloc 2021-05-12 10:51:52 -04:00
Daniel Micay
96a322bcbe raise max_map_count recommendation 2021-05-12 10:32:59 -04:00
Daniel Micay
92a1e456d2 use normal class region size on x86_64 Android 2021-05-12 05:11:30 -04:00
Daniel Micay
9706f5a311 increase class region size on Android to 2GiB 2021-05-12 03:07:33 -04:00
Daniel Micay
440489af67 purge memory if munmap fails 2021-05-12 01:05:39 -04:00
Daniel Micay
f9a8e7216b purge slab memory even if using MAP_FIXED fails 2021-05-12 00:45:19 -04:00
Daniel Micay
5c974bdf82 use region quarantine even if MAP_FIXED call fails
This is a more sensible way of handling an out-of-memory failure in this
edge case. It doesn't matter much in practice.
2021-05-12 00:20:03 -04:00
Daniel Micay
2335f56713 add wrapper function for getting slot count 2021-05-10 07:04:50 -04:00
Daniel Micay
13a3aa16d0 improve naming of adjust_size_for_canaries 2021-05-07 04:23:49 -04:00
Daniel Micay
8bfa1a7dd5 use 1 slot for all extended size classes
This reduces memory usage and improves security in combination with the
guard slab feature.
2021-05-01 22:10:20 -04:00
Daniel Micay
3952645318 avoid unused variable for some configurations 2021-03-31 12:12:49 -04:00
Daniel Micay
1d15d34c7e return errors from memory_set_name too 2021-03-22 14:19:02 -04:00
Daniel Micay
29ffcdf810 portable error reporting for memory API 2021-03-22 13:59:16 -04:00
Daniel Micay
f773a96b59 remove unnecessary sys/mman.h include 2021-03-22 12:25:22 -04:00
Daniel Micay
b84af9b499 add wrapper for madvise 2021-03-22 12:24:26 -04:00
Daniel Micay
73b78a8adb document madvise for malloc_trim of quarantines 2021-03-22 11:17:00 -04:00
Daniel Micay
e77ffa76d9 add initial malloc_trim slab quarantine purging
This currently only purges the quarantines for extended size classes.
2021-03-22 11:16:57 -04:00
Daniel Micay
86b0b3e452 fix !CONFIG_EXTENDED_SIZE_CLASSES configuration 2021-03-21 18:09:02 -04:00
Daniel Micay
7b03b5c629 update README for region quarantine change 2021-03-18 07:35:38 -04:00
Daniel Micay
db21ecd529 use longer default region quarantine random array 2021-03-18 06:27:46 -04:00
Daniel Micay
ee55acf116 update libdivide to 4.0.0 2021-03-09 02:38:16 -05:00
Daniel Micay
a3b4c163eb drop unused header 2021-03-05 00:35:10 -05:00
Daniel Micay
325b82f1bd update to Android qpr2 branch as minimum 2021-03-01 16:52:30 -05:00
Daniel Micay
ddd14bc421 avoid type comparison warning on some platforms 2021-02-16 17:18:35 -05:00
Daniel Micay
29b09648d6 avoid undefined clz and shift in edge cases
This is triggered when get_large_size_class is called with a size in the
range [1,4]. This can occur with aligned_alloc(8192, size). In practice,
it doesn't appear to cause any harm, but we shouldn't have any undefined
behavior for well-defined usage of the API. It also occurs if the caller
passes a pointer outside the slab region to free_sized but the expected
size is in the range [1,4]. That usage of free_sized is already going to
be considered undefined, but we should avoid undefined behavior in the
caller from triggering more undefined behavior when it's avoidable.
2021-02-16 08:31:17 -05:00
Thibaut Sautereau
1984cb3b3d malloc_object_size: avoid fault for invalid region
It's the region pointer that can be NULL here, and p was checked at the
beginning of the function.
2021-02-10 17:43:36 -05:00
Thibaut Sautereau
76860c72e1 malloc_usable_size: clean abort on invalid region
It's the region pointer that can be NULL here, and p was checked at the
beginning of the function. Also fix the test accordingly.
2021-02-10 17:41:17 -05:00
Daniel Micay
5c8b686370 update copyright notice 2021-01-06 20:38:55 -05:00
Daniel Micay
5275563252 fix C++ sized deallocation check false positive
This is a compatibility issue triggered when both slab canaries and the
C++ allocator overloads providing sized deallocation checks are enabled.

The boundary where slab allocations are turned into large allocations
due to not having room for the canary in the largest slab allocation
size class triggers a false positive in the sized deallocation check.
2021-01-06 00:18:59 -05:00
Daniel Micay
e9d9f70ad4 update supported Android 11 branch 2020-12-07 18:14:25 -05:00
Daniel Micay
10c5d61187 work around glibc bug in mallinfo test 2020-11-10 14:14:08 -05:00
Daniel Micay
b90f650153 fix sized deallocation check with large sizes
The CONFIG_CXX_ALLOCATOR feature enables sanity checks for sized
deallocation and this wasn't updated to handle the introduction of
performing size class rounding for large sizes.
2020-11-10 13:53:32 -05:00
Daniel Micay
8d0314295e support Android's logging system for fatal_error 2020-10-19 07:51:00 -04:00
Daniel Micay
b072022022 perform init sanity checks before MPK unsealing 2020-10-06 17:34:35 -04:00
Daniel Micay
2bb1c39d31 add MPK support for stats retrieval functions 2020-10-06 17:32:25 -04:00
Daniel Micay
0bf18b7c26 optimize malloc_usable_size enforce_init 2020-10-03 15:10:49 -04:00
Daniel Micay
178d4f320f harden checks for uninitialized usage 2020-10-02 15:06:29 -04:00
Daniel Micay
b9ebf47c7c explicitly use python3 to run tests 2020-09-19 09:40:56 -04:00
Daniel Micay
8906c0941a improve mallinfo test 2020-09-18 11:38:06 -04:00
Daniel Micay
59e174eee0 update stats documentation 2020-09-17 17:44:01 -04:00
Daniel Micay
483b1d7b8b empty malloc_info output when stats are disabled 2020-09-17 17:42:18 -04:00
Daniel Micay
96eca21ac5 remove thread_local macro workaround glibc < 2.28 2020-09-17 17:38:40 -04:00
Daniel Micay
022b64791e whitespace fixes 2020-09-17 17:23:13 -04:00
Daniel Micay
b4bbd09f07 change label for quarantined large allocations 2020-09-17 16:56:01 -04:00
Daniel Micay
a88305c01b support disabling region quarantine 2020-09-17 16:53:34 -04:00
Daniel Micay
85c5c3736c add stats tracking to special large realloc paths 2020-09-17 16:29:13 -04:00
Daniel Micay
96a9bcf3a1 move deprecated glibc extensions to the bottom 2020-09-17 16:20:05 -04:00
Daniel Micay
41fb89517a simplify malloc_info code 2020-09-17 16:10:02 -04:00
Daniel Micay
50e0f1334c add is_init check to malloc_info 2020-09-17 16:07:10 -04:00
Daniel Micay
9fb2791af2 add is_init check to h_mallinfo_arena_info 2020-09-17 16:00:03 -04:00
anupritaisno1
8974af86d1 hardened malloc: iterate -> malloc_iterate
Signed-off-by: anupritaisno1 <www.anuprita804@gmail.com>
2020-09-15 00:37:23 -04:00
anupritaisno1
d203d6c445 Android.bp: export hardened malloc headers
Signed-off-by: anupritaisno1 <www.anuprita804@gmail.com>
2020-09-14 07:21:57 -04:00
Daniel Micay
9f5e1f6eb9 master is only going to support Android 11 2020-09-14 05:57:39 -04:00
Daniel Micay
1cba254452 move external API to include directory 2020-09-14 05:52:53 -04:00
anupritaisno1
730f148647 Android.bp: make hardened malloc ramdisk available
Signed-off-by: anupritaisno1 <www.anuprita804@gmail.com>
2020-09-13 05:14:25 -04:00
Daniel Micay
dd7291ebfe better wording for page size mismatch error 2020-08-05 18:10:53 -04:00
Daniel Micay
bcb93cab63 avoid an ifdef 2020-08-04 17:22:03 -04:00
rwarr627
f214bd541a added check for if small allocations are free 2020-06-17 23:29:30 -04:00
rwarr627
7804e263e9 added tests for if malloc_object_size small allocations are free 2020-06-17 23:29:30 -04:00
Daniel Micay
de3fb50dcc tests: make no-optimize attribute Clang compatible 2020-06-17 20:08:46 -04:00
Daniel Micay
b404d6da6e fix out-of-memory check in offset test 2020-06-17 16:27:59 -04:00
Daniel Micay
c9c7dca498 use size_t in offset test to fix warning 2020-06-17 16:26:08 -04:00
Daniel Micay
dcc80a01db android10-qpr3-release 2020-06-16 11:10:13 -04:00
Daniel Micay
722974f4e9 remove trailing whitespace 2020-06-13 09:59:50 -04:00
rwarr627
195bc8c92a added tests for malloc_object_size
LDFLAGS is on single line
2020-06-13 01:27:32 -04:00
rwarr627
577524798e calculates offset from start for small allocations 2020-06-13 01:27:32 -04:00
Daniel Micay
467ba8440f add comment explaining slab cache size 2020-05-24 09:36:43 -04:00
Daniel Micay
067b3c864f set slab cache sizes based on the largest slab 2020-05-24 09:31:02 -04:00
Daniel Micay
82440e78d9 silence annoying tidy checks 2020-05-18 19:10:14 -04:00
Daniel Micay
fc0bd78215 default number of arenas is currently 4 2020-05-13 03:48:44 -04:00
Daniel Micay
08a5f5ee0b reduces empty slab cache size now too 2020-05-13 03:19:17 -04:00
Daniel Micay
e82367e1bf include leaner sample configurations 2020-05-13 03:11:23 -04:00
Daniel Micay
4a6bbe445c limit cached slabs based on max size class 2020-05-13 01:05:37 -04:00
Daniel Micay
cf55ac0f6d disable annoying Clang warning from avoiding ifdef 2020-04-30 18:14:40 -04:00
Daniel Micay
b672316bc7 use const for memory_corruption_check_small
This currently causes a warning (treated as an error) on Android where
malloc_usable_size uses a const pointer.
2020-04-30 16:06:32 -04:00
Daniel Micay
029a2edf28 remove trailing whitespace 2020-04-30 16:03:45 -04:00
rwarr627
35bd7cd76d added memory corruption checking to malloc_usable_size for slab allocations 2020-04-29 18:06:15 -04:00
rwarr627
0a3a726c93 added tests for malloc_usable_size 2020-04-29 18:06:15 -04:00
Daniel Micay
19365c25d6 remove workaround for Linux kernel MPK fork bug 2020-04-24 02:51:39 -04:00
Daniel Micay
c75dcb9d9c compare with negative integers to check signals 2020-04-11 15:26:24 -04:00
Daniel Micay
d757835d90 increase size for eight_byte_overflow_large test
This was not working as expected due to CONFIG_EXTENDED_SIZE_CLASSES
resulting in 128k being a slab allocation size class. The addition of
padding for the canary pushes it into the next size class, resulting in
this writing over size class rounding padding rather than this actually
being an overflow as intended.
2020-04-11 15:25:34 -04:00
rwarr627
2c421590b5 added testing headers to table of contents 2020-04-11 15:08:30 -04:00
rwarr627
9f661f945c added testing instructions 2020-04-11 15:08:30 -04:00
rwarr627
b160f723e0 ignores __pycache__ 2020-04-11 15:08:30 -04:00
rwarr627
ec6854c71b added test rule 2020-04-11 15:08:30 -04:00
rwarr627
6b987e644b verifies the results of the simple-memory-corruption tests 2020-04-11 15:08:30 -04:00
Daniel Micay
2a87f52fc1 use organization funding metadata 2020-04-08 11:53:58 -04:00
Daniel Micay
466d351e93 drop legacy glibc version check for pkey support 2020-03-29 11:40:12 -04:00
Daniel Micay
0436227092 no longer need glibc pthread_atfork workaround 2020-03-29 11:40:12 -04:00
Daniel Micay
3af44d2e6a can now depend on libc having a getrandom wrapper 2020-03-29 11:40:12 -04:00
Daniel Micay
a5abe5add6 increase minimum dependency versions 2020-03-29 11:40:12 -04:00
Daniel Micay
26134f9aaa update copyright notice 2020-02-07 18:14:28 -05:00
Daniel Micay
449962e044 disable obsolete glibc extensions elsewhere 2020-02-03 08:39:19 -05:00
Valentin Churavy
bee398f860 replace __BEGIN_DECLS with extern "C" 2019-12-29 20:48:55 -05:00
Daniel Micay
7c5c768e2f update libdivide copyright 2019-11-06 06:04:16 -05:00
Daniel Micay
7945b3f109 update libdivide to 3.0 2019-11-06 05:41:58 -05:00
Daniel Micay
dfa49481e5 random: fix whitespace issue 2019-11-06 05:28:35 -05:00
Daniel Micay
2fbf7bb25e fix another README typo 2019-11-06 05:12:50 -05:00
Daniel Micay
ac95820fae remove duplicate word from README 2019-11-06 04:57:41 -05:00
Daniel Micay
b48ac93b03 current supported branch is android10-release 2019-11-06 03:33:28 -05:00
Daniel Micay
eff5037d64 add missing word to README 2019-11-06 03:33:28 -05:00
Daniel Micay
97ea85f55d README: add Compatibility to TOC 2019-10-11 09:06:40 -04:00
Daniel Micay
6f4de3971e
document OpenSSH compatibility fix 2019-10-09 23:27:25 -04:00
Daniel Micay
cb73bccf35 add GitHub sponsors metadata 2019-09-26 18:53:46 -04:00
Daniel Micay
74eb614f16 inherit base CXXFLAGS from the environment too 2019-09-26 14:50:58 -04:00
Daniel Micay
a28da3c65a use prefix for extended mallinfo functions 2019-09-07 18:33:24 -04:00
Daniel Micay
fb9f5d630b use cc_library instead of cc_library_static 2019-09-07 18:00:42 -04:00
Daniel Micay
8d648e2b25 make it available in recovery for Android 10 2019-09-07 17:38:43 -04:00
Daniel Micay
6d78dec42a clarify malloc_info format is a bit different 2019-08-20 15:57:59 -04:00
Daniel Micay
2e4ab73fb6 basic design -> core design 2019-08-19 06:11:10 -04:00
Daniel Micay
0e4ea0090b expand design documentation further 2019-08-19 06:10:40 -04:00
Daniel Micay
5b3d59ec7d Makefile: split long line for SHARED_FLAGS 2019-08-18 08:41:12 -04:00
Daniel Micay
7c455c3956 update libdivide to 2.0 2019-08-18 06:56:52 -04:00
Daniel Micay
efda950994 fix width of cell in memory tagging section 2019-08-18 06:52:09 -04:00
Daniel Micay
40be77003e fix OS integration header level 2019-08-18 06:48:03 -04:00
Daniel Micay
0af33616f0 cleaner wording in introduction 2019-08-18 06:47:25 -04:00
Daniel Micay
c66cf10894 clarify malloc_object_size state further 2019-08-18 06:45:53 -04:00
Daniel Micay
0129d8e470 move point about delayed free for slab allocations 2019-08-18 06:44:35 -04:00
Daniel Micay
5eefcd39b4 the design / implementation is fairly complete now 2019-08-18 06:24:21 -04:00
Daniel Micay
2288b3a754 add information on stats 2019-08-18 06:21:39 -04:00
Daniel Micay
f4afedb137 add links to size class section from configuration 2019-08-18 05:48:29 -04:00
Daniel Micay
ac70e2c250 use full sentences to describe compile options 2019-08-18 05:47:13 -04:00
Daniel Micay
d0b466beb8 elaborate on the cost of GUARD_SLABS_INTERVAL 2019-08-18 05:46:20 -04:00
Daniel Micay
7a8c57d0f5 stop marking MPK support 'extremely experimental' 2019-08-18 05:37:42 -04:00
Daniel Micay
c4fc025fde clarify ZERO_ON_FREE / WRITE_AFTER_FREE_CHECK 2019-08-18 05:35:48 -04:00
Daniel Micay
58b56f10ea avoid -shared-libgcc due to old Clang versions 2019-08-18 04:53:51 -04:00
Daniel Micay
125efe99db fix wording in note about malloc_object_size 2019-08-18 02:51:32 -04:00
Daniel Micay
77b242ea3f enable misc tidy checks 2019-08-18 02:43:49 -04:00
Daniel Micay
d37657e125 enable llvm-include-order tidy check 2019-08-18 02:39:55 -04:00
Daniel Micay
3c67708c3a reorder clang-tidy checks parameters 2019-08-18 02:34:51 -04:00
Daniel Micay
2ad74515b1 enable -Werror for Android build system 2019-08-18 02:31:05 -04:00
Daniel Micay
abece7656b add enabled-by-default option to use -Werror 2019-08-18 02:28:23 -04:00
Daniel Micay
c70745ab15 Makefile: add check target depending on tidy 2019-08-18 02:20:06 -04:00
Daniel Micay
7d4d2ef0fb treat clang-tidy warnings as errors for automation 2019-08-18 02:18:04 -04:00
Daniel Micay
8133444f43 move clang-tidy configuration to .clang-tidy 2019-08-18 02:12:18 -04:00
Daniel Micay
8f9f2521a0 disable sanitizer recovery in UBSan debug builds
This makes it harder to miss that an error occurred and avoids spamming
output. There should never be a single error, so it doesn't make sense
to gather as many errors as possible when a single error is already a
serious issue that would need to be fixed.
2019-08-18 02:00:13 -04:00
Daniel Micay
d8ebdea05f handle CONFIG_SEAL_METADATA option like the others 2019-08-18 01:56:20 -04:00
Daniel Micay
defd55f302 provide link to Bionic integration commit 2019-08-18 01:44:40 -04:00
Daniel Micay
04f69d9f0d update supported Android branches 2019-08-18 01:41:56 -04:00
Daniel Micay
995d0580d1 remove extra spaces inserted by vim joinspaces 2019-08-18 01:39:22 -04:00
Daniel Micay
8d2df1deb8 use CC as CXX to make sure LTO is compatible 2019-08-18 01:28:59 -04:00
Daniel Micay
1bc201c4c1 use -Wcast-align if -Wcast-align=strict is missing 2019-08-18 01:15:54 -04:00
Daniel Micay
cc8c4459e1 make safe_flag treat unknown warnings as missing 2019-08-18 01:15:54 -04:00
Daniel Micay
b6b910f032 add table of contents to README 2019-08-18 01:15:54 -04:00
Daniel Micay
24de5aab05 still need to finish up initial malloc_object_size 2019-08-18 01:15:54 -04:00
Daniel Micay
71e4577367 fix some inconsistencies in the tagging examples 2019-08-13 21:44:16 -04:00
Patrick Schleizer
75e86914aa respect existing CFLAGS, CPPFLAGS and LDFLAGS 2019-07-29 13:52:55 -04:00
Daniel Micay
90d12fb340 override local default to -fstack-protector-strong
This is a no-op on a toolchain compiled with the basic mitigations
enabled by default, so this is generally a no-op anywhere this project
is likely to be deployed. SSP has a very low performance cost so there's
little reason to avoid it, even though it also has zero value for this
code in practice. It would be great if one of the more modern approaches
was widely adopted, but unfortunately SSP is as good as it gets for
portable options. It doesn't provide any protection against external
writes to the stack data which is all that's really needed here.

ShadowCallStack is a great option for arm64, but it's substantially more
difficult to protect return addresses well on x86_64 due to the design of
the ISA and ABI.
2019-07-19 11:53:55 -04:00
Daniel Micay
77743e5a36 use -fstack-clash-protection for completeness
This is a no-op for the current code and will likely remain that way so
there's no benefit but also no performance cost.
2019-07-19 11:18:49 -04:00
Daniel Micay
3ed6e546c8 OS integration guide 2019-07-18 07:22:29 -04:00
Daniel Micay
d80919fa1e substantially raise the arbitrary arena limit 2019-07-12 03:43:33 -04:00
Daniel Micay
410e9efb93 extend configuration sanity checks 2019-07-11 17:09:48 -04:00
Daniel Micay
7bcfa500be remove note about lack of sanity checks for config 2019-07-11 15:50:45 -04:00
Daniel Micay
72a08f88fb supports Debian oldstable due to Buster release 2019-07-10 18:08:14 -04:00
Daniel Micay
a32e26b8e9 avoid trying to use mremap outside of Linux 2019-07-05 21:59:44 -04:00
Daniel Micay
934ab4cb59 explain extended size classes impact on quarantine 2019-07-05 17:57:41 -04:00
Daniel Micay
060f74b993 extended size classes now go up to 128k not 64k 2019-07-05 17:55:25 -04:00
Daniel Micay
4d4277319a clarifications to randomization documentation 2019-06-23 19:20:16 -04:00
Daniel Micay
a579257a26 update libdivide to 1.1 2019-06-23 00:39:35 -04:00
78 changed files with 4294 additions and 1968 deletions

2
.clang-tidy Normal file
View File

@ -0,0 +1,2 @@
Checks: 'bugprone-*,-bugprone-easily-swappable-parameters,-bugprone-macro-parentheses,-bugprone-too-small-loop-variable,cert-*,-cert-err33-c,clang-analyzer-*,-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,-clang-diagnostic-constant-logical-operand,readability-*,-readability-function-cognitive-complexity,-readability-identifier-length,-readability-inconsistent-declaration-parameter-name,-readability-magic-numbers,-readability-named-parameter,llvm-include-order,misc-*'
WarningsAsErrors: '*'

1
.github/FUNDING.yml vendored
View File

@ -1 +0,0 @@
custom: https://grapheneos.org/donate

7
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,7 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: daily
target-branch: main

39
.github/workflows/build-and-test.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Build and run tests
on:
push:
pull_request:
schedule:
- cron: '0 2 * * *'
jobs:
build-ubuntu-gcc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build
run: make test
build-ubuntu-clang:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build
run: CC=clang CXX=clang++ make test
build-musl:
runs-on: ubuntu-latest
container:
image: alpine:latest
steps:
- uses: actions/checkout@v4
- name: Install dependencies
run: apk update && apk add build-base python3
- name: Build
run: make test
build-ubuntu-gcc-aarch64:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgcc-s1-arm64-cross cpp-aarch64-linux-gnu
- name: Build
run: CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-gcc++ make CONFIG_NATIVE=false

4
.gitignore vendored
View File

@ -1,2 +1,2 @@
*.o
*.so
out/
out-light/

View File

@ -1,15 +1,16 @@
common_cflags = [
"-pipe",
"-O3",
//"-flto",
"-fPIC",
"-fvisibility=hidden",
//"-fno-plt",
"-pipe",
"-Wall",
"-Wextra",
"-Wcast-align",
"-Wcast-qual",
"-Wwrite-strings",
"-Werror",
"-DH_MALLOC_PREFIX",
"-DZERO_ON_FREE=true",
"-DWRITE_AFTER_FREE_CHECK=true",
@ -21,20 +22,21 @@ common_cflags = [
"-DCONFIG_LARGE_SIZE_CLASSES=true",
"-DGUARD_SLABS_INTERVAL=1",
"-DGUARD_SIZE_DIVISOR=2",
"-DREGION_QUARANTINE_RANDOM_LENGTH=128",
"-DREGION_QUARANTINE_RANDOM_LENGTH=256",
"-DREGION_QUARANTINE_QUEUE_LENGTH=1024",
"-DREGION_QUARANTINE_SKIP_THRESHOLD=33554432", // 32MiB
"-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32",
"-DCONFIG_CLASS_REGION_SIZE=1073741824", // 1GiB
"-DCONFIG_CLASS_REGION_SIZE=34359738368", // 32GiB
"-DN_ARENA=1",
"-DCONFIG_STATS=true",
"-DCONFIG_SELF_INIT=false",
]
cc_defaults {
name: "hardened_malloc_defaults",
defaults: ["linux_bionic_supported"],
cflags: common_cflags,
conlyflags: ["-std=c11", "-Wmissing-prototypes"],
conlyflags: ["-std=c17", "-Wmissing-prototypes"],
stl: "none",
}
@ -47,13 +49,32 @@ lib_src_files = [
"util.c",
]
cc_library_static {
cc_library {
name: "libhardened_malloc",
ramdisk_available: true,
vendor_ramdisk_available: true,
recovery_available: true,
defaults: ["hardened_malloc_defaults"],
srcs: lib_src_files,
export_include_dirs: ["include"],
static_libs: ["libasync_safe"],
target: {
android: {
shared: {
enabled: false,
},
system_shared_libs: [],
},
linux_bionic: {
system_shared_libs: [],
},
},
product_variables: {
debuggable: {
cflags: ["-DLABEL_MEMORY"],
},
},
apex_available: [
"com.android.runtime",
],
}

View File

@ -4,7 +4,7 @@ chacha.c is a simple conversion of chacha-merged.c to a keystream-only implement
D. J. Bernstein
Public domain.
malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find, regions_delete):
h_malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find, regions_delete):
Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@ -25,7 +25,8 @@ malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find,
libdivide:
Copyright (C) 2010 ridiculous_fish
Copyright (C) 2010 - 2019 ridiculous_fish, <libdivide@ridiculousfish.com>
Copyright (C) 2016 - 2019 Kim Walisch, <kim.walisch@gmail.com>
Boost Software License - Version 1.0 - August 17th, 2003

View File

@ -16,6 +16,8 @@ Somewhat important and an easy sell:
* also needed by jemalloc for different reasons
* not needed if the kernel gets first class support for arbitrarily sized
guard pages and a virtual memory quarantine feature
* `MREMAP_DONTUNMAP` is now available but doesn't support expanding the
mapping which may be an issue due to VMA merging being unreliable
Fairly infeasible to land but could reduce overhead and extend coverage of
security features to other code directly using mmap:

View File

@ -1,4 +1,4 @@
Copyright (c) 2019 Daniel Micay
Copyright © 2018-2023 GrapheneOS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

119
Makefile
View File

@ -1,57 +1,60 @@
CONFIG_NATIVE := true
CONFIG_CXX_ALLOCATOR := true
CONFIG_UBSAN := false
CONFIG_SEAL_METADATA := false
CONFIG_ZERO_ON_FREE := true
CONFIG_WRITE_AFTER_FREE_CHECK := true
CONFIG_SLOT_RANDOMIZE := true
CONFIG_SLAB_CANARY := true
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 1
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 1
CONFIG_EXTENDED_SIZE_CLASSES := true
CONFIG_LARGE_SIZE_CLASSES := true
CONFIG_GUARD_SLABS_INTERVAL := 1
CONFIG_GUARD_SIZE_DIVISOR := 2
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 128
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
CONFIG_N_ARENA := 4
CONFIG_STATS := false
VARIANT := default
ifneq ($(VARIANT),)
CONFIG_FILE := config/$(VARIANT).mk
include config/$(VARIANT).mk
endif
ifeq ($(VARIANT),default)
SUFFIX :=
else
SUFFIX := -$(VARIANT)
endif
OUT := out$(SUFFIX)
define safe_flag
$(shell $(CC) -E $1 - </dev/null >/dev/null 2>&1 && echo $1 || echo $2)
$(shell $(CC) $(if $(filter clang%,$(CC)),-Werror=unknown-warning-option) -E $1 - </dev/null >/dev/null 2>&1 && echo $1 || echo $2)
endef
CPPFLAGS := -D_GNU_SOURCE
SHARED_FLAGS := -O3 -flto -fPIC -fvisibility=hidden $(call safe_flag,-fno-plt) -pipe -Wall -Wextra $(call safe_flag,-Wcast-align=strict) -Wcast-qual -Wwrite-strings
CPPFLAGS := $(CPPFLAGS) -D_GNU_SOURCE -I include
SHARED_FLAGS := -pipe -O3 -flto -fPIC -fvisibility=hidden -fno-plt \
-fstack-clash-protection $(call safe_flag,-fcf-protection) -fstack-protector-strong \
-Wall -Wextra $(call safe_flag,-Wcast-align=strict,-Wcast-align) -Wcast-qual -Wwrite-strings \
-Wundef
ifeq ($(CONFIG_WERROR),true)
SHARED_FLAGS += -Werror
endif
ifeq ($(CONFIG_NATIVE),true)
SHARED_FLAGS += -march=native
endif
CFLAGS := -std=c11 $(SHARED_FLAGS) -Wmissing-prototypes
CXXFLAGS := $(call safe_flag,-std=c++17,-std=c++14) $(SHARED_FLAGS)
LDFLAGS := -Wl,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text
TIDY_CHECKS := -checks=bugprone-*,-bugprone-macro-parentheses,cert-*,clang-analyzer-*,readability-*,-readability-inconsistent-declaration-parameter-name,-readability-magic-numbers,-readability-named-parameter,-bugprone-too-small-loop-variable
ifeq ($(CONFIG_UBSAN),true)
SHARED_FLAGS += -fsanitize=undefined -fno-sanitize-recover=undefined
endif
CFLAGS := $(CFLAGS) -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes -Wstrict-prototypes
CXXFLAGS := $(CXXFLAGS) -std=c++17 -fsized-deallocation $(SHARED_FLAGS)
LDFLAGS := $(LDFLAGS) -Wl,-O1,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text
SOURCES := chacha.c h_malloc.c memory.c pages.c random.c util.c
OBJECTS := $(SOURCES:.c=.o)
ifeq ($(CONFIG_CXX_ALLOCATOR),true)
# make sure LTO is compatible in case CC and CXX don't match (such as clang and g++)
CXX := $(CC)
LDLIBS += -lstdc++
SOURCES += new.cc
OBJECTS += new.o
endif
ifeq ($(CONFIG_UBSAN),true)
CFLAGS += -fsanitize=undefined
CXXFLAGS += -fsanitize=undefined
endif
OBJECTS := $(addprefix $(OUT)/,$(OBJECTS))
ifeq ($(CONFIG_SEAL_METADATA),true)
CPPFLAGS += -DCONFIG_SEAL_METADATA
ifeq (,$(filter $(CONFIG_SEAL_METADATA),true false))
$(error CONFIG_SEAL_METADATA must be true or false)
endif
ifeq (,$(filter $(CONFIG_ZERO_ON_FREE),true false))
@ -82,7 +85,12 @@ ifeq (,$(filter $(CONFIG_STATS),true false))
$(error CONFIG_STATS must be true or false)
endif
ifeq (,$(filter $(CONFIG_SELF_INIT),true false))
$(error CONFIG_SELF_INIT must be true or false)
endif
CPPFLAGS += \
-DCONFIG_SEAL_METADATA=$(CONFIG_SEAL_METADATA) \
-DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \
-DWRITE_AFTER_FREE_CHECK=$(CONFIG_WRITE_AFTER_FREE_CHECK) \
-DSLOT_RANDOMIZE=$(CONFIG_SLOT_RANDOMIZE) \
@ -99,23 +107,42 @@ CPPFLAGS += \
-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=$(CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH) \
-DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \
-DN_ARENA=$(CONFIG_N_ARENA) \
-DCONFIG_STATS=$(CONFIG_STATS)
-DCONFIG_STATS=$(CONFIG_STATS) \
-DCONFIG_SELF_INIT=$(CONFIG_SELF_INIT)
libhardened_malloc.so: $(OBJECTS)
$(OUT)/libhardened_malloc$(SUFFIX).so: $(OBJECTS) | $(OUT)
$(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@
chacha.o: chacha.c chacha.h util.h
h_malloc.o: h_malloc.c h_malloc.h mutex.h memory.h pages.h random.h util.h
memory.o: memory.c memory.h util.h
new.o: new.cc h_malloc.h util.h
pages.o: pages.c pages.h memory.h util.h
random.o: random.c random.h chacha.h util.h
util.o: util.c util.h
$(OUT):
mkdir -p $(OUT)
$(OUT)/chacha.o: chacha.c chacha.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/h_malloc.o: h_malloc.c include/h_malloc.h mutex.h memory.h pages.h random.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/memory.o: memory.c memory.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/new.o: new.cc include/h_malloc.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.cc) $(OUTPUT_OPTION) $<
$(OUT)/pages.o: pages.c pages.h memory.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/random.o: random.c random.h chacha.h util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
$(OUT)/util.o: util.c util.h $(CONFIG_FILE) | $(OUT)
$(COMPILE.c) $(OUTPUT_OPTION) $<
check: tidy
tidy:
clang-tidy $(TIDY_CHECKS) $(SOURCES) -- $(CPPFLAGS)
clang-tidy --extra-arg=-std=c17 $(filter %.c,$(SOURCES)) -- $(CPPFLAGS)
clang-tidy --extra-arg=-std=c++17 $(filter %.cc,$(SOURCES)) -- $(CPPFLAGS)
clean:
rm -f libhardened_malloc.so $(OBJECTS)
rm -f $(OUT)/libhardened_malloc.so $(OBJECTS)
$(MAKE) -C test/ clean
.PHONY: clean tidy
test: $(OUT)/libhardened_malloc$(SUFFIX).so
$(MAKE) -C test/
python3 -m unittest discover --start-directory test/
.PHONY: check clean tidy test

562
README.md
View File

@ -1,5 +1,30 @@
# Hardened malloc
* [Introduction](#introduction)
* [Dependencies](#dependencies)
* [Testing](#testing)
* [Individual Applications](#individual-applications)
* [Automated Test Framework](#automated-test-framework)
* [Compatibility](#compatibility)
* [OS integration](#os-integration)
* [Android-based operating systems](#android-based-operating-systems)
* [Traditional Linux-based operating systems](#traditional-linux-based-operating-systems)
* [Configuration](#configuration)
* [Core design](#core-design)
* [Security properties](#security-properties)
* [Randomness](#randomness)
* [Size classes](#size-classes)
* [Scalability](#scalability)
* [Small (slab) allocations](#small-slab-allocations)
* [Thread caching (or lack thereof)](#thread-caching-or-lack-thereof)
* [Large allocations](#large-allocations)
* [Memory tagging](#memory-tagging)
* [API extensions](#api-extensions)
* [Stats](#stats)
* [System calls](#system-calls)
## Introduction
This is a security-focused general purpose memory allocator providing the
malloc API along with various extensions. It provides substantial hardening
against heap corruption vulnerabilities. The security-focused design also leads
@ -7,7 +32,7 @@ to much less metadata overhead and memory waste from fragmentation than a more
traditional allocator design. It aims to provide decent overall performance
with a focus on long-term performance and memory usage rather than allocator
micro-benchmarks. It offers scalability via a configurable number of entirely
independently arenas, with the internal locking within arenas further divided
independent arenas, with the internal locking within arenas further divided
up per size class.
This project currently supports Bionic (Android), musl and glibc. It may
@ -20,17 +45,17 @@ and can cover the same use cases.
This allocator is intended as a successor to a previous implementation based on
extending OpenBSD malloc with various additional security features. It's still
heavily based on the OpenBSD malloc design, albeit not on the existing code
other than reusing the hash table implementation for the time being. The main
differences in the design are that it is solely focused on hardening rather
than finding bugs, uses finer-grained size classes along with slab sizes going
beyond 4k to reduce internal fragmentation, doesn't rely on the kernel having
fine-grained mmap randomization and only targets 64-bit to make aggressive use
of the large address space. There are lots of smaller differences in the
implementation approach. It incorporates the previous extensions made to
OpenBSD malloc including adding padding to allocations for canaries (distinct
from the current OpenBSD malloc canaries), write-after-free detection tied to
the existing clearing on free, queues alongside the existing randomized arrays
for quarantining allocations and proper double-free detection for quarantined
other than reusing the hash table implementation. The main differences in the
design are that it's solely focused on hardening rather than finding bugs, uses
finer-grained size classes along with slab sizes going beyond 4k to reduce
internal fragmentation, doesn't rely on the kernel having fine-grained mmap
randomization and only targets 64-bit to make aggressive use of the large
address space. There are lots of smaller differences in the implementation
approach. It incorporates the previous extensions made to OpenBSD malloc
including adding padding to allocations for canaries (distinct from the current
OpenBSD malloc canaries), write-after-free detection tied to the existing
clearing on free, queues alongside the existing randomized arrays for
quarantining allocations and proper double-free detection for quarantined
allocations. The per-size-class memory regions with their own random bases were
loosely inspired by the size and type-based partitioning in PartitionAlloc. The
planned changes to OpenBSD malloc ended up being too extensive and invasive so
@ -40,11 +65,14 @@ used instead as this allocator fundamentally doesn't support that environment.
## Dependencies
Debian stable determines the most ancient set of supported dependencies:
Debian stable (currently Debian 12) determines the most ancient set of
supported dependencies:
* glibc 2.24
* Linux 4.9
* Clang 3.8 or GCC 6.3
* glibc 2.36
* Linux 6.1
* Clang 14.0.6 or GCC 12.2.0
For Android, the Linux GKI 5.10, 5.15 and 6.1 branches are supported.
However, using more recent releases is highly recommended. Older versions of
the dependencies may be compatible at the moment but are not tested and will
@ -54,18 +82,20 @@ For external malloc replacement with musl, musl 1.1.20 is required. However,
there will be custom integration offering better performance in the future
along with other hardening for the C standard library implementation.
For Android, only current generation Android Open Source Project branches will
be supported, which currently means pie-qpr2-release.
For Android, only the current generation, actively developed maintenance branch of the Android
Open Source Project will be supported, which currently means `android13-qpr2-release`.
## Testing
### Individual Applications
The `preload.sh` script can be used for testing with dynamically linked
executables using glibc or musl:
./preload.sh krita --new-image RGBA,U8,500,500
It can be necessary to substantially increase the `vm.max_map_count` sysctl to
accomodate the large number of mappings caused by guard slabs and large
accommodate the large number of mappings caused by guard slabs and large
allocation guard regions. The number of mappings can also be drastically
reduced via a significant increase to `CONFIG_GUARD_SLABS_INTERVAL` but the
feature has a low performance and memory usage cost so that isn't recommended.
@ -78,6 +108,79 @@ this allocator offers across different size classes. The intention is that this
will be offered as part of hardened variants of the Bionic and musl C standard
libraries.
### Automated Test Framework
A collection of simple, automated tests are provided and can be run with the
make command as follows:
make test
## Compatibility
OpenSSH 8.1 or higher is required to allow the mprotect `PROT_READ|PROT_WRITE`
system calls in the seccomp-bpf filter rather than killing the process.
## OS integration
### Android-based operating systems
On GrapheneOS, hardened\_malloc is integrated into the standard C library as
the standard malloc implementation. Other Android-based operating systems can
reuse [the integration
code](https://github.com/GrapheneOS/platform_bionic/commit/20160b81611d6f2acd9ab59241bebeac7cf1d71c)
to provide it. If desired, jemalloc can be left as a runtime configuration
option by only conditionally using hardened\_malloc to give users the choice
between performance and security. However, this reduces security for threat
models where persistent state is untrusted, i.e. verified boot and attestation
(see the [attestation sister project](https://attestation.app/about)).
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
large number of guard pages created by hardened\_malloc. This can be done in
`init.rc` (`system/core/rootdir/init.rc`) near the other virtual memory
configuration:
write /proc/sys/vm/max_map_count 1048576
This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large
value in the build configuration.
### Traditional Linux-based operating systems
On traditional Linux-based operating systems, hardened\_malloc can either be
integrated into the libc implementation as a replacement for the standard
malloc implementation or loaded as a dynamic library. Rather than rebuilding
each executable to be linked against it, it can be added as a preloaded
library to `/etc/ld.so.preload`. For example, with `libhardened_malloc.so`
installed to `/usr/local/lib/libhardened_malloc.so`, add that full path as a
line to the `/etc/ld.so.preload` configuration file:
/usr/local/lib/libhardened_malloc.so
The format of this configuration file is a whitespace-separated list, so it's
good practice to put each library on a separate line.
Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis
will not work when `AT_SECURE` is set such as with setuid binaries. It's also
generally not a recommended approach for production usage. The recommendation
is to enable it globally and make exceptions for performance critical cases by
running the application in a container / namespace without it enabled.
Make sure to raise `vm.max_map_count` substantially too to accommodate the very
large number of guard pages created by hardened\_malloc. As an example, in
`/etc/sysctl.d/hardened_malloc.conf`:
vm.max_map_count = 1048576
This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large
value in the build configuration.
On arm64, make sure your kernel is configured to use 4k pages since we haven't
yet added support for 16k and 64k pages. The kernel also has to be configured
to use 4 level page tables for the full 48 bit address space instead of only
having a 39 bit address space for the default hardened\_malloc configuration.
It's possible to reduce the class region size substantially to make a 39 bit
address space workable but the defaults won't work.
## Configuration
You can set some configuration options at compile-time via arguments to the
@ -90,8 +193,45 @@ between portability, performance, memory usage or security. The core design
choices are not configurable and the allocator remains very security-focused
even with all the optional features disabled.
The configuration system supports a configuration template system with two
standard presets: the default configuration (`config/default.mk`) and a light
configuration (`config/light.mk`). Packagers are strongly encouraged to ship
both the standard `default` and `light` configuration. You can choose the
configuration to build using `make VARIANT=light` where `make VARIANT=default`
is the same as `make`. Non-default configuration templates will build a library
with the suffix `-variant` such as `libhardened_malloc-light.so` and will use
an `out-variant` directory instead of `out` for the build.
The `default` configuration template has all normal optional security features
enabled (just not the niche `CONFIG_SEAL_METADATA`) and is quite aggressive in
terms of sacrificing performance and memory usage for security. The `light`
configuration template disables the slab quarantines, write after free check,
slot randomization and raises the guard slab interval from 1 to 8 but leaves
zero-on-free and slab canaries enabled. The `light` configuration has solid
performance and memory usage while still being far more secure than mainstream
allocators with much better security properties. Disabling zero-on-free would
gain more performance but doesn't make much difference for small allocations
without also disabling slab canaries. Slab canaries slightly raise memory use
and slightly slow down performance but are quite important to mitigate small
overflows and C string overflows. Disabling slab canaries is not recommended
in most cases since it would no longer be a strict upgrade over traditional
allocators with headers on allocations and basic consistency checks for them.
For reduced memory usage at the expense of performance (this will also reduce
the size of the empty slab caches and quarantines, saving a lot of memory,
since those are currently based on the size of the largest size class):
make \
N_ARENA=1 \
CONFIG_EXTENDED_SIZE_CLASSES=false
The following boolean configuration options are available:
* `CONFIG_WERROR`: `true` (default) or `false` to control whether compiler
warnings are treated as errors. This is highly recommended, but it can be
disabled to avoid patching the Makefile if a compiler version not tested by
the project is being used and has warnings. Investigating these warnings is
still recommended and the intention is to always be free of any warnings.
* `CONFIG_NATIVE`: `true` (default) or `false` to control whether the code is
optimized for the detected CPU on the host. If this is disabled, setting up a
custom `-march` higher than the baseline architecture is highly recommended
@ -104,12 +244,15 @@ The following boolean configuration options are available:
allocations are zeroed on free, to mitigate use-after-free and uninitialized
use vulnerabilities along with purging lots of potentially sensitive data
from the process as soon as possible. This has a performance cost scaling to
the size of the allocation, which is usually acceptable.
the size of the allocation, which is usually acceptable. This is not relevant
to large allocations because the pages are given back to the kernel.
* `CONFIG_WRITE_AFTER_FREE_CHECK`: `true` (default) or `false` to control
sanity checking that new allocations contain zeroed memory. This can detect
writes caused by a write-after-free vulnerability and mixes well with the
features for making memory reuse randomized / delayed. This has a performance
cost scaling to the size of the allocation, which is usually acceptable.
sanity checking that new small allocations contain zeroed memory. This can
detect writes caused by a write-after-free vulnerability and mixes well with
the features for making memory reuse randomized / delayed. This has a
performance cost scaling to the size of the allocation, which is usually
acceptable. This is not relevant to large allocations because they're always
a fresh memory mapping from the kernel.
* `CONFIG_SLOT_RANDOMIZE`: `true` (default) or `false` to randomize selection
of free slots within slabs. This has a measurable performance cost and isn't
one of the important security features, but the cost has been deemed more
@ -126,84 +269,112 @@ The following boolean configuration options are available:
* `CONFIG_SEAL_METADATA`: `true` or `false` (default) to control whether Memory
Protection Keys are used to disable access to all writable allocator state
outside of the memory allocator code. It's currently disabled by default due
to being extremely experimental and a significant performance cost for this
use case on current generation hardware, which may become drastically lower
in the future. Whether or not this feature is enabled, the metadata is all
contained within an isolated memory region with high entropy random guard
regions around it.
to a significant performance cost for this use case on current generation
hardware, which may become drastically lower in the future. Whether or not
this feature is enabled, the metadata is all contained within an isolated
memory region with high entropy random guard regions around it.
The following integer configuration options are available. Proper sanity checks
for the chosen values are not written yet, so use them at your own peril:
The following integer configuration options are available:
* `CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH`: `1` (default) to control the number
of slots in the random array used to randomize reuse for small memory
allocations. This sets the length for the largest size class (currently
16384) and the quarantine length for smaller size classes is scaled to match
the total memory of the quarantined allocations (1 becomes 1024 for 16 byte
allocations).
allocations. This sets the length for the largest size class (either 16kiB
or 128kiB based on `CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length
for smaller size classes is scaled to match the total memory of the
quarantined allocations (1 becomes 1024 for 16 byte allocations with 16kiB
as the largest size class, or 8192 with 128kiB as the largest).
* `CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH`: `1` (default) to control the number of
slots in the queue used to delay reuse for small memory allocations. This
sets the length for the largest size class (currently 16384) and the
quarantine length for smaller size classes is scaled to match the total
memory of the quarantined allocations (1 becomes 1024 for 16 byte
allocations).
sets the length for the largest size class (either 16kiB or 128kiB based on
`CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length for smaller size
classes is scaled to match the total memory of the quarantined allocations (1
becomes 1024 for 16 byte allocations with 16kiB as the largest size class, or
8192 with 128kiB as the largest).
* `CONFIG_GUARD_SLABS_INTERVAL`: `1` (default) to control the number of slabs
before a slab is skipped and left as an unused memory protected guard slab
before a slab is skipped and left as an unused memory protected guard slab.
The default of `1` leaves a guard slab between every slab. This feature does
not have a *direct* performance cost, but it makes the address space usage
sparser which can indirectly hurt performance. The kernel also needs to track
a lot more memory mappings, which uses a bit of extra memory and slows down
memory mapping and memory protection changes in the process. The kernel uses
O(log n) algorithms for this and system calls are already fairly slow anyway,
so having many extra mappings doesn't usually add up to a significant cost.
* `CONFIG_GUARD_SIZE_DIVISOR`: `2` (default) to control the maximum size of the
guard regions placed on both sides of large memory allocations, relative to
the usable size of the memory allocation
* `CONFIG_REGION_QUARANTINE_RANDOM_LENGTH`: `128` (default) to control the
the usable size of the memory allocation.
* `CONFIG_REGION_QUARANTINE_RANDOM_LENGTH`: `256` (default) to control the
number of slots in the random array used to randomize region reuse for large
memory allocations
memory allocations.
* `CONFIG_REGION_QUARANTINE_QUEUE_LENGTH`: `1024` (default) to control the
number of slots in the queue used to delay region reuse for large memory
allocations
allocations.
* `CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD`: `33554432` (default) to control
the size threshold where large allocations will not be quarantined
the size threshold where large allocations will not be quarantined.
* `CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH`: `32` (default) to control the
number of slots in the random array used to randomize free slab reuse
number of slots in the random array used to randomize free slab reuse.
* `CONFIG_CLASS_REGION_SIZE`: `34359738368` (default) to control the size of
the size class regions
* `CONFIG_N_ARENA`: `1` (default) to control the number of arenas
the size class regions.
* `CONFIG_N_ARENA`: `4` (default) to control the number of arenas
* `CONFIG_STATS`: `false` (default) to control whether stats on allocation /
deallocation count and active allocations are tracked. This is currently only
exposed via the mallinfo APIs on Android.
deallocation count and active allocations are tracked. See the [section on
stats](#stats) for more details.
* `CONFIG_EXTENDED_SIZE_CLASSES`: `true` (default) to control whether small
size class go up to 64k instead of the minimum requirement for avoiding
memory waste of 16k. The option to extend it even further will be offered in
the future when better support for larger slab allocations is added.
size class go up to 128kiB instead of the minimum requirement for avoiding
memory waste of 16kiB. The option to extend it even further will be offered
in the future when better support for larger slab allocations is added. See
the [section on size classes](#size-classes) below for details.
* `CONFIG_LARGE_SIZE_CLASSES`: `true` (default) to control whether large
allocations use the slab allocation size class scheme instead of page size
granularity (see the section on size classes below)
granularity. See the [section on size classes](#size-classes) below for
details.
There will be more control over enabled features in the future along with
control over fairly arbitrarily chosen values like the size of empty slab
caches (making them smaller improves security and reduces memory usage while
larger caches can substantially improves performance).
## Basic design
## Core design
The current design is very simple and will become a bit more sophisticated as
the basic features are completed and the implementation is hardened and
optimized. The allocator is exclusive to 64-bit platforms in order to take full
advantage of the abundant address space without being constrained by needing to
keep the design compatible with 32-bit.
The core design of the allocator is very simple / minimalist. The allocator is
exclusive to 64-bit platforms in order to take full advantage of the abundant
address space without being constrained by needing to keep the design
compatible with 32-bit.
The mutable allocator state is entirely located within a dedicated metadata
region, and the allocator is designed around this approach for both small
(slab) allocations and large allocations. This provides reliable, deterministic
protections against invalid free including double frees, and protects metadata
from attackers. Traditional allocator exploitation techniques do not work with
the hardened\_malloc implementation.
Small allocations are always located in a large memory region reserved for slab
allocations. It can be determined that an allocation is one of the small size
classes from the address range. Each small size class has a separate reserved
region within the larger region, and the size of a small allocation can simply
be determined from the range. Each small size class has a separate out-of-line
metadata array outside of the overall allocation region, with the index of the
metadata struct within the array mapping to the index of the slab within the
dedicated size class region. Slabs are a multiple of the page size and are
page aligned. The entire small size class region starts out memory protected
and becomes readable / writable as it gets allocated, with idle slabs beyond
the cache limit having their pages dropped and the memory protected again.
allocations. On free, it can be determined that an allocation is one of the
small size classes from the address range. If arenas are enabled, the arena is
also determined from the address range as each arena has a dedicated sub-region
in the slab allocation region. Arenas provide totally independent slab
allocators with their own allocator state and no coordination between them.
Once the base region is determined (simply the slab allocation region as a
whole without any arenas enabled), the size class is determined from the
address range too, since it's divided up into a sub-region for each size class.
There's a top level slab allocation region, divided up into arenas, with each
of those divided up into size class regions. The size class regions each have a
random base within a large guard region. Once the size class is determined, the
slab size is known, and the index of the slab is calculated and used to obtain
the slab metadata for the slab from the slab metadata array. Finally, the index
of the slot within the slab provides the index of the bit tracking the slot in
the bitmap. Every slab allocation slot has a dedicated bit in a bitmap tracking
whether it's free, along with a separate bitmap for tracking allocations in the
quarantine. The slab metadata entries in the array have intrusive lists
threaded through them to track partial slabs (partially filled, and these are
the first choice for allocation), empty slabs (limited amount of cached free
memory) and free slabs (purged / memory protected).
Large allocations are tracked via a global hash table mapping their address to
their size and guard size. They're simply memory mappings and get mapped on
allocation and then unmapped on free.
their size and random guard size. They're simply memory mappings and get mapped
on allocation and then unmapped on free. Large allocations are the only dynamic
memory mappings made by the allocator, since the address space for allocator
state (including both small / large allocation metadata) and slab allocations
is statically reserved.
This allocator is aimed at production usage, not aiding with finding and fixing
memory corruption bugs for software development. It does find many latent bugs
@ -268,6 +439,7 @@ was a bit less important and if a core goal was finding latent bugs.
* Slab allocations are zeroed on free
* Detection of write-after-free for slab allocations by verifying zero filling
is intact at allocation time
* Delayed free via a combination of FIFO and randomization for slab allocations
* Large allocations are purged and memory protected on free with the memory
mapping kept reserved in a quarantine to detect use-after-free
* The quarantine is primarily based on a FIFO ring buffer, with the oldest
@ -278,7 +450,6 @@ was a bit less important and if a core goal was finding latent bugs.
of the quarantine
* Memory in fresh allocations is consistently zeroed due to it either being
fresh pages or zeroed on free after previous usage
* Delayed free via a combination of FIFO and randomization for slab allocations
* Random canaries placed after each slab allocation to *absorb*
and then later detect overflows/underflows
* High entropy per-slab random values
@ -287,8 +458,9 @@ was a bit less important and if a core goal was finding latent bugs.
size class regions interspersed with guard pages
* Zero size allocations are a dedicated size class with the entire region
remaining non-readable and non-writable
* Extension for retrieving the size of allocations with fallback
to a sentinel for pointers not managed by the allocator
* Extension for retrieving the size of allocations with fallback to a sentinel
for pointers not managed by the allocator [in-progress, full implementation
needs to be ported from the previous OpenBSD malloc-based allocator]
* Can also return accurate values for pointers *within* small allocations
* The same applies to pointers within the first page of large allocations,
otherwise it currently has to return a sentinel
@ -313,23 +485,24 @@ was a bit less important and if a core goal was finding latent bugs.
The current implementation of random number generation for randomization-based
mitigations is based on generating a keystream from a stream cipher (ChaCha8)
in small chunks. A separate CSPRNG is used for each small size class, large
allocations, etc. in order to fit into the existing fine-grained locking model
without needing to waste memory per thread by having the CSPRNG state in Thread
Local Storage. Similarly, it's protected via the same approach taken for the
rest of the metadata. The stream cipher is regularly reseeded from the OS to
provide backtracking and prediction resistance with a negligible cost. The
reseed interval simply needs to be adjusted to the point that it stops
registering as having any significant performance impact. The performance
impact on recent Linux kernels is primarily from the high cost of system calls
and locking since the implementation is quite efficient (ChaCha20), especially
for just generating the key and nonce for another stream cipher (ChaCha8).
in small chunks. Separate CSPRNGs are used for each small size class in each
arena, large allocations and initialization in order to fit into the
fine-grained locking model without needing to waste memory per thread by
having the CSPRNG state in Thread Local Storage. Similarly, it's protected via
the same approach taken for the rest of the metadata. The stream cipher is
regularly reseeded from the OS to provide backtracking and prediction
resistance with a negligible cost. The reseed interval simply needs to be
adjusted to the point that it stops registering as having any significant
performance impact. The performance impact on recent Linux kernels is
primarily from the high cost of system calls and locking since the
implementation is quite efficient (ChaCha20), especially for just generating
the key and nonce for another stream cipher (ChaCha8).
ChaCha8 is a great fit because it's extremely fast across platforms without
relying on hardware support or complex platform-specific code. The security
margins of ChaCha20 would be completely overkill for the use case. Using
ChaCha8 avoids needing to resort to a non-cryptographically secure PRNG or
something without a lot of scrunity. The current implementation is simply the
something without a lot of scrutiny. The current implementation is simply the
reference implementation of ChaCha8 converted into a pure keystream by ripping
out the XOR of the message into the keystream.
@ -423,10 +596,10 @@ retaining the isolation.
| size class | worst case internal fragmentation | slab slots | slab size | internal fragmentation for slabs |
| - | - | - | - | - |
| 20480 | 20.0% | 2 | 40960 | 0.0% |
| 24576 | 16.66% | 2 | 49152 | 0.0% |
| 28672 | 14.28% | 2 | 57344 | 0.0% |
| 32768 | 12.5% | 2 | 65536 | 0.0% |
| 20480 | 20.0% | 1 | 20480 | 0.0% |
| 24576 | 16.66% | 1 | 24576 | 0.0% |
| 28672 | 14.28% | 1 | 28672 | 0.0% |
| 32768 | 12.5% | 1 | 32768 | 0.0% |
| 40960 | 20.0% | 1 | 40960 | 0.0% |
| 49152 | 16.66% | 1 | 49152 | 0.0% |
| 57344 | 14.28% | 1 | 57344 | 0.0% |
@ -461,7 +634,7 @@ to finding the per-size-class metadata. The part that's still open to different
design choices is how arenas are assigned to threads. One approach is
statically assigning arenas via round-robin like the standard jemalloc
implementation, or statically assigning to a random arena which is essentially
the current implementation. Another option is dynamic load balancing via a
the current implementation. Another option is dynamic load balancing via a
heuristic like `sched_getcpu` for per-CPU arenas, which would offer better
performance than randomly choosing an arena each time while being more
predictable for an attacker. There are actually some security benefits from
@ -472,7 +645,7 @@ varying usage of size classes.
When there's substantial allocation or deallocation pressure, the allocator
does end up calling into the kernel to purge / protect unused slabs by
replacing them with fresh `PROT_NONE` regions along with unprotecting slabs
when partially filled and cached empty slabs are depleted. There will be
when partially filled and cached empty slabs are depleted. There will be
configuration over the amount of cached empty slabs, but it's not entirely a
performance vs. memory trade-off since memory protecting unused slabs is a nice
opportunistic boost to security. However, it's not really part of the core
@ -549,7 +722,7 @@ freeing as there would be if the kernel supported these features directly.
## Memory tagging
Integrating extensive support for ARMv8.5 memory tagging is planned and this
section will be expanded cover the details on the chosen design. The approach
section will be expanded to cover the details on the chosen design. The approach
for slab allocations is currently covered, but it can also be used for the
allocator metadata region and large allocations.
@ -587,38 +760,38 @@ reuse after a certain number of allocation cycles. Similarly to the initial tag
generation, tag values for adjacent allocations will be skipped by incrementing
past them.
For example, consider this slab of allocations that are not yet used with 16
For example, consider this slab of allocations that are not yet used with 15
representing the tag for free memory. For the sake of simplicity, there will be
no quarantine or other slabs for this example:
| 16 | 16 | 16 | 16 | 16 | 16 |
| 15 | 15 | 15 | 15 | 15 | 15 |
Three slots are randomly chosen for allocations, with random tags assigned (2,
15, 7) since these slots haven't ever been used and don't have saved values:
7, 14) since these slots haven't ever been used and don't have saved values:
| 16 | 2 | 16 | 15 | 7 | 16 |
| 15 | 2 | 15 | 7 | 14 | 15 |
The 2nd allocation slot is freed, and is set back to the tag for free memory
(16), but with the previous tag value stored in the freed space:
(15), but with the previous tag value stored in the freed space:
| 16 | 16 | 16 | 7 | 15 | 16 |
| 15 | 15 | 15 | 7 | 14 | 15 |
The first slot is allocated for the first time, receiving the random value 3:
| 3 | 16 | 16 | 7 | 15 | 16 |
| 3 | 15 | 15 | 7 | 14 | 15 |
The 2nd slot is randomly chosen again, so the previous tag (2) is retrieved and
incremented to 3 as part of the use-after-free mitigation. An adjacent
allocation already uses the tag 3, so the tag is further incremented to 4 (it
would be incremented to 5 if one of the adjacent tags was 4):
| 3 | 4 | 16 | 7 | 15 | 16 |
| 3 | 4 | 15 | 7 | 14 | 15 |
The last slot is randomly chosen for the next alocation, and is assigned the
random value 15. However, it's placed next to an allocation with the tag 15 so
The last slot is randomly chosen for the next allocation, and is assigned the
random value 14. However, it's placed next to an allocation with the tag 14 so
the tag is incremented and wraps around to 0:
| 3 | 4 | 16 | 7 | 15 | 0 |
| 3 | 4 | 15 | 7 | 14 | 0 |
## API extensions
@ -647,6 +820,183 @@ this implementation, it retrieves an upper bound on the size for small memory
allocations based on calculating the size class region. This function is safe
to use from signal handlers already.
## Stats
If stats are enabled, hardened\_malloc keeps tracks allocator statistics in
order to provide implementations of `mallinfo` and `malloc_info`.
On Android, `mallinfo` is used for [mallinfo-based garbage collection
triggering](https://developer.android.com/preview/features#mallinfo) so
hardened\_malloc enables `CONFIG_STATS` by default. The `malloc_info`
implementation on Android is the standard one in Bionic, with the information
provided to Bionic via Android's internal extended `mallinfo` API with support
for arenas and size class bins. This means the `malloc_info` output is fully
compatible, including still having `jemalloc-1` as the version of the data
format to retain compatibility with existing tooling.
On non-Android Linux, `mallinfo` has zeroed fields even with `CONFIG_STATS`
enabled because glibc `mallinfo` is inherently broken. It defines the fields as
`int` instead of `size_t`, resulting in undefined signed overflows. It also
misuses the fields and provides a strange, idiosyncratic set of values rather
than following the SVID/XPG `mallinfo` definition. The `malloc_info` function
is still provided, with a similar format as what Android uses, with tweaks for
hardened\_malloc and the version set to `hardened_malloc-1`. The data format
may be changed in the future.
As an example, consider the following program from the hardened\_malloc tests:
```c
#include <pthread.h>
#include <malloc.h>
__attribute__((optimize(0)))
void leak_memory(void) {
(void)malloc(1024 * 1024 * 1024);
(void)malloc(16);
(void)malloc(32);
(void)malloc(4096);
}
void *do_work(void *p) {
leak_memory();
return NULL;
}
int main(void) {
pthread_t thread[4];
for (int i = 0; i < 4; i++) {
pthread_create(&thread[i], NULL, do_work, NULL);
}
for (int i = 0; i < 4; i++) {
pthread_join(thread[i], NULL);
}
malloc_info(0, stdout);
}
```
This produces the following output when piped through `xmllint --format -`:
```xml
<?xml version="1.0"?>
<malloc version="hardened_malloc-1">
<heap nr="0">
<bin nr="2" size="32">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>32</allocated>
</bin>
<bin nr="3" size="48">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>48</allocated>
</bin>
<bin nr="13" size="320">
<nmalloc>4</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>20480</slab_allocated>
<allocated>1280</allocated>
</bin>
<bin nr="29" size="5120">
<nmalloc>2</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>40960</slab_allocated>
<allocated>10240</allocated>
</bin>
<bin nr="45" size="81920">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>81920</slab_allocated>
<allocated>81920</allocated>
</bin>
</heap>
<heap nr="1">
<bin nr="2" size="32">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>32</allocated>
</bin>
<bin nr="3" size="48">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>48</allocated>
</bin>
<bin nr="29" size="5120">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>40960</slab_allocated>
<allocated>5120</allocated>
</bin>
</heap>
<heap nr="2">
<bin nr="2" size="32">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>32</allocated>
</bin>
<bin nr="3" size="48">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>48</allocated>
</bin>
<bin nr="29" size="5120">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>40960</slab_allocated>
<allocated>5120</allocated>
</bin>
</heap>
<heap nr="3">
<bin nr="2" size="32">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>32</allocated>
</bin>
<bin nr="3" size="48">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>4096</slab_allocated>
<allocated>48</allocated>
</bin>
<bin nr="29" size="5120">
<nmalloc>1</nmalloc>
<ndalloc>0</ndalloc>
<slab_allocated>40960</slab_allocated>
<allocated>5120</allocated>
</bin>
</heap>
<heap nr="4">
<allocated_large>4294967296</allocated_large>
</heap>
</malloc>
```
The heap entries correspond to the arenas. Unlike jemalloc, hardened\_malloc
doesn't handle large allocations within the arenas, so it presents those in the
`malloc_info` statistics as a separate arena dedicated to large allocations.
For example, with 4 arenas enabled, there will be a 5th arena in the statistics
for the large allocations.
The `nmalloc` / `ndalloc` fields are 64-bit integers tracking allocation and
deallocation count. These are defined as wrapping on overflow, per the jemalloc
implementation.
See the [section on size classes](#size-classes) to map the size class bin
number to the corresponding size class. The bin index begins at 0, mapping to
the 0 byte size class, followed by 1 for the 16 bytes, 2 for 32 bytes, etc. and
large allocations are treated as one group.
When stats aren't enabled, the `malloc_info` output will be an empty `malloc`
element.
## System calls
This is intended to aid with creating system call whitelists via seccomp-bpf
@ -665,6 +1015,7 @@ System calls used by all build configurations:
* `mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new)`
* `munmap`
* `write(STDERR_FILENO, buf, len)` (before aborting due to memory corruption)
* `madvise(ptr, size, MADV_DONTNEED)`
The main distinction from a typical malloc implementation is the use of
getrandom. A common compatibility issue is that existing system call whitelists
@ -677,7 +1028,6 @@ Additional system calls when `CONFIG_SEAL_METADATA=true` is set:
* `pkey_alloc`
* `pkey_mprotect` instead of `mprotect` with an additional `pkey` parameter,
but otherwise the same (regular `mprotect` is never called)
* `uname` (to detect old buggy kernel versions)
Additional system calls for Android builds with `LABEL_MEMORY`:

23
config/default.mk Normal file
View File

@ -0,0 +1,23 @@
CONFIG_WERROR := true
CONFIG_NATIVE := true
CONFIG_CXX_ALLOCATOR := true
CONFIG_UBSAN := false
CONFIG_SEAL_METADATA := false
CONFIG_ZERO_ON_FREE := true
CONFIG_WRITE_AFTER_FREE_CHECK := true
CONFIG_SLOT_RANDOMIZE := true
CONFIG_SLAB_CANARY := true
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 1
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 1
CONFIG_EXTENDED_SIZE_CLASSES := true
CONFIG_LARGE_SIZE_CLASSES := true
CONFIG_GUARD_SLABS_INTERVAL := 1
CONFIG_GUARD_SIZE_DIVISOR := 2
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
CONFIG_N_ARENA := 4
CONFIG_STATS := false
CONFIG_SELF_INIT := true

23
config/light.mk Normal file
View File

@ -0,0 +1,23 @@
CONFIG_WERROR := true
CONFIG_NATIVE := true
CONFIG_CXX_ALLOCATOR := true
CONFIG_UBSAN := false
CONFIG_SEAL_METADATA := false
CONFIG_ZERO_ON_FREE := true
CONFIG_WRITE_AFTER_FREE_CHECK := false
CONFIG_SLOT_RANDOMIZE := false
CONFIG_SLAB_CANARY := true
CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 0
CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 0
CONFIG_EXTENDED_SIZE_CLASSES := true
CONFIG_LARGE_SIZE_CLASSES := true
CONFIG_GUARD_SLABS_INTERVAL := 8
CONFIG_GUARD_SIZE_DIVISOR := 2
CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256
CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024
CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB
CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32
CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB
CONFIG_N_ARENA := 4
CONFIG_STATS := false
CONFIG_SELF_INIT := true

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,9 @@
#include <malloc.h>
__BEGIN_DECLS
#ifdef __cplusplus
extern "C" {
#endif
#ifndef H_MALLOC_PREFIX
#define h_malloc malloc
@ -21,6 +23,7 @@ __BEGIN_DECLS
#define h_malloc_trim malloc_trim
#define h_malloc_stats malloc_stats
#define h_mallinfo mallinfo
#define h_mallinfo2 mallinfo2
#define h_malloc_info malloc_info
#define h_memalign memalign
@ -30,7 +33,12 @@ __BEGIN_DECLS
#define h_malloc_get_state malloc_get_state
#define h_malloc_set_state malloc_set_state
#define h_iterate iterate
#define h_mallinfo_narenas mallinfo_narenas
#define h_mallinfo_nbins mallinfo_nbins
#define h_mallinfo_arena_info mallinfo_arena_info
#define h_mallinfo_bin_info mallinfo_bin_info
#define h_malloc_iterate malloc_iterate
#define h_malloc_disable malloc_disable
#define h_malloc_enable malloc_enable
@ -40,9 +48,10 @@ __BEGIN_DECLS
#endif
// C standard
void *h_malloc(size_t size);
void *h_calloc(size_t nmemb, size_t size);
void *h_realloc(void *ptr, size_t size);
__attribute__((malloc)) __attribute__((alloc_size(1))) void *h_malloc(size_t size);
__attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *h_calloc(size_t nmemb, size_t size);
__attribute__((alloc_size(2))) void *h_realloc(void *ptr, size_t size);
__attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1)))
void *h_aligned_alloc(size_t alignment, size_t size);
void h_free(void *ptr);
@ -68,10 +77,11 @@ int h_malloc_info(int options, FILE *fp);
#endif
// obsolete glibc extensions
__attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1)))
void *h_memalign(size_t alignment, size_t size);
#ifndef __ANDROID__
void *h_valloc(size_t size);
void *h_pvalloc(size_t size);
__attribute__((malloc)) __attribute__((alloc_size(1))) void *h_valloc(size_t size);
__attribute__((malloc)) void *h_pvalloc(size_t size);
#endif
#ifdef __GLIBC__
void h_cfree(void *ptr) __THROW;
@ -81,11 +91,11 @@ int h_malloc_set_state(void *state);
// Android extensions
#ifdef __ANDROID__
size_t __mallinfo_narenas(void);
size_t __mallinfo_nbins(void);
struct mallinfo __mallinfo_arena_info(size_t arena);
struct mallinfo __mallinfo_bin_info(size_t arena, size_t bin);
int h_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr, size_t size, void *arg),
size_t h_mallinfo_narenas(void);
size_t h_mallinfo_nbins(void);
struct mallinfo h_mallinfo_arena_info(size_t arena);
struct mallinfo h_mallinfo_bin_info(size_t arena, size_t bin);
int h_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr, size_t size, void *arg),
void *arg);
void h_malloc_disable(void);
void h_malloc_enable(void);
@ -94,10 +104,10 @@ void h_malloc_enable(void);
// hardened_malloc extensions
// return an upper bound on object size for any pointer based on malloc metadata
size_t h_malloc_object_size(void *ptr);
size_t h_malloc_object_size(const void *ptr);
// similar to malloc_object_size, but avoiding locking so the results are much more limited
size_t h_malloc_object_size_fast(void *ptr);
size_t h_malloc_object_size_fast(const void *ptr);
// The free function with an extra parameter for passing the size requested at
// allocation time.
@ -111,6 +121,8 @@ size_t h_malloc_object_size_fast(void *ptr);
// passed size matches the allocated size.
void h_free_sized(void *ptr, size_t expected_size);
__END_DECLS
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,7 +1,10 @@
#include <errno.h>
#include <sys/mman.h>
#ifdef LABEL_MEMORY
#include <sys/prctl.h>
#endif
#ifndef PR_SET_VMA
#define PR_SET_VMA 0x53564d41
@ -25,30 +28,28 @@ void *memory_map(size_t size) {
return p;
}
int memory_map_fixed(void *ptr, size_t size) {
bool memory_map_fixed(void *ptr, size_t size) {
void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
if (unlikely(p == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM MAP_FIXED mmap failure");
}
return 1;
bool ret = p == MAP_FAILED;
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM MAP_FIXED mmap failure");
}
return 0;
return ret;
}
int memory_unmap(void *ptr, size_t size) {
int ret = munmap(ptr, size);
bool memory_unmap(void *ptr, size_t size) {
bool ret = munmap(ptr, size);
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM munmap failure");
}
return ret;
}
static int memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey) {
static bool memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey) {
#ifdef USE_PKEY
int ret = pkey_mprotect(ptr, size, prot, pkey);
bool ret = pkey_mprotect(ptr, size, prot, pkey);
#else
int ret = mprotect(ptr, size, prot);
bool ret = mprotect(ptr, size, prot);
#endif
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM mprotect failure");
@ -56,42 +57,50 @@ static int memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey
return ret;
}
int memory_protect_ro(void *ptr, size_t size) {
bool memory_protect_ro(void *ptr, size_t size) {
return memory_protect_prot(ptr, size, PROT_READ, -1);
}
int memory_protect_rw(void *ptr, size_t size) {
bool memory_protect_rw(void *ptr, size_t size) {
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, -1);
}
int memory_protect_rw_metadata(void *ptr, size_t size) {
bool memory_protect_rw_metadata(void *ptr, size_t size) {
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, get_metadata_key());
}
int memory_remap(void *old, size_t old_size, size_t new_size) {
#ifdef HAVE_COMPATIBLE_MREMAP
bool memory_remap(void *old, size_t old_size, size_t new_size) {
void *ptr = mremap(old, old_size, new_size, 0);
if (unlikely(ptr == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM mremap failure");
}
return 1;
bool ret = ptr == MAP_FAILED;
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM mremap failure");
}
return 0;
return ret;
}
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
void *ptr = mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new);
if (unlikely(ptr == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM MREMAP_FIXED mremap failure");
}
return 1;
bool ret = ptr == MAP_FAILED;
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM MREMAP_FIXED mremap failure");
}
return 0;
return ret;
}
#endif
bool memory_purge(void *ptr, size_t size) {
int ret = madvise(ptr, size, MADV_DONTNEED);
if (unlikely(ret) && errno != ENOMEM) {
fatal_error("non-ENOMEM MADV_DONTNEED madvise failure");
}
return ret;
}
void memory_set_name(UNUSED void *ptr, UNUSED size_t size, UNUSED const char *name) {
bool memory_set_name(UNUSED void *ptr, UNUSED size_t size, UNUSED const char *name) {
#ifdef LABEL_MEMORY
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name);
return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name);
#else
return false;
#endif
}

View File

@ -1,18 +1,26 @@
#ifndef MEMORY_H
#define MEMORY_H
#include <stdbool.h>
#include <stddef.h>
#ifdef __linux__
#define HAVE_COMPATIBLE_MREMAP
#endif
int get_metadata_key(void);
void *memory_map(size_t size);
int memory_map_fixed(void *ptr, size_t size);
int memory_unmap(void *ptr, size_t size);
int memory_protect_ro(void *ptr, size_t size);
int memory_protect_rw(void *ptr, size_t size);
int memory_protect_rw_metadata(void *ptr, size_t size);
int memory_remap(void *old, size_t old_size, size_t new_size);
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
void memory_set_name(void *ptr, size_t size, const char *name);
bool memory_map_fixed(void *ptr, size_t size);
bool memory_unmap(void *ptr, size_t size);
bool memory_protect_ro(void *ptr, size_t size);
bool memory_protect_rw(void *ptr, size_t size);
bool memory_protect_rw_metadata(void *ptr, size_t size);
#ifdef HAVE_COMPATIBLE_MREMAP
bool memory_remap(void *old, size_t old_size, size_t new_size);
bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
#endif
bool memory_purge(void *ptr, size_t size);
bool memory_set_name(void *ptr, size_t size, const char *name);
#endif

8
new.cc
View File

@ -1,7 +1,9 @@
// needed with libstdc++ but not libc++
#if __has_include(<bits/functexcept.h>)
#include <bits/functexcept.h>
#include <new>
#endif
#define noreturn
#include <new>
#include "h_malloc.h"
#include "util.h"
@ -78,7 +80,6 @@ EXPORT void operator delete[](void *ptr, size_t size) noexcept {
h_free_sized(ptr, size);
}
#if __cplusplus >= 201703L
COLD static void *handle_out_of_memory(size_t size, size_t alignment, bool nothrow) {
void *ptr = nullptr;
@ -150,4 +151,3 @@ EXPORT void operator delete(void *ptr, size_t size, std::align_val_t) noexcept {
EXPORT void operator delete[](void *ptr, size_t size, std::align_val_t) noexcept {
h_free_sized(ptr, size);
}
#endif

12
pages.c
View File

@ -9,10 +9,6 @@ static bool add_guards(size_t size, size_t guard_size, size_t *total_size) {
__builtin_add_overflow(*total_size, guard_size, total_size);
}
static uintptr_t alignment_ceiling(uintptr_t s, uintptr_t alignment) {
return ((s) + (alignment - 1)) & ((~alignment) + 1);
}
void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name) {
size_t real_size;
if (unlikely(add_guards(usable_size, guard_size, &real_size))) {
@ -33,7 +29,7 @@ void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, cons
}
void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name) {
usable_size = PAGE_CEILING(usable_size);
usable_size = page_align(usable_size);
if (unlikely(!usable_size)) {
errno = ENOMEM;
return NULL;
@ -59,7 +55,7 @@ void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_
void *usable = (char *)real + guard_size;
size_t lead_size = alignment_ceiling((uintptr_t)usable, alignment) - (uintptr_t)usable;
size_t lead_size = align((uintptr_t)usable, alignment) - (uintptr_t)usable;
size_t trail_size = alloc_size - lead_size - usable_size;
void *base = (char *)usable + lead_size;
@ -86,5 +82,7 @@ void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_
}
void deallocate_pages(void *usable, size_t usable_size, size_t guard_size) {
memory_unmap((char *)usable - guard_size, usable_size + guard_size * 2);
if (unlikely(memory_unmap((char *)usable - guard_size, usable_size + guard_size * 2))) {
memory_purge(usable, usable_size);
}
}

View File

@ -5,16 +5,21 @@
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#define PAGE_SHIFT 12
#ifndef PAGE_SIZE
#define PAGE_SIZE ((size_t)1 << PAGE_SHIFT)
#endif
#define PAGE_CEILING(s) (((s) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name);
void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name);
void deallocate_pages(void *usable, size_t usable_size, size_t guard_size);
static inline size_t page_align(size_t size) {
return align(size, PAGE_SIZE);
}
static inline size_t hash_page(const void *p) {
uintptr_t u = (uintptr_t)p >> PAGE_SHIFT;
size_t sum = u;

View File

@ -5,17 +5,7 @@
#include "random.h"
#include "util.h"
#if __has_include(<sys/random.h>)
// glibc 2.25 and later
#include <sys/random.h>
#else
#include <unistd.h>
#include <sys/syscall.h>
static ssize_t getrandom(void *buf, size_t buflen, unsigned int flags) {
return syscall(SYS_getrandom, buf, buflen, flags);
}
#endif
static void get_random_seed(void *buf, size_t size) {
while (size) {
@ -102,7 +92,7 @@ u16 get_random_u16_uniform(struct random_state *state, u16 bound) {
if (leftover < bound) {
u16 threshold = -bound % bound;
while (leftover < threshold) {
random = get_random_u16(state);
random = get_random_u16(state);
multiresult = random * bound;
leftover = (u16)multiresult;
}
@ -129,7 +119,7 @@ u64 get_random_u64_uniform(struct random_state *state, u64 bound) {
if (leftover < bound) {
u64 threshold = -bound % bound;
while (leftover < threshold) {
random = get_random_u64(state);
random = get_random_u64(state);
multiresult = random * bound;
leftover = multiresult;
}

40
test/.gitignore vendored
View File

@ -1,4 +1,44 @@
large_array_growth
mallinfo
mallinfo2
malloc_info
offset
delete_type_size_mismatch
double_free_large
double_free_large_delayed
double_free_small
double_free_small_delayed
invalid_free_protected
invalid_free_small_region
invalid_free_small_region_far
invalid_free_unprotected
read_after_free_large
read_after_free_small
read_zero_size
string_overflow
unaligned_free_large
unaligned_free_small
uninitialized_free
uninitialized_malloc_usable_size
uninitialized_realloc
write_after_free_large
write_after_free_large_reuse
write_after_free_small
write_after_free_small_reuse
write_zero_size
unaligned_malloc_usable_size_small
invalid_malloc_usable_size_small
invalid_malloc_usable_size_small_quarantine
malloc_object_size
malloc_object_size_offset
invalid_malloc_object_size_small
invalid_malloc_object_size_small_quarantine
impossibly_large_malloc
overflow_large_1_byte
overflow_large_8_byte
overflow_small_1_byte
overflow_small_8_byte
uninitialized_read_large
uninitialized_read_small
realloc_init
__pycache__/

View File

@ -1,23 +1,76 @@
CONFIG_SLAB_CANARY := true
CONFIG_EXTENDED_SIZE_CLASSES := true
ifneq ($(VARIANT),)
$(error testing non-default variants not yet supported)
endif
ifeq (,$(filter $(CONFIG_SLAB_CANARY),true false))
$(error CONFIG_SLAB_CANARY must be true or false)
endif
LDLIBS := -lpthread
dir=$(dir $(realpath $(firstword $(MAKEFILE_LIST))))
CPPFLAGS += \
CPPFLAGS := \
-D_GNU_SOURCE \
-DSLAB_CANARY=$(CONFIG_SLAB_CANARY) \
-DCONFIG_EXTENDED_SIZE_CLASSES=$(CONFIG_EXTENDED_SIZE_CLASSES)
SHARED_FLAGS := -O3
CFLAGS := -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes
CXXFLAGS := -std=c++17 -fsized-deallocation $(SHARED_FLAGS)
LDFLAGS := -Wl,-L$(dir)../out,-R,$(dir)../out
LDLIBS := -lpthread -lhardened_malloc
EXECUTABLES := \
offset \
mallinfo \
mallinfo2 \
malloc_info \
large_array_growth
large_array_growth \
double_free_large \
double_free_large_delayed \
double_free_small \
double_free_small_delayed \
unaligned_free_large \
unaligned_free_small \
read_after_free_large \
read_after_free_small \
write_after_free_large \
write_after_free_large_reuse \
write_after_free_small \
write_after_free_small_reuse \
read_zero_size \
write_zero_size \
invalid_free_protected \
invalid_free_unprotected \
invalid_free_small_region \
invalid_free_small_region_far \
uninitialized_read_small \
uninitialized_read_large \
uninitialized_free \
uninitialized_realloc \
uninitialized_malloc_usable_size \
overflow_large_1_byte \
overflow_large_8_byte \
overflow_small_1_byte \
overflow_small_8_byte \
string_overflow \
delete_type_size_mismatch \
unaligned_malloc_usable_size_small \
invalid_malloc_usable_size_small \
invalid_malloc_usable_size_small_quarantine \
malloc_object_size \
malloc_object_size_offset \
invalid_malloc_object_size_small \
invalid_malloc_object_size_small_quarantine \
impossibly_large_malloc \
realloc_init
all: $(EXECUTABLES)
clean:
rm -f $(EXECUTABLES)
rm -fr ./__pycache__

0
test/__init__.py Normal file
View File

View File

@ -1,11 +1,12 @@
#include <stdint.h>
#include "test_util.h"
struct foo {
uint64_t a, b, c, d;
};
__attribute__((optimize(0)))
int main(void) {
OPTNONE int main(void) {
void *p = new char;
struct foo *c = (struct foo *)p;
delete c;

View File

@ -1,8 +1,9 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
void *p = malloc(128 * 1024);
#include "test_util.h"
OPTNONE int main(void) {
void *p = malloc(256 * 1024);
if (!p) {
return 1;
}

View File

@ -1,12 +1,13 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
void *p = malloc(128 * 1024);
#include "test_util.h"
OPTNONE int main(void) {
void *p = malloc(256 * 1024);
if (!p) {
return 1;
}
void *q = malloc(128 * 1024);
void *q = malloc(256 * 1024);
if (!q) {
return 1;
}

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
void *p = malloc(16);
if (!p) {
return 1;

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
void *p = malloc(16);
if (!p) {
return 1;

View File

@ -0,0 +1,8 @@
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(-8);
return !(p == NULL);
}

View File

@ -2,8 +2,9 @@
#include <sys/mman.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
free(malloc(16));
char *p = mmap(NULL, 4096 * 16, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (p == MAP_FAILED) {

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;

View File

@ -2,8 +2,9 @@
#include <sys/mman.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
free(malloc(16));
char *p = mmap(NULL, 4096 * 16, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (p == MAP_FAILED) {

View File

@ -0,0 +1,15 @@
#include <stdlib.h>
#include "test_util.h"
size_t malloc_object_size(void *ptr);
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;
}
char *q = p + 4096 * 4;
malloc_object_size(q);
return 0;
}

View File

@ -0,0 +1,15 @@
#include <stdlib.h>
#include "test_util.h"
size_t malloc_object_size(void *ptr);
OPTNONE int main(void) {
void *p = malloc(16);
if (!p) {
return 1;
}
free(p);
malloc_object_size(p);
return 0;
}

View File

@ -0,0 +1,13 @@
#include <malloc.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;
}
char *q = p + 4096 * 4;
malloc_usable_size(q);
return 0;
}

View File

@ -0,0 +1,13 @@
#include <malloc.h>
#include "test_util.h"
OPTNONE int main(void) {
void *p = malloc(16);
if (!p) {
return 1;
}
free(p);
malloc_usable_size(p);
return 0;
}

View File

@ -1,8 +1,9 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
void *p = NULL;
size_t size = 256 * 1024;

View File

@ -1,21 +1,44 @@
#include <stdlib.h>
#include <stdio.h>
#if defined(__GLIBC__) || defined(__ANDROID__)
#include <malloc.h>
#endif
__attribute__((optimize(0)))
int main(void) {
malloc(1024 * 1024 * 1024);
malloc(16);
malloc(32);
malloc(64);
#include "test_util.h"
static void print_mallinfo(void) {
#if defined(__GLIBC__) || defined(__ANDROID__)
struct mallinfo info = mallinfo();
printf("arena: %zu\n", info.arena);
printf("ordblks: %zu\n", info.ordblks);
printf("smblks: %zu\n", info.smblks);
printf("hblks: %zu\n", info.hblks);
printf("hblkhd: %zu\n", info.hblkhd);
printf("usmblks: %zu\n", info.usmblks);
printf("fsmblks: %zu\n", info.fsmblks);
printf("uordblks: %zu\n", info.uordblks);
printf("fordblks: %zu\n", info.fordblks);
printf("keepcost: %zu\n", info.keepcost);
printf("mallinfo:\n");
printf("arena: %zu\n", (size_t)info.arena);
printf("ordblks: %zu\n", (size_t)info.ordblks);
printf("smblks: %zu\n", (size_t)info.smblks);
printf("hblks: %zu\n", (size_t)info.hblks);
printf("hblkhd: %zu\n", (size_t)info.hblkhd);
printf("usmblks: %zu\n", (size_t)info.usmblks);
printf("fsmblks: %zu\n", (size_t)info.fsmblks);
printf("uordblks: %zu\n", (size_t)info.uordblks);
printf("fordblks: %zu\n", (size_t)info.fordblks);
printf("keepcost: %zu\n", (size_t)info.keepcost);
#endif
}
OPTNONE int main(void) {
void *a[4];
a[0] = malloc(1024 * 1024 * 1024);
a[1] = malloc(16);
a[2] = malloc(32);
a[3] = malloc(64);
print_mallinfo();
free(a[0]);
free(a[1]);
free(a[2]);
free(a[3]);
printf("\n");
print_mallinfo();
}

44
test/mallinfo2.c Normal file
View File

@ -0,0 +1,44 @@
#include <stdio.h>
#include <stdlib.h>
#if defined(__GLIBC__)
#include <malloc.h>
#endif
#include "test_util.h"
static void print_mallinfo2(void) {
#if defined(__GLIBC__)
struct mallinfo2 info = mallinfo2();
printf("mallinfo2:\n");
printf("arena: %zu\n", (size_t)info.arena);
printf("ordblks: %zu\n", (size_t)info.ordblks);
printf("smblks: %zu\n", (size_t)info.smblks);
printf("hblks: %zu\n", (size_t)info.hblks);
printf("hblkhd: %zu\n", (size_t)info.hblkhd);
printf("usmblks: %zu\n", (size_t)info.usmblks);
printf("fsmblks: %zu\n", (size_t)info.fsmblks);
printf("uordblks: %zu\n", (size_t)info.uordblks);
printf("fordblks: %zu\n", (size_t)info.fordblks);
printf("keepcost: %zu\n", (size_t)info.keepcost);
#endif
}
OPTNONE int main(void) {
void *a[4];
a[0] = malloc(1024 * 1024 * 1024);
a[1] = malloc(16);
a[2] = malloc(32);
a[3] = malloc(64);
print_mallinfo2();
free(a[0]);
free(a[1]);
free(a[2]);
free(a[3]);
printf("\n");
print_mallinfo2();
}

View File

@ -1,16 +1,21 @@
#include <pthread.h>
#include <stdio.h>
#if defined(__GLIBC__) || defined(__ANDROID__)
#include <malloc.h>
#endif
__attribute__((optimize(0)))
void leak_memory(void) {
(void)malloc(1024 * 1024 * 1024);
(void)malloc(16);
(void)malloc(32);
(void)malloc(4096);
#include "test_util.h"
#include "../util.h"
OPTNONE static void leak_memory(void) {
(void)!malloc(1024 * 1024 * 1024);
(void)!malloc(16);
(void)!malloc(32);
(void)!malloc(4096);
}
void *do_work(void *p) {
static void *do_work(UNUSED void *p) {
leak_memory();
return NULL;
}
@ -24,5 +29,7 @@ int main(void) {
pthread_join(thread[i], NULL);
}
#if defined(__GLIBC__) || defined(__ANDROID__)
malloc_info(0, stdout);
#endif
}

12
test/malloc_object_size.c Normal file
View File

@ -0,0 +1,12 @@
#include <stdbool.h>
#include <stdlib.h>
#include "test_util.h"
size_t malloc_object_size(void *ptr);
OPTNONE int main(void) {
char *p = malloc(16);
size_t size = malloc_object_size(p);
return size != (SLAB_CANARY ? 24 : 32);
}

View File

@ -0,0 +1,12 @@
#include <stdbool.h>
#include <stdlib.h>
#include "test_util.h"
size_t malloc_object_size(void *ptr);
OPTNONE int main(void) {
char *p = malloc(16);
size_t size = malloc_object_size(p + 5);
return size != (SLAB_CANARY ? 19 : 27);
}

View File

@ -3,7 +3,7 @@
#include <stdio.h>
#include <stdlib.h>
static unsigned size_classes[] = {
static size_t size_classes[] = {
/* large */ 4 * 1024 * 1024,
/* 0 */ 0,
/* 16 */ 16, 32, 48, 64, 80, 96, 112, 128,
@ -32,9 +32,9 @@ int main(void) {
void *p[N_SIZE_CLASSES];
for (unsigned i = 0; i < N_SIZE_CLASSES; i++) {
unsigned size = size_classes[i];
size_t size = size_classes[i];
p[i] = malloc(size);
if (!p) {
if (!p[i]) {
return 1;
}
void *q = malloc(size);

View File

@ -0,0 +1,15 @@
#include <malloc.h>
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}
size_t size = malloc_usable_size(p);
*(p + size) = 0;
free(p);
return 0;
}

View File

@ -0,0 +1,15 @@
#include <malloc.h>
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}
size_t size = malloc_usable_size(p);
*(p + size + 7) = 0;
free(p);
return 0;
}

View File

@ -0,0 +1,15 @@
#include <malloc.h>
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(8);
if (!p) {
return 1;
}
size_t size = malloc_usable_size(p);
*(p + size) = 1;
free(p);
return 0;
}

View File

@ -0,0 +1,16 @@
#include <malloc.h>
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(8);
if (!p) {
return 1;
}
size_t size = malloc_usable_size(p);
// XOR is used to avoid the test having a 1/256 chance to fail
*(p + size + 7) ^= 1;
free(p);
return 0;
}

View File

@ -0,0 +1,21 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}
memset(p, 'a', 16);
free(p);
for (size_t i = 0; i < 256 * 1024; i++) {
printf("%x\n", p[i]);
if (p[i] != '\0') {
return 1;
}
}
return 0;
}

View File

@ -2,8 +2,9 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;
@ -12,6 +13,9 @@ int main(void) {
free(p);
for (size_t i = 0; i < 16; i++) {
printf("%x\n", p[i]);
if (p[i] != '\0') {
return 1;
}
}
return 0;
}

View File

@ -1,8 +1,9 @@
#include <stdlib.h>
#include <stdio.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(0);
if (!p) {
return 1;

33
test/realloc_init.c Normal file
View File

@ -0,0 +1,33 @@
#include <pthread.h>
#include <stdlib.h>
static void *thread_func(void *arg) {
arg = realloc(arg, 1024);
if (!arg) {
exit(EXIT_FAILURE);
}
free(arg);
return NULL;
}
int main(void) {
void *mem = realloc(NULL, 12);
if (!mem) {
return EXIT_FAILURE;
}
pthread_t thread;
int r = pthread_create(&thread, NULL, thread_func, mem);
if (r != 0) {
return EXIT_FAILURE;
}
r = pthread_join(thread, NULL);
if (r != 0) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}

View File

@ -1,25 +0,0 @@
delete_type_size_mismatch
double_free_large
double_free_large_delayed
double_free_small
double_free_small_delayed
eight_byte_overflow_large
eight_byte_overflow_small
invalid_free_protected
invalid_free_small_region
invalid_free_small_region_far
invalid_free_unprotected
read_after_free_large
read_after_free_small
read_zero_size
string_overflow
unaligned_free_large
unaligned_free_small
uninitialized_free
uninitialized_malloc_usable_size
uninitialized_realloc
write_after_free_large
write_after_free_large_reuse
write_after_free_small
write_after_free_small_reuse
write_zero_size

View File

@ -1,31 +0,0 @@
EXECUTABLES := \
double_free_large \
double_free_large_delayed \
double_free_small \
double_free_small_delayed \
unaligned_free_large \
unaligned_free_small \
read_after_free_large \
read_after_free_small \
write_after_free_large \
write_after_free_large_reuse \
write_after_free_small \
write_after_free_small_reuse \
read_zero_size \
write_zero_size \
invalid_free_protected \
invalid_free_unprotected \
invalid_free_small_region \
invalid_free_small_region_far \
uninitialized_free \
uninitialized_realloc \
uninitialized_malloc_usable_size \
eight_byte_overflow_small \
eight_byte_overflow_large \
string_overflow \
delete_type_size_mismatch
all: $(EXECUTABLES)
clean:
rm -f $(EXECUTABLES)

View File

@ -1,12 +0,0 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(128 * 1024);
if (!p) {
return 1;
}
*(p + 128 * 1024 + 7) = 0;
free(p);
return 0;
}

View File

@ -1,12 +0,0 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(8);
if (!p) {
return 1;
}
*(p + 8 + 7) = 0;
free(p);
return 0;
}

View File

@ -1,17 +0,0 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(128 * 1024);
if (!p) {
return 1;
}
memset(p, 'a', 16);
free(p);
for (size_t i = 0; i < 128 * 1024; i++) {
printf("%x\n", p[i]);
}
return 0;
}

View File

@ -1,14 +0,0 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(128 * 1024);
if (!p) {
return 1;
}
free(p);
char *q = malloc(128 * 1024);
p[64 * 1024 + 1] = 'a';
return 0;
}

View File

@ -4,8 +4,9 @@
#include <malloc.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;

242
test/test_smc.py Normal file
View File

@ -0,0 +1,242 @@
import os
import subprocess
import unittest
class TestSimpleMemoryCorruption(unittest.TestCase):
@classmethod
def setUpClass(self):
self.dir = os.path.dirname(os.path.realpath(__file__))
def run_test(self, test_name):
sub = subprocess.Popen(self.dir + "/" + test_name,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = sub.communicate()
return stdout, stderr, sub.returncode
def test_delete_type_size_mismatch(self):
_stdout, stderr, returncode = self.run_test(
"delete_type_size_mismatch")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: sized deallocation mismatch (small)\n")
def test_double_free_large_delayed(self):
_stdout, stderr, returncode = self.run_test(
"double_free_large_delayed")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_double_free_large(self):
_stdout, stderr, returncode = self.run_test("double_free_large")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_double_free_small_delayed(self):
_stdout, stderr, returncode = self.run_test(
"double_free_small_delayed")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: double free (quarantine)\n")
def test_double_free_small(self):
_stdout, stderr, returncode = self.run_test("double_free_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: double free (quarantine)\n")
def test_overflow_large_1_byte(self):
_stdout, _stderr, returncode = self.run_test(
"overflow_large_1_byte")
self.assertEqual(returncode, -11)
def test_overflow_large_8_byte(self):
_stdout, _stderr, returncode = self.run_test(
"overflow_large_8_byte")
self.assertEqual(returncode, -11)
def test_overflow_small_1_byte(self):
_stdout, stderr, returncode = self.run_test(
"overflow_small_1_byte")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: canary corrupted\n")
def test_overflow_small_8_byte(self):
_stdout, stderr, returncode = self.run_test(
"overflow_small_8_byte")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: canary corrupted\n")
def test_invalid_free_protected(self):
_stdout, stderr, returncode = self.run_test("invalid_free_protected")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_invalid_free_small_region_far(self):
_stdout, stderr, returncode = self.run_test(
"invalid_free_small_region_far")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: invalid free within a slab yet to be used\n")
def test_invalid_free_small_region(self):
_stdout, stderr, returncode = self.run_test(
"invalid_free_small_region")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: double free\n")
def test_invalid_free_unprotected(self):
_stdout, stderr, returncode = self.run_test("invalid_free_unprotected")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_invalid_malloc_usable_size_small_quarantene(self):
_stdout, stderr, returncode = self.run_test(
"invalid_malloc_usable_size_small_quarantine")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: invalid malloc_usable_size (quarantine)\n")
def test_invalid_malloc_usable_size_small(self):
_stdout, stderr, returncode = self.run_test(
"invalid_malloc_usable_size_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: invalid malloc_usable_size\n")
def test_read_after_free_large(self):
_stdout, _stderr, returncode = self.run_test("read_after_free_large")
self.assertEqual(returncode, -11)
def test_read_after_free_small(self):
stdout, _stderr, returncode = self.run_test("read_after_free_small")
self.assertEqual(returncode, 0)
self.assertEqual(stdout.decode("utf-8"),
"0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n")
def test_read_zero_size(self):
_stdout, _stderr, returncode = self.run_test("read_zero_size")
self.assertEqual(returncode, -11)
def test_string_overflow(self):
stdout, _stderr, returncode = self.run_test("string_overflow")
self.assertEqual(returncode, 0)
self.assertEqual(stdout.decode("utf-8"), "overflow by 0 bytes\n")
def test_unaligned_free_large(self):
_stdout, stderr, returncode = self.run_test("unaligned_free_large")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_unaligned_free_small(self):
_stdout, stderr, returncode = self.run_test("unaligned_free_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid unaligned free\n")
def test_unaligned_malloc_usable_size_small(self):
_stdout, stderr, returncode = self.run_test(
"unaligned_malloc_usable_size_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid unaligned malloc_usable_size\n")
def test_uninitialized_free(self):
_stdout, stderr, returncode = self.run_test("uninitialized_free")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid free\n")
def test_uninitialized_malloc_usable_size(self):
_stdout, stderr, returncode = self.run_test(
"uninitialized_malloc_usable_size")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid malloc_usable_size\n")
def test_uninitialized_realloc(self):
_stdout, stderr, returncode = self.run_test("uninitialized_realloc")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: invalid realloc\n")
def test_write_after_free_large_reuse(self):
_stdout, _stderr, returncode = self.run_test(
"write_after_free_large_reuse")
self.assertEqual(returncode, -11)
def test_write_after_free_large(self):
_stdout, _stderr, returncode = self.run_test("write_after_free_large")
self.assertEqual(returncode, -11)
def test_write_after_free_small_reuse(self):
_stdout, stderr, returncode = self.run_test(
"write_after_free_small_reuse")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: detected write after free\n")
def test_write_after_free_small(self):
_stdout, stderr, returncode = self.run_test("write_after_free_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode("utf-8"),
"fatal allocator error: detected write after free\n")
def test_write_zero_size(self):
_stdout, _stderr, returncode = self.run_test("write_zero_size")
self.assertEqual(returncode, -11)
def test_malloc_object_size(self):
_stdout, _stderr, returncode = self.run_test("malloc_object_size")
self.assertEqual(returncode, 0)
def test_malloc_object_size_offset(self):
_stdout, _stderr, returncode = self.run_test(
"malloc_object_size_offset")
self.assertEqual(returncode, 0)
def test_invalid_malloc_object_size_small(self):
_stdout, stderr, returncode = self.run_test(
"invalid_malloc_object_size_small")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: invalid malloc_object_size\n")
def test_invalid_malloc_object_size_small_quarantine(self):
_stdout, stderr, returncode = self.run_test(
"invalid_malloc_object_size_small_quarantine")
self.assertEqual(returncode, -6)
self.assertEqual(stderr.decode(
"utf-8"), "fatal allocator error: invalid malloc_object_size (quarantine)\n")
def test_impossibly_large_malloc(self):
_stdout, stderr, returncode = self.run_test(
"impossibly_large_malloc")
self.assertEqual(returncode, 0)
def test_uninitialized_read_small(self):
_stdout, stderr, returncode = self.run_test(
"uninitialized_read_small")
self.assertEqual(returncode, 0)
def test_uninitialized_read_large(self):
_stdout, stderr, returncode = self.run_test(
"uninitialized_read_large")
self.assertEqual(returncode, 0)
def test_realloc_init(self):
_stdout, _stderr, returncode = self.run_test(
"realloc_init")
self.assertEqual(returncode, 0)
if __name__ == '__main__':
unittest.main()

10
test/test_util.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef TEST_UTIL_H
#define TEST_UTIL_H
#ifdef __clang__
#define OPTNONE __attribute__((optnone))
#else
#define OPTNONE __attribute__((optimize(0)))
#endif
#endif

View File

@ -1,8 +1,9 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(128 * 1024);
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;

View File

@ -0,0 +1,12 @@
#include <malloc.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(16);
if (!p) {
return 1;
}
malloc_usable_size(p + 1);
return 0;
}

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
free((void *)1);
return 0;
}

View File

@ -1,7 +1,8 @@
#include <malloc.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
malloc_usable_size((void *)1);
return 0;
}

View File

@ -0,0 +1,14 @@
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
for (unsigned i = 0; i < 256 * 1024; i++) {
if (p[i] != 0) {
return 1;
}
}
free(p);
return 0;
}

View File

@ -0,0 +1,14 @@
#include <stdlib.h>
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(8);
for (unsigned i = 0; i < 8; i++) {
if (p[i] != 0) {
return 1;
}
}
free(p);
return 0;
}

View File

@ -1,7 +1,8 @@
#include <stdlib.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
void *p = realloc((void *)1, 16);
if (!p) {
return 1;

View File

@ -1,9 +1,9 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
char *p = malloc(128 * 1024);
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}

View File

@ -0,0 +1,16 @@
#include <stdlib.h>
#include <string.h>
#include "test_util.h"
#include "../util.h"
OPTNONE int main(void) {
char *p = malloc(256 * 1024);
if (!p) {
return 1;
}
free(p);
UNUSED char *q = malloc(256 * 1024);
p[64 * 1024 + 1] = 'a';
return 0;
}

View File

@ -1,8 +1,8 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(128);
if (!p) {
return 1;

View File

@ -1,14 +1,15 @@
#include <stdlib.h>
#include <string.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
#include "../util.h"
OPTNONE int main(void) {
char *p = malloc(128);
if (!p) {
return 1;
}
free(p);
char *q = malloc(128);
UNUSED char *q = malloc(128);
p[65] = 'a';

View File

@ -1,8 +1,8 @@
#include <stdlib.h>
#include <stdio.h>
__attribute__((optimize(0)))
int main(void) {
#include "test_util.h"
OPTNONE int main(void) {
char *p = malloc(0);
if (!p) {
return 1;

3470
third_party/libdivide.h vendored

File diff suppressed because it is too large Load Diff

10
util.c
View File

@ -4,8 +4,13 @@
#include <unistd.h>
#ifdef __ANDROID__
#include <async_safe/log.h>
#endif
#include "util.h"
#ifndef __ANDROID__
static int write_full(int fd, const char *buf, size_t length) {
do {
ssize_t bytes_written = write(fd, buf, length);
@ -21,11 +26,16 @@ static int write_full(int fd, const char *buf, size_t length) {
return 0;
}
#endif
COLD noreturn void fatal_error(const char *s) {
#ifdef __ANDROID__
async_safe_fatal("hardened_malloc: fatal allocator error: %s", s);
#else
const char *prefix = "fatal allocator error: ";
(void)(write_full(STDERR_FILENO, prefix, strlen(prefix)) != -1 &&
write_full(STDERR_FILENO, s, strlen(s)) != -1 &&
write_full(STDERR_FILENO, "\n", 1));
abort();
#endif
}

52
util.h
View File

@ -1,8 +1,12 @@
#ifndef UTIL_H
#define UTIL_H
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdnoreturn.h>
// C11 noreturn doesn't work in C++
#define noreturn __attribute__((noreturn))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
@ -26,40 +30,40 @@
#define STRINGIFY(s) #s
#define ALIAS(f) __attribute__((alias(STRINGIFY(f))))
static inline int ffzl(long x) {
return __builtin_ffsl(~x);
}
COLD noreturn void fatal_error(const char *s);
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef uint64_t u64;
typedef unsigned __int128 u128;
// use __register_atfork directly to avoid linking with libpthread for glibc < 2.28
#ifdef __GLIBC__
#if !__GLIBC_PREREQ(2, 28)
extern void *__dso_handle;
extern int __register_atfork(void (*)(void), void (*)(void), void (*)(void), void *);
#define atfork(prepare, parent, child) __register_atfork(prepare, parent, child, __dso_handle)
#endif
#endif
#define U64_WIDTH 64
#ifndef atfork
#define atfork pthread_atfork
#endif
static inline int ffz64(u64 x) {
return __builtin_ffsll(~x);
}
#ifdef CONFIG_SEAL_METADATA
// parameter must not be 0
static inline int clz64(u64 x) {
return __builtin_clzll(x);
}
// parameter must not be 0
static inline u64 log2u64(u64 x) {
return U64_WIDTH - clz64(x) - 1;
}
static inline size_t align(size_t size, size_t align) {
size_t mask = align - 1;
return (size + mask) & ~mask;
}
COLD noreturn void fatal_error(const char *s);
#if CONFIG_SEAL_METADATA
#ifdef __GLIBC__
#if __GLIBC_PREREQ(2, 27)
#define USE_PKEY
#endif
#endif
#ifndef USE_PKEY
#else
#error "CONFIG_SEAL_METADATA requires Memory Protection Key support"
#endif