DivestOS/Patches/LineageOS-17.1/android_bionic/0003-Graphene_Bionic_Hardening-16.patch
Tad 181519cf38 Add bionic hardening patchsets from GrapheneOS
11 b3a0c2c5db
11 5412c37195 #explicit zero
11 31456ac632 #brk
11 58ebc243ea #random
11 5323b39f7e #undefined
11 6a91d9dddb #merge
11 a042b5a0ba #vla formatting
11 9ec639de1b #pthread
11 49571a0a49 #read only
11 149cc5ccb8 #zero
11 2e613ccbe7 #fork mmap
11 e239c7dff8 #memprot pthread
11 0b03d92b7f #xor
11 de08419b82 #junk
11 897d4903e2 #guard
11 648cd68ca3 #ptrhread guard
11 0bc4dbcbd2 #stack rand
10 aa9cc05d07
10 a8cdbb6352 #explicit zero
10 b28302c668 #brk
10 9f8be7d07c #random
10 cb91a7ee3a #undefined
10 08279e2fdd #merge
10 6a18bd565d #vla formatting
10 2f392c2d08 #pthread
10 8bbce1bc50 #read only
10 725f61db82 #zero
10 4cd257135f #fork mmap
10 9220cf622b #memprot pthread
10 8ef71d1ffd #memprot exit
10 0eaef1abbd #xor
10 64f1cc2148 #junk
10 5c42a527cf #guard
10 5cc8c34e60 #pthread guard
10 7f61cc8a1c #stack rand
9  abdf523d26
9  e4b9b31e6f #explicit zero
9  a3a22a63d2 #brk
9  7444dbc3cf #random
9  dcd3b72ac9 #undefined
9  543e1df342 #merge
9  611e5691f7 #vla formatting
9  8de97ce864 #pthread
9  a475717042 #read only
9  7f0947cc0e #zero
9  e9751d3370 #fork mmap
9  83cd86d0d5 #memprot pthread
9  1ebb165455 #memprot exit
9  488ba483cf #xor
9  f9351d884b #junk
9  85e5bca0a5 #move

Signed-off-by: Tad <tad@spotco.us>
2022-03-15 16:56:46 -04:00

58 lines
3.0 KiB
Diff

From 5cc8c34e60dbfeb1fd996bf83bb01a0443d93a8a Mon Sep 17 00:00:00 2001
From: Daniel Micay <danielmicay@gmail.com>
Date: Thu, 10 Oct 2019 22:52:49 -0400
Subject: [PATCH] move pthread_internal_t behind guard page
---
libc/bionic/pthread_create.cpp | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index 6bbf970a0b..b4a044db18 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -218,10 +218,13 @@ int __init_thread(pthread_internal_t* thread) {
ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_size) {
const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
- // Allocate in order: stack guard, stack, guard page, static TLS, guard page.
+ size_t thread_page_size = __BIONIC_ALIGN(sizeof(pthread_internal_t), PAGE_SIZE);
+
+ // Allocate in order: stack guard, stack, guard page, pthread_internal_t, static TLS, guard page.
size_t mmap_size;
if (__builtin_add_overflow(stack_size, stack_guard_size, &mmap_size)) return {};
if (__builtin_add_overflow(mmap_size, PTHREAD_GUARD_SIZE, &mmap_size)) return {};
+ if (__builtin_add_overflow(mmap_size, thread_page_size, &mmap_size)) return {};
if (__builtin_add_overflow(mmap_size, layout.size(), &mmap_size)) return {};
if (__builtin_add_overflow(mmap_size, PTHREAD_GUARD_SIZE, &mmap_size)) return {};
@@ -250,9 +253,10 @@ ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_si
return {};
}
- char* const static_tls_space = space + stack_guard_size + stack_size + PTHREAD_GUARD_SIZE;
+ char* const thread = space + stack_guard_size + stack_size + PTHREAD_GUARD_SIZE;
+ char* const static_tls_space = thread + thread_page_size;
- if (mprotect(static_tls_space, layout.size(), PROT_READ | PROT_WRITE) != 0) {
+ if (mprotect(thread, thread_page_size + layout.size(), PROT_READ | PROT_WRITE) != 0) {
async_safe_format_log(ANDROID_LOG_WARN, "libc",
"pthread_create failed: couldn't mprotect R+W %zu-byte static TLS mapping region: %s",
layout.size(), strerror(errno));
@@ -295,13 +299,8 @@ static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** chi
stack_top = static_cast<char*>(attr->stack_base) + attr->stack_size;
}
- // Carve out space from the stack for the thread's pthread_internal_t. This
- // memory isn't counted in pthread_attr_getstacksize.
-
- // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
- stack_top = align_down(stack_top - sizeof(pthread_internal_t), 16);
-
- pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
+ pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(
+ mapping.static_tls - __BIONIC_ALIGN(sizeof(pthread_internal_t), PAGE_SIZE));
if (!stack_clean) {
// If thread was not allocated by mmap(), it may not have been cleared to zero.
// So assume the worst and zero it.