Compare commits

..

No commits in common. "2024011600-redfin" and "13" have entirely different histories.

17 changed files with 77 additions and 1082 deletions

View File

@ -9,28 +9,14 @@ on:
jobs: jobs:
build-ubuntu-gcc: build-ubuntu-gcc:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy:
matrix:
version: [12]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setting up gcc version
run: |
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${{ matrix.version }} 100
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${{ matrix.version }} 100
- name: Build - name: Build
run: make test run: make test
build-ubuntu-clang: build-ubuntu-clang:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy:
matrix:
version: [14, 15]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setting up clang version
run: |
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{ matrix.version }} 100
sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${{ matrix.version }} 100
- name: Build - name: Build
run: CC=clang CXX=clang++ make test run: CC=clang CXX=clang++ make test
build-musl: build-musl:

View File

@ -73,9 +73,6 @@ cc_library {
debuggable: { debuggable: {
cflags: ["-DLABEL_MEMORY"], cflags: ["-DLABEL_MEMORY"],
}, },
device_has_arm_mte: {
cflags: ["-DHAS_ARM_MTE", "-march=armv9-a+memtag"]
},
}, },
apex_available: [ apex_available: [
"com.android.runtime", "com.android.runtime",

227
CREDITS
View File

@ -54,230 +54,3 @@ libdivide:
random.c get_random_{type}_uniform functions are based on Fast Random Integer random.c get_random_{type}_uniform functions are based on Fast Random Integer
Generation in an Interval by Daniel Lemire Generation in an Interval by Daniel Lemire
arm_mte.h arm_mte_tag_and_clear_mem function contents were copied from storeTags function in scudo:
==============================================================================
The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
==============================================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---- LLVM Exceptions to the Apache 2.0 License ----
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into an Object form of such source code, you
may redistribute such embedded portions in such Object form without complying
with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
In addition, if you combine or link compiled forms of this Software with
software that is licensed under the GPLv2 ("Combined Software") and if a
court of competent jurisdiction determines that the patent provision (Section
3), the indemnity provision (Section 9) or other Section of the License
conflicts with the conditions of the GPLv2, you may retroactively and
prospectively choose to deem waived or otherwise exclude such Section(s) of
the License, but only in their entirety and only with respect to the Combined
Software.
==============================================================================

108
README.md
View File

@ -159,9 +159,6 @@ line to the `/etc/ld.so.preload` configuration file:
The format of this configuration file is a whitespace-separated list, so it's The format of this configuration file is a whitespace-separated list, so it's
good practice to put each library on a separate line. good practice to put each library on a separate line.
On Debian systems `libhardened_malloc.so` should be installed into `/usr/lib/`
to avoid preload failures caused by AppArmor profile restrictions.
Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis
will not work when `AT_SECURE` is set such as with setuid binaries. It's also will not work when `AT_SECURE` is set such as with setuid binaries. It's also
generally not a recommended approach for production usage. The recommendation generally not a recommended approach for production usage. The recommendation
@ -473,16 +470,16 @@ was a bit less important and if a core goal was finding latent bugs.
* Errors other than ENOMEM from mmap, munmap, mprotect and mremap treated * Errors other than ENOMEM from mmap, munmap, mprotect and mremap treated
as fatal, which can help to detect memory management gone wrong elsewhere as fatal, which can help to detect memory management gone wrong elsewhere
in the process. in the process.
* Memory tagging for slab allocations via MTE on ARMv8.5+ * [future] Memory tagging for slab allocations via MTE on ARMv8.5+
* random memory tags as the baseline, providing probabilistic protection * random memory tags as the baseline, providing probabilistic protection
against various forms of memory corruption against various forms of memory corruption
* dedicated tag for free slots, set on free, for deterministic protection * dedicated tag for free slots, set on free, for deterministic protection
against accessing freed memory against accessing freed memory
* store previous random tag within freed slab allocations, and increment it
to get the next tag for that slot to provide deterministic use-after-free
detection through multiple cycles of memory reuse
* guarantee distinct tags for adjacent memory allocations by incrementing * guarantee distinct tags for adjacent memory allocations by incrementing
past matching values for deterministic detection of linear overflows past matching values for deterministic detection of linear overflows
* [future] store previous random tag and increment it to get the next tag
for that slot to provide deterministic use-after-free detection through
multiple cycles of memory reuse
## Randomness ## Randomness
@ -724,48 +721,77 @@ freeing as there would be if the kernel supported these features directly.
## Memory tagging ## Memory tagging
Random tags are set for all slab allocations when allocated, with 5 excluded values: Integrating extensive support for ARMv8.5 memory tagging is planned and this
section will be expanded to cover the details on the chosen design. The approach
for slab allocations is currently covered, but it can also be used for the
allocator metadata region and large allocations.
1. the default `0` tag Memory allocations are already always multiples of naturally aligned 16 byte
2. a statically *reserved free tag* units, so memory tags are a natural fit into a malloc implementation due to the
3. the previous tag used for the slot 16 byte alignment requirement. The only extra memory consumption will come from
4. the current (or previous) tag used for the slot to the left the hardware supported storage for the tag values (4 bits per 16 bytes).
5. the current (or previous) tag used for the slot to the right
When a slab allocation is freed, the *reserved free tag* is set for the slot. The baseline policy will be to generate random tags for each slab allocation
slot on first use. The highest value will be reserved for marking freed memory
allocations to detect any accesses to freed memory so it won't be part of the
generated range. Adjacent slots will be guaranteed to have distinct memory tags
in order to guarantee that linear overflows are detected. There are a few ways
of implementing this and it will end up depending on the performance costs of
different approaches. If there's an efficient way to fetch the adjacent tag
values without wasting extra memory, it will be possible to check for them and
skip them either by generating a new random value in a loop or incrementing
past them since the tiny bit of bias wouldn't matter. Another approach would be
alternating odd and even tag values but that would substantially reduce the
overall randomness of the tags and there's very little entropy from the start.
This ensures the following properties: Once a slab allocation has been freed, the tag will be set to the reserved
value for free memory and the previous tag value will be stored inside the
allocation itself. The next time the slot is allocated, the chosen tag value
will be the previous value incremented by one to provide use-after-free
detection between generations of allocations. The stored tag will be wiped
before retagging the memory, to avoid leaking it and as part of preserving the
security property of newly allocated memory being zeroed due to zero-on-free.
It will eventually wrap all the way around, but this ends up providing a strong
guarantee for many allocation cycles due to the combination of 4 bit tags with
the FIFO quarantine feature providing delayed free. It also benefits from
random slot allocation and the randomized portion of delayed free, which result
in a further delay along with preventing a deterministic bypass by forcing a
reuse after a certain number of allocation cycles. Similarly to the initial tag
generation, tag values for adjacent allocations will be skipped by incrementing
past them.
- Linear overflows are deterministically detected. For example, consider this slab of allocations that are not yet used with 15
- Use-after-free are deterministically detected until the freed slot goes through representing the tag for free memory. For the sake of simplicity, there will be
both the random and FIFO quarantines, gets allocated again, goes through both no quarantine or other slabs for this example:
quarantines again and then finally gets allocated again for a 2nd time.
Since the default `0` tag isn't used, untagged memory can't access malloc allocations
and vice versa, although it may make sense to reuse the default tag for free
data to avoid reducing the possible random tags from 15 to 14, since freed
data is always zeroed anyway.
Slab allocations are done in a statically reserved region for each size class | 15 | 15 | 15 | 15 | 15 | 15 |
and all metadata is in a statically reserved region, so interactions between
different uses of the same address space is not applicable.
Large allocations beyond the largest slab allocation size class (128k by Three slots are randomly chosen for allocations, with random tags assigned (2,
default) are guaranteed to have randomly sized guard regions to the left and 7, 14) since these slots haven't ever been used and don't have saved values:
right. Random and FIFO address space quarantines provide use-after-free
detection. We need to test whether the cost of random tags is acceptable to enabled them by default,
since they would be useful for:
- probabilistic detection of overflows | 15 | 2 | 15 | 7 | 14 | 15 |
- probabilistic detection of use-after-free once the address space is
out of the quarantine and reused for another allocation
- deterministic detection of use-after-free for reuse by another allocator.
When memory tagging is enabled, checking for write-after-free at allocation The 2nd allocation slot is freed, and is set back to the tag for free memory
time and checking canaries are both disabled. Canaries will be more thoroughly (15), but with the previous tag value stored in the freed space:
disabled when using memory tagging in the future, but Android currently has
[very dynamic memory tagging support](https://source.android.com/docs/security/test/memory-safety/arm-mte) | 15 | 15 | 15 | 7 | 14 | 15 |
where it can be enabled/disabled at any time which creates a barrier to
optimizing by disabling redundant features. The first slot is allocated for the first time, receiving the random value 3:
| 3 | 15 | 15 | 7 | 14 | 15 |
The 2nd slot is randomly chosen again, so the previous tag (2) is retrieved and
incremented to 3 as part of the use-after-free mitigation. An adjacent
allocation already uses the tag 3, so the tag is further incremented to 4 (it
would be incremented to 5 if one of the adjacent tags was 4):
| 3 | 4 | 15 | 7 | 14 | 15 |
The last slot is randomly chosen for the next allocation, and is assigned the
random value 14. However, it's placed next to an allocation with the tag 14 so
the tag is incremented and wraps around to 0:
| 3 | 4 | 15 | 7 | 14 | 0 |
## API extensions ## API extensions

View File

@ -1,25 +0,0 @@
java_test_host {
name: "HMallocTest",
srcs: [
"src/**/*.java",
],
libs: [
"tradefed",
"compatibility-tradefed",
"compatibility-host-util",
],
static_libs: [
"cts-host-utils",
"frameworks-base-hostutils",
],
test_suites: [
"general-tests",
],
data_device_bins_64: [
"memtag_test",
],
}

View File

@ -1,13 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<configuration description="hardened_malloc test">
<target_preparer class="com.android.compatibility.common.tradefed.targetprep.FilePusher">
<option name="cleanup" value="true" />
<option name="push" value="memtag_test->/data/local/tmp/memtag_test" />
</target_preparer>
<test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
<option name="jar" value="HMallocTest.jar" />
</test>
</configuration>

View File

@ -1,16 +0,0 @@
cc_test {
name: "memtag_test",
srcs: ["memtag_test.cc"],
cflags: [
"-Wall",
"-Werror",
"-Wextra",
"-O0",
],
compile_multilib: "64",
sanitize: {
memtag_heap: true,
},
}

View File

@ -1,297 +0,0 @@
// needed to uncondionally enable assertions
#undef NDEBUG
#include <assert.h>
#include <malloc.h>
#include <signal.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/utsname.h>
#include <unistd.h>
#include <map>
#include <set>
#include <string>
#include <unordered_map>
using namespace std;
using u8 = uint8_t;
using uptr = uintptr_t;
using u64 = uint64_t;
const size_t DEFAULT_ALLOC_SIZE = 8;
const size_t CANARY_SIZE = 8;
void do_context_switch() {
utsname s;
uname(&s);
}
u8 get_pointer_tag(void *ptr) {
return (((uptr) ptr) >> 56) & 0xf;
}
void *untag_pointer(void *ptr) {
const uintptr_t mask = UINTPTR_MAX >> 8;
return (void *) ((uintptr_t) ptr & mask);
}
// This test checks that slab slot allocation uses tag that is distint from tags of its neighbors
// and from the tag of the previous allocation that used the same slot
void tag_distinctness() {
// 0 and 15 are reserved
const int min_tag = 1;
const int max_tag = 14;
struct SizeClass {
int size;
int slot_cnt;
};
// values from size_classes[] and size_class_slots[] in h_malloc.c
SizeClass size_classes[] = {
{ .size = 16, .slot_cnt = 256, },
{ .size = 32, .slot_cnt = 128, },
// this size class is used by allocations that are made by the addr_tag_map, which breaks
// tag distinctess checks
// { .size = 48, .slot_cnt = 85, },
{ .size = 64, .slot_cnt = 64, },
{ .size = 80, .slot_cnt = 51, },
{ .size = 96, .slot_cnt = 42, },
{ .size = 112, .slot_cnt = 36, },
{ .size = 128, .slot_cnt = 64, },
{ .size = 160, .slot_cnt = 51, },
{ .size = 192, .slot_cnt = 64, },
{ .size = 224, .slot_cnt = 54, },
{ .size = 10240, .slot_cnt = 6, },
{ .size = 20480, .slot_cnt = 1, },
};
int tag_usage[max_tag + 1];
for (size_t sc_idx = 0; sc_idx < sizeof(size_classes) / sizeof(SizeClass); ++sc_idx) {
SizeClass &sc = size_classes[sc_idx];
const size_t full_alloc_size = sc.size;
const size_t alloc_size = full_alloc_size - CANARY_SIZE;
// "tdc" is short for "tag distinctness check"
int left_neighbor_tdc_cnt = 0;
int right_neighbor_tdc_cnt = 0;
int prev_alloc_tdc_cnt = 0;
int iter_cnt = 600;
unordered_map<uptr, u8> addr_tag_map;
addr_tag_map.reserve(iter_cnt * sc.slot_cnt);
u64 seen_tags = 0;
for (int iter = 0; iter < iter_cnt; ++iter) {
uptr allocations[256]; // 256 is max slot count
for (int i = 0; i < sc.slot_cnt; ++i) {
u8 *p = (u8 *) malloc(alloc_size);
assert(p);
uptr addr = (uptr) untag_pointer(p);
u8 tag = get_pointer_tag(p);
assert(tag >= min_tag && tag <= max_tag);
seen_tags |= 1 << tag;
++tag_usage[tag];
// check most recent tags of left and right neighbors
auto left = addr_tag_map.find(addr - full_alloc_size);
if (left != addr_tag_map.end()) {
assert(left->second != tag);
++left_neighbor_tdc_cnt;
}
auto right = addr_tag_map.find(addr + full_alloc_size);
if (right != addr_tag_map.end()) {
assert(right->second != tag);
++right_neighbor_tdc_cnt;
}
// check previous tag of this slot
auto prev = addr_tag_map.find(addr);
if (prev != addr_tag_map.end()) {
assert(prev->second != tag);
++prev_alloc_tdc_cnt;
addr_tag_map.erase(addr);
}
addr_tag_map.emplace(addr, tag);
for (size_t j = 0; j < alloc_size; ++j) {
// check that slot is zeroed
assert(p[j] == 0);
// check that slot is readable and writable
p[j]++;
}
allocations[i] = addr;
}
// free some of allocations to allow their slots to be reused
for (int i = sc.slot_cnt - 1; i >= 0; i -= 2) {
free((void *) allocations[i]);
}
}
// check that all of the tags were used, except reserved ones
assert(seen_tags == (0xffff & ~(1 << 0 | 1 << 15)));
printf("size_class\t%i\t" "tdc_left %i\t" "tdc_right %i\t" "tdc_prev_alloc %i\n",
sc.size, left_neighbor_tdc_cnt, right_neighbor_tdc_cnt, prev_alloc_tdc_cnt);
// make sure tag distinctess checks were actually performed
int min_tdc_cnt = sc.slot_cnt * iter_cnt / 5;
assert(prev_alloc_tdc_cnt > min_tdc_cnt);
if (sc.slot_cnt > 1) {
assert(left_neighbor_tdc_cnt > min_tdc_cnt);
assert(right_neighbor_tdc_cnt > min_tdc_cnt);
}
// async tag check failures are reported on context switch
do_context_switch();
}
printf("\nTag use counters:\n");
int min = INT_MAX;
int max = 0;
double geomean = 0.0;
for (int i = min_tag; i <= max_tag; ++i) {
int v = tag_usage[i];
geomean += log(v);
min = std::min(min, v);
max = std::max(max, v);
printf("%i\t%i\n", i, tag_usage[i]);
}
int tag_cnt = 1 + max_tag - min_tag;
geomean = exp(geomean / tag_cnt);
double max_deviation = std::max((double) max - geomean, geomean - min);
printf("geomean: %.2f, max deviation from geomean: %.2f%%\n", geomean, (100.0 * max_deviation) / geomean);
}
u8* alloc_default() {
const size_t full_alloc_size = DEFAULT_ALLOC_SIZE + CANARY_SIZE;
set<uptr> addrs;
// make sure allocation has both left and right neighbors, otherwise overflow/underflow tests
// will fail when allocation is at the end/beginning of slab
for (;;) {
u8 *p = (u8 *) malloc(DEFAULT_ALLOC_SIZE);
assert(p);
uptr addr = (uptr) untag_pointer(p);
uptr left = addr - full_alloc_size;
if (addrs.find(left) != addrs.end()) {
uptr right = addr + full_alloc_size;
if (addrs.find(right) != addrs.end()) {
return p;
}
}
addrs.emplace(addr);
}
}
volatile u8 u8_var;
void read_after_free() {
u8 *p = alloc_default();
free(p);
volatile u8 v = p[0];
(void) v;
}
void write_after_free() {
u8 *p = alloc_default();
free(p);
p[0] = 1;
}
void underflow_read() {
u8 *p = alloc_default();
volatile u8 v = p[-1];
(void) v;
}
void underflow_write() {
u8 *p = alloc_default();
p[-1] = 1;
}
void overflow_read() {
u8 *p = alloc_default();
volatile u8 v = p[DEFAULT_ALLOC_SIZE + CANARY_SIZE];
(void) v;
}
void overflow_write() {
u8 *p = alloc_default();
p[DEFAULT_ALLOC_SIZE + CANARY_SIZE] = 1;
}
void untagged_read() {
u8 *p = alloc_default();
p = (u8 *) untag_pointer(p);
volatile u8 v = p[0];
(void) v;
}
void untagged_write() {
u8 *p = alloc_default();
p = (u8 *) untag_pointer(p);
p[0] = 1;
}
map<string, function<void()>> tests = {
#define TEST(s) { #s, s }
TEST(tag_distinctness),
TEST(read_after_free),
TEST(write_after_free),
TEST(overflow_read),
TEST(overflow_write),
TEST(underflow_read),
TEST(underflow_write),
TEST(untagged_read),
TEST(untagged_write),
#undef TEST
};
void segv_handler(int, siginfo_t *si, void *) {
fprintf(stderr, "SEGV_CODE %i", si->si_code);
exit(139); // standard exit code for SIGSEGV
}
int main(int argc, char **argv) {
setbuf(stdout, NULL);
assert(argc == 2);
auto test_name = string(argv[1]);
auto test_fn = tests[test_name];
assert(test_fn != nullptr);
assert(mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_ASYNC) == 1);
struct sigaction sa = {
.sa_sigaction = segv_handler,
.sa_flags = SA_SIGINFO,
};
assert(sigaction(SIGSEGV, &sa, nullptr) == 0);
test_fn();
do_context_switch();
return 0;
}

View File

@ -1,95 +0,0 @@
package grapheneos.hmalloc;
import com.android.tradefed.device.DeviceNotAvailableException;
import com.android.tradefed.testtype.DeviceJUnit4ClassRunner;
import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.IOException;
import java.util.ArrayList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@RunWith(DeviceJUnit4ClassRunner.class)
public class MemtagTest extends BaseHostJUnit4Test {
private static final String TEST_BINARY = "/data/local/tmp/memtag_test";
enum Result {
SUCCESS(0, ""),
// it's expected that the device is configured to use asymm MTE tag checking mode
ASYNC_MTE_ERROR(139, "SEGV_CODE 8"),
SYNC_MTE_ERROR(139, "SEGV_CODE 9"),
;
public final int exitCode;
public final String stderr;
Result(int exitCode, String stderr) {
this.exitCode = exitCode;
this.stderr = stderr;
}
}
private static final int SEGV_EXIT_CODE = 139;
private void runTest(String name, Result expectedResult) throws DeviceNotAvailableException {
var args = new ArrayList<String>();
args.add(TEST_BINARY);
args.add(name);
String cmdLine = String.join(" ", args);
var result = getDevice().executeShellV2Command(cmdLine);
assertEquals("process exit code", expectedResult.exitCode, result.getExitCode().intValue());
assertEquals("stderr", expectedResult.stderr, result.getStderr());
}
@Test
public void tag_distinctness() throws DeviceNotAvailableException {
runTest("tag_distinctness", Result.SUCCESS);
}
@Test
public void read_after_free() throws DeviceNotAvailableException {
runTest("read_after_free", Result.SYNC_MTE_ERROR);
}
@Test
public void write_after_free() throws DeviceNotAvailableException {
runTest("write_after_free", Result.ASYNC_MTE_ERROR);
}
@Test
public void underflow_read() throws DeviceNotAvailableException {
runTest("underflow_read", Result.SYNC_MTE_ERROR);
}
@Test
public void underflow_write() throws DeviceNotAvailableException {
runTest("underflow_write", Result.ASYNC_MTE_ERROR);
}
@Test
public void overflow_read() throws DeviceNotAvailableException {
runTest("overflow_read", Result.SYNC_MTE_ERROR);
}
@Test
public void overflow_write() throws DeviceNotAvailableException {
runTest("overflow_write", Result.ASYNC_MTE_ERROR);
}
@Test
public void untagged_read() throws DeviceNotAvailableException {
runTest("untagged_read", Result.SYNC_MTE_ERROR);
}
@Test
public void untagged_write() throws DeviceNotAvailableException {
runTest("untagged_write", Result.ASYNC_MTE_ERROR);
}
}

View File

@ -1,91 +0,0 @@
#ifndef ARM_MTE_H
#define ARM_MTE_H
#include <arm_acle.h>
#include <util.h>
// Returns a tagged pointer.
// See https://developer.arm.com/documentation/ddi0602/2023-09/Base-Instructions/IRG--Insert-Random-Tag-
static inline void *arm_mte_create_random_tag(void *p, u64 exclusion_mask) {
return __arm_mte_create_random_tag(p, exclusion_mask);
}
// Tag the memory region with the tag specified in tag bits of tagged_ptr. Memory region itself is
// zeroed.
// tagged_ptr has to be aligned by 16, and len has to be a multiple of 16 (tag granule size).
//
// Arm's software optimization guide says:
// "it is recommended to use STZGM (or DCZGVA) to set tag if data is not a concern." (STZGM and
// DCGZVA are zeroing variants of tagging instructions).
//
// Contents of this function were copied from scudo:
// https://android.googlesource.com/platform/external/scudo/+/refs/tags/android-14.0.0_r1/standalone/memtag.h#167
//
// scudo is licensed under the Apache License v2.0 with LLVM Exceptions, which is compatible with
// the hardened_malloc's MIT license
static inline void arm_mte_tag_and_clear_mem(void *tagged_ptr, size_t len) {
uintptr_t Begin = (uintptr_t) tagged_ptr;
uintptr_t End = Begin + len;
uintptr_t LineSize, Next, Tmp;
__asm__ __volatile__(
".arch_extension memtag \n\t"
// Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
// of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
// indicates that the DC instructions are unavailable.
"DCZID .req %[Tmp] \n\t"
"mrs DCZID, dczid_el0 \n\t"
"tbnz DCZID, #4, 3f \n\t"
"and DCZID, DCZID, #15 \n\t"
"mov %[LineSize], #4 \n\t"
"lsl %[LineSize], %[LineSize], DCZID \n\t"
".unreq DCZID \n\t"
// Our main loop doesn't handle the case where we don't need to perform any
// DC GZVA operations. If the size of our tagged region is less than
// twice the cache line size, bail out to the slow path since it's not
// guaranteed that we'll be able to do a DC GZVA.
"Size .req %[Tmp] \n\t"
"sub Size, %[End], %[Cur] \n\t"
"cmp Size, %[LineSize], lsl #1 \n\t"
"b.lt 3f \n\t"
".unreq Size \n\t"
"LineMask .req %[Tmp] \n\t"
"sub LineMask, %[LineSize], #1 \n\t"
// STZG until the start of the next cache line.
"orr %[Next], %[Cur], LineMask \n\t"
"1:\n\t"
"stzg %[Cur], [%[Cur]], #16 \n\t"
"cmp %[Cur], %[Next] \n\t"
"b.lt 1b \n\t"
// DC GZVA cache lines until we have no more full cache lines.
"bic %[Next], %[End], LineMask \n\t"
".unreq LineMask \n\t"
"2: \n\t"
"dc gzva, %[Cur] \n\t"
"add %[Cur], %[Cur], %[LineSize] \n\t"
"cmp %[Cur], %[Next] \n\t"
"b.lt 2b \n\t"
// STZG until the end of the tagged region. This loop is also used to handle
// slow path cases.
"3: \n\t"
"cmp %[Cur], %[End] \n\t"
"b.ge 4f \n\t"
"stzg %[Cur], [%[Cur]], #16 \n\t"
"b 3b \n\t"
"4: \n\t"
: [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next), [Tmp] "=&r"(Tmp)
: [End] "r"(End)
: "memory"
);
}
#endif

View File

@ -14,7 +14,6 @@
#include "h_malloc.h" #include "h_malloc.h"
#include "memory.h" #include "memory.h"
#include "memtag.h"
#include "mutex.h" #include "mutex.h"
#include "pages.h" #include "pages.h"
#include "random.h" #include "random.h"
@ -76,9 +75,6 @@ static union {
struct region_metadata *regions[2]; struct region_metadata *regions[2];
#ifdef USE_PKEY #ifdef USE_PKEY
int metadata_pkey; int metadata_pkey;
#endif
#ifdef MEMTAG
bool is_memtag_disabled;
#endif #endif
}; };
char padding[PAGE_SIZE]; char padding[PAGE_SIZE];
@ -88,12 +84,6 @@ static inline void *get_slab_region_end(void) {
return atomic_load_explicit(&ro.slab_region_end, memory_order_acquire); return atomic_load_explicit(&ro.slab_region_end, memory_order_acquire);
} }
#ifdef MEMTAG
static inline bool is_memtag_enabled(void) {
return !ro.is_memtag_disabled;
}
#endif
#define SLAB_METADATA_COUNT #define SLAB_METADATA_COUNT
struct slab_metadata { struct slab_metadata {
@ -109,18 +99,6 @@ struct slab_metadata {
#if SLAB_QUARANTINE #if SLAB_QUARANTINE
u64 quarantine_bitmap[4]; u64 quarantine_bitmap[4];
#endif #endif
#ifdef HAS_ARM_MTE
// arm_mte_tags is used as a u4 array (MTE tags are 4-bit wide)
//
// Its size is calculated by the following formula:
// (MAX_SLAB_SLOT_COUNT + 2) / 2
// MAX_SLAB_SLOT_COUNT is currently 256, 2 extra slots are needed for branchless handling of
// edge slots in tag_and_clear_slab_slot()
//
// It's intentionally placed at the end of struct to improve locality: for most size classes,
// slot count is far lower than MAX_SLAB_SLOT_COUNT.
u8 arm_mte_tags[129];
#endif
}; };
static const size_t min_align = 16; static const size_t min_align = 16;
@ -469,12 +447,6 @@ static void write_after_free_check(const char *p, size_t size) {
return; return;
} }
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
return;
}
#endif
for (size_t i = 0; i < size; i += sizeof(u64)) { for (size_t i = 0; i < size; i += sizeof(u64)) {
if (unlikely(*(const u64 *)(const void *)(p + i))) { if (unlikely(*(const u64 *)(const void *)(p + i))) {
fatal_error("detected write after free"); fatal_error("detected write after free");
@ -489,48 +461,19 @@ static void set_slab_canary_value(UNUSED struct slab_metadata *metadata, UNUSED
0x00ffffffffffffffUL; 0x00ffffffffffffffUL;
metadata->canary_value = get_random_u64(rng) & canary_mask; metadata->canary_value = get_random_u64(rng) & canary_mask;
#ifdef HAS_ARM_MTE
if (unlikely(metadata->canary_value == 0)) {
// 0 is reserved to support disabling MTE at runtime (this is required on Android).
// When MTE is enabled, writing and reading of canaries is disabled, i.e. canary remains zeroed.
// After MTE is disabled, canaries that are set to 0 are ignored, since they wouldn't match
// slab's metadata->canary_value.
// 0x100 was chosen arbitrarily, and can be encoded as an immediate value on ARM by the compiler.
metadata->canary_value = 0x100;
}
#endif
#endif #endif
} }
static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void *p, UNUSED size_t size) { static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void *p, UNUSED size_t size) {
#if SLAB_CANARY #if SLAB_CANARY
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
return;
}
#endif
memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size); memcpy((char *)p + size - canary_size, &metadata->canary_value, canary_size);
#endif #endif
} }
static void check_canary(UNUSED const struct slab_metadata *metadata, UNUSED const void *p, UNUSED size_t size) { static void check_canary(UNUSED const struct slab_metadata *metadata, UNUSED const void *p, UNUSED size_t size) {
#if SLAB_CANARY #if SLAB_CANARY
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
return;
}
#endif
u64 canary_value; u64 canary_value;
memcpy(&canary_value, (const char *)p + size - canary_size, canary_size); memcpy(&canary_value, (const char *)p + size - canary_size, canary_size);
#ifdef HAS_ARM_MTE
if (unlikely(canary_value == 0)) {
return;
}
#endif
if (unlikely(canary_value != metadata->canary_value)) { if (unlikely(canary_value != metadata->canary_value)) {
fatal_error("canary corrupted"); fatal_error("canary corrupted");
} }
@ -563,39 +506,6 @@ static inline void stats_slab_deallocate(UNUSED struct size_class *c, UNUSED siz
#endif #endif
} }
#ifdef HAS_ARM_MTE
static void *tag_and_clear_slab_slot(struct slab_metadata *metadata, void *slot_ptr, size_t slot_idx, size_t slot_size) {
// arm_mte_tags is an array of 4-bit unsigned integers stored as u8 array (MTE tags are 4-bit wide)
//
// It stores the most recent tag for each slab slot, or 0 if the slot was never used.
// Slab indices in arm_mte_tags array are shifted to the right by 1, and size of this array
// is (MAX_SLAB_SLOT_COUNT + 2). This means that first and last values of arm_mte_tags array
// are always 0, which allows to handle edge slots in a branchless way when tag exclusion mask
// is constructed.
u8 *slot_tags = metadata->arm_mte_tags;
// Tag exclusion mask. 0 tag is always excluded to detect accesses to slab memory via untagged
// pointers. Moreover, 0 tag is excluded in bionic via PR_MTE_TAG_MASK prctl
u64 tem = (1 << 0) | (1 << RESERVED_TAG);
// current or previous tag of left neighbor or 0 if there's no left neighbor or if it was never used
tem |= (1 << u4_arr_get(slot_tags, slot_idx));
// previous tag of this slot or 0 if it was never used
tem |= (1 << u4_arr_get(slot_tags, slot_idx + 1));
// current or previous tag of right neighbor or 0 if there's no right neighbor or if it was never used
tem |= (1 << u4_arr_get(slot_tags, slot_idx + 2));
void *tagged_ptr = arm_mte_create_random_tag(slot_ptr, tem);
// slot addresses and sizes are always aligned by 16
arm_mte_tag_and_clear_mem(tagged_ptr, slot_size);
// store new tag of this slot
u4_arr_set(slot_tags, slot_idx + 1, get_pointer_tag(tagged_ptr));
return tagged_ptr;
}
#endif
static inline void *allocate_small(unsigned arena, size_t requested_size) { static inline void *allocate_small(unsigned arena, size_t requested_size) {
struct size_info info = get_size_info(requested_size); struct size_info info = get_size_info(requested_size);
size_t size = likely(info.size) ? info.size : 16; size_t size = likely(info.size) ? info.size : 16;
@ -624,11 +534,6 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
if (requested_size) { if (requested_size) {
write_after_free_check(p, size - canary_size); write_after_free_check(p, size - canary_size);
set_canary(metadata, p, size); set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
} }
stats_small_allocate(c, size); stats_small_allocate(c, size);
@ -661,11 +566,6 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
void *p = slot_pointer(size, slab, slot); void *p = slot_pointer(size, slab, slot);
if (requested_size) { if (requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
} }
stats_slab_allocate(c, slab_size); stats_slab_allocate(c, slab_size);
stats_small_allocate(c, size); stats_small_allocate(c, size);
@ -688,11 +588,6 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
void *p = slot_pointer(size, slab, slot); void *p = slot_pointer(size, slab, slot);
if (requested_size) { if (requested_size) {
set_canary(metadata, p, size); set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
} }
stats_slab_allocate(c, slab_size); stats_slab_allocate(c, slab_size);
stats_small_allocate(c, size); stats_small_allocate(c, size);
@ -717,11 +612,6 @@ static inline void *allocate_small(unsigned arena, size_t requested_size) {
if (requested_size) { if (requested_size) {
write_after_free_check(p, size - canary_size); write_after_free_check(p, size - canary_size);
set_canary(metadata, p, size); set_canary(metadata, p, size);
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
p = tag_and_clear_slab_slot(metadata, p, slot, size);
}
#endif
} }
stats_small_allocate(c, size); stats_small_allocate(c, size);
@ -804,16 +694,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
if (likely(!is_zero_size)) { if (likely(!is_zero_size)) {
check_canary(metadata, p, size); check_canary(metadata, p, size);
bool skip_zero = false; if (ZERO_ON_FREE) {
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
arm_mte_tag_and_clear_mem(set_pointer_tag(p, RESERVED_TAG), size);
// metadata->arm_mte_tags is intentionally not updated, see tag_and_clear_slab_slot()
skip_zero = true;
}
#endif
if (ZERO_ON_FREE && !skip_zero) {
memset(p, 0, size - canary_size); memset(p, 0, size - canary_size);
} }
} }
@ -1193,14 +1074,13 @@ static inline void enforce_init(void) {
} }
} }
static struct mutex init_lock = MUTEX_INITIALIZER;
COLD static void init_slow_path(void) { COLD static void init_slow_path(void) {
static struct mutex lock = MUTEX_INITIALIZER;
mutex_lock(&init_lock); mutex_lock(&lock);
if (unlikely(is_init())) { if (unlikely(is_init())) {
mutex_unlock(&init_lock); mutex_unlock(&lock);
return; return;
} }
@ -1243,15 +1123,8 @@ COLD static void init_slow_path(void) {
if (unlikely(memory_protect_rw_metadata(ra->regions, ra->total * sizeof(struct region_metadata)))) { if (unlikely(memory_protect_rw_metadata(ra->regions, ra->total * sizeof(struct region_metadata)))) {
fatal_error("failed to unprotect memory for regions table"); fatal_error("failed to unprotect memory for regions table");
} }
#ifdef HAS_ARM_MTE
if (likely(is_memtag_enabled())) {
ro.slab_region_start = memory_map_mte(slab_region_size);
} else {
ro.slab_region_start = memory_map(slab_region_size); ro.slab_region_start = memory_map(slab_region_size);
}
#else
ro.slab_region_start = memory_map(slab_region_size);
#endif
if (unlikely(ro.slab_region_start == NULL)) { if (unlikely(ro.slab_region_start == NULL)) {
fatal_error("failed to allocate slab region"); fatal_error("failed to allocate slab region");
} }
@ -1291,7 +1164,7 @@ COLD static void init_slow_path(void) {
} }
memory_set_name(&ro, sizeof(ro), "malloc read-only after init"); memory_set_name(&ro, sizeof(ro), "malloc read-only after init");
mutex_unlock(&init_lock); mutex_unlock(&lock);
// may allocate, so wait until the allocator is initialized to avoid deadlocking // may allocate, so wait until the allocator is initialized to avoid deadlocking
if (unlikely(pthread_atfork(full_lock, full_unlock, post_fork_child))) { if (unlikely(pthread_atfork(full_lock, full_unlock, post_fork_child))) {
@ -1495,11 +1368,6 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) { if (!ZERO_ON_FREE && likely(p != NULL) && total_size && total_size <= max_slab_size_class) {
memset(p, 0, total_size - canary_size); memset(p, 0, total_size - canary_size);
} }
#ifdef HAS_ARM_MTE
// use an assert instead of adding a conditional to memset() above (freed memory is always
// zeroed when MTE is enabled)
static_assert(ZERO_ON_FREE, "disabling ZERO_ON_FREE reduces performance when ARM MTE is enabled");
#endif
return p; return p;
} }
@ -1517,14 +1385,11 @@ EXPORT void *h_realloc(void *old, size_t size) {
} }
} }
void *old_orig = old;
old = untag_pointer(old);
size_t old_size; size_t old_size;
if (old < get_slab_region_end() && old >= ro.slab_region_start) { if (old < get_slab_region_end() && old >= ro.slab_region_start) {
old_size = slab_usable_size(old); old_size = slab_usable_size(old);
if (size <= max_slab_size_class && get_size_info(size).size == old_size) { if (size <= max_slab_size_class && get_size_info(size).size == old_size) {
return old_orig; return old;
} }
thread_unseal_metadata(); thread_unseal_metadata();
} else { } else {
@ -1637,7 +1502,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
if (copy_size > 0 && copy_size <= max_slab_size_class) { if (copy_size > 0 && copy_size <= max_slab_size_class) {
copy_size -= canary_size; copy_size -= canary_size;
} }
memcpy(new, old_orig, copy_size); memcpy(new, old, copy_size);
if (old_size <= max_slab_size_class) { if (old_size <= max_slab_size_class) {
deallocate_small(old, NULL); deallocate_small(old, NULL);
} else { } else {
@ -1678,8 +1543,6 @@ EXPORT void h_free(void *p) {
return; return;
} }
p = untag_pointer(p);
if (p < get_slab_region_end() && p >= ro.slab_region_start) { if (p < get_slab_region_end() && p >= ro.slab_region_start) {
thread_unseal_metadata(); thread_unseal_metadata();
deallocate_small(p, NULL); deallocate_small(p, NULL);
@ -1703,8 +1566,6 @@ EXPORT void h_free_sized(void *p, size_t expected_size) {
return; return;
} }
p = untag_pointer(p);
expected_size = adjust_size_for_canary(expected_size); expected_size = adjust_size_for_canary(expected_size);
if (p < get_slab_region_end() && p >= ro.slab_region_start) { if (p < get_slab_region_end() && p >= ro.slab_region_start) {
@ -1758,13 +1619,11 @@ static inline void memory_corruption_check_small(const void *p) {
mutex_unlock(&c->lock); mutex_unlock(&c->lock);
} }
EXPORT size_t h_malloc_usable_size(H_MALLOC_USABLE_SIZE_CONST void *arg) { EXPORT size_t h_malloc_usable_size(H_MALLOC_USABLE_SIZE_CONST void *p) {
if (arg == NULL) { if (p == NULL) {
return 0; return 0;
} }
const void *p = untag_const_pointer(arg);
if (p < get_slab_region_end() && p >= ro.slab_region_start) { if (p < get_slab_region_end() && p >= ro.slab_region_start) {
thread_unseal_metadata(); thread_unseal_metadata();
memory_corruption_check_small(p); memory_corruption_check_small(p);
@ -2166,26 +2025,3 @@ COLD EXPORT int h_malloc_set_state(UNUSED void *state) {
return -2; return -2;
} }
#endif #endif
#ifdef __ANDROID__
COLD EXPORT void h_malloc_disable_memory_tagging(void) {
#ifdef HAS_ARM_MTE
mutex_lock(&init_lock);
if (!ro.is_memtag_disabled) {
if (is_init()) {
if (unlikely(memory_protect_rw(&ro, sizeof(ro)))) {
fatal_error("failed to unprotect allocator data");
}
ro.is_memtag_disabled = true;
if (unlikely(memory_protect_ro(&ro, sizeof(ro)))) {
fatal_error("failed to protect allocator data");
}
} else {
// bionic calls this function very early in some cases
ro.is_memtag_disabled = true;
}
}
mutex_unlock(&init_lock);
#endif
}
#endif

View File

@ -99,7 +99,6 @@ int h_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr
void *arg); void *arg);
void h_malloc_disable(void); void h_malloc_disable(void);
void h_malloc_enable(void); void h_malloc_enable(void);
void h_malloc_disable_memory_tagging(void);
#endif #endif
// hardened_malloc extensions // hardened_malloc extensions

View File

@ -28,20 +28,6 @@ void *memory_map(size_t size) {
return p; return p;
} }
#ifdef HAS_ARM_MTE
// Note that PROT_MTE can't be cleared via mprotect
void *memory_map_mte(size_t size) {
void *p = mmap(NULL, size, PROT_MTE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (unlikely(p == MAP_FAILED)) {
if (errno != ENOMEM) {
fatal_error("non-ENOMEM MTE mmap failure");
}
return NULL;
}
return p;
}
#endif
bool memory_map_fixed(void *ptr, size_t size) { bool memory_map_fixed(void *ptr, size_t size) {
void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
bool ret = p == MAP_FAILED; bool ret = p == MAP_FAILED;

View File

@ -11,9 +11,6 @@
int get_metadata_key(void); int get_metadata_key(void);
void *memory_map(size_t size); void *memory_map(size_t size);
#ifdef HAS_ARM_MTE
void *memory_map_mte(size_t size);
#endif
bool memory_map_fixed(void *ptr, size_t size); bool memory_map_fixed(void *ptr, size_t size);
bool memory_unmap(void *ptr, size_t size); bool memory_unmap(void *ptr, size_t size);
bool memory_protect_ro(void *ptr, size_t size); bool memory_protect_ro(void *ptr, size_t size);

View File

@ -1,49 +0,0 @@
#ifndef MEMTAG_H
#define MEMTAG_H
#include "util.h"
#ifdef HAS_ARM_MTE
#include "arm_mte.h"
#define MEMTAG 1
#define RESERVED_TAG 15
#define TAG_WIDTH 4
#endif
static inline void *untag_pointer(void *ptr) {
#ifdef HAS_ARM_MTE
const uintptr_t mask = UINTPTR_MAX >> 8;
return (void *) ((uintptr_t) ptr & mask);
#else
return ptr;
#endif
}
static inline const void *untag_const_pointer(const void *ptr) {
#ifdef HAS_ARM_MTE
const uintptr_t mask = UINTPTR_MAX >> 8;
return (const void *) ((uintptr_t) ptr & mask);
#else
return ptr;
#endif
}
static inline void *set_pointer_tag(void *ptr, u8 tag) {
#ifdef HAS_ARM_MTE
return (void *) (((uintptr_t) tag << 56) | (uintptr_t) untag_pointer(ptr));
#else
(void) tag;
return ptr;
#endif
}
static inline u8 get_pointer_tag(void *ptr) {
#ifdef HAS_ARM_MTE
return (((uintptr_t) ptr) >> 56) & 0xf;
#else
(void) ptr;
return 0;
#endif
}
#endif

3
util.c
View File

@ -6,8 +6,6 @@
#ifdef __ANDROID__ #ifdef __ANDROID__
#include <async_safe/log.h> #include <async_safe/log.h>
int mallopt(int param, int value);
#define M_BIONIC_RESTORE_DEFAULT_SIGABRT_HANDLER (-1003)
#endif #endif
#include "util.h" #include "util.h"
@ -32,7 +30,6 @@ static int write_full(int fd, const char *buf, size_t length) {
COLD noreturn void fatal_error(const char *s) { COLD noreturn void fatal_error(const char *s) {
#ifdef __ANDROID__ #ifdef __ANDROID__
mallopt(M_BIONIC_RESTORE_DEFAULT_SIGABRT_HANDLER, 0);
async_safe_fatal("hardened_malloc: fatal allocator error: %s", s); async_safe_fatal("hardened_malloc: fatal allocator error: %s", s);
#else #else
const char *prefix = "fatal allocator error: "; const char *prefix = "fatal allocator error: ";

16
util.h
View File

@ -57,22 +57,6 @@ static inline size_t align(size_t size, size_t align) {
return (size + mask) & ~mask; return (size + mask) & ~mask;
} }
// u4_arr_{set,get} are helper functions for using u8 array as an array of unsigned 4-bit values.
// val is treated as a 4-bit value
static inline void u4_arr_set(u8 *arr, size_t idx, u8 val) {
size_t off = idx >> 1;
size_t shift = (idx & 1) << 2;
u8 mask = (u8) (0xf0 >> shift);
arr[off] = (arr[off] & mask) | (val << shift);
}
static inline u8 u4_arr_get(const u8 *arr, size_t idx) {
size_t off = idx >> 1;
size_t shift = (idx & 1) << 2;
return (u8) ((arr[off] >> shift) & 0xf);
}
COLD noreturn void fatal_error(const char *s); COLD noreturn void fatal_error(const char *s);
#if CONFIG_SEAL_METADATA #if CONFIG_SEAL_METADATA