mirror of
https://github.com/GrapheneOS/hardened_malloc.git
synced 2025-05-25 01:11:21 -04:00
initialize size class CSPRNGs from init CSPRNG
This avoids making a huge number of getrandom system calls during initialization. The init CSPRNG is unmapped before initialization finishes and these are still reseeded from the OS. The purpose of the independent CSPRNGs is simply to avoid the massive performance hit of synchronization and there's no harm in doing it this way. Keeping around the init CSPRNG and reseeding from it would defeat the purpose of reseeding, and it isn't a measurable performance issue since it can just be tuned to reseed less often.
This commit is contained in:
parent
c7e2cb82f4
commit
a13db3fc68
3 changed files with 13 additions and 2 deletions
|
@ -1093,7 +1093,7 @@ COLD static void init_slow_path(void) {
|
|||
struct region_allocator *ra = ro.region_allocator;
|
||||
|
||||
mutex_init(&ra->lock);
|
||||
random_state_init(&ra->rng);
|
||||
random_state_init_from_random_state(&ra->rng, rng);
|
||||
ro.regions[0] = allocator_state->regions_a;
|
||||
ro.regions[1] = allocator_state->regions_b;
|
||||
ra->regions = ro.regions[0];
|
||||
|
@ -1116,7 +1116,7 @@ COLD static void init_slow_path(void) {
|
|||
struct size_class *c = &ro.size_class_metadata[arena][class];
|
||||
|
||||
mutex_init(&c->lock);
|
||||
random_state_init(&c->rng);
|
||||
random_state_init_from_random_state(&c->rng, rng);
|
||||
|
||||
size_t bound = (REAL_CLASS_REGION_SIZE - CLASS_REGION_SIZE) / PAGE_SIZE - 1;
|
||||
size_t gap = (get_random_u64_uniform(rng, bound) + 1) * PAGE_SIZE;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue