DivestOS/Patches/OLD/bacon/Kernel-All/ch-12.1/22.patch

1068 lines
34 KiB
Diff
Raw Normal View History

2016-12-21 19:30:02 -05:00
From 0fd3d1cb2f2d6f989bdf7075a6df2bdd84f39084 Mon Sep 17 00:00:00 2001
From: Daniel Micay <danielmicay@gmail.com>
Date: Wed, 19 Nov 2014 14:25:58 -0500
Subject: [PATCH] WIP port from PaX 3.2 LTS -> 3.4
---
Documentation/kernel-parameters.txt | 14 +++++
arch/arm/include/asm/cache.h | 2 +
arch/arm/include/asm/page.h | 1 +
drivers/char/mem.c | 2 +-
drivers/char/random.c | 4 +-
fs/binfmt_elf.c | 116 ++++++++++++++++++++++--------------
fs/buffer.c | 2 +-
fs/dcache.c | 3 +-
fs/exec.c | 4 +-
fs/xattr.c | 21 +++++++
include/linux/sched.h | 1 +
include/linux/slab.h | 23 ++++++-
include/linux/slab_def.h | 4 ++
include/linux/xattr.h | 3 +
kernel/fork.c | 2 +-
mm/mm_init.c | 25 ++++++++
mm/rmap.c | 6 +-
mm/slab.c | 50 ++++++++++++++--
mm/slob.c | 50 ++++++++++------
mm/slub.c | 29 ++++++++-
net/core/skbuff.c | 6 +-
security/Kconfig | 32 +++++++---
22 files changed, 310 insertions(+), 90 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9d56aae..11570d3 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2061,8 +2061,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
expand down segment used by UDEREF on X86-32 or the frequent
page table updates on X86-64.
+ pax_sanitize_slab=
+ Format: { 0 | 1 | off | fast | full }
+ Options '0' and '1' are only provided for backward
+ compatibility, 'off' or 'fast' should be used instead.
+ 0|off : disable slab object sanitization
+ 1|fast: enable slab object sanitization excluding
+ whitelisted slabs (default)
+ full : sanitize all slabs, even the whitelisted ones
+
pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
+ pax_extra_latent_entropy
+ Enable a very simple form of latent entropy extraction
+ from the first 4GB of memory as the bootmem allocator
+ passes the memory pages to the buddy allocator.
+
pcbit= [HW,ISDN]
pcd. [PARIDE]
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 73dab99..2255c86 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -4,6 +4,8 @@
#ifndef __ASMARM_CACHE_H
#define __ASMARM_CACHE_H
+#include <linux/const.h>
+
#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index b848020..9b999166 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -23,6 +23,7 @@
#else
+#include <linux/compiler.h>
#include <asm/glue.h>
/*
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 52ed8d4..08e88db 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -400,7 +400,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
else
csize = count;
- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
if (rc < 0)
return rc;
buf += csize;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e54ef05..550a156 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -527,8 +527,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate += i ? 7 : 14;
}
- ACCESS_ONCE(r->input_rotate) = input_rotate;
- ACCESS_ONCE(r->add_ptr) = i;
+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
+ ACCESS_ONCE_RW(r->add_ptr) = i;
smp_wmb();
if (out)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5432fe8..3e624bc 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -566,7 +566,7 @@ static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf
#endif
#ifdef CONFIG_PAX_EMUTRAMP
- if (elf_phdata->p_flags & PF_EMUTRAMP)
+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
pax_flags |= MF_PAX_EMUTRAMP;
#endif
@@ -634,7 +634,7 @@ static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmo
#endif
#ifdef CONFIG_PAX_EMUTRAMP
- if (pax_flags_softmode & MF_PAX_EMUTRAMP)
+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
pax_flags |= MF_PAX_EMUTRAMP;
#endif
@@ -686,12 +686,48 @@ static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmo
#endif
#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+static unsigned long pax_parse_defaults(void)
{
unsigned long pax_flags = 0UL;
+#ifdef CONFIG_PAX_SOFTMODE
+ if (pax_softmode)
+ return pax_flags;
+#endif
+
+#ifdef CONFIG_PAX_PAGEEXEC
+ pax_flags |= MF_PAX_PAGEEXEC;
+#endif
+
+#ifdef CONFIG_PAX_SEGMEXEC
+ pax_flags |= MF_PAX_SEGMEXEC;
+#endif
+
+#ifdef CONFIG_PAX_MPROTECT
+ pax_flags |= MF_PAX_MPROTECT;
+#endif
+
+#ifdef CONFIG_PAX_RANDMMAP
+ if (randomize_va_space)
+ pax_flags |= MF_PAX_RANDMMAP;
+#endif
+
+ return pax_flags;
+}
+
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
+{
+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
+
#ifdef CONFIG_PAX_EI_PAX
+#ifdef CONFIG_PAX_SOFTMODE
+ if (pax_softmode)
+ return pax_flags;
+#endif
+
+ pax_flags = 0UL;
+
#ifdef CONFIG_PAX_PAGEEXEC
if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
pax_flags |= MF_PAX_PAGEEXEC;
@@ -717,28 +753,10 @@ static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
pax_flags |= MF_PAX_RANDMMAP;
#endif
-#else
-
-#ifdef CONFIG_PAX_PAGEEXEC
- pax_flags |= MF_PAX_PAGEEXEC;
-#endif
-
-#ifdef CONFIG_PAX_SEGMEXEC
- pax_flags |= MF_PAX_SEGMEXEC;
-#endif
-
-#ifdef CONFIG_PAX_MPROTECT
- pax_flags |= MF_PAX_MPROTECT;
-#endif
-
-#ifdef CONFIG_PAX_RANDMMAP
- if (randomize_va_space)
- pax_flags |= MF_PAX_RANDMMAP;
-#endif
-
#endif
return pax_flags;
+
}
static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
@@ -754,7 +772,7 @@ static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const
((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
- return ~0UL;
+ return PAX_PARSE_FLAGS_FALLBACK;
#ifdef CONFIG_PAX_SOFTMODE
if (pax_softmode)
@@ -767,7 +785,7 @@ static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const
}
#endif
- return ~0UL;
+ return PAX_PARSE_FLAGS_FALLBACK;
}
static unsigned long pax_parse_xattr_pax(struct file * const file)
@@ -775,23 +793,27 @@ static unsigned long pax_parse_xattr_pax(struct file * const file)
#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
ssize_t xattr_size, i;
- unsigned char xattr_value[5];
+ unsigned char xattr_value[sizeof("pemrs") - 1];
unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
- xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
- if (xattr_size <= 0)
- return ~0UL;
+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
+ return PAX_PARSE_FLAGS_FALLBACK;
for (i = 0; i < xattr_size; i++)
switch (xattr_value[i]) {
default:
- return ~0UL;
+ return PAX_PARSE_FLAGS_FALLBACK;
#define parse_flag(option1, option2, flag) \
case option1: \
+ if (pax_flags_hardmode & MF_PAX_##flag) \
+ return PAX_PARSE_FLAGS_FALLBACK;\
pax_flags_hardmode |= MF_PAX_##flag; \
break; \
case option2: \
+ if (pax_flags_softmode & MF_PAX_##flag) \
+ return PAX_PARSE_FLAGS_FALLBACK;\
pax_flags_softmode |= MF_PAX_##flag; \
break;
@@ -805,7 +827,7 @@ static unsigned long pax_parse_xattr_pax(struct file * const file)
}
if (pax_flags_hardmode & pax_flags_softmode)
- return ~0UL;
+ return PAX_PARSE_FLAGS_FALLBACK;
#ifdef CONFIG_PAX_SOFTMODE
if (pax_softmode)
@@ -815,27 +837,30 @@ static unsigned long pax_parse_xattr_pax(struct file * const file)
return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
#else
- return ~0UL;
+ return PAX_PARSE_FLAGS_FALLBACK;
#endif
}
static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
{
- unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
- pax_flags = pax_parse_ei_pax(elf_ex);
+ pax_flags = pax_parse_defaults();
+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
xattr_pax_flags = pax_parse_xattr_pax(file);
- if (pt_pax_flags == ~0UL)
- pt_pax_flags = xattr_pax_flags;
- else if (xattr_pax_flags == ~0UL)
- xattr_pax_flags = pt_pax_flags;
- if (pt_pax_flags != xattr_pax_flags)
+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
+ pt_pax_flags != xattr_pax_flags)
return -EINVAL;
- if (pt_pax_flags != ~0UL)
+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+ pax_flags = xattr_pax_flags;
+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
pax_flags = pt_pax_flags;
+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
+ pax_flags = ei_pax_flags;
#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
@@ -871,7 +896,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
unsigned long random_variable = 0;
#ifdef CONFIG_PAX_RANDUSTACK
- if (randomize_va_space)
+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
return stack_top - current->mm->delta_stack;
#endif
@@ -908,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
- unsigned long pax_task_size = TASK_SIZE;
+ unsigned long pax_task_size;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
@@ -1044,6 +1069,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
goto out_free_dentry;
/* OK, This is the point of no return */
+ current->mm->def_flags = 0;
#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
current->mm->pax_flags = 0UL;
@@ -1062,8 +1088,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->delta_stack = 0UL;
#endif
- current->mm->def_flags = 0;
-
#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
send_sig(SIGKILL, current, 0);
@@ -1081,7 +1105,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
current->mm->context.user_cs_limit = PAGE_SIZE;
- current->mm->def_flags |= VM_PAGEEXEC;
+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
}
#endif
@@ -1091,9 +1115,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
pax_task_size = SEGMEXEC_TASK_SIZE;
current->mm->def_flags |= VM_NOHUGEPAGE;
- }
+ } else
#endif
+ pax_task_size = TASK_SIZE;
+
#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
diff --git a/fs/buffer.c b/fs/buffer.c
index fb6ad35..79c56c5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3333,7 +3333,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
- SLAB_MEM_SPREAD),
+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
NULL);
/*
diff --git a/fs/dcache.c b/fs/dcache.c
index 57689a2..8f792ff 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3108,7 +3108,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
+ SLAB_NO_SANITIZE, NULL);
dcache_init();
inode_init();
diff --git a/fs/exec.c b/fs/exec.c
index 90d4bdf..a103e85 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2030,7 +2030,9 @@ void pax_report_refcount_overflow(struct pt_regs *regs)
printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
+ preempt_disable();
show_regs(regs);
+ preempt_enable();
force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
}
#endif
@@ -2101,7 +2103,7 @@ void check_object_size(const void *ptr, unsigned long n, bool to)
if (!n)
return;
- type = check_heap_object(ptr, n, to);
+ type = check_heap_object(ptr, n);
if (!type) {
if (check_stack_object(ptr, n) != -1)
return;
diff --git a/fs/xattr.c b/fs/xattr.c
index 3c8c1cc..e92d0af 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -226,6 +226,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
return rc;
}
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t
+pax_getxattr(struct dentry *dentry, void *value, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ ssize_t error;
+
+ error = inode_permission(inode, MAY_EXEC);
+ if (error)
+ return error;
+
+ if (inode->i_op->getxattr)
+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
+ else
+ error = -EOPNOTSUPP;
+
+ return error;
+}
+EXPORT_SYMBOL(pax_getxattr);
+#endif
+
ssize_t
vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d04575f..c9bcebe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1654,6 +1654,7 @@ extern int pax_softmode;
#endif
extern int pax_check_flags(unsigned long *);
+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
/* if tsk != current then task_lock must be held on it */
#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index bbd740b..e9ba911 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -27,6 +27,13 @@
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
+#else
+#define SLAB_NO_SANITIZE 0x00000000UL
+#endif
+
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
@@ -103,6 +110,20 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+#ifdef CONFIG_X86_64
+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
+#else
+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
+#endif
+enum pax_sanitize_mode {
+ PAX_SANITIZE_SLAB_OFF = 0,
+ PAX_SANITIZE_SLAB_FAST,
+ PAX_SANITIZE_SLAB_FULL,
+};
+extern enum pax_sanitize_mode pax_sanitize_slab;
+#endif
+
/*
* struct kmem_cache related prototypes
*/
@@ -172,7 +193,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
-const char *check_heap_object(const void *ptr, unsigned long n, bool to);
+const char *check_heap_object(const void *ptr, unsigned long n);
bool is_usercopy_object(const void *ptr);
/*
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 0a3d314..02dd4ad 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -70,6 +70,10 @@ struct kmem_cache {
atomic_unchecked_t allocmiss;
atomic_unchecked_t freehit;
atomic_unchecked_t freemiss;
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ atomic_unchecked_t sanitized;
+ atomic_unchecked_t not_sanitized;
+#endif
/*
* If debugging is enabled, then the allocator can add additional
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index ef6e4069..cf5f26c 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -87,6 +87,9 @@ struct xattr {
};
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
+ssize_t pax_getxattr(struct dentry *, void *, size_t);
+#endif
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
diff --git a/kernel/fork.c b/kernel/fork.c
index c35da73..4badfbd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1763,7 +1763,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
mmap_init();
nsproxy_cache_init();
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 1ffd97a..ed75674 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -9,8 +9,33 @@
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/export.h>
+#include <linux/slab.h>
#include "internal.h"
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
+static int __init pax_sanitize_slab_setup(char *str)
+{
+ if (!str)
+ return 0;
+
+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
+ pr_info("PaX slab sanitization: %s\n", "disabled");
+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
+ pr_info("PaX slab sanitization: %s\n", "fast");
+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
+ } else if (!strcmp(str, "full")) {
+ pr_info("PaX slab sanitization: %s\n", "full");
+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
+ } else
+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
+
+ return 0;
+}
+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
+#endif
+
#ifdef CONFIG_DEBUG_MEMORY_INIT
int mminit_loglevel;
diff --git a/mm/rmap.c b/mm/rmap.c
index 396e273..a4cb6fb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -505,8 +505,10 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
+ anon_vma_ctor);
+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
+ SLAB_PANIC|SLAB_NO_SANITIZE);
}
/*
diff --git a/mm/slab.c b/mm/slab.c
index de7f15e..1bdb31b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -153,7 +153,7 @@
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
-# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
+# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_STORE_USER | \
@@ -161,8 +161,8 @@
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else
-# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | \
+# define CREATE_MASK (SLAB_USERCOPY | SLAB_NO_SANITIZE | \
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
@@ -395,6 +395,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
#else
#define STATS_INC_ACTIVE(x) do { } while (0)
#define STATS_DEC_ACTIVE(x) do { } while (0)
@@ -411,6 +413,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
#define STATS_INC_FREEMISS(x) do { } while (0)
+#define STATS_INC_SANITIZED(x) do { } while (0)
+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
#endif
#if DEBUG
@@ -1392,7 +1396,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
-static struct notifier_block __cpuinitdata cpucache_notifier = {
+static struct notifier_block cpucache_notifier = {
&cpuup_callback, NULL, 0
};
@@ -2366,6 +2370,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
BUG_ON(flags & ~CREATE_MASK);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
+ flags |= SLAB_NO_SANITIZE;
+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
+ flags &= ~SLAB_NO_SANITIZE;
+#endif
+
/*
* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
@@ -3745,6 +3756,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
+ STATS_INC_NOT_SANITIZED(cachep);
+ else {
+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, obj_size(cachep));
+
+ if (cachep->ctor)
+ cachep->ctor(objp);
+
+ STATS_INC_SANITIZED(cachep);
+ }
+#endif
+
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
@@ -3961,6 +3986,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
+ VM_BUG_ON(!virt_addr_valid(objp));
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
@@ -4298,6 +4324,9 @@ static void print_slabinfo_header(struct seq_file *m)
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
+#endif
#endif
seq_putc(m, '\n');
}
@@ -4415,6 +4444,14 @@ static int s_show(struct seq_file *m, void *p)
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ {
+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
+
+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
+ }
+#endif
#endif
seq_putc(m, '\n');
return 0;
@@ -4684,6 +4721,9 @@ bool is_usercopy_object(const void *ptr)
if (ZERO_OR_NULL_PTR(ptr))
return false;
+ if (!slab_is_available())
+ return false;
+
if (!virt_addr_valid(ptr))
return false;
@@ -4697,7 +4737,7 @@ bool is_usercopy_object(const void *ptr)
}
#ifdef CONFIG_PAX_USERCOPY
-const char *check_heap_object(const void *ptr, unsigned long n, bool to)
+const char *check_heap_object(const void *ptr, unsigned long n)
{
struct page *page;
struct kmem_cache *cachep;
diff --git a/mm/slob.c b/mm/slob.c
index 3c15e57..f1ff1e1 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -92,6 +92,13 @@ struct slob_block {
};
typedef struct slob_block slob_t;
+struct kmem_cache {
+ unsigned int size, align;
+ unsigned long flags;
+ const char *name;
+ void (*ctor)(void *);
+};
+
/*
* We use struct page fields to manage some slob allocation aspects,
* however to avoid the horrible mess in include/linux/mm_types.h, we'll
@@ -101,9 +108,7 @@ struct slob_page {
union {
struct {
unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
slobidx_t units; /* free units left in page */
- unsigned long pad[1];
unsigned long size; /* size when >=PAGE_SIZE */
slob_t *free; /* first free slob_t in page */
struct list_head list; /* linked list of free pages */
@@ -393,7 +398,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/*
* slob_free: entry point into the slob allocator.
*/
-static void slob_free(void *block, int size)
+static void slob_free(struct kmem_cache *c, void *block, int size)
{
struct slob_page *sp;
slob_t *prev, *next, *b = (slob_t *)block;
@@ -421,6 +426,11 @@ static void slob_free(void *block, int size)
return;
}
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
+#endif
+
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
@@ -544,11 +554,12 @@ void kfree(const void *block)
return;
kmemleak_free(block);
+ VM_BUG_ON(!virt_addr_valid(block));
sp = slob_page(block);
if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
slob_t *m = (slob_t *)(block - align);
- slob_free(m, m[0].units + align);
+ slob_free(NULL, m, m[0].units + align);
} else {
clear_slob_page(sp);
free_slob_page(sp);
@@ -560,11 +571,16 @@ EXPORT_SYMBOL(kfree);
bool is_usercopy_object(const void *ptr)
{
+ if (!slab_is_available())
+ return false;
+
+ // PAX: TODO
+
return false;
}
#ifdef CONFIG_PAX_USERCOPY
-const char *check_heap_object(const void *ptr, unsigned long n, bool to)
+const char *check_heap_object(const void *ptr, unsigned long n)
{
struct slob_page *sp;
const slob_t *free;
@@ -643,13 +659,6 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache {
- unsigned int size, align;
- unsigned long flags;
- const char *name;
- void (*ctor)(void *);
-};
-
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
@@ -663,6 +672,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
#endif
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
+ flags |= SLAB_NO_SANITIZE;
+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
+ flags &= ~SLAB_NO_SANITIZE;
+#endif
+
if (c) {
c->name = name;
c->size = size;
@@ -691,7 +707,7 @@ void kmem_cache_destroy(struct kmem_cache *c)
kmemleak_free(c);
if (c->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
- slob_free(c, sizeof(struct kmem_cache));
+ slob_free(NULL, c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -731,12 +747,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-static void __kmem_cache_free(void *b, int size)
+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
{
struct slob_page *sp = slob_page(b);
if (is_slob_page(sp))
- slob_free(b, size);
+ slob_free(c, b, size);
else {
clear_slob_page(sp);
free_slob_page(sp);
@@ -750,7 +766,7 @@ static void kmem_rcu_free(struct rcu_head *head)
struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
- __kmem_cache_free(b, slob_rcu->size);
+ __kmem_cache_free(NULL, b, slob_rcu->size);
}
void kmem_cache_free(struct kmem_cache *c, void *b)
@@ -771,7 +787,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
slob_rcu->size = size;
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
- __kmem_cache_free(b, size);
+ __kmem_cache_free(c, b, size);
}
#ifdef CONFIG_PAX_USERCOPY_SLABS
diff --git a/mm/slub.c b/mm/slub.c
index 8bb7580..5657a81 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -187,7 +187,7 @@ static enum {
PARTIAL, /* Kmem_cache_node works */
UP, /* Everything works but does not show up in sysfs */
SYSFS /* Sysfs up */
-} slab_state = DOWN;
+} slab_state __read_only = DOWN;
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
@@ -2553,6 +2553,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
slab_free_hook(s, x);
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (!(s->flags & SLAB_NO_SANITIZE)) {
+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->objsize);
+ if (s->ctor)
+ s->ctor(x);
+ }
+#endif
+
redo:
/*
* Determine the currently cpus per cpu slab.
@@ -2928,6 +2936,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
s->inuse = size;
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ (!(flags & SLAB_NO_SANITIZE)) ||
+#endif
s->ctor)) {
/*
* Relocate free pointer after the object if it is not
@@ -3411,6 +3422,9 @@ bool is_usercopy_object(const void *ptr)
if (ZERO_OR_NULL_PTR(ptr))
return false;
+ if (!slab_is_available())
+ return false;
+
if (!virt_addr_valid(ptr))
return false;
@@ -3424,7 +3438,7 @@ bool is_usercopy_object(const void *ptr)
}
#ifdef CONFIG_PAX_USERCOPY
-const char *check_heap_object(const void *ptr, unsigned long n, bool to)
+const char *check_heap_object(const void *ptr, unsigned long n)
{
struct page *page;
struct kmem_cache *s;
@@ -3517,6 +3531,7 @@ void kfree(const void *x)
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
+ VM_BUG_ON(!virt_addr_valid(x));
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
@@ -3998,6 +4013,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
return NULL;
down_write(&slub_lock);
+
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
+ flags |= SLAB_NO_SANITIZE;
+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
+ flags &= ~SLAB_NO_SANITIZE;
+#endif
+
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
atomic_inc(&s->refcount);
@@ -4080,7 +4103,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata slab_notifier = {
+static struct notifier_block slab_notifier = {
.notifier_call = slab_cpuup_callback
};
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 771e325..5688637 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2956,13 +2956,15 @@ void __init skb_init(void)
skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff),
0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+ SLAB_NO_SANITIZE,
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
(2*sizeof(struct sk_buff)) +
sizeof(atomic_t),
0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
+ SLAB_NO_SANITIZE,
NULL);
}
diff --git a/security/Kconfig b/security/Kconfig
index c140672..cd275f8 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -364,6 +364,10 @@ choice
Select the method used to instrument function pointer dereferences.
Note that binary modules cannot be instrumented by this approach.
+ Note that the implementation requires a gcc with plugin support,
+ i.e., gcc 4.5 or newer. You may need to install the supporting
+ headers explicitly in addition to the normal gcc package.
+
config PAX_KERNEXEC_PLUGIN_METHOD_BTS
bool "bts"
help
@@ -494,23 +498,35 @@ config PAX_MEMORY_SANITIZE
bool "Sanitize all freed memory"
depends on !HIBERNATION
help
- By saying Y here the kernel will erase memory pages as soon as they
- are freed. This in turn reduces the lifetime of data stored in the
- pages, making it less likely that sensitive information such as
- passwords, cryptographic secrets, etc stay in memory for too long.
+ By saying Y here the kernel will erase memory pages and slab objects
+ as soon as they are freed. This in turn reduces the lifetime of data
+ stored in them, making it less likely that sensitive information such
+ as passwords, cryptographic secrets, etc stay in memory for too long.
This is especially useful for programs whose runtime is short, long
lived processes and the kernel itself benefit from this as long as
- they operate on whole memory pages and ensure timely freeing of pages
- that may hold sensitive information.
+ they ensure timely freeing of memory that may hold sensitive
+ information.
+
+ A nice side effect of the sanitization of slab objects is the
+ reduction of possible info leaks caused by padding bytes within the
+ leaky structures. Use-after-free bugs for structures containing
+ pointers can also be detected as dereferencing the sanitized pointer
+ will generate an access violation.
The tradeoff is performance impact, on a single CPU system kernel
compilation sees a 3% slowdown, other systems and workloads may vary
and you are advised to test this feature on your expected workload
before deploying it.
- Note that this feature does not protect data stored in live pages,
- e.g., process memory swapped to disk may stay there for a long time.
+ The slab sanitization feature excludes a few slab caches per default
+ for performance reasons. To extend the feature to cover those as
+ well, pass "pax_sanitize_slab=full" as kernel command line parameter.
+
+ To reduce the performance penalty by sanitizing pages only, albeit
+ limiting the effectiveness of this feature at the same time, slab
+ sanitization can be disabled with the kernel command line parameter
+ "pax_sanitize_slab=off".
config PAX_MEMORY_STACKLEAK
bool "Sanitize kernel stack"