mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-12-24 15:09:34 -05:00
66918 lines
2.2 MiB
66918 lines
2.2 MiB
From 3fdbfd418fb113e6f54835dda212d6e4b8621ea7 Mon Sep 17 00:00:00 2001
|
|
From: Daniel Micay <danielmicay@gmail.com>
|
|
Date: Wed, 22 Jul 2015 23:36:13 -0400
|
|
Subject: [PATCH] apply old 3.4 PaX patch
|
|
|
|
---
|
|
Documentation/dontdiff | 39 +-
|
|
Documentation/kernel-parameters.txt | 7 +
|
|
Makefile | 93 +-
|
|
arch/alpha/include/asm/atomic.h | 10 +
|
|
arch/alpha/include/asm/elf.h | 7 +
|
|
arch/alpha/include/asm/pgalloc.h | 6 +
|
|
arch/alpha/include/asm/pgtable.h | 11 +
|
|
arch/alpha/kernel/module.c | 2 +-
|
|
arch/alpha/kernel/osf_sys.c | 10 +-
|
|
arch/alpha/mm/fault.c | 141 +-
|
|
arch/arm/include/asm/atomic.h | 211 +-
|
|
arch/arm/include/asm/atomic.h.rej | 321 +++
|
|
arch/arm/include/asm/cache.h | 2 +-
|
|
arch/arm/include/asm/cacheflush.h | 2 +-
|
|
arch/arm/include/asm/cmpxchg.h | 2 +
|
|
arch/arm/include/asm/elf.h.rej | 11 +
|
|
arch/arm/include/asm/kmap_types.h | 1 +
|
|
arch/arm/include/asm/outercache.h | 2 +-
|
|
arch/arm/include/asm/page.h | 2 +-
|
|
arch/arm/include/asm/pgalloc.h | 6 +
|
|
arch/arm/include/asm/uaccess.h | 27 +-
|
|
arch/arm/kernel/armksyms.c | 4 +-
|
|
arch/arm/kernel/process.c | 7 -
|
|
arch/arm/kernel/process.c.rej | 13 +
|
|
arch/arm/kernel/setup.c | 6 +-
|
|
arch/arm/lib/copy_from_user.S | 6 +-
|
|
arch/arm/lib/copy_page.S | 1 +
|
|
arch/arm/lib/copy_to_user.S | 6 +-
|
|
arch/arm/lib/uaccess.S | 12 +-
|
|
arch/arm/lib/uaccess_with_memcpy.c | 2 +-
|
|
arch/arm/mach-omap2/board-n8x0.c | 2 +-
|
|
arch/arm/mm/fault.c | 48 +
|
|
arch/arm/mm/mmap.c | 31 +-
|
|
arch/arm/plat-orion/include/plat/addr-map.h | 2 +-
|
|
arch/arm/plat-samsung/include/plat/dma-ops.h | 2 +-
|
|
arch/arm/plat-samsung/include/plat/ehci.h | 2 +-
|
|
arch/avr32/include/asm/elf.h | 8 +-
|
|
arch/avr32/include/asm/kmap_types.h | 3 +-
|
|
arch/avr32/mm/fault.c | 27 +
|
|
arch/frv/include/asm/atomic.h | 10 +
|
|
arch/frv/include/asm/kmap_types.h | 1 +
|
|
arch/frv/mm/elf-fdpic.c | 7 +-
|
|
arch/ia64/include/asm/atomic.h | 10 +
|
|
arch/ia64/include/asm/elf.h | 7 +
|
|
arch/ia64/include/asm/pgalloc.h | 12 +
|
|
arch/ia64/include/asm/pgtable.h | 13 +-
|
|
arch/ia64/include/asm/spinlock.h | 2 +-
|
|
arch/ia64/include/asm/uaccess.h | 4 +-
|
|
arch/ia64/kernel/module.c | 48 +-
|
|
arch/ia64/kernel/sys_ia64.c | 13 +-
|
|
arch/ia64/kernel/vmlinux.lds.S | 2 +-
|
|
arch/ia64/mm/fault.c | 33 +-
|
|
arch/ia64/mm/hugetlbpage.c | 2 +-
|
|
arch/ia64/mm/init.c | 13 +
|
|
arch/m32r/lib/usercopy.c | 6 +
|
|
arch/mips/include/asm/atomic.h | 14 +
|
|
arch/mips/include/asm/elf.h | 11 +-
|
|
arch/mips/include/asm/exec.h | 2 +-
|
|
arch/mips/include/asm/page.h | 2 +-
|
|
arch/mips/include/asm/pgalloc.h | 5 +
|
|
arch/mips/kernel/binfmt_elfn32.c | 7 +
|
|
arch/mips/kernel/binfmt_elfo32.c | 7 +
|
|
arch/mips/kernel/process.c | 12 -
|
|
arch/mips/mm/fault.c | 17 +
|
|
arch/mips/mm/mmap.c | 41 +-
|
|
arch/parisc/include/asm/atomic.h | 10 +
|
|
arch/parisc/include/asm/elf.h | 7 +
|
|
arch/parisc/include/asm/pgalloc.h | 6 +
|
|
arch/parisc/include/asm/pgtable.h | 11 +
|
|
arch/parisc/include/asm/uaccess.h | 4 +-
|
|
arch/parisc/kernel/module.c | 50 +-
|
|
arch/parisc/kernel/sys_parisc.c | 6 +-
|
|
arch/parisc/kernel/traps.c | 4 +-
|
|
arch/parisc/mm/fault.c | 140 +-
|
|
arch/powerpc/include/asm/atomic.h | 10 +
|
|
arch/powerpc/include/asm/elf.h | 18 +-
|
|
arch/powerpc/include/asm/exec.h | 2 +-
|
|
arch/powerpc/include/asm/kmap_types.h | 1 +
|
|
arch/powerpc/include/asm/mman.h | 2 +-
|
|
arch/powerpc/include/asm/page.h | 8 +-
|
|
arch/powerpc/include/asm/page_64.h | 7 +-
|
|
arch/powerpc/include/asm/pgalloc-64.h | 7 +
|
|
arch/powerpc/include/asm/pgtable.h | 1 +
|
|
arch/powerpc/include/asm/pte-hash32.h | 1 +
|
|
arch/powerpc/include/asm/reg.h | 1 +
|
|
arch/powerpc/include/asm/uaccess.h | 142 +-
|
|
arch/powerpc/kernel/exceptions-64e.S | 4 +-
|
|
arch/powerpc/kernel/exceptions-64s.S | 2 +-
|
|
arch/powerpc/kernel/module_32.c | 13 +-
|
|
arch/powerpc/kernel/process.c | 55 -
|
|
arch/powerpc/kernel/signal_32.c | 2 +-
|
|
arch/powerpc/kernel/signal_64.c | 2 +-
|
|
arch/powerpc/kernel/vdso.c | 5 +-
|
|
arch/powerpc/lib/usercopy_64.c | 18 -
|
|
arch/powerpc/mm/fault.c | 54 +-
|
|
arch/powerpc/mm/mmap_64.c | 12 +
|
|
arch/powerpc/mm/slice.c | 23 +-
|
|
arch/s390/include/asm/atomic.h | 10 +
|
|
arch/s390/include/asm/elf.h | 13 +-
|
|
arch/s390/include/asm/exec.h | 2 +-
|
|
arch/s390/include/asm/uaccess.h | 15 +-
|
|
arch/s390/kernel/module.c | 22 +-
|
|
arch/s390/kernel/process.c | 36 -
|
|
arch/s390/mm/mmap.c | 24 +
|
|
arch/score/include/asm/exec.h | 2 +-
|
|
arch/score/kernel/process.c | 5 -
|
|
arch/sh/mm/mmap.c | 24 +-
|
|
arch/sparc/include/asm/atomic_64.h | 106 +-
|
|
arch/sparc/include/asm/cache.h | 2 +-
|
|
arch/sparc/include/asm/elf_32.h | 7 +
|
|
arch/sparc/include/asm/elf_64.h | 7 +
|
|
arch/sparc/include/asm/pgalloc_32.h | 1 +
|
|
arch/sparc/include/asm/pgalloc_64.h | 1 +
|
|
arch/sparc/include/asm/pgtable_32.h | 17 +
|
|
arch/sparc/include/asm/pgtsrmmu.h | 7 +
|
|
arch/sparc/include/asm/spinlock_64.h | 35 +-
|
|
arch/sparc/include/asm/thread_info_32.h | 2 +
|
|
arch/sparc/include/asm/thread_info_64.h | 2 +
|
|
arch/sparc/include/asm/uaccess.h | 8 +
|
|
arch/sparc/include/asm/uaccess_32.h | 27 +-
|
|
arch/sparc/include/asm/uaccess_64.h | 19 +-
|
|
arch/sparc/kernel/Makefile | 2 +-
|
|
arch/sparc/kernel/sys_sparc_32.c | 4 +-
|
|
arch/sparc/kernel/sys_sparc_64.c | 52 +-
|
|
arch/sparc/kernel/traps_64.c | 13 +-
|
|
arch/sparc/lib/Makefile | 2 +-
|
|
arch/sparc/lib/atomic_64.S | 148 +-
|
|
arch/sparc/lib/ksyms.c | 6 +
|
|
arch/sparc/mm/Makefile | 2 +-
|
|
arch/sparc/mm/fault_32.c | 292 ++
|
|
arch/sparc/mm/fault_64.c | 486 ++++
|
|
arch/sparc/mm/hugetlbpage.c | 16 +-
|
|
arch/sparc/mm/init_32.c | 15 +-
|
|
arch/sparc/mm/srmmu.c | 7 +
|
|
arch/tile/include/asm/atomic_64.h | 10 +
|
|
arch/tile/include/asm/uaccess.h | 4 +-
|
|
arch/um/Makefile | 4 +
|
|
arch/um/include/asm/kmap_types.h | 1 +
|
|
arch/um/include/asm/page.h | 3 +
|
|
arch/um/include/asm/pgtable-3level.h | 1 +
|
|
arch/um/kernel/process.c | 16 -
|
|
arch/x86/Kconfig | 9 +-
|
|
arch/x86/Kconfig.cpu | 6 +-
|
|
arch/x86/Kconfig.debug | 6 +-
|
|
arch/x86/Makefile | 10 +
|
|
arch/x86/boot/Makefile.rej | 11 +
|
|
arch/x86/boot/bitops.h | 4 +-
|
|
arch/x86/boot/boot.h | 4 +-
|
|
arch/x86/boot/compressed/Makefile | 3 +
|
|
arch/x86/boot/compressed/eboot.c | 2 -
|
|
arch/x86/boot/compressed/head_32.S | 7 +-
|
|
arch/x86/boot/compressed/head_64.S | 4 +-
|
|
arch/x86/boot/compressed/misc.c | 4 +-
|
|
arch/x86/boot/cpucheck.c | 28 +-
|
|
arch/x86/boot/header.S | 6 +-
|
|
arch/x86/boot/memory.c | 2 +-
|
|
arch/x86/boot/video-vesa.c | 1 +
|
|
arch/x86/boot/video.c | 2 +-
|
|
arch/x86/crypto/aes-x86_64-asm_64.S | 4 +
|
|
arch/x86/crypto/aesni-intel_asm.S | 31 +
|
|
arch/x86/crypto/blowfish-x86_64-asm_64.S | 8 +
|
|
arch/x86/crypto/camellia-x86_64-asm_64.S | 8 +
|
|
arch/x86/crypto/salsa20-x86_64-asm_64.S | 5 +
|
|
arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | 5 +
|
|
arch/x86/crypto/sha1_ssse3_asm.S | 3 +
|
|
arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 5 +
|
|
arch/x86/crypto/twofish-x86_64-asm_64.S | 3 +
|
|
arch/x86/ia32/ia32_signal.c | 20 +-
|
|
arch/x86/ia32/ia32entry.S | 123 +-
|
|
arch/x86/ia32/ia32entry.S.rej | 16 +
|
|
arch/x86/ia32/sys_ia32.c | 14 +-
|
|
arch/x86/include/asm/alternative-asm.h | 39 +
|
|
arch/x86/include/asm/alternative.h | 2 +-
|
|
arch/x86/include/asm/apic.h | 2 +-
|
|
arch/x86/include/asm/apm.h | 4 +-
|
|
arch/x86/include/asm/atomic.h | 324 ++-
|
|
arch/x86/include/asm/atomic64_32.h | 100 +
|
|
arch/x86/include/asm/atomic64_64.h | 202 +-
|
|
arch/x86/include/asm/bitops.h | 2 +-
|
|
arch/x86/include/asm/boot.h | 7 +-
|
|
arch/x86/include/asm/cache.h | 5 +-
|
|
arch/x86/include/asm/cacheflush.h | 2 +-
|
|
arch/x86/include/asm/checksum_32.h | 12 +-
|
|
arch/x86/include/asm/cmpxchg.h | 35 +
|
|
arch/x86/include/asm/cpufeature.h | 2 +-
|
|
arch/x86/include/asm/desc.h | 65 +-
|
|
arch/x86/include/asm/desc_defs.h | 6 +
|
|
arch/x86/include/asm/e820.h | 2 +-
|
|
arch/x86/include/asm/elf.h | 31 +-
|
|
arch/x86/include/asm/emergency-restart.h | 2 +-
|
|
arch/x86/include/asm/fpu-internal.h | 10 +
|
|
arch/x86/include/asm/fpu-internal.h.rej | 11 +
|
|
arch/x86/include/asm/futex.h | 14 +-
|
|
arch/x86/include/asm/hw_irq.h | 4 +-
|
|
arch/x86/include/asm/io.h | 11 +
|
|
arch/x86/include/asm/irqflags.h | 5 +
|
|
arch/x86/include/asm/kprobes.h | 9 +-
|
|
arch/x86/include/asm/kvm_host.h | 2 +-
|
|
arch/x86/include/asm/local.h | 94 +-
|
|
arch/x86/include/asm/mman.h | 10 +
|
|
arch/x86/include/asm/mmu.h | 16 +-
|
|
arch/x86/include/asm/mmu_context.h | 76 +-
|
|
arch/x86/include/asm/module.h | 17 +-
|
|
arch/x86/include/asm/page_64_types.h | 2 +-
|
|
arch/x86/include/asm/paravirt.h | 44 +-
|
|
arch/x86/include/asm/paravirt_types.h | 13 +-
|
|
arch/x86/include/asm/paravirt_types.h.rej | 26 +
|
|
arch/x86/include/asm/pgalloc.h | 23 +
|
|
arch/x86/include/asm/pgtable-2level.h | 2 +
|
|
arch/x86/include/asm/pgtable-3level.h | 4 +
|
|
arch/x86/include/asm/pgtable.h | 110 +-
|
|
arch/x86/include/asm/pgtable_32.h | 14 +-
|
|
arch/x86/include/asm/pgtable_32_types.h | 15 +-
|
|
arch/x86/include/asm/pgtable_64.h | 19 +-
|
|
arch/x86/include/asm/pgtable_64_types.h.rej | 13 +
|
|
arch/x86/include/asm/pgtable_types.h | 36 +-
|
|
arch/x86/include/asm/processor.h | 39 +-
|
|
arch/x86/include/asm/ptrace.h | 18 +-
|
|
arch/x86/include/asm/reboot.h | 12 +-
|
|
arch/x86/include/asm/rwsem.h | 60 +-
|
|
arch/x86/include/asm/segment.h | 24 +-
|
|
arch/x86/include/asm/smp.h | 14 +-
|
|
arch/x86/include/asm/spinlock.h | 36 +-
|
|
arch/x86/include/asm/stackprotector.h | 4 +-
|
|
arch/x86/include/asm/stacktrace.h | 34 +-
|
|
arch/x86/include/asm/switch_to.h | 4 +-
|
|
arch/x86/include/asm/sys_ia32.h | 2 +-
|
|
arch/x86/include/asm/thread_info.h | 87 +-
|
|
arch/x86/include/asm/uaccess.h | 93 +-
|
|
arch/x86/include/asm/uaccess_32.h | 111 +-
|
|
arch/x86/include/asm/uaccess_64.h | 290 +-
|
|
arch/x86/include/asm/vdso.h | 2 +-
|
|
arch/x86/include/asm/x86_init.h | 26 +-
|
|
arch/x86/include/asm/xsave.h | 12 +-
|
|
arch/x86/kernel/acpi/realmode/Makefile | 3 +
|
|
arch/x86/kernel/acpi/sleep.c | 4 +
|
|
arch/x86/kernel/acpi/wakeup_32.S | 6 +-
|
|
arch/x86/kernel/alternative.c | 65 +-
|
|
arch/x86/kernel/apic/apic.c | 4 +-
|
|
arch/x86/kernel/apic/io_apic.c | 12 +-
|
|
arch/x86/kernel/apm_32.c | 19 +-
|
|
arch/x86/kernel/asm-offsets.c | 20 +
|
|
arch/x86/kernel/asm-offsets_64.c | 1 +
|
|
arch/x86/kernel/cpu/Makefile | 4 -
|
|
arch/x86/kernel/cpu/amd.c | 2 +-
|
|
arch/x86/kernel/cpu/common.c | 77 +-
|
|
arch/x86/kernel/cpu/intel.c | 2 +-
|
|
arch/x86/kernel/cpu/mcheck/mce.c | 27 +-
|
|
arch/x86/kernel/cpu/mcheck/p5.c | 3 +
|
|
arch/x86/kernel/cpu/mcheck/winchip.c | 3 +
|
|
arch/x86/kernel/cpu/mtrr/main.c | 2 +-
|
|
arch/x86/kernel/cpu/mtrr/mtrr.h | 2 +-
|
|
arch/x86/kernel/cpu/perf_event.c | 2 +-
|
|
arch/x86/kernel/crash.c | 4 +-
|
|
arch/x86/kernel/doublefault_32.c | 8 +-
|
|
arch/x86/kernel/dumpstack.c | 30 +-
|
|
arch/x86/kernel/dumpstack_32.c | 34 +-
|
|
arch/x86/kernel/dumpstack_64.c | 61 +-
|
|
arch/x86/kernel/early_printk.c | 1 +
|
|
arch/x86/kernel/entry_32.S | 363 ++-
|
|
arch/x86/kernel/entry_32.S.rej | 47 +
|
|
arch/x86/kernel/entry_64.S | 506 +++-
|
|
arch/x86/kernel/entry_64.S.rej | 11 +
|
|
arch/x86/kernel/ftrace.c | 14 +-
|
|
arch/x86/kernel/head32.c | 4 +-
|
|
arch/x86/kernel/head_32.S | 244 +-
|
|
arch/x86/kernel/head_64.S | 158 +-
|
|
arch/x86/kernel/i386_ksyms_32.c | 8 +
|
|
arch/x86/kernel/i387.c | 2 +-
|
|
arch/x86/kernel/i8259.c | 2 +-
|
|
arch/x86/kernel/init_task.c | 7 +-
|
|
arch/x86/kernel/ioport.c | 2 +-
|
|
arch/x86/kernel/irq.c | 6 +-
|
|
arch/x86/kernel/irq.c.rej | 15 +
|
|
arch/x86/kernel/irq_32.c | 66 +-
|
|
arch/x86/kernel/irq_64.c | 2 +-
|
|
arch/x86/kernel/kdebugfs.c | 2 +-
|
|
arch/x86/kernel/kgdb.c | 10 +-
|
|
arch/x86/kernel/kprobes-opt.c | 8 +-
|
|
arch/x86/kernel/kprobes.c | 22 +-
|
|
arch/x86/kernel/ldt.c | 31 +-
|
|
arch/x86/kernel/machine_kexec_32.c | 6 +-
|
|
arch/x86/kernel/microcode_intel.c | 4 +-
|
|
arch/x86/kernel/module.c | 76 +-
|
|
arch/x86/kernel/nmi.c | 11 +
|
|
arch/x86/kernel/paravirt-spinlocks.c | 2 +-
|
|
arch/x86/kernel/paravirt.c | 43 +-
|
|
arch/x86/kernel/pci-iommu_table.c | 2 +-
|
|
arch/x86/kernel/process.c | 83 +-
|
|
arch/x86/kernel/process_32.c | 21 +-
|
|
arch/x86/kernel/process_64.c | 16 +-
|
|
arch/x86/kernel/ptrace.c | 8 +-
|
|
arch/x86/kernel/pvclock.c | 8 +-
|
|
arch/x86/kernel/reboot.c | 51 +-
|
|
arch/x86/kernel/relocate_kernel_64.S | 1 +
|
|
arch/x86/kernel/relocate_kernel_64.S.rej | 18 +
|
|
arch/x86/kernel/setup.c | 12 +-
|
|
arch/x86/kernel/setup.c.rej | 10 +
|
|
arch/x86/kernel/setup_percpu.c | 27 +-
|
|
arch/x86/kernel/signal.c | 21 +-
|
|
arch/x86/kernel/smpboot.c | 15 +-
|
|
arch/x86/kernel/step.c | 10 +-
|
|
arch/x86/kernel/sys_i386_32.c | 231 +-
|
|
arch/x86/kernel/sys_x86_64.c | 48 +-
|
|
arch/x86/kernel/sys_x86_64.c.rej | 11 +
|
|
arch/x86/kernel/tboot.c | 12 +-
|
|
arch/x86/kernel/time.c | 10 +-
|
|
arch/x86/kernel/tls.c | 5 +
|
|
arch/x86/kernel/trampoline_32.S | 8 +-
|
|
arch/x86/kernel/trampoline_64.S | 4 +-
|
|
arch/x86/kernel/traps.c | 59 +-
|
|
arch/x86/kernel/vm86_32.c | 6 +-
|
|
arch/x86/kernel/vmlinux.lds.S | 148 +-
|
|
arch/x86/kernel/vsyscall_64.c | 14 +-
|
|
arch/x86/kernel/x8664_ksyms_64.c | 2 -
|
|
arch/x86/kernel/xsave.c | 6 +-
|
|
arch/x86/kvm/cpuid.c | 21 +-
|
|
arch/x86/kvm/emulate.c | 4 +-
|
|
arch/x86/kvm/lapic.c | 2 +-
|
|
arch/x86/kvm/paging_tmpl.h | 2 +-
|
|
arch/x86/kvm/svm.c | 8 +
|
|
arch/x86/kvm/vmx.c | 35 +-
|
|
arch/x86/kvm/x86.c | 10 +-
|
|
arch/x86/lguest/boot.c | 3 +-
|
|
arch/x86/lib/atomic64_386_32.S | 164 ++
|
|
arch/x86/lib/atomic64_cx8_32.S | 103 +-
|
|
arch/x86/lib/checksum_32.S | 100 +-
|
|
arch/x86/lib/clear_page_64.S | 5 +-
|
|
arch/x86/lib/cmpxchg16b_emu.S | 2 +
|
|
arch/x86/lib/copy_page_64.S | 24 +-
|
|
arch/x86/lib/copy_user_64.S | 47 +-
|
|
arch/x86/lib/copy_user_nocache_64.S | 20 +-
|
|
arch/x86/lib/csum-copy_64.S | 2 +
|
|
arch/x86/lib/csum-wrappers_64.c | 16 +-
|
|
arch/x86/lib/getuser.S | 68 +-
|
|
arch/x86/lib/insn.c | 9 +-
|
|
arch/x86/lib/iomap_copy_64.S | 2 +
|
|
arch/x86/lib/memcpy_64.S | 18 +-
|
|
arch/x86/lib/memmove_64.S | 34 +-
|
|
arch/x86/lib/memset_64.S | 7 +-
|
|
arch/x86/lib/mmx_32.c | 243 +-
|
|
arch/x86/lib/msr-reg.S | 18 +-
|
|
arch/x86/lib/putuser.S | 87 +-
|
|
arch/x86/lib/rwlock.S | 42 +
|
|
arch/x86/lib/rwsem.S | 6 +-
|
|
arch/x86/lib/thunk_64.S | 2 +
|
|
arch/x86/lib/usercopy_32.c | 381 ++-
|
|
arch/x86/lib/usercopy_64.c | 38 +-
|
|
arch/x86/mm/extable.c | 2 +-
|
|
arch/x86/mm/fault.c | 537 +++-
|
|
arch/x86/mm/fault.c.rej | 23 +
|
|
arch/x86/mm/gup.c | 2 +-
|
|
arch/x86/mm/highmem_32.c | 4 +
|
|
arch/x86/mm/hugetlbpage.c | 90 +-
|
|
arch/x86/mm/init.c | 89 +-
|
|
arch/x86/mm/init.c.rej | 11 +
|
|
arch/x86/mm/init_32.c | 122 +-
|
|
arch/x86/mm/init_64.c | 48 +-
|
|
arch/x86/mm/iomap_32.c | 4 +
|
|
arch/x86/mm/ioremap.c | 8 +-
|
|
arch/x86/mm/ioremap.c.rej | 11 +
|
|
arch/x86/mm/kmemcheck/kmemcheck.c | 4 +-
|
|
arch/x86/mm/mmap.c | 25 +-
|
|
arch/x86/mm/mmap.c.rej | 28 +
|
|
arch/x86/mm/mmio-mod.c | 6 +-
|
|
arch/x86/mm/pageattr-test.c | 2 +-
|
|
arch/x86/mm/pageattr.c | 33 +-
|
|
arch/x86/mm/pat.c | 12 +-
|
|
arch/x86/mm/pf_in.c | 10 +-
|
|
arch/x86/mm/pgtable.c | 137 +-
|
|
arch/x86/mm/pgtable_32.c | 3 +
|
|
arch/x86/mm/setup_nx.c | 7 +
|
|
arch/x86/mm/tlb.c | 4 +
|
|
arch/x86/net/bpf_jit.S | 14 +
|
|
arch/x86/net/bpf_jit_comp.c | 37 +-
|
|
arch/x86/oprofile/backtrace.c | 8 +-
|
|
arch/x86/pci/i386.c | 2 +-
|
|
arch/x86/pci/mrst.c | 4 +-
|
|
arch/x86/pci/pcbios.c | 146 +-
|
|
arch/x86/platform/efi/efi_32.c | 19 +
|
|
arch/x86/platform/efi/efi_stub_32.S | 64 +-
|
|
arch/x86/platform/efi/efi_stub_64.S | 8 +
|
|
arch/x86/platform/mrst/mrst.c | 6 +-
|
|
arch/x86/power/cpu.c | 4 +-
|
|
arch/x86/tools/relocs.c | 89 +-
|
|
arch/x86/vdso/Makefile | 2 +-
|
|
arch/x86/vdso/vdso32-setup.c | 23 +-
|
|
arch/x86/vdso/vma.c | 29 +-
|
|
arch/x86/xen/enlighten.c | 35 +-
|
|
arch/x86/xen/mmu.c | 9 +
|
|
arch/x86/xen/smp.c | 16 +-
|
|
arch/x86/xen/xen-asm_32.S.rej | 23 +
|
|
arch/x86/xen/xen-head.S | 11 +
|
|
arch/x86/xen/xen-ops.h | 2 -
|
|
block/blk-iopoll.c | 2 +-
|
|
block/blk-map.c | 2 +-
|
|
block/blk-softirq.c | 2 +-
|
|
block/bsg.c | 12 +-
|
|
block/compat_ioctl.c | 2 +-
|
|
block/partitions/efi.c | 8 +-
|
|
block/scsi_ioctl.c | 27 +-
|
|
crypto/cryptd.c | 4 +-
|
|
drivers/acpi/apei/cper.c | 8 +-
|
|
drivers/acpi/ec_sys.c | 12 +-
|
|
drivers/acpi/proc.c | 18 +-
|
|
drivers/acpi/processor_driver.c | 2 +-
|
|
drivers/ata/libata-core.c | 8 +-
|
|
drivers/ata/pata_arasan_cf.c | 4 +-
|
|
drivers/atm/adummy.c | 2 +-
|
|
drivers/atm/ambassador.c | 8 +-
|
|
drivers/atm/atmtcp.c | 14 +-
|
|
drivers/atm/eni.c | 12 +-
|
|
drivers/atm/firestream.c | 8 +-
|
|
drivers/atm/fore200e.c | 14 +-
|
|
drivers/atm/he.c | 18 +-
|
|
drivers/atm/horizon.c | 4 +-
|
|
drivers/atm/idt77252.c | 36 +-
|
|
drivers/atm/iphase.c | 34 +-
|
|
drivers/atm/lanai.c | 12 +-
|
|
drivers/atm/nicstar.c | 46 +-
|
|
drivers/atm/solos-pci.c | 4 +-
|
|
drivers/atm/suni.c | 4 +-
|
|
drivers/atm/uPD98402.c | 16 +-
|
|
drivers/atm/zatm.c | 6 +-
|
|
drivers/base/devtmpfs.c | 2 +-
|
|
drivers/base/power/wakeup.c | 4 +-
|
|
drivers/base/power/wakeup.c.rej | 11 +
|
|
drivers/block/cciss.c | 28 +-
|
|
drivers/block/cciss.h | 2 +-
|
|
drivers/block/cpqarray.c | 28 +-
|
|
drivers/block/cpqarray.h | 2 +-
|
|
drivers/block/drbd/drbd_int.h | 20 +-
|
|
drivers/block/drbd/drbd_main.c | 10 +-
|
|
drivers/block/drbd/drbd_nl.c | 10 +-
|
|
drivers/block/drbd/drbd_receiver.c | 20 +-
|
|
drivers/block/loop.c | 2 +-
|
|
drivers/char/agp/frontend.c | 2 +-
|
|
drivers/char/hpet.c | 2 +-
|
|
drivers/char/ipmi/ipmi_msghandler.c | 8 +-
|
|
drivers/char/ipmi/ipmi_si_intf.c | 8 +-
|
|
drivers/char/mbcs.c | 2 +-
|
|
drivers/char/mem.c | 41 +-
|
|
drivers/char/nvram.c | 2 +-
|
|
drivers/char/random.c | 15 +-
|
|
drivers/char/sonypi.c | 9 +-
|
|
drivers/char/tpm/tpm.c | 2 +-
|
|
drivers/char/tpm/tpm_bios.c | 14 +-
|
|
drivers/char/virtio_console.c | 4 +-
|
|
drivers/edac/edac_pci_sysfs.c | 20 +-
|
|
drivers/edac/mce_amd.h | 2 +-
|
|
drivers/firewire/core-card.c | 2 +-
|
|
drivers/firewire/core-cdev.c | 3 +-
|
|
drivers/firewire/core-transaction.c | 1 +
|
|
drivers/firewire/core.h | 1 +
|
|
drivers/firmware/dmi_scan.c | 7 +-
|
|
drivers/gpio/gpio-vr41xx.c | 2 +-
|
|
drivers/gpu/drm/drm_crtc_helper.c | 2 +-
|
|
drivers/gpu/drm/drm_drv.c | 4 +-
|
|
drivers/gpu/drm/drm_fops.c | 12 +-
|
|
drivers/gpu/drm/drm_fops.c.rej | 13 +
|
|
drivers/gpu/drm/drm_global.c | 14 +-
|
|
drivers/gpu/drm/drm_info.c | 14 +-
|
|
drivers/gpu/drm/drm_ioc32.c | 4 +-
|
|
drivers/gpu/drm/drm_ioctl.c | 2 +-
|
|
drivers/gpu/drm/drm_lock.c | 4 +-
|
|
drivers/gpu/drm/drm_stub.c | 2 +-
|
|
drivers/gpu/drm/i810/i810_dma.c | 8 +-
|
|
drivers/gpu/drm/i810/i810_drv.h | 4 +-
|
|
drivers/gpu/drm/i915/i915_debugfs.c | 4 +-
|
|
drivers/gpu/drm/i915/i915_dma.c | 2 +-
|
|
drivers/gpu/drm/i915/i915_drv.h | 8 +-
|
|
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +-
|
|
drivers/gpu/drm/i915/i915_gem_execbuffer.c.rej | 14 +
|
|
drivers/gpu/drm/i915/i915_irq.c | 10 +-
|
|
drivers/gpu/drm/i915/intel_display.c | 4 +-
|
|
drivers/gpu/drm/i915/intel_display.c.rej | 32 +
|
|
drivers/gpu/drm/mga/mga_drv.h | 4 +-
|
|
drivers/gpu/drm/mga/mga_irq.c | 8 +-
|
|
drivers/gpu/drm/nouveau/nouveau_bios.c | 2 +-
|
|
drivers/gpu/drm/nouveau/nouveau_drv.h | 12 +-
|
|
drivers/gpu/drm/nouveau/nouveau_fence.c | 4 +-
|
|
drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +-
|
|
drivers/gpu/drm/nouveau/nouveau_state.c | 2 +-
|
|
drivers/gpu/drm/nouveau/nv04_graph.c | 2 +-
|
|
drivers/gpu/drm/nouveau/nv50_sor.c | 2 +-
|
|
drivers/gpu/drm/nouveau/nvd0_display.c | 2 +-
|
|
drivers/gpu/drm/r128/r128_cce.c | 2 +-
|
|
drivers/gpu/drm/r128/r128_drv.h | 4 +-
|
|
drivers/gpu/drm/r128/r128_irq.c | 4 +-
|
|
drivers/gpu/drm/r128/r128_state.c | 4 +-
|
|
drivers/gpu/drm/radeon/mkregtable.c | 4 +-
|
|
drivers/gpu/drm/radeon/radeon.h | 6 +-
|
|
drivers/gpu/drm/radeon/radeon_device.c | 2 +-
|
|
drivers/gpu/drm/radeon/radeon_drv.h | 2 +-
|
|
drivers/gpu/drm/radeon/radeon_fence.c | 6 +-
|
|
drivers/gpu/drm/radeon/radeon_ioc32.c | 2 +-
|
|
drivers/gpu/drm/radeon/radeon_irq.c | 6 +-
|
|
drivers/gpu/drm/radeon/radeon_state.c | 4 +-
|
|
drivers/gpu/drm/radeon/radeon_ttm.c | 6 +-
|
|
drivers/gpu/drm/radeon/rs690.c | 4 +-
|
|
drivers/gpu/drm/ttm/ttm_page_alloc.c | 4 +-
|
|
drivers/gpu/drm/via/via_drv.h | 4 +-
|
|
drivers/gpu/drm/via/via_irq.c | 18 +-
|
|
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 +-
|
|
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 8 +-
|
|
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 4 +-
|
|
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c | 2 +-
|
|
drivers/hid/hid-core.c | 4 +-
|
|
drivers/hid/usbhid/hiddev.c | 2 +-
|
|
drivers/hv/channel.c | 4 +-
|
|
drivers/hv/hv.c | 2 +-
|
|
drivers/hv/hyperv_vmbus.h | 2 +-
|
|
drivers/hv/vmbus_drv.c | 4 +-
|
|
drivers/hwmon/acpi_power_meter.c | 2 -
|
|
drivers/hwmon/sht15.c | 12 +-
|
|
drivers/i2c/busses/i2c-amd756-s4882.c | 2 +-
|
|
drivers/i2c/busses/i2c-nforce2-s4985.c | 2 +-
|
|
drivers/i2c/i2c-mux.c | 2 +-
|
|
drivers/ide/aec62xx.c | 2 +-
|
|
drivers/ide/alim15x3.c | 2 +-
|
|
drivers/ide/amd74xx.c | 2 +-
|
|
drivers/ide/atiixp.c | 2 +-
|
|
drivers/ide/cmd64x.c | 2 +-
|
|
drivers/ide/cs5520.c | 2 +-
|
|
drivers/ide/cs5530.c | 2 +-
|
|
drivers/ide/cs5535.c | 2 +-
|
|
drivers/ide/cy82c693.c | 2 +-
|
|
drivers/ide/hpt366.c | 24 +-
|
|
drivers/ide/ide-cd.c | 2 +-
|
|
drivers/ide/ide-pci-generic.c | 2 +-
|
|
drivers/ide/it8172.c | 2 +-
|
|
drivers/ide/it8213.c | 2 +-
|
|
drivers/ide/it821x.c | 2 +-
|
|
drivers/ide/jmicron.c | 2 +-
|
|
drivers/ide/ns87415.c | 2 +-
|
|
drivers/ide/opti621.c | 2 +-
|
|
drivers/ide/pdc202xx_new.c | 2 +-
|
|
drivers/ide/pdc202xx_old.c | 2 +-
|
|
drivers/ide/piix.c | 2 +-
|
|
drivers/ide/rz1000.c | 2 +-
|
|
drivers/ide/sc1200.c | 2 +-
|
|
drivers/ide/scc_pata.c | 2 +-
|
|
drivers/ide/serverworks.c | 2 +-
|
|
drivers/ide/siimage.c | 2 +-
|
|
drivers/ide/sis5513.c | 2 +-
|
|
drivers/ide/sl82c105.c | 2 +-
|
|
drivers/ide/slc90e66.c | 2 +-
|
|
drivers/ide/tc86c001.c | 2 +-
|
|
drivers/ide/triflex.c | 2 +-
|
|
drivers/ide/trm290.c | 2 +-
|
|
drivers/ide/via82cxxx.c | 2 +-
|
|
drivers/ieee802154/fakehard.c | 2 +-
|
|
drivers/infiniband/core/cm.c | 32 +-
|
|
drivers/infiniband/core/fmr_pool.c | 20 +-
|
|
drivers/infiniband/hw/cxgb4/mem.c | 4 +-
|
|
drivers/infiniband/hw/ipath/ipath_rc.c | 6 +-
|
|
drivers/infiniband/hw/ipath/ipath_ruc.c | 6 +-
|
|
drivers/infiniband/hw/nes/nes.c | 4 +-
|
|
drivers/infiniband/hw/nes/nes.h | 40 +-
|
|
drivers/infiniband/hw/nes/nes_cm.c | 62 +-
|
|
drivers/infiniband/hw/nes/nes_mgt.c | 8 +-
|
|
drivers/infiniband/hw/nes/nes_nic.c | 40 +-
|
|
drivers/infiniband/hw/nes/nes_verbs.c | 10 +-
|
|
drivers/infiniband/hw/qib/qib.h | 1 +
|
|
drivers/input/gameport/gameport.c | 4 +-
|
|
drivers/input/input.c | 4 +-
|
|
drivers/input/joystick/sidewinder.c | 1 +
|
|
drivers/input/joystick/xpad.c | 4 +-
|
|
drivers/input/mousedev.c | 2 +-
|
|
drivers/input/serio/serio.c | 4 +-
|
|
drivers/isdn/capi/capi.c | 10 +-
|
|
drivers/isdn/hardware/avm/b1.c | 4 +-
|
|
drivers/isdn/hardware/eicon/divasync.h | 2 +-
|
|
drivers/isdn/hardware/eicon/xdi_adapter.h | 2 +-
|
|
drivers/isdn/icn/icn.c | 2 +-
|
|
drivers/lguest/core.c | 10 +-
|
|
drivers/lguest/x86/core.c | 12 +-
|
|
drivers/lguest/x86/switcher_32.S | 27 +-
|
|
drivers/macintosh/macio_asic.c | 2 +-
|
|
drivers/md/bitmap.c | 2 +-
|
|
drivers/md/dm-ioctl.c | 2 +-
|
|
drivers/md/dm-raid1.c | 16 +-
|
|
drivers/md/dm-stripe.c | 10 +-
|
|
drivers/md/dm-table.c | 2 +-
|
|
drivers/md/dm-thin-metadata.c | 4 +-
|
|
drivers/md/dm.c | 16 +-
|
|
drivers/md/md.c | 26 +-
|
|
drivers/md/md.h | 6 +-
|
|
drivers/md/persistent-data/dm-space-map-checker.c | 2 +-
|
|
drivers/md/persistent-data/dm-space-map-disk.c | 2 +-
|
|
drivers/md/persistent-data/dm-space-map-metadata.c | 2 +-
|
|
drivers/md/persistent-data/dm-space-map.h | 1 +
|
|
drivers/md/raid1.c | 4 +-
|
|
drivers/md/raid10.c | 16 +-
|
|
drivers/md/raid5.c | 10 +-
|
|
drivers/media/dvb/ddbridge/ddbridge-core.c | 2 +-
|
|
drivers/media/dvb/dvb-core/dvb_demux.h | 2 +-
|
|
drivers/media/dvb/dvb-core/dvbdev.c | 2 +-
|
|
drivers/media/dvb/dvb-usb/cxusb.c | 2 +-
|
|
drivers/media/dvb/dvb-usb/dw2102.c | 2 +-
|
|
drivers/media/dvb/frontends/dib3000.h | 2 +-
|
|
drivers/media/dvb/ngene/ngene-cards.c | 2 +-
|
|
drivers/media/radio/radio-cadet.c | 2 +
|
|
drivers/media/video/au0828/au0828.h | 2 +-
|
|
drivers/media/video/cx88/cx88-alsa.c | 2 +-
|
|
drivers/media/video/omap/omap_vout.c | 11 +-
|
|
drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h | 2 +-
|
|
drivers/media/video/timblogiw.c | 4 +-
|
|
drivers/message/fusion/mptsas.c | 34 +-
|
|
drivers/message/fusion/mptscsih.c | 17 +-
|
|
drivers/message/i2o/i2o_proc.c | 44 +-
|
|
drivers/message/i2o/iop.c | 8 +-
|
|
drivers/mfd/abx500-core.c | 2 +-
|
|
drivers/mfd/janz-cmodio.c | 1 +
|
|
drivers/misc/lis3lv02d/lis3lv02d.c | 8 +-
|
|
drivers/misc/lis3lv02d/lis3lv02d.h | 2 +-
|
|
drivers/misc/sgi-gru/gruhandles.c | 4 +-
|
|
drivers/misc/sgi-gru/gruprocfs.c | 8 +-
|
|
drivers/misc/sgi-gru/grutables.h | 158 +-
|
|
drivers/misc/sgi-xp/xp.h | 2 +-
|
|
drivers/misc/sgi-xp/xpc.h | 3 +-
|
|
drivers/misc/sgi-xp/xpc_main.c | 2 +-
|
|
drivers/mmc/host/sdhci-pci.c | 2 +-
|
|
drivers/mtd/devices/doc2000.c | 2 +-
|
|
drivers/mtd/nand/denali.c | 1 +
|
|
drivers/mtd/nftlmount.c | 1 +
|
|
drivers/net/ethernet/atheros/atlx/atl2.c | 2 +-
|
|
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 2 +-
|
|
drivers/net/ethernet/broadcom/tg3.h | 1 +
|
|
drivers/net/ethernet/chelsio/cxgb3/l2t.h | 2 +-
|
|
drivers/net/ethernet/dec/tulip/de4x5.c | 4 +-
|
|
drivers/net/ethernet/dec/tulip/eeprom.c | 2 +-
|
|
drivers/net/ethernet/dec/tulip/winbond-840.c | 2 +-
|
|
drivers/net/ethernet/dlink/sundance.c | 2 +-
|
|
drivers/net/ethernet/emulex/benet/be_main.c | 2 +-
|
|
drivers/net/ethernet/faraday/ftgmac100.c | 2 +
|
|
drivers/net/ethernet/faraday/ftmac100.c | 2 +
|
|
drivers/net/ethernet/fealnx.c | 2 +-
|
|
drivers/net/ethernet/intel/e1000e/hw.h | 9 +-
|
|
drivers/net/ethernet/intel/igb/e1000_hw.h | 12 +-
|
|
drivers/net/ethernet/intel/igbvf/vf.h | 6 +-
|
|
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 12 +-
|
|
drivers/net/ethernet/intel/ixgbevf/vf.h | 6 +-
|
|
drivers/net/ethernet/mellanox/mlx4/main.c | 1 +
|
|
drivers/net/ethernet/neterion/vxge/vxge-config.h | 2 +-
|
|
drivers/net/ethernet/neterion/vxge/vxge-traffic.h | 2 +-
|
|
drivers/net/ethernet/realtek/r8169.c | 6 +-
|
|
drivers/net/ethernet/sis/sis190.c | 2 +-
|
|
drivers/net/ethernet/stmicro/stmmac/mmc_core.c | 4 +-
|
|
drivers/net/hyperv/hyperv_net.h | 2 +-
|
|
drivers/net/hyperv/rndis_filter.c | 4 +-
|
|
drivers/net/ppp/ppp_generic.c | 4 +-
|
|
drivers/net/tokenring/abyss.c | 8 +-
|
|
drivers/net/tokenring/madgemc.c | 8 +-
|
|
drivers/net/tokenring/proteon.c | 8 +-
|
|
drivers/net/tokenring/skisa.c | 8 +-
|
|
drivers/net/usb/hso.c | 25 +-
|
|
drivers/net/wireless/ath/ath.h | 1 +
|
|
drivers/net/wireless/ath/ath9k/ar9002_mac.c | 30 +-
|
|
drivers/net/wireless/ath/ath9k/ar9003_mac.c | 58 +-
|
|
drivers/net/wireless/ath/ath9k/hw.h | 6 +-
|
|
.../net/wireless/brcm80211/brcmsmac/phy/phy_int.h | 2 +-
|
|
drivers/net/wireless/iwlegacy/3945-mac.c | 4 +-
|
|
drivers/net/wireless/mac80211_hwsim.c | 8 +-
|
|
drivers/net/wireless/mwifiex/main.h | 2 +-
|
|
drivers/net/wireless/rndis_wlan.c | 2 +-
|
|
drivers/net/wireless/rt2x00/rt2x00.h | 2 +-
|
|
drivers/net/wireless/rt2x00/rt2x00queue.c | 4 +-
|
|
drivers/net/wireless/wl1251/wl1251.h | 2 +-
|
|
drivers/oprofile/buffer_sync.c | 8 +-
|
|
drivers/oprofile/event_buffer.c | 2 +-
|
|
drivers/oprofile/oprof.c | 2 +-
|
|
drivers/oprofile/oprofile_stats.c | 10 +-
|
|
drivers/oprofile/oprofile_stats.h | 10 +-
|
|
drivers/oprofile/oprofilefs.c | 2 +-
|
|
drivers/parport/procfs.c | 4 +-
|
|
drivers/pci/hotplug/cpci_hotplug.h | 2 +-
|
|
drivers/pci/hotplug/cpqphp_nvram.c | 4 +
|
|
drivers/pci/pcie/aspm.c | 6 +-
|
|
drivers/pci/probe.c | 2 +-
|
|
drivers/platform/x86/thinkpad_acpi.c | 70 +-
|
|
drivers/pnp/pnpbios/bioscalls.c | 14 +-
|
|
drivers/pnp/resource.c | 4 +-
|
|
drivers/power/bq27x00_battery.c | 2 +-
|
|
drivers/regulator/max8660.c | 6 +-
|
|
drivers/regulator/mc13892-regulator.c | 6 +-
|
|
drivers/scsi/aacraid/aacraid.h | 2 +-
|
|
drivers/scsi/aacraid/linit.c | 2 +-
|
|
drivers/scsi/aic94xx/aic94xx_init.c | 2 +-
|
|
drivers/scsi/bfa/bfa.h | 2 +-
|
|
drivers/scsi/bfa/bfa_fcpim.c | 4 +-
|
|
drivers/scsi/bfa/bfa_fcpim.h | 3 +-
|
|
drivers/scsi/bfa/bfa_ioc.h | 4 +-
|
|
drivers/scsi/hosts.c | 4 +-
|
|
drivers/scsi/hpsa.c | 30 +-
|
|
drivers/scsi/hpsa.h | 2 +-
|
|
drivers/scsi/ips.h | 2 +-
|
|
drivers/scsi/libfc/fc_exch.c | 38 +-
|
|
drivers/scsi/libsas/sas_ata.c | 2 +-
|
|
drivers/scsi/lpfc/lpfc.h | 8 +-
|
|
drivers/scsi/lpfc/lpfc_debugfs.c | 18 +-
|
|
drivers/scsi/lpfc/lpfc_init.c | 6 +-
|
|
drivers/scsi/lpfc/lpfc_scsi.c | 16 +-
|
|
drivers/scsi/pmcraid.c | 20 +-
|
|
drivers/scsi/pmcraid.h | 8 +-
|
|
drivers/scsi/qla2xxx/qla_def.h | 2 +-
|
|
drivers/scsi/qla4xxx/ql4_def.h | 2 +-
|
|
drivers/scsi/qla4xxx/ql4_os.c | 6 +-
|
|
drivers/scsi/scsi.c | 2 +-
|
|
drivers/scsi/scsi_lib.c | 6 +-
|
|
drivers/scsi/scsi_sysfs.c | 2 +-
|
|
drivers/scsi/scsi_tgt_lib.c | 2 +-
|
|
drivers/scsi/scsi_transport_fc.c | 8 +-
|
|
drivers/scsi/scsi_transport_iscsi.c | 6 +-
|
|
drivers/scsi/scsi_transport_srp.c | 6 +-
|
|
drivers/scsi/sg.c | 6 +-
|
|
drivers/spi/spi.c | 2 +-
|
|
drivers/staging/octeon/ethernet-rx.c | 12 +-
|
|
drivers/staging/octeon/ethernet.c | 8 +-
|
|
drivers/staging/rtl8712/rtl871x_io.h | 2 +-
|
|
drivers/staging/sbe-2t3e3/netdev.c | 2 +-
|
|
drivers/staging/speakup/speakup_soft.c.rej | 15 +
|
|
drivers/staging/usbip/usbip_common.h | 2 +-
|
|
drivers/staging/usbip/vhci.h | 2 +-
|
|
drivers/staging/usbip/vhci_hcd.c | 6 +-
|
|
drivers/staging/usbip/vhci_rx.c | 2 +-
|
|
drivers/staging/vt6655/hostap.c | 7 +-
|
|
drivers/staging/vt6656/hostap.c | 7 +-
|
|
drivers/staging/wlan-ng/hfa384x_usb.c | 2 +-
|
|
drivers/staging/zcache/tmem.c | 4 +-
|
|
drivers/staging/zcache/tmem.h | 2 +
|
|
drivers/target/target_core_tmr.c | 2 +-
|
|
drivers/target/target_core_transport.c | 10 +-
|
|
drivers/tty/hvc/hvcs.c | 23 +-
|
|
drivers/tty/ipwireless/tty.c | 29 +-
|
|
drivers/tty/n_gsm.c | 2 +-
|
|
drivers/tty/n_tty.c | 3 +-
|
|
drivers/tty/pty.c | 4 +-
|
|
drivers/tty/serial/kgdboc.c | 32 +-
|
|
drivers/tty/tty_io.c | 2 +-
|
|
drivers/tty/tty_ldisc.c | 10 +-
|
|
drivers/uio/uio.c | 21 +-
|
|
drivers/usb/atm/cxacru.c | 2 +-
|
|
drivers/usb/atm/usbatm.c | 24 +-
|
|
drivers/usb/core/devices.c | 6 +-
|
|
drivers/usb/early/ehci-dbgp.c | 16 +-
|
|
drivers/usb/wusbcore/wa-hc.h | 4 +-
|
|
drivers/usb/wusbcore/wa-xfer.c | 2 +-
|
|
drivers/vhost/vhost.c | 2 +-
|
|
drivers/video/aty/aty128fb.c | 2 +-
|
|
drivers/video/fbcmap.c | 3 +-
|
|
drivers/video/fbmem.c | 6 +-
|
|
drivers/video/geode/gx1fb_core.c | 2 +-
|
|
drivers/video/gxt4500.c | 4 +-
|
|
drivers/video/i810/i810_accel.c | 1 +
|
|
drivers/video/i810/i810_main.c | 2 +-
|
|
drivers/video/jz4740_fb.c | 2 +-
|
|
drivers/video/udlfb.c | 32 +-
|
|
drivers/video/uvesafb.c | 39 +-
|
|
drivers/video/vesafb.c | 51 +-
|
|
drivers/video/via/via_clock.h | 2 +-
|
|
drivers/xen/xen-pciback/conf_space.h | 6 +-
|
|
fs/9p/vfs_inode.c | 2 +-
|
|
fs/Kconfig.binfmt | 2 +-
|
|
fs/aio.c | 11 +-
|
|
fs/autofs4/waitq.c | 2 +-
|
|
fs/befs/linuxvfs.c | 2 +-
|
|
fs/binfmt_aout.c | 23 +-
|
|
fs/binfmt_elf.c | 577 +++-
|
|
fs/binfmt_elf.c.rej | 22 +
|
|
fs/binfmt_flat.c | 6 +
|
|
fs/bio.c | 6 +-
|
|
fs/block_dev.c | 2 +-
|
|
fs/btrfs/check-integrity.c | 2 +-
|
|
fs/btrfs/ctree.c | 9 +-
|
|
fs/btrfs/ioctl.c | 2 +-
|
|
fs/btrfs/relocation.c | 2 +-
|
|
fs/cachefiles/bind.c | 6 +-
|
|
fs/cachefiles/daemon.c | 8 +-
|
|
fs/cachefiles/internal.h | 12 +-
|
|
fs/cachefiles/namei.c | 2 +-
|
|
fs/cachefiles/proc.c | 12 +-
|
|
fs/cachefiles/rdwr.c | 2 +-
|
|
fs/ceph/dir.c | 2 +-
|
|
fs/cifs/cifs_debug.c | 86 +-
|
|
fs/cifs/cifsfs.c | 8 +-
|
|
fs/cifs/cifsglob.h | 50 +-
|
|
fs/cifs/link.c | 2 +-
|
|
fs/cifs/misc.c | 4 +-
|
|
fs/coda/cache.c | 10 +-
|
|
fs/compat.c | 6 +-
|
|
fs/compat_binfmt_elf.c | 2 +
|
|
fs/compat_ioctl.c.rej | 40 +
|
|
fs/configfs/dir.c | 10 +-
|
|
fs/dcache.c | 2 +-
|
|
fs/ecryptfs/inode.c | 6 +-
|
|
fs/ecryptfs/miscdev.c | 2 +-
|
|
fs/ecryptfs/read_write.c | 4 +-
|
|
fs/exec.c | 352 ++-
|
|
fs/ext4/ext4.h | 20 +-
|
|
fs/ext4/mballoc.c | 44 +-
|
|
fs/fcntl.c | 4 +-
|
|
fs/fifo.c | 22 +-
|
|
fs/fs_struct.c | 12 +-
|
|
fs/fscache/cookie.c | 34 +-
|
|
fs/fscache/internal.h | 182 +-
|
|
fs/fscache/object.c | 26 +-
|
|
fs/fscache/operation.c | 28 +-
|
|
fs/fscache/page.c | 106 +-
|
|
fs/fscache/stats.c | 330 +--
|
|
fs/fuse/cuse.c | 10 +-
|
|
fs/fuse/dev.c | 2 +-
|
|
fs/fuse/dir.c | 2 +-
|
|
fs/gfs2/inode.c | 2 +-
|
|
fs/inode.c | 4 +-
|
|
fs/jffs2/erase.c | 3 +-
|
|
fs/jffs2/wbuf.c | 3 +-
|
|
fs/jfs/super.c | 2 +-
|
|
fs/libfs.c | 7 +-
|
|
fs/libfs.c.rej | 12 +
|
|
fs/lockd/clntproc.c | 4 +-
|
|
fs/locks.c | 8 +-
|
|
fs/namei.c | 13 +-
|
|
fs/nfs/inode.c | 6 +-
|
|
fs/nfs/inode.c.rej | 11 +
|
|
fs/nfsd/vfs.c | 6 +-
|
|
fs/notify/fanotify/fanotify_user.c | 3 +-
|
|
fs/notify/notification.c | 4 +-
|
|
fs/ntfs/dir.c | 2 +-
|
|
fs/ntfs/file.c | 4 +-
|
|
fs/ocfs2/localalloc.c | 2 +-
|
|
fs/ocfs2/ocfs2.h | 10 +-
|
|
fs/ocfs2/suballoc.c | 12 +-
|
|
fs/ocfs2/super.c | 20 +-
|
|
fs/ocfs2/symlink.c | 2 +-
|
|
fs/pipe.c | 33 +-
|
|
fs/proc/array.c | 20 +
|
|
fs/proc/base.c | 2 +-
|
|
fs/proc/kcore.c | 32 +-
|
|
fs/proc/meminfo.c | 2 +-
|
|
fs/proc/nommu.c.rej | 11 +
|
|
fs/proc/task_mmu.c | 24 +-
|
|
fs/proc/task_mmu.c.rej | 39 +
|
|
fs/proc/task_nommu.c | 2 +-
|
|
fs/proc/task_nommu.c.rej | 10 +
|
|
fs/quota/netlink.c | 4 +-
|
|
fs/readdir.c | 2 +-
|
|
fs/reiserfs/do_balan.c | 2 +-
|
|
fs/reiserfs/procfs.c | 2 +-
|
|
fs/reiserfs/reiserfs.h | 4 +-
|
|
fs/seq_file.c | 2 +-
|
|
fs/splice.c | 36 +-
|
|
fs/sysfs/file.c | 10 +-
|
|
fs/sysfs/symlink.c | 2 +-
|
|
fs/udf/misc.c | 2 +-
|
|
fs/xattr_acl.c | 4 +-
|
|
fs/xfs/xfs_bmap.c | 2 +-
|
|
fs/xfs/xfs_dir2_sf.c | 10 +-
|
|
fs/xfs/xfs_ioctl.c | 2 +-
|
|
fs/xfs/xfs_iops.c | 2 +-
|
|
include/acpi/acpi_bus.h | 2 +-
|
|
include/asm-generic/atomic-long.h | 189 ++
|
|
include/asm-generic/atomic.h | 2 +-
|
|
include/asm-generic/atomic64.h | 12 +
|
|
include/asm-generic/cache.h | 4 +-
|
|
include/asm-generic/emergency-restart.h | 2 +-
|
|
include/asm-generic/kmap_types.h | 3 +-
|
|
include/asm-generic/local.h | 1 +
|
|
include/asm-generic/pgtable-nopmd.h | 18 +-
|
|
include/asm-generic/pgtable-nopud.h | 15 +-
|
|
include/asm-generic/pgtable.h | 8 +
|
|
include/asm-generic/vmlinux.lds.h | 10 +-
|
|
include/drm/drmP.h | 5 +-
|
|
include/drm/drm_crtc_helper.h | 4 +-
|
|
include/drm/ttm/ttm_memory.h | 2 +-
|
|
include/linux/a.out.h | 8 +
|
|
include/linux/atmdev.h | 2 +-
|
|
include/linux/binfmts.h | 1 +
|
|
include/linux/blkdev.h | 2 +-
|
|
include/linux/blktrace_api.h | 2 +-
|
|
include/linux/byteorder/little_endian.h | 24 +-
|
|
include/linux/cache.h | 4 +
|
|
include/linux/cleancache.h | 2 +-
|
|
include/linux/compiler-gcc4.h | 19 +
|
|
include/linux/compiler.h | 68 +-
|
|
include/linux/crypto.h | 6 +-
|
|
include/linux/decompress/mm.h | 2 +-
|
|
include/linux/dma-mapping.h | 2 +-
|
|
include/linux/efi.h.rej | 11 +
|
|
include/linux/elf.h | 30 +
|
|
include/linux/filter.h | 4 +
|
|
include/linux/firewire.h | 2 +-
|
|
include/linux/fs.h | 3 +-
|
|
include/linux/fs_struct.h | 2 +-
|
|
include/linux/fscache-cache.h | 4 +-
|
|
include/linux/fsnotify.h | 2 +-
|
|
include/linux/fsnotify_backend.h | 1 +
|
|
include/linux/ftrace_event.h | 4 +-
|
|
include/linux/genhd.h | 2 +-
|
|
include/linux/gfp.h | 9 +
|
|
include/linux/gfp.h.rej | 10 +
|
|
include/linux/hid.h | 2 +-
|
|
include/linux/highmem.h | 12 +
|
|
include/linux/i2c.h | 1 +
|
|
include/linux/i2o.h | 2 +-
|
|
include/linux/if_team.h | 1 +
|
|
include/linux/if_team.h.rej | 10 +
|
|
include/linux/init.h | 18 +-
|
|
include/linux/init_task.h | 7 +
|
|
include/linux/intel-iommu.h | 2 +-
|
|
include/linux/interrupt.h | 6 +-
|
|
include/linux/kgdb.h | 6 +-
|
|
include/linux/kref.h | 2 +-
|
|
include/linux/kvm_host.h | 4 +-
|
|
include/linux/libata.h | 2 +-
|
|
include/linux/mca.h | 2 +-
|
|
include/linux/memory.h | 2 +-
|
|
include/linux/mfd/abx500.h | 1 +
|
|
include/linux/mfd/abx500/ux500_chargalg.h | 2 +-
|
|
include/linux/mm.h | 92 +-
|
|
include/linux/mm_types.h | 22 +-
|
|
include/linux/mmu_notifier.h | 6 +-
|
|
include/linux/mmzone.h | 2 +-
|
|
include/linux/mod_devicetable.h | 4 +-
|
|
include/linux/module.h | 54 +-
|
|
include/linux/moduleloader.h | 14 +-
|
|
include/linux/moduleparam.h | 4 +-
|
|
include/linux/namei.h | 6 +-
|
|
include/linux/netdevice.h | 3 +-
|
|
include/linux/of_pdt.h | 2 +-
|
|
include/linux/oprofile.h | 4 +-
|
|
include/linux/perf_event.h | 8 +-
|
|
include/linux/pipe_fs_i.h | 6 +-
|
|
include/linux/pm_runtime.h | 2 +-
|
|
include/linux/poison.h | 4 +-
|
|
include/linux/preempt.h | 2 +-
|
|
include/linux/proc_fs.h | 2 +-
|
|
include/linux/random.h | 11 +-
|
|
include/linux/reboot.h | 14 +-
|
|
include/linux/relay.h | 2 +-
|
|
include/linux/rfkill.h | 1 +
|
|
include/linux/rio.h | 2 +-
|
|
include/linux/rmap.h | 4 +-
|
|
include/linux/sched.h | 65 +-
|
|
include/linux/screen_info.h | 3 +-
|
|
include/linux/seq_file.h | 1 +
|
|
include/linux/skbuff.h | 8 +-
|
|
include/linux/slab.h | 23 +-
|
|
include/linux/slab_def.h | 31 +-
|
|
include/linux/slob_def.h | 4 +-
|
|
include/linux/slub_def.h | 10 +-
|
|
include/linux/sonet.h | 2 +-
|
|
include/linux/sunrpc/clnt.h | 8 +-
|
|
include/linux/sunrpc/sched.h | 1 +
|
|
include/linux/sunrpc/svc_rdma.h | 18 +-
|
|
include/linux/sysctl.h.rej | 14 +
|
|
include/linux/tty_ldisc.h | 2 +-
|
|
include/linux/types.h | 16 +
|
|
include/linux/uaccess.h | 6 +-
|
|
include/linux/unaligned/access_ok.h | 12 +-
|
|
include/linux/usb/renesas_usbhs.h | 4 +-
|
|
include/linux/vermagic.h | 21 +-
|
|
include/linux/vmalloc.h | 4 +-
|
|
include/linux/vmalloc.h.rej | 23 +
|
|
include/linux/vmstat.h | 20 +-
|
|
include/linux/xattr.h | 5 +
|
|
include/media/saa7146_vv.h | 2 +-
|
|
include/media/v4l2-dev.h | 3 +-
|
|
include/media/v4l2-ioctl.h | 2 +-
|
|
include/net/caif/caif_hsi.h | 2 +-
|
|
include/net/caif/cfctrl.h | 6 +-
|
|
include/net/flow.h | 2 +-
|
|
include/net/inetpeer.h.rej | 26 +
|
|
include/net/ip_fib.h | 2 +-
|
|
include/net/ip_vs.h | 4 +-
|
|
include/net/irda/ircomm_core.h | 2 +-
|
|
include/net/irda/ircomm_tty.h | 5 +-
|
|
include/net/iucv/af_iucv.h | 2 +-
|
|
include/net/neighbour.h | 2 +-
|
|
include/net/netlink.h | 2 +-
|
|
include/net/netns/ipv4.h | 4 +-
|
|
include/net/sctp/sctp.h | 6 +-
|
|
include/net/sock.h | 4 +-
|
|
include/net/tcp.h | 2 +-
|
|
include/net/udp.h | 2 +-
|
|
include/net/xfrm.h | 2 +-
|
|
include/rdma/iw_cm.h | 2 +-
|
|
include/scsi/libfc.h | 3 +-
|
|
include/scsi/scsi_device.h | 6 +-
|
|
include/scsi/scsi_transport_fc.h | 2 +-
|
|
include/sound/ak4xxx-adda.h | 2 +-
|
|
include/sound/hwdep.h | 2 +-
|
|
include/sound/info.h | 2 +-
|
|
include/sound/pcm.h | 1 +
|
|
include/sound/sb16_csp.h | 2 +-
|
|
include/sound/soc.h.rej | 11 +
|
|
include/sound/ymfpci.h | 2 +-
|
|
include/target/target_core_base.h | 6 +-
|
|
include/trace/events/irq.h | 4 +-
|
|
include/video/udlfb.h | 8 +-
|
|
include/video/uvesafb.h | 1 +
|
|
init/Kconfig | 2 +-
|
|
init/do_mounts.c | 14 +-
|
|
init/do_mounts.h | 8 +-
|
|
init/do_mounts_initrd.c | 28 +-
|
|
init/do_mounts_md.c | 6 +-
|
|
init/initramfs.c | 40 +-
|
|
init/main.c | 72 +-
|
|
ipc/msg.c | 11 +-
|
|
ipc/sem.c | 11 +-
|
|
ipc/shm.c | 17 +-
|
|
kernel/acct.c | 2 +-
|
|
kernel/audit.c | 6 +-
|
|
kernel/audit.c.rej | 11 +
|
|
kernel/auditsc.c | 4 +-
|
|
kernel/capability.c | 3 +
|
|
kernel/compat.c | 40 +-
|
|
kernel/debug/debug_core.c | 16 +-
|
|
kernel/debug/kdb/kdb_main.c | 4 +-
|
|
kernel/events/core.c | 28 +-
|
|
kernel/exit.c | 4 +-
|
|
kernel/fork.c | 167 +-
|
|
kernel/futex.c | 9 +
|
|
kernel/gcov/base.c | 7 +-
|
|
kernel/hrtimer.c | 2 +-
|
|
kernel/jump_label.c | 5 +
|
|
kernel/kallsyms.c | 39 +-
|
|
kernel/kexec.c | 3 +-
|
|
kernel/kmod.c | 2 +-
|
|
kernel/kprobes.c | 8 +-
|
|
kernel/lockdep.c | 7 +-
|
|
kernel/lockdep_proc.c | 2 +-
|
|
kernel/module.c | 310 +-
|
|
kernel/module.c.rej | 26 +
|
|
kernel/mutex-debug.c | 12 +-
|
|
kernel/mutex-debug.h | 4 +-
|
|
kernel/mutex.c | 7 +-
|
|
kernel/panic.c | 3 +-
|
|
kernel/pid.c | 2 +-
|
|
kernel/posix-cpu-timers.c | 4 +-
|
|
kernel/posix-timers.c | 20 +-
|
|
kernel/power/poweroff.c | 2 +-
|
|
kernel/power/process.c | 10 +-
|
|
kernel/power/process.c.rej | 19 +
|
|
kernel/profile.c | 14 +-
|
|
kernel/ptrace.c | 6 +-
|
|
kernel/rcutiny.c | 4 +-
|
|
kernel/rcutiny_plugin.h | 2 +-
|
|
kernel/rcutorture.c | 56 +-
|
|
kernel/rcutree.c | 50 +-
|
|
kernel/rcutree.h.rej | 11 +
|
|
kernel/rcutree_plugin.h | 2 +-
|
|
kernel/rcutree_trace.c | 4 +-
|
|
kernel/rtmutex-tester.c | 24 +-
|
|
kernel/sched/auto_group.c | 4 +-
|
|
kernel/sched/fair.c | 2 +-
|
|
kernel/signal.c | 8 +-
|
|
kernel/smp.c.rej | 28 +
|
|
kernel/softirq.c | 14 +-
|
|
kernel/sys.c | 12 +-
|
|
kernel/sysctl.c | 37 +-
|
|
kernel/sysctl_binary.c | 14 +-
|
|
kernel/time/alarmtimer.c | 2 +-
|
|
kernel/time/tick-broadcast.c | 2 +-
|
|
kernel/time/timer_stats.c | 10 +-
|
|
kernel/timer.c | 2 +-
|
|
kernel/trace/blktrace.c | 6 +-
|
|
kernel/trace/ftrace.c | 11 +-
|
|
kernel/trace/trace.c | 6 +-
|
|
kernel/trace/trace_events.c | 25 +-
|
|
kernel/trace/trace_kprobe.c | 8 +-
|
|
kernel/trace/trace_mmiotrace.c | 8 +-
|
|
kernel/trace/trace_output.c | 2 +-
|
|
kernel/trace/trace_stack.c.rej | 11 +
|
|
kernel/trace/trace_workqueue.c | 6 +-
|
|
lib/bitmap.c | 6 +-
|
|
lib/bitmap.c.rej | 10 +
|
|
lib/bug.c | 2 +
|
|
lib/debugobjects.c | 2 +-
|
|
lib/devres.c | 4 +-
|
|
lib/dma-debug.c | 2 +-
|
|
lib/extable.c | 3 +
|
|
lib/inflate.c | 2 +-
|
|
lib/ioremap.c | 4 +-
|
|
lib/radix-tree.c | 2 +-
|
|
lib/vsprintf.c | 12 +-
|
|
mm/Kconfig | 6 +-
|
|
mm/filemap.c | 2 +-
|
|
mm/fremap.c | 5 +
|
|
mm/highmem.c | 7 +-
|
|
mm/huge_memory.c | 2 +-
|
|
mm/hugetlb.c | 50 +
|
|
mm/hugetlb.c.rej | 12 +
|
|
mm/internal.h | 1 +
|
|
mm/maccess.c | 4 +-
|
|
mm/madvise.c | 41 +
|
|
mm/memory-failure.c | 14 +-
|
|
mm/memory-failure.c.rej | 20 +
|
|
mm/memory.c | 314 +-
|
|
mm/memory.c.rej | 149 +
|
|
mm/mempolicy.c | 25 +
|
|
mm/mlock.c | 22 +-
|
|
mm/mmap.c | 585 +++-
|
|
mm/mmap.c.rej | 103 +
|
|
mm/mprotect.c | 138 +-
|
|
mm/mremap.c | 45 +-
|
|
mm/nommu.c | 11 +-
|
|
mm/page_alloc.c | 14 +-
|
|
mm/percpu.c | 2 +-
|
|
mm/process_vm_access.c | 14 +-
|
|
mm/rmap.c | 38 +-
|
|
mm/shmem.c | 5 +-
|
|
mm/slab.c | 106 +-
|
|
mm/slob.c | 171 +-
|
|
mm/slub.c | 102 +-
|
|
mm/sparse-vmemmap.c | 4 +-
|
|
mm/swap.c.rej | 18 +
|
|
mm/swapfile.c | 12 +-
|
|
mm/util.c | 6 +
|
|
mm/vmalloc.c | 89 +-
|
|
mm/vmstat.c | 6 +-
|
|
net/8021q/vlan.c | 3 +-
|
|
net/9p/trans_fd.c | 2 +-
|
|
net/atm/atm_misc.c | 8 +-
|
|
net/atm/lec.h | 2 +-
|
|
net/atm/mpc.h | 2 +-
|
|
net/atm/proc.c | 6 +-
|
|
net/atm/resources.c | 4 +-
|
|
net/batman-adv/bat_iv_ogm.c | 6 +-
|
|
net/batman-adv/hard-interface.c | 4 +-
|
|
net/batman-adv/soft-interface.c | 4 +-
|
|
net/batman-adv/types.h | 6 +-
|
|
net/batman-adv/unicast.c | 2 +-
|
|
net/bluetooth/hci_conn.c.rej | 10 +
|
|
net/bluetooth/l2cap_core.c | 6 +-
|
|
net/bluetooth/l2cap_core.c.rej | 15 +
|
|
net/bridge/netfilter/ebtables.c | 2 +-
|
|
net/caif/cfctrl.c | 11 +-
|
|
net/can/gw.c | 2 +-
|
|
net/compat.c | 26 +-
|
|
net/compat.c.rej | 14 +
|
|
net/core/datagram.c | 2 +-
|
|
net/core/dev.c | 16 +-
|
|
net/core/flow.c | 8 +-
|
|
net/core/iovec.c | 4 +-
|
|
net/core/rtnetlink.c | 2 +-
|
|
net/core/scm.c | 8 +-
|
|
net/core/sock.c | 16 +-
|
|
net/decnet/sysctl_net_decnet.c | 4 +-
|
|
net/ipv4/fib_frontend.c | 6 +-
|
|
net/ipv4/fib_semantics.c | 2 +-
|
|
net/ipv4/inetpeer.c.rej | 13 +
|
|
net/ipv4/ip_fragment.c | 2 +-
|
|
net/ipv4/ip_sockglue.c | 2 +-
|
|
net/ipv4/ipconfig.c | 6 +-
|
|
net/ipv4/ping.c.rej | 11 +
|
|
net/ipv4/raw.c | 14 +-
|
|
net/ipv4/route.c | 4 +-
|
|
net/ipv4/route.c.rej | 11 +
|
|
net/ipv4/tcp_probe.c | 2 +-
|
|
net/ipv4/udp.c | 6 +-
|
|
net/ipv4/udp.c.rej | 11 +
|
|
net/ipv6/addrconf.c | 2 +-
|
|
net/ipv6/inet6_connection_sock.c | 4 +-
|
|
net/ipv6/ipv6_sockglue.c | 2 +-
|
|
net/ipv6/raw.c | 19 +-
|
|
net/ipv6/udp.c | 8 +-
|
|
net/irda/ircomm/ircomm_tty.c | 38 +-
|
|
net/iucv/af_iucv.c | 4 +-
|
|
net/key/af_key.c | 4 +-
|
|
net/mac80211/ieee80211_i.h | 3 +-
|
|
net/mac80211/iface.c | 12 +-
|
|
net/mac80211/main.c | 2 +-
|
|
net/mac80211/pm.c | 6 +-
|
|
net/mac80211/rate.c | 2 +-
|
|
net/mac80211/rc80211_pid_debugfs.c | 2 +-
|
|
net/mac80211/util.c | 2 +-
|
|
net/netfilter/ipvs/ip_vs_conn.c | 6 +-
|
|
net/netfilter/ipvs/ip_vs_core.c | 4 +-
|
|
net/netfilter/ipvs/ip_vs_ctl.c | 10 +-
|
|
net/netfilter/ipvs/ip_vs_sync.c | 4 +-
|
|
net/netfilter/ipvs/ip_vs_xmit.c | 4 +-
|
|
net/netfilter/nfnetlink_log.c | 4 +-
|
|
net/netfilter/xt_statistic.c | 8 +-
|
|
net/netlink/af_netlink.c | 4 +-
|
|
net/packet/af_packet.c | 12 +-
|
|
net/phonet/pep.c | 6 +-
|
|
net/phonet/socket.c.rej | 11 +
|
|
net/rds/cong.c | 6 +-
|
|
net/rds/ib.h | 2 +-
|
|
net/rds/ib_cm.c | 2 +-
|
|
net/rds/ib_recv.c | 4 +-
|
|
net/rds/iw.h | 2 +-
|
|
net/rds/iw_cm.c | 2 +-
|
|
net/rds/iw_recv.c | 4 +-
|
|
net/rds/tcp.c | 2 +-
|
|
net/rds/tcp_send.c | 2 +-
|
|
net/rxrpc/af_rxrpc.c | 2 +-
|
|
net/rxrpc/ar-ack.c | 14 +-
|
|
net/rxrpc/ar-call.c | 2 +-
|
|
net/rxrpc/ar-connection.c | 2 +-
|
|
net/rxrpc/ar-connevent.c | 2 +-
|
|
net/rxrpc/ar-input.c | 4 +-
|
|
net/rxrpc/ar-internal.h | 8 +-
|
|
net/rxrpc/ar-local.c | 2 +-
|
|
net/rxrpc/ar-output.c | 4 +-
|
|
net/rxrpc/ar-peer.c | 2 +-
|
|
net/rxrpc/ar-proc.c | 4 +-
|
|
net/rxrpc/ar-transport.c | 2 +-
|
|
net/rxrpc/rxkad.c | 4 +-
|
|
net/sctp/socket.c | 2 +-
|
|
net/socket.c | 32 +-
|
|
net/socket.c.rej | 11 +
|
|
net/sunrpc/sched.c | 4 +-
|
|
net/sunrpc/xprtrdma/svc_rdma.c | 38 +-
|
|
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 6 +-
|
|
net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 +-
|
|
net/sunrpc/xprtrdma/svc_rdma_transport.c | 10 +-
|
|
net/tipc/link.c | 6 +-
|
|
net/tipc/msg.c | 2 +-
|
|
net/tipc/subscr.c | 2 +-
|
|
net/wireless/core.h | 2 +-
|
|
net/wireless/wext-core.c | 19 +-
|
|
net/xfrm/xfrm_policy.c | 16 +-
|
|
scripts/Makefile.build | 4 +-
|
|
scripts/Makefile.clean | 3 +-
|
|
scripts/Makefile.host | 28 +-
|
|
scripts/basic/fixdep.c | 12 +-
|
|
scripts/gcc-plugin.sh | 17 +
|
|
scripts/mod/file2alias.c | 14 +-
|
|
scripts/mod/modpost.c | 25 +-
|
|
scripts/mod/modpost.h | 6 +-
|
|
scripts/mod/sumversion.c | 2 +-
|
|
scripts/pnmtologo.c | 6 +-
|
|
security/Kconfig | 645 ++++-
|
|
security/integrity/ima/ima.h | 4 +-
|
|
security/integrity/ima/ima_api.c | 2 +-
|
|
security/integrity/ima/ima_fs.c | 4 +-
|
|
security/integrity/ima/ima_queue.c | 2 +-
|
|
security/keys/compat.c.rej | 11 +
|
|
security/keys/keyctl.c | 6 +-
|
|
security/keys/keyctl.c.rej | 11 +
|
|
security/keys/keyring.c | 6 +-
|
|
security/security.c | 9 +-
|
|
security/selinux/hooks.c.rej | 10 +
|
|
security/selinux/include/xfrm.h | 2 +-
|
|
security/smack/smack_lsm.c | 2 +-
|
|
security/tomoyo/tomoyo.c | 2 +-
|
|
sound/aoa/codecs/onyx.c | 7 +-
|
|
sound/aoa/codecs/onyx.h | 1 +
|
|
sound/core/oss/pcm_oss.c | 18 +-
|
|
sound/core/pcm_compat.c | 2 +-
|
|
sound/core/pcm_native.c | 4 +-
|
|
sound/core/seq/seq_device.c | 8 +-
|
|
sound/drivers/mts64.c | 14 +-
|
|
sound/drivers/opl4/opl4_lib.c | 2 +-
|
|
sound/drivers/portman2x4.c | 3 +-
|
|
sound/firewire/amdtp.c | 4 +-
|
|
sound/firewire/amdtp.h | 2 +-
|
|
sound/firewire/isight.c | 10 +-
|
|
sound/isa/cmi8330.c | 2 +-
|
|
sound/oss/sb_audio.c | 2 +-
|
|
sound/oss/swarm_cs4297a.c | 6 +-
|
|
sound/pci/hda/hda_codec.h | 7 +-
|
|
sound/pci/ice1712/ice1712.h | 4 +-
|
|
sound/pci/ymfpci/ymfpci_main.c | 12 +-
|
|
sound/soc/soc-pcm.c.rej | 11 +
|
|
sound/usb/card.h.rej | 18 +
|
|
tools/gcc/.gitignore | 1 +
|
|
tools/gcc/Makefile | 43 +
|
|
tools/gcc/checker_plugin.c | 171 ++
|
|
tools/gcc/colorize_plugin.c | 148 +
|
|
tools/gcc/constify_plugin.c | 328 +++
|
|
tools/gcc/generate_size_overflow_hash.sh | 94 +
|
|
tools/gcc/kallocstat_plugin.c | 167 ++
|
|
tools/gcc/kernexec_plugin.c | 427 +++
|
|
tools/gcc/latent_entropy_plugin.c | 295 ++
|
|
tools/gcc/size_overflow_hash.data | 3028 ++++++++++++++++++++
|
|
tools/gcc/size_overflow_plugin.c | 1558 ++++++++++
|
|
tools/gcc/stackleak_plugin.c | 313 ++
|
|
tools/perf/util/include/asm/alternative-asm.h | 3 +
|
|
usr/gen_init_cpio.c.rej | 24 +
|
|
virt/kvm/kvm_main.c | 20 +-
|
|
1283 files changed, 25194 insertions(+), 5798 deletions(-)
|
|
create mode 100644 arch/arm/include/asm/atomic.h.rej
|
|
create mode 100644 arch/arm/include/asm/elf.h.rej
|
|
create mode 100644 arch/arm/kernel/process.c.rej
|
|
create mode 100644 arch/x86/boot/Makefile.rej
|
|
create mode 100644 arch/x86/ia32/ia32entry.S.rej
|
|
create mode 100644 arch/x86/include/asm/fpu-internal.h.rej
|
|
create mode 100644 arch/x86/include/asm/paravirt_types.h.rej
|
|
create mode 100644 arch/x86/include/asm/pgtable_64_types.h.rej
|
|
create mode 100644 arch/x86/kernel/entry_32.S.rej
|
|
create mode 100644 arch/x86/kernel/entry_64.S.rej
|
|
create mode 100644 arch/x86/kernel/irq.c.rej
|
|
create mode 100644 arch/x86/kernel/relocate_kernel_64.S.rej
|
|
create mode 100644 arch/x86/kernel/setup.c.rej
|
|
create mode 100644 arch/x86/kernel/sys_x86_64.c.rej
|
|
create mode 100644 arch/x86/mm/fault.c.rej
|
|
create mode 100644 arch/x86/mm/init.c.rej
|
|
create mode 100644 arch/x86/mm/ioremap.c.rej
|
|
create mode 100644 arch/x86/mm/mmap.c.rej
|
|
create mode 100644 arch/x86/xen/xen-asm_32.S.rej
|
|
create mode 100644 drivers/base/power/wakeup.c.rej
|
|
create mode 100644 drivers/gpu/drm/drm_fops.c.rej
|
|
create mode 100644 drivers/gpu/drm/i915/i915_gem_execbuffer.c.rej
|
|
create mode 100644 drivers/gpu/drm/i915/intel_display.c.rej
|
|
create mode 100644 drivers/staging/speakup/speakup_soft.c.rej
|
|
create mode 100644 fs/binfmt_elf.c.rej
|
|
create mode 100644 fs/compat_ioctl.c.rej
|
|
create mode 100644 fs/libfs.c.rej
|
|
create mode 100644 fs/nfs/inode.c.rej
|
|
create mode 100644 fs/proc/nommu.c.rej
|
|
create mode 100644 fs/proc/task_mmu.c.rej
|
|
create mode 100644 fs/proc/task_nommu.c.rej
|
|
create mode 100644 include/linux/efi.h.rej
|
|
create mode 100644 include/linux/gfp.h.rej
|
|
create mode 100644 include/linux/if_team.h.rej
|
|
create mode 100644 include/linux/sysctl.h.rej
|
|
create mode 100644 include/linux/vmalloc.h.rej
|
|
create mode 100644 include/net/inetpeer.h.rej
|
|
create mode 100644 include/sound/soc.h.rej
|
|
create mode 100644 kernel/audit.c.rej
|
|
create mode 100644 kernel/module.c.rej
|
|
create mode 100644 kernel/power/process.c.rej
|
|
create mode 100644 kernel/rcutree.h.rej
|
|
create mode 100644 kernel/smp.c.rej
|
|
create mode 100644 kernel/trace/trace_stack.c.rej
|
|
create mode 100644 lib/bitmap.c.rej
|
|
create mode 100644 mm/hugetlb.c.rej
|
|
create mode 100644 mm/memory-failure.c.rej
|
|
create mode 100644 mm/memory.c.rej
|
|
create mode 100644 mm/mmap.c.rej
|
|
create mode 100644 mm/swap.c.rej
|
|
create mode 100644 net/bluetooth/hci_conn.c.rej
|
|
create mode 100644 net/bluetooth/l2cap_core.c.rej
|
|
create mode 100644 net/compat.c.rej
|
|
create mode 100644 net/ipv4/inetpeer.c.rej
|
|
create mode 100644 net/ipv4/ping.c.rej
|
|
create mode 100644 net/ipv4/route.c.rej
|
|
create mode 100644 net/ipv4/udp.c.rej
|
|
create mode 100644 net/phonet/socket.c.rej
|
|
create mode 100644 net/socket.c.rej
|
|
create mode 100644 scripts/gcc-plugin.sh
|
|
create mode 100644 security/keys/compat.c.rej
|
|
create mode 100644 security/keys/keyctl.c.rej
|
|
create mode 100644 security/selinux/hooks.c.rej
|
|
create mode 100644 sound/soc/soc-pcm.c.rej
|
|
create mode 100644 sound/usb/card.h.rej
|
|
create mode 100644 tools/gcc/.gitignore
|
|
create mode 100644 tools/gcc/Makefile
|
|
create mode 100644 tools/gcc/checker_plugin.c
|
|
create mode 100644 tools/gcc/colorize_plugin.c
|
|
create mode 100644 tools/gcc/constify_plugin.c
|
|
create mode 100644 tools/gcc/generate_size_overflow_hash.sh
|
|
create mode 100644 tools/gcc/kallocstat_plugin.c
|
|
create mode 100644 tools/gcc/kernexec_plugin.c
|
|
create mode 100644 tools/gcc/latent_entropy_plugin.c
|
|
create mode 100644 tools/gcc/size_overflow_hash.data
|
|
create mode 100644 tools/gcc/size_overflow_plugin.c
|
|
create mode 100644 tools/gcc/stackleak_plugin.c
|
|
create mode 100644 usr/gen_init_cpio.c.rej
|
|
|
|
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
|
|
index b4a898f..781c7ad 100644
|
|
--- a/Documentation/dontdiff
|
|
+++ b/Documentation/dontdiff
|
|
@@ -2,9 +2,11 @@
|
|
*.aux
|
|
*.bin
|
|
*.bz2
|
|
+*.c.[012]*.*
|
|
*.cis
|
|
*.cpio
|
|
*.csp
|
|
+*.dbg
|
|
*.dsp
|
|
*.dvi
|
|
*.elf
|
|
@@ -14,6 +16,7 @@
|
|
*.gcov
|
|
*.gen.S
|
|
*.gif
|
|
+*.gmo
|
|
*.grep
|
|
*.grp
|
|
*.gz
|
|
@@ -48,14 +51,17 @@
|
|
*.tab.h
|
|
*.tex
|
|
*.ver
|
|
+*.vim
|
|
*.xml
|
|
*.xz
|
|
*_MODULES
|
|
+*_reg_safe.h
|
|
*_vga16.c
|
|
*~
|
|
\#*#
|
|
*.9
|
|
-.*
|
|
+.[^g]*
|
|
+.gen*
|
|
.*.d
|
|
.mm
|
|
53c700_d.h
|
|
@@ -69,6 +75,7 @@ Image
|
|
Module.markers
|
|
Module.symvers
|
|
PENDING
|
|
+PERF*
|
|
SCCS
|
|
System.map*
|
|
TAGS
|
|
@@ -80,6 +87,7 @@ aic7*seq.h*
|
|
aicasm
|
|
aicdb.h*
|
|
altivec*.c
|
|
+ashldi3.S
|
|
asm-offsets.h
|
|
asm_offsets.h
|
|
autoconf.h*
|
|
@@ -92,19 +100,24 @@ bounds.h
|
|
bsetup
|
|
btfixupprep
|
|
build
|
|
+builtin-policy.h
|
|
bvmlinux
|
|
bzImage*
|
|
capability_names.h
|
|
capflags.c
|
|
classlist.h*
|
|
+clut_vga16.c
|
|
+common-cmds.h
|
|
comp*.log
|
|
compile.h*
|
|
conf
|
|
config
|
|
config-*
|
|
config_data.h*
|
|
+config.c
|
|
config.mak
|
|
config.mak.autogen
|
|
+config.tmp
|
|
conmakehash
|
|
consolemap_deftbl.c*
|
|
cpustr.h
|
|
@@ -115,9 +128,11 @@ devlist.h*
|
|
dnotify_test
|
|
docproc
|
|
dslm
|
|
+dtc-lexer.lex.c
|
|
elf2ecoff
|
|
elfconfig.h*
|
|
evergreen_reg_safe.h
|
|
+exception_policy.conf
|
|
fixdep
|
|
flask.h
|
|
fore200e_mkfirm
|
|
@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
|
|
gconf
|
|
gconf.glade.h
|
|
gen-devlist
|
|
+gen-kdb_cmds.c
|
|
gen_crc32table
|
|
gen_init_cpio
|
|
generated
|
|
genheaders
|
|
genksyms
|
|
*_gray256.c
|
|
+hash
|
|
+hid-example
|
|
hpet_example
|
|
hugepage-mmap
|
|
hugepage-shm
|
|
@@ -145,7 +163,7 @@ int32.c
|
|
int4.c
|
|
int8.c
|
|
kallsyms
|
|
-kconfig
|
|
+kern_constants.h
|
|
keywords.c
|
|
ksym.c*
|
|
ksym.h*
|
|
@@ -153,7 +171,7 @@ kxgettext
|
|
lkc_defs.h
|
|
lex.c
|
|
lex.*.c
|
|
-linux
|
|
+lib1funcs.S
|
|
logo_*.c
|
|
logo_*_clut224.c
|
|
logo_*_mono.c
|
|
@@ -164,14 +182,15 @@ machtypes.h
|
|
map
|
|
map_hugetlb
|
|
maui_boot.h
|
|
-media
|
|
mconf
|
|
+mdp
|
|
miboot*
|
|
mk_elfconfig
|
|
mkboot
|
|
mkbugboot
|
|
mkcpustr
|
|
mkdep
|
|
+mkpiggy
|
|
mkprep
|
|
mkregtable
|
|
mktables
|
|
@@ -188,6 +207,7 @@ oui.c*
|
|
page-types
|
|
parse.c
|
|
parse.h
|
|
+parse-events*
|
|
patches*
|
|
pca200e.bin
|
|
pca200e_ecd.bin2
|
|
@@ -197,6 +217,7 @@ perf-archive
|
|
piggyback
|
|
piggy.gzip
|
|
piggy.S
|
|
+pmu-*
|
|
pnmtologo
|
|
ppc_defs.h*
|
|
pss_boot.h
|
|
@@ -207,6 +228,7 @@ r300_reg_safe.h
|
|
r420_reg_safe.h
|
|
r600_reg_safe.h
|
|
recordmcount
|
|
+regdb.c
|
|
relocs
|
|
rlim_names.h
|
|
rn50_reg_safe.h
|
|
@@ -216,7 +238,9 @@ series
|
|
setup
|
|
setup.bin
|
|
setup.elf
|
|
+size_overflow_hash.h
|
|
sImage
|
|
+slabinfo
|
|
sm_tbl*
|
|
split-include
|
|
syscalltab.h
|
|
@@ -227,6 +251,7 @@ tftpboot.img
|
|
timeconst.h
|
|
times.h*
|
|
trix_boot.h
|
|
+user_constants.h
|
|
utsrelease.h*
|
|
vdso-syms.lds
|
|
vdso.lds
|
|
@@ -238,13 +263,17 @@ vdso32.lds
|
|
vdso32.so.dbg
|
|
vdso64.lds
|
|
vdso64.so.dbg
|
|
+vdsox32.lds
|
|
+vdsox32-syms.lds
|
|
version.h*
|
|
vmImage
|
|
vmlinux
|
|
vmlinux-*
|
|
vmlinux.aout
|
|
vmlinux.bin.all
|
|
+vmlinux.bin.bz2
|
|
vmlinux.lds
|
|
+vmlinux.relocs
|
|
vmlinuz
|
|
voffset.h
|
|
vsyscall.lds
|
|
@@ -252,9 +281,11 @@ vsyscall_32.lds
|
|
wanxlfw.inc
|
|
uImage
|
|
unifdef
|
|
+utsrelease.h
|
|
wakeup.bin
|
|
wakeup.elf
|
|
wakeup.lds
|
|
zImage*
|
|
zconf.hash.c
|
|
+zconf.lex.c
|
|
zoffset.h
|
|
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
|
|
index 7b6fa39..9d56aae 100644
|
|
--- a/Documentation/kernel-parameters.txt
|
|
+++ b/Documentation/kernel-parameters.txt
|
|
@@ -2056,6 +2056,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|
the specified number of seconds. This is to be used if
|
|
your oopses keep scrolling off the screen.
|
|
|
|
+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
|
|
+ virtualization environments that don't cope well with the
|
|
+ expand down segment used by UDEREF on X86-32 or the frequent
|
|
+ page table updates on X86-64.
|
|
+
|
|
+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
|
|
+
|
|
pcbit= [HW,ISDN]
|
|
|
|
pcd. [PARIDE]
|
|
diff --git a/Makefile b/Makefile
|
|
index 45ff15e..0ed1032 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
|
|
|
|
HOSTCC = gcc
|
|
HOSTCXX = g++
|
|
-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
|
|
-HOSTCXXFLAGS = -O2
|
|
+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
|
|
+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
|
|
+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
|
|
|
|
# Decide whether to build built-in, modular, or both.
|
|
# Normally, just do built-in.
|
|
@@ -411,8 +412,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
|
|
# Rules shared between *config targets and build targets
|
|
|
|
# Basic helpers built in scripts/
|
|
-PHONY += scripts_basic
|
|
-scripts_basic:
|
|
+PHONY += scripts_basic gcc-plugins
|
|
+scripts_basic: gcc-plugins
|
|
$(Q)$(MAKE) $(build)=scripts/basic
|
|
$(Q)rm -f .tmp_quiet_recordmcount
|
|
|
|
@@ -568,6 +569,56 @@ else
|
|
KBUILD_CFLAGS += -O2
|
|
endif
|
|
|
|
+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
|
|
+ifneq ($(PLUGINCC),)
|
|
+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
|
|
+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
|
|
+endif
|
|
+ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
|
|
+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
|
|
+endif
|
|
+ifdef CONFIG_KALLOCSTAT_PLUGIN
|
|
+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
|
|
+endif
|
|
+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
|
|
+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
|
|
+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
|
|
+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
|
|
+endif
|
|
+ifdef CONFIG_CHECKER_PLUGIN
|
|
+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
|
|
+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
|
|
+endif
|
|
+endif
|
|
+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
|
|
+ifdef CONFIG_PAX_SIZE_OVERFLOW
|
|
+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
|
|
+endif
|
|
+ifdef CONFIG_PAX_LATENT_ENTROPY
|
|
+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
|
|
+endif
|
|
+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
|
|
+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
|
|
+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
|
|
+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
|
|
+export PLUGINCC CONSTIFY_PLUGIN
|
|
+ifeq ($(KBUILD_EXTMOD),)
|
|
+gcc-plugins:
|
|
+ $(Q)$(MAKE) $(build)=tools/gcc
|
|
+else
|
|
+gcc-plugins: ;
|
|
+endif
|
|
+else
|
|
+gcc-plugins:
|
|
+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
|
|
+ $(Q)echo "warning, your gcc installation does not support plugins, perhaps the necessary headers are missing?"
|
|
+else
|
|
+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
|
|
+endif
|
|
+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
|
|
+endif
|
|
+
|
|
include $(srctree)/arch/$(SRCARCH)/Makefile
|
|
|
|
ifneq ($(CONFIG_FRAME_WARN),0)
|
|
@@ -937,6 +988,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
|
|
|
|
# The actual objects are generated when descending,
|
|
# make sure no implicit rule kicks in
|
|
+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
|
|
|
|
# Handle descending into subdirectories listed in $(vmlinux-dirs)
|
|
@@ -946,7 +999,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
|
|
# Error messages still appears in the original language
|
|
|
|
PHONY += $(vmlinux-dirs)
|
|
-$(vmlinux-dirs): prepare scripts
|
|
+$(vmlinux-dirs): gcc-plugins prepare scripts
|
|
$(Q)$(MAKE) $(build)=$@
|
|
|
|
# Store (new) KERNELRELASE string in include/config/kernel.release
|
|
@@ -990,6 +1043,7 @@ prepare0: archprepare FORCE
|
|
$(Q)$(MAKE) $(build)=.
|
|
|
|
# All the preparing..
|
|
+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
|
|
prepare: prepare0
|
|
|
|
# Generate some files
|
|
@@ -1097,6 +1151,8 @@ all: modules
|
|
# using awk while concatenating to the final file.
|
|
|
|
PHONY += modules
|
|
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
|
|
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
|
|
@$(kecho) ' Building modules, stage 2.';
|
|
@@ -1112,7 +1168,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
|
|
|
|
# Target to prepare building external modules
|
|
PHONY += modules_prepare
|
|
-modules_prepare: prepare scripts
|
|
+modules_prepare: gcc-plugins prepare scripts
|
|
|
|
# Target to install modules
|
|
PHONY += modules_install
|
|
@@ -1171,7 +1227,7 @@ CLEAN_FILES += vmlinux System.map \
|
|
MRPROPER_DIRS += include/config usr/include include/generated \
|
|
arch/*/include/generated
|
|
MRPROPER_FILES += .config .config.old .version .old_version \
|
|
- include/linux/version.h \
|
|
+ include/linux/version.h tools/gcc/size_overflow_hash.h\
|
|
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
|
|
|
|
# clean - Delete most, but leave enough to build external modules
|
|
@@ -1209,6 +1265,7 @@ distclean: mrproper
|
|
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
|
|
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
|
|
-o -name '.*.rej' \
|
|
+ -o -name '.*.rej' -o -name '*.so' \
|
|
-o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
|
|
-type f -print | xargs rm -f
|
|
|
|
@@ -1369,6 +1426,8 @@ PHONY += $(module-dirs) modules
|
|
$(module-dirs): crmodverdir $(objtree)/Module.symvers
|
|
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
|
|
|
|
+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
modules: $(module-dirs)
|
|
@$(kecho) ' Building modules, stage 2.';
|
|
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
|
|
@@ -1495,17 +1554,21 @@ else
|
|
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
|
|
endif
|
|
|
|
-%.s: %.c prepare scripts FORCE
|
|
+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
+%.s: %.c gcc-plugins prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
%.i: %.c prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
-%.o: %.c prepare scripts FORCE
|
|
+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
+%.o: %.c gcc-plugins prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
%.lst: %.c prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
-%.s: %.S prepare scripts FORCE
|
|
+%.s: %.S gcc-plugins prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
-%.o: %.S prepare scripts FORCE
|
|
+%.o: %.S gcc-plugins prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
%.symtypes: %.c prepare scripts FORCE
|
|
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
|
|
@@ -1515,11 +1578,15 @@ endif
|
|
$(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir)
|
|
-%/: prepare scripts FORCE
|
|
+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
+%/: gcc-plugins prepare scripts FORCE
|
|
$(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir)
|
|
-%.ko: prepare scripts FORCE
|
|
+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
|
|
+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
|
|
+%.ko: gcc-plugins prepare scripts FORCE
|
|
$(cmd_crmodverdir)
|
|
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
|
$(build)=$(build-dir) $(@:.ko=.o)
|
|
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
|
|
index c2cbe4f..f7264b4 100644
|
|
--- a/arch/alpha/include/asm/atomic.h
|
|
+++ b/arch/alpha/include/asm/atomic.h
|
|
@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
#define atomic_dec(v) atomic_sub(1,(v))
|
|
#define atomic64_dec(v) atomic64_sub(1,(v))
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
|
|
index 968d999..d36b2df 100644
|
|
--- a/arch/alpha/include/asm/elf.h
|
|
+++ b/arch/alpha/include/asm/elf.h
|
|
@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|
|
|
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
|
|
+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
|
|
+#endif
|
|
+
|
|
/* $0 is set by ld.so to a pointer to a function which might be
|
|
registered using atexit. This provides a mean for the dynamic
|
|
linker to call DT_FINI functions for shared libraries that have
|
|
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
|
|
index bc2a0da..8ad11ee 100644
|
|
--- a/arch/alpha/include/asm/pgalloc.h
|
|
+++ b/arch/alpha/include/asm/pgalloc.h
|
|
@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
pgd_set(pgd, pmd);
|
|
}
|
|
|
|
+static inline void
|
|
+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
+{
|
|
+ pgd_populate(mm, pgd, pmd);
|
|
+}
|
|
+
|
|
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
|
|
|
static inline void
|
|
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
|
|
index 81a4342..348b927 100644
|
|
--- a/arch/alpha/include/asm/pgtable.h
|
|
+++ b/arch/alpha/include/asm/pgtable.h
|
|
@@ -102,6 +102,17 @@ struct vm_area_struct;
|
|
#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
|
|
#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
|
|
#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
|
|
+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
|
|
+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
|
|
+#else
|
|
+# define PAGE_SHARED_NOEXEC PAGE_SHARED
|
|
+# define PAGE_COPY_NOEXEC PAGE_COPY
|
|
+# define PAGE_READONLY_NOEXEC PAGE_READONLY
|
|
+#endif
|
|
+
|
|
#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
|
|
|
|
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
|
|
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
|
|
index 2fd00b7..cfd5069 100644
|
|
--- a/arch/alpha/kernel/module.c
|
|
+++ b/arch/alpha/kernel/module.c
|
|
@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
|
|
|
|
/* The small sections were sorted to the end of the segment.
|
|
The following should definitely cover them. */
|
|
- gp = (u64)me->module_core + me->core_size - 0x8000;
|
|
+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
|
|
got = sechdrs[me->arch.gotsecindex].sh_addr;
|
|
|
|
for (i = 0; i < n; i++) {
|
|
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
|
|
index 49ee319..9ee7d14 100644
|
|
--- a/arch/alpha/kernel/osf_sys.c
|
|
+++ b/arch/alpha/kernel/osf_sys.c
|
|
@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
if (limit - len < addr)
|
|
return -ENOMEM;
|
|
- if (!vma || addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
addr = vma->vm_end;
|
|
vma = vma->vm_next;
|
|
@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
merely specific addresses, but regions of memory -- perhaps
|
|
this feature should be incorporated into all ports? */
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
|
|
if (addr != (unsigned long) -ENOMEM)
|
|
@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
}
|
|
|
|
/* Next, try allocating at TASK_UNMAPPED_BASE. */
|
|
- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
|
|
- len, limit);
|
|
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
|
|
+
|
|
if (addr != (unsigned long) -ENOMEM)
|
|
return addr;
|
|
|
|
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
|
|
index f153733..f1ac251 100644
|
|
--- a/arch/alpha/mm/fault.c
|
|
+++ b/arch/alpha/mm/fault.c
|
|
@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
|
|
__reload_thread(pcb);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (regs->pc = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ * 2 when patched PLT trampoline was detected
|
|
+ * 3 when unpatched PLT trampoline was detected
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #1 */
|
|
+ unsigned int ldah, ldq, jmp;
|
|
+
|
|
+ err = get_user(ldah, (unsigned int *)regs->pc);
|
|
+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
|
|
+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
|
|
+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
|
|
+ jmp == 0x6BFB0000U)
|
|
+ {
|
|
+ unsigned long r27, addr;
|
|
+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
|
|
+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
|
|
+
|
|
+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
|
|
+ err = get_user(r27, (unsigned long *)addr);
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ regs->r27 = r27;
|
|
+ regs->pc = r27;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #2 */
|
|
+ unsigned int ldah, lda, br;
|
|
+
|
|
+ err = get_user(ldah, (unsigned int *)regs->pc);
|
|
+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
|
|
+ err |= get_user(br, (unsigned int *)(regs->pc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
|
|
+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
|
|
+ (br & 0xFFE00000U) == 0xC3E00000U)
|
|
+ {
|
|
+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
|
|
+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
|
|
+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
|
|
+
|
|
+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
|
|
+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: unpatched PLT emulation */
|
|
+ unsigned int br;
|
|
+
|
|
+ err = get_user(br, (unsigned int *)regs->pc);
|
|
+
|
|
+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
|
|
+ unsigned int br2, ldq, nop, jmp;
|
|
+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
|
|
+
|
|
+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
|
|
+ err = get_user(br2, (unsigned int *)addr);
|
|
+ err |= get_user(ldq, (unsigned int *)(addr+4));
|
|
+ err |= get_user(nop, (unsigned int *)(addr+8));
|
|
+ err |= get_user(jmp, (unsigned int *)(addr+12));
|
|
+ err |= get_user(resolver, (unsigned long *)(addr+16));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (br2 == 0xC3600000U &&
|
|
+ ldq == 0xA77B000CU &&
|
|
+ nop == 0x47FF041FU &&
|
|
+ jmp == 0x6B7B0000U)
|
|
+ {
|
|
+ regs->r28 = regs->pc+4;
|
|
+ regs->r27 = addr+16;
|
|
+ regs->pc = resolver;
|
|
+ return 3;
|
|
+ }
|
|
+ }
|
|
+ } while (0);
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 5; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
|
|
/*
|
|
* This routine handles page faults. It determines the address,
|
|
@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|
good_area:
|
|
si_code = SEGV_ACCERR;
|
|
if (cause < 0) {
|
|
- if (!(vma->vm_flags & VM_EXEC))
|
|
+ if (!(vma->vm_flags & VM_EXEC)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
|
|
+ goto bad_area;
|
|
+
|
|
+ up_read(&mm->mmap_sem);
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ case 2:
|
|
+ case 3:
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
|
|
+ do_group_exit(SIGKILL);
|
|
+#else
|
|
goto bad_area;
|
|
+#endif
|
|
+
|
|
+ }
|
|
} else if (!cause) {
|
|
/* Allow reads even for write-only mappings */
|
|
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
|
|
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
|
|
index f0dc53b..621d737 100644
|
|
--- a/arch/arm/include/asm/atomic.h
|
|
+++ b/arch/arm/include/asm/atomic.h
|
|
@@ -17,17 +17,35 @@
|
|
#include <asm/barrier.h>
|
|
#include <asm/cmpxchg.h>
|
|
|
|
+#ifdef CONFIG_GENERIC_ATOMIC64
|
|
+#include <asm-generic/atomic64.h>
|
|
+#endif
|
|
+
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
+#define _ASM_EXTABLE(from, to) \
|
|
+" .pushsection __ex_table,\"a\"\n"\
|
|
+" .align 3\n" \
|
|
+" .long " #from ", " #to"\n" \
|
|
+" .popsection"
|
|
+
|
|
/*
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
* strex/ldrex monitor on some implementations. The reason we can use it for
|
|
* atomic_set() is the clrex or dummy strex done on every exception return.
|
|
*/
|
|
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
|
|
+{
|
|
+ return v->counter;
|
|
+}
|
|
#define atomic_set(v,i) (((v)->counter) = (i))
|
|
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
|
|
+{
|
|
+ v->counter = i;
|
|
+}
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
|
|
@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_add\n"
|
|
+"1: ldrex %1, [%3]\n"
|
|
+" adds %0, %1, %4\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" bvc 3f\n"
|
|
+"2: bkpt 0xf103\n"
|
|
+"3:\n"
|
|
+#endif
|
|
+
|
|
+" strex %1, %0, [%3]\n"
|
|
+" teq %1, #0\n"
|
|
+" bne 1b"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+"\n4:\n"
|
|
+ _ASM_EXTABLE(2b, 4b)
|
|
+#endif
|
|
+
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
+ : "cc");
|
|
+}
|
|
+
|
|
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ unsigned long tmp;
|
|
+ int result;
|
|
+
|
|
+ __asm__ __volatile__("@ atomic_add_unchecked\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" add %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
@@ -81,6 +128,35 @@ static inline void atomic_sub(int i, atomic_t *v)
|
|
int result;
|
|
|
|
__asm__ __volatile__("@ atomic_sub\n"
|
|
+"1: ldrex %1, [%3]\n"
|
|
+" subs %0, %1, %4\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" bvc 3f\n"
|
|
+"2: bkpt 0xf103\n"
|
|
+"3:\n"
|
|
+#endif
|
|
+
|
|
+" strex %1, %0, [%3]\n"
|
|
+" teq %1, #0\n"
|
|
+" bne 1b"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+"\n4:\n"
|
|
+ _ASM_EXTABLE(2b, 4b)
|
|
+#endif
|
|
+
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
+ : "cc");
|
|
+}
|
|
+
|
|
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ unsigned long tmp;
|
|
+ int result;
|
|
+
|
|
+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
|
|
"1: ldrex %0, [%3]\n"
|
|
" sub %0, %0, %4\n"
|
|
" strex %1, %0, [%3]\n"
|
|
@@ -182,7 +258,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
return val;
|
|
}
|
|
+
|
|
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ return atomic_add_return(i, v);
|
|
+}
|
|
+
|
|
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
|
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ (void) atomic_add_return(i, v);
|
|
+}
|
|
|
|
static inline int atomic_sub_return(int i, atomic_t *v)
|
|
{
|
|
@@ -197,6 +283,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|
return val;
|
|
}
|
|
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
|
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ (void) atomic_sub_return(i, v);
|
|
+}
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
{
|
|
@@ -285,6 +375,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|
return result;
|
|
}
|
|
|
|
+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ u64 result;
|
|
+
|
|
+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
|
|
+" ldrexd %0, %H0, [%1]"
|
|
+ : "=&r" (result)
|
|
+ : "r" (&v->counter), "Qo" (v->counter)
|
|
+ );
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
{
|
|
u64 tmp;
|
|
@@ -308,6 +411,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
__asm__ __volatile__("@ atomic64_add\n"
|
|
"1: ldrexd %0, %H0, [%3]\n"
|
|
" adds %0, %0, %4\n"
|
|
+" adcs %H0, %H0, %H4\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" bvc 3f\n"
|
|
+"2: bkpt 0xf103\n"
|
|
+"3:\n"
|
|
+#endif
|
|
+
|
|
+" strexd %1, %0, %H0, [%3]\n"
|
|
+" teq %1, #0\n"
|
|
+" bne 1b"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+"\n4:\n"
|
|
+ _ASM_EXTABLE(2b, 4b)
|
|
+#endif
|
|
+
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "r" (i)
|
|
+ : "cc");
|
|
+}
|
|
+
|
|
+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
|
|
+{
|
|
+ u64 result;
|
|
+ unsigned long tmp;
|
|
+
|
|
+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
|
|
+"1: ldrexd %0, %H0, [%3]\n"
|
|
+" adds %0, %0, %4\n"
|
|
" adc %H0, %H0, %H4\n"
|
|
" strexd %1, %0, %H0, [%3]\n"
|
|
" teq %1, #0\n"
|
|
@@ -376,6 +509,39 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (i)
|
|
: "cc");
|
|
+}
|
|
+
|
|
+static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
+{
|
|
+ u64 result, tmp;
|
|
+
|
|
+ smp_mb();
|
|
+
|
|
+ __asm__ __volatile__("@ atomic64_sub_return\n"
|
|
+"1: ldrexd %1, %H1, [%3]\n"
|
|
+" subs %0, %1, %4\n"
|
|
+" sbcs %H0, %H1, %H4\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" bvc 3f\n"
|
|
+" mov %0, %1\n"
|
|
+" mov %H0, %H1\n"
|
|
+"2: bkpt 0xf103\n"
|
|
+"3:\n"
|
|
+#endif
|
|
+
|
|
+" strexd %1, %0, %H0, [%3]\n"
|
|
+" teq %1, #0\n"
|
|
+" bne 1b"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+"\n4:\n"
|
|
+ _ASM_EXTABLE(2b, 4b)
|
|
+#endif
|
|
+
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "r" (i)
|
|
+ : "cc");
|
|
|
|
smp_mb();
|
|
|
|
@@ -407,6 +573,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
|
return oldval;
|
|
}
|
|
|
|
+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
|
|
+{
|
|
+ u64 oldval;
|
|
+ unsigned long res;
|
|
+
|
|
+ smp_mb();
|
|
+
|
|
+ do {
|
|
+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
|
|
+ "ldrexd %1, %H1, [%3]\n"
|
|
+ "mov %0, #0\n"
|
|
+ "teq %1, %4\n"
|
|
+ "teqeq %H1, %H4\n"
|
|
+ "strexdeq %0, %5, %H5, [%3]"
|
|
+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
+ : "r" (&ptr->counter), "r" (old), "r" (new)
|
|
+ : "cc");
|
|
+ } while (res);
|
|
+
|
|
+ smp_mb();
|
|
+
|
|
+ return oldval;
|
|
+}
|
|
+
|
|
static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|
{
|
|
u64 result;
|
|
@@ -470,13 +660,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|
" teq %0, %5\n"
|
|
" teqeq %H0, %H5\n"
|
|
" moveq %1, #0\n"
|
|
-" beq 2f\n"
|
|
+" beq 4f\n"
|
|
" adds %0, %0, %6\n"
|
|
-" adc %H0, %H0, %H6\n"
|
|
+" adcs %H0, %H0, %H6\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" bvc 3f\n"
|
|
+"2: bkpt 0xf103\n"
|
|
+"3:\n"
|
|
+#endif
|
|
+
|
|
" strexd %2, %0, %H0, [%4]\n"
|
|
" teq %2, #0\n"
|
|
" bne 1b\n"
|
|
-"2:"
|
|
+"4:\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ _ASM_EXTABLE(2b, 4b)
|
|
+#endif
|
|
+
|
|
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
|
|
: "r" (&v->counter), "r" (u), "r" (a)
|
|
: "cc");
|
|
@@ -489,10 +691,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|
|
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
|
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
|
|
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
|
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
|
+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
diff --git a/arch/arm/include/asm/atomic.h.rej b/arch/arm/include/asm/atomic.h.rej
|
|
new file mode 100644
|
|
index 0000000..635cf3d
|
|
--- /dev/null
|
|
+++ b/arch/arm/include/asm/atomic.h.rej
|
|
@@ -0,0 +1,321 @@
|
|
+diff a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h (rejected hunks)
|
|
+@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|
+ smp_mb();
|
|
+
|
|
+ __asm__ __volatile__("@ atomic_add_return\n"
|
|
++"1: ldrex %1, [%3]\n"
|
|
++" adds %0, %1, %4\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++" bvc 3f\n"
|
|
++" mov %0, %1\n"
|
|
++"2: bkpt 0xf103\n"
|
|
++"3:\n"
|
|
++#endif
|
|
++
|
|
++" strex %1, %0, [%3]\n"
|
|
++" teq %1, #0\n"
|
|
++" bne 1b"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++"\n4:\n"
|
|
++ _ASM_EXTABLE(2b, 4b)
|
|
++#endif
|
|
++
|
|
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
++ : "r" (&v->counter), "Ir" (i)
|
|
++ : "cc");
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ return result;
|
|
++}
|
|
++
|
|
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
|
|
++{
|
|
++ unsigned long tmp;
|
|
++ int result;
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
|
|
+ "1: ldrex %0, [%3]\n"
|
|
+ " add %0, %0, %4\n"
|
|
+ " strex %1, %0, [%3]\n"
|
|
+@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|
+ smp_mb();
|
|
+
|
|
+ __asm__ __volatile__("@ atomic_sub_return\n"
|
|
+-"1: ldrex %0, [%3]\n"
|
|
+-" sub %0, %0, %4\n"
|
|
++"1: ldrex %1, [%3]\n"
|
|
++" subs %0, %1, %4\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++" bvc 3f\n"
|
|
++" mov %0, %1\n"
|
|
++"2: bkpt 0xf103\n"
|
|
++"3:\n"
|
|
++#endif
|
|
++
|
|
+ " strex %1, %0, [%3]\n"
|
|
+ " teq %1, #0\n"
|
|
+ " bne 1b"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++"\n4:\n"
|
|
++ _ASM_EXTABLE(2b, 4b)
|
|
++#endif
|
|
++
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
+ : "cc");
|
|
+@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
+ return oldval;
|
|
+ }
|
|
+
|
|
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
|
|
++{
|
|
++ unsigned long oldval, res;
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ do {
|
|
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
|
|
++ "ldrex %1, [%3]\n"
|
|
++ "mov %0, #0\n"
|
|
++ "teq %1, %4\n"
|
|
++ "strexeq %0, %5, [%3]\n"
|
|
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
++ : "cc");
|
|
++ } while (res);
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ return oldval;
|
|
++}
|
|
++
|
|
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
+ {
|
|
+ unsigned long tmp, tmp2;
|
|
+@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
|
|
++{
|
|
++ return atomic_cmpxchg(v, old, new);
|
|
++}
|
|
++
|
|
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
+ {
|
|
+ unsigned long flags;
|
|
+@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
+ #endif /* __LINUX_ARM_ARCH__ */
|
|
+
|
|
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
|
|
++{
|
|
++ return xchg(&v->counter, new);
|
|
++}
|
|
+
|
|
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
+ {
|
|
+@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
+ }
|
|
+
|
|
+ #define atomic_inc(v) atomic_add(1, v)
|
|
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
|
|
++{
|
|
++ atomic_add_unchecked(1, v);
|
|
++}
|
|
+ #define atomic_dec(v) atomic_sub(1, v)
|
|
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
|
|
++{
|
|
++ atomic_sub_unchecked(1, v);
|
|
++}
|
|
+
|
|
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
|
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
|
|
++{
|
|
++ return atomic_add_return_unchecked(1, v) == 0;
|
|
++}
|
|
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
|
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
|
|
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
|
|
++{
|
|
++ return atomic_add_return_unchecked(1, v);
|
|
++}
|
|
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
|
|
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
|
+
|
|
+@@ -241,6 +428,14 @@ typedef struct {
|
|
+ u64 __aligned(8) counter;
|
|
+ } atomic64_t;
|
|
+
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++typedef struct {
|
|
++ u64 __aligned(8) counter;
|
|
++} atomic64_unchecked_t;
|
|
++#else
|
|
++typedef atomic64_t atomic64_unchecked_t;
|
|
++#endif
|
|
++
|
|
+ #define ATOMIC64_INIT(i) { (i) }
|
|
+
|
|
+ static inline u64 atomic64_read(atomic64_t *v)
|
|
+@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
+ : "cc");
|
|
+ }
|
|
+
|
|
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
|
|
++{
|
|
++ u64 tmp;
|
|
++
|
|
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
|
|
++"1: ldrexd %0, %H0, [%2]\n"
|
|
++" strexd %0, %3, %H3, [%2]\n"
|
|
++" teq %0, #0\n"
|
|
++" bne 1b"
|
|
++ : "=&r" (tmp), "=Qo" (v->counter)
|
|
++ : "r" (&v->counter), "r" (i)
|
|
++ : "cc");
|
|
++}
|
|
++
|
|
+ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
+ {
|
|
+ u64 result;
|
|
+@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
+
|
|
+ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|
+ {
|
|
++ u64 result, tmp;
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ __asm__ __volatile__("@ atomic64_add_return\n"
|
|
++"1: ldrexd %1, %H1, [%3]\n"
|
|
++" adds %0, %1, %4\n"
|
|
++" adcs %H0, %H1, %H4\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++" bvc 3f\n"
|
|
++" mov %0, %1\n"
|
|
++" mov %H0, %H1\n"
|
|
++"2: bkpt 0xf103\n"
|
|
++"3:\n"
|
|
++#endif
|
|
++
|
|
++" strexd %1, %0, %H0, [%3]\n"
|
|
++" teq %1, #0\n"
|
|
++" bne 1b"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++"\n4:\n"
|
|
++ _ASM_EXTABLE(2b, 4b)
|
|
++#endif
|
|
++
|
|
++ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
++ : "r" (&v->counter), "r" (i)
|
|
++ : "cc");
|
|
++
|
|
++ smp_mb();
|
|
++
|
|
++ return result;
|
|
++}
|
|
++
|
|
++static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
|
|
++{
|
|
+ u64 result;
|
|
+ unsigned long tmp;
|
|
+
|
|
+ smp_mb();
|
|
+
|
|
+- __asm__ __volatile__("@ atomic64_add_return\n"
|
|
++ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
|
|
+ "1: ldrexd %0, %H0, [%3]\n"
|
|
+ " adds %0, %0, %4\n"
|
|
+ " adc %H0, %H0, %H4\n"
|
|
+@@ -318,23 +607,34 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
+ __asm__ __volatile__("@ atomic64_sub\n"
|
|
+ "1: ldrexd %0, %H0, [%3]\n"
|
|
+ " subs %0, %0, %4\n"
|
|
+-" sbc %H0, %H0, %H4\n"
|
|
++" sbcs %H0, %H0, %H4\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++" bvc 3f\n"
|
|
++"2: bkpt 0xf103\n"
|
|
++"3:\n"
|
|
++#endif
|
|
++
|
|
+ " strexd %1, %0, %H0, [%3]\n"
|
|
+ " teq %1, #0\n"
|
|
+ " bne 1b"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++"\n4:\n"
|
|
++ _ASM_EXTABLE(2b, 4b)
|
|
++#endif
|
|
++
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter), "r" (i)
|
|
+ : "cc");
|
|
+ }
|
|
+
|
|
+-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
++static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
|
|
+ {
|
|
+ u64 result;
|
|
+ unsigned long tmp;
|
|
+
|
|
+- smp_mb();
|
|
+-
|
|
+- __asm__ __volatile__("@ atomic64_sub_return\n"
|
|
++ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
|
|
+ "1: ldrexd %0, %H0, [%3]\n"
|
|
+ " subs %0, %0, %4\n"
|
|
+ " sbc %H0, %H0, %H4\n"
|
|
+@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|
+
|
|
+ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
|
+ {
|
|
+- u64 result;
|
|
+- unsigned long tmp;
|
|
++ u64 result, tmp;
|
|
+
|
|
+ smp_mb();
|
|
+
|
|
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
|
|
+-"1: ldrexd %0, %H0, [%3]\n"
|
|
+-" subs %0, %0, #1\n"
|
|
+-" sbc %H0, %H0, #0\n"
|
|
++"1: ldrexd %1, %H1, [%3]\n"
|
|
++" subs %0, %1, #1\n"
|
|
++" sbcs %H0, %H1, #0\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++" bvc 3f\n"
|
|
++" mov %0, %1\n"
|
|
++" mov %H0, %H1\n"
|
|
++"2: bkpt 0xf103\n"
|
|
++"3:\n"
|
|
++#endif
|
|
++
|
|
+ " teq %H0, #0\n"
|
|
+-" bmi 2f\n"
|
|
++" bmi 4f\n"
|
|
+ " strexd %1, %0, %H0, [%3]\n"
|
|
+ " teq %1, #0\n"
|
|
+ " bne 1b\n"
|
|
+-"2:"
|
|
++"4:\n"
|
|
++
|
|
++#ifdef CONFIG_PAX_REFCOUNT
|
|
++ _ASM_EXTABLE(2b, 4b)
|
|
++#endif
|
|
++
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
+ : "r" (&v->counter)
|
|
+ : "cc");
|
|
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
|
|
index 75fe66b..73dab99 100644
|
|
--- a/arch/arm/include/asm/cache.h
|
|
+++ b/arch/arm/include/asm/cache.h
|
|
@@ -5,7 +5,7 @@
|
|
#define __ASMARM_CACHE_H
|
|
|
|
#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
|
|
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
|
|
|
|
/*
|
|
* Memory returned by kmalloc() may be used for DMA, so we must make
|
|
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
|
|
index 1a417b3..c71240c 100644
|
|
--- a/arch/arm/include/asm/cacheflush.h
|
|
+++ b/arch/arm/include/asm/cacheflush.h
|
|
@@ -133,7 +133,7 @@ struct cpu_cache_fns {
|
|
void (*dma_inv_range)(const void *, const void *);
|
|
void (*dma_clean_range)(const void *, const void *);
|
|
void (*dma_flush_range)(const void *, const void *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Select the calling method
|
|
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
|
|
index abb2c37..96db950 100644
|
|
--- a/arch/arm/include/asm/cmpxchg.h
|
|
+++ b/arch/arm/include/asm/cmpxchg.h
|
|
@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
|
|
|
#define xchg(ptr,x) \
|
|
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
|
+#define xchg_unchecked(ptr,x) \
|
|
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
|
|
|
#include <asm-generic/cmpxchg-local.h>
|
|
|
|
diff --git a/arch/arm/include/asm/elf.h.rej b/arch/arm/include/asm/elf.h.rej
|
|
new file mode 100644
|
|
index 0000000..732816df6
|
|
--- /dev/null
|
|
+++ b/arch/arm/include/asm/elf.h.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/arm/include/asm/elf.h 2012-05-21 11:32:31.723926261 +0200
|
|
++++ arch/arm/include/asm/elf.h 2012-05-21 12:10:08.424048829 +0200
|
|
+@@ -133,8 +140,4 @@ int dump_task_regs(struct task_struct *t
|
|
+ extern void elf_set_personality(const struct elf32_hdr *);
|
|
+ #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
|
|
+
|
|
+-struct mm_struct;
|
|
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
+-#define arch_randomize_brk arch_randomize_brk
|
|
+-
|
|
+ #endif
|
|
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
|
|
index e51b1e8..32a3113 100644
|
|
--- a/arch/arm/include/asm/kmap_types.h
|
|
+++ b/arch/arm/include/asm/kmap_types.h
|
|
@@ -21,6 +21,7 @@ enum km_type {
|
|
KM_L1_CACHE,
|
|
KM_L2_CACHE,
|
|
KM_KDB,
|
|
+ KM_CLEARPAGE,
|
|
KM_TYPE_NR
|
|
};
|
|
|
|
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
|
|
index f94784f..9a09a4a 100644
|
|
--- a/arch/arm/include/asm/outercache.h
|
|
+++ b/arch/arm/include/asm/outercache.h
|
|
@@ -35,7 +35,7 @@ struct outer_cache_fns {
|
|
#endif
|
|
void (*set_debug)(unsigned long);
|
|
void (*resume)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern struct outer_cache_fns outer_cache;
|
|
|
|
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
|
|
index 673205e..b848020 100644
|
|
--- a/arch/arm/include/asm/page.h
|
|
+++ b/arch/arm/include/asm/page.h
|
|
@@ -123,7 +123,7 @@ struct cpu_user_fns {
|
|
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
|
|
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
|
|
unsigned long vaddr, struct vm_area_struct *vma);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#ifdef MULTI_USER
|
|
extern struct cpu_user_fns cpu_user;
|
|
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
|
|
index 943504f..bf8d667 100644
|
|
--- a/arch/arm/include/asm/pgalloc.h
|
|
+++ b/arch/arm/include/asm/pgalloc.h
|
|
@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
|
|
}
|
|
|
|
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
+{
|
|
+ pud_populate(mm, pud, pmd);
|
|
+}
|
|
+
|
|
#else /* !CONFIG_ARM_LPAE */
|
|
|
|
/*
|
|
@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
|
|
#define pmd_free(mm, pmd) do { } while (0)
|
|
#define pud_populate(mm,pmd,pte) BUG()
|
|
+#define pud_populate_kernel(mm,pmd,pte) BUG()
|
|
|
|
#endif /* CONFIG_ARM_LPAE */
|
|
|
|
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
|
|
index 508ad5d..e8158b5 100644
|
|
--- a/arch/arm/include/asm/uaccess.h
|
|
+++ b/arch/arm/include/asm/uaccess.h
|
|
@@ -22,6 +22,8 @@
|
|
#define VERIFY_READ 0
|
|
#define VERIFY_WRITE 1
|
|
|
|
+extern void check_object_size(const void *ptr, unsigned long n, bool to);
|
|
+
|
|
/*
|
|
* The exception table consists of pairs of addresses: the first is the
|
|
* address of an instruction that is allowed to fault, and the second is
|
|
@@ -405,8 +407,23 @@ do { \
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
|
|
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
|
|
+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
|
|
+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
|
|
+
|
|
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+{
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+ return ___copy_from_user(to, from, n);
|
|
+}
|
|
+
|
|
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+{
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
+ return ___copy_to_user(to, from, n);
|
|
+}
|
|
+
|
|
extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
|
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
|
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
|
|
@@ -418,6 +435,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
|
|
|
|
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (access_ok(VERIFY_READ, from, n))
|
|
n = __copy_from_user(to, from, n);
|
|
else /* security hole - plug it */
|
|
@@ -427,6 +447,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
|
|
|
|
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
n = __copy_to_user(to, from, n);
|
|
return n;
|
|
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
|
|
index 60d3b73..9168db0 100644
|
|
--- a/arch/arm/kernel/armksyms.c
|
|
+++ b/arch/arm/kernel/armksyms.c
|
|
@@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
|
|
#ifdef CONFIG_MMU
|
|
EXPORT_SYMBOL(copy_page);
|
|
|
|
-EXPORT_SYMBOL(__copy_from_user);
|
|
-EXPORT_SYMBOL(__copy_to_user);
|
|
+EXPORT_SYMBOL(___copy_from_user);
|
|
+EXPORT_SYMBOL(___copy_to_user);
|
|
EXPORT_SYMBOL(__clear_user);
|
|
|
|
EXPORT_SYMBOL(__get_user_1);
|
|
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
|
|
index ddc9618..c422585 100644
|
|
--- a/arch/arm/kernel/process.c
|
|
+++ b/arch/arm/kernel/process.c
|
|
@@ -29,7 +29,6 @@
|
|
#include <linux/tick.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/uaccess.h>
|
|
-#include <linux/random.h>
|
|
#include <linux/hw_breakpoint.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <linux/console.h>
|
|
@@ -697,12 +696,6 @@ unsigned long get_wchan(struct task_struct *p)
|
|
return 0;
|
|
}
|
|
|
|
-unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
-{
|
|
- unsigned long range_end = mm->brk + 0x02000000;
|
|
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
|
|
-}
|
|
-
|
|
#ifdef CONFIG_MMU
|
|
#ifdef CONFIG_KUSER_HELPERS
|
|
/*
|
|
diff --git a/arch/arm/kernel/process.c.rej b/arch/arm/kernel/process.c.rej
|
|
new file mode 100644
|
|
index 0000000..0b8d034
|
|
--- /dev/null
|
|
+++ b/arch/arm/kernel/process.c.rej
|
|
@@ -0,0 +1,13 @@
|
|
+diff a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c (rejected hunks)
|
|
+@@ -332,9 +331,10 @@ void machine_power_off(void)
|
|
+ machine_shutdown();
|
|
+ if (pm_power_off)
|
|
+ pm_power_off();
|
|
++ BUG();
|
|
+ }
|
|
+
|
|
+-void machine_restart(char *cmd)
|
|
++__noreturn void machine_restart(char *cmd)
|
|
+ {
|
|
+ machine_shutdown();
|
|
+
|
|
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
|
|
index 8c16abc..db0145b 100644
|
|
--- a/arch/arm/kernel/setup.c
|
|
+++ b/arch/arm/kernel/setup.c
|
|
@@ -120,13 +120,13 @@ struct processor processor __read_mostly;
|
|
struct cpu_tlb_fns cpu_tlb __read_mostly;
|
|
#endif
|
|
#ifdef MULTI_USER
|
|
-struct cpu_user_fns cpu_user __read_mostly;
|
|
+struct cpu_user_fns cpu_user __read_only;
|
|
#endif
|
|
#ifdef MULTI_CACHE
|
|
-struct cpu_cache_fns cpu_cache __read_mostly;
|
|
+struct cpu_cache_fns cpu_cache __read_only;
|
|
#endif
|
|
#ifdef CONFIG_OUTER_CACHE
|
|
-struct outer_cache_fns outer_cache __read_mostly;
|
|
+struct outer_cache_fns outer_cache __read_only;
|
|
EXPORT_SYMBOL(outer_cache);
|
|
#endif
|
|
|
|
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
|
|
index 66a477a..bee61d36 100644
|
|
--- a/arch/arm/lib/copy_from_user.S
|
|
+++ b/arch/arm/lib/copy_from_user.S
|
|
@@ -16,7 +16,7 @@
|
|
/*
|
|
* Prototype:
|
|
*
|
|
- * size_t __copy_from_user(void *to, const void *from, size_t n)
|
|
+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
|
|
*
|
|
* Purpose:
|
|
*
|
|
@@ -84,11 +84,11 @@
|
|
|
|
.text
|
|
|
|
-ENTRY(__copy_from_user)
|
|
+ENTRY(___copy_from_user)
|
|
|
|
#include "copy_template.S"
|
|
|
|
-ENDPROC(__copy_from_user)
|
|
+ENDPROC(___copy_from_user)
|
|
|
|
.pushsection .fixup,"ax"
|
|
.align 0
|
|
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
|
|
index 6ee2f67..d1cce76 100644
|
|
--- a/arch/arm/lib/copy_page.S
|
|
+++ b/arch/arm/lib/copy_page.S
|
|
@@ -10,6 +10,7 @@
|
|
* ASM optimised string functions
|
|
*/
|
|
#include <linux/linkage.h>
|
|
+#include <linux/const.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
|
|
index d066df6..df28194 100644
|
|
--- a/arch/arm/lib/copy_to_user.S
|
|
+++ b/arch/arm/lib/copy_to_user.S
|
|
@@ -16,7 +16,7 @@
|
|
/*
|
|
* Prototype:
|
|
*
|
|
- * size_t __copy_to_user(void *to, const void *from, size_t n)
|
|
+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
|
|
*
|
|
* Purpose:
|
|
*
|
|
@@ -88,11 +88,11 @@
|
|
.text
|
|
|
|
ENTRY(__copy_to_user_std)
|
|
-WEAK(__copy_to_user)
|
|
+WEAK(___copy_to_user)
|
|
|
|
#include "copy_template.S"
|
|
|
|
-ENDPROC(__copy_to_user)
|
|
+ENDPROC(___copy_to_user)
|
|
ENDPROC(__copy_to_user_std)
|
|
|
|
.pushsection .fixup,"ax"
|
|
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
|
|
index 5c908b1..e712687 100644
|
|
--- a/arch/arm/lib/uaccess.S
|
|
+++ b/arch/arm/lib/uaccess.S
|
|
@@ -20,7 +20,7 @@
|
|
|
|
#define PAGE_SHIFT 12
|
|
|
|
-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
|
|
+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
|
|
* Purpose : copy a block to user memory from kernel memory
|
|
* Params : to - user memory
|
|
* : from - kernel memory
|
|
@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
|
|
sub r2, r2, ip
|
|
b .Lc2u_dest_aligned
|
|
|
|
-ENTRY(__copy_to_user)
|
|
+ENTRY(___copy_to_user)
|
|
stmfd sp!, {r2, r4 - r7, lr}
|
|
cmp r2, #4
|
|
blt .Lc2u_not_enough
|
|
@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
|
|
ldrgtb r3, [r1], #0
|
|
USER( TUSER( strgtb) r3, [r0], #1) @ May fault
|
|
b .Lc2u_finished
|
|
-ENDPROC(__copy_to_user)
|
|
+ENDPROC(___copy_to_user)
|
|
|
|
.pushsection .fixup,"ax"
|
|
.align 0
|
|
9001: ldmfd sp!, {r0, r4 - r7, pc}
|
|
.popsection
|
|
|
|
-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
|
|
+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
|
|
* Purpose : copy a block from user memory to kernel memory
|
|
* Params : to - kernel memory
|
|
* : from - user memory
|
|
@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
|
|
sub r2, r2, ip
|
|
b .Lcfu_dest_aligned
|
|
|
|
-ENTRY(__copy_from_user)
|
|
+ENTRY(___copy_from_user)
|
|
stmfd sp!, {r0, r2, r4 - r7, lr}
|
|
cmp r2, #4
|
|
blt .Lcfu_not_enough
|
|
@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
|
|
USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
|
|
strgtb r3, [r0], #1
|
|
b .Lcfu_finished
|
|
-ENDPROC(__copy_from_user)
|
|
+ENDPROC(___copy_from_user)
|
|
|
|
.pushsection .fixup,"ax"
|
|
.align 0
|
|
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
|
|
index 025f742..8432b08 100644
|
|
--- a/arch/arm/lib/uaccess_with_memcpy.c
|
|
+++ b/arch/arm/lib/uaccess_with_memcpy.c
|
|
@@ -104,7 +104,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|
}
|
|
|
|
unsigned long
|
|
-__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+___copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
/*
|
|
* This test is stubbed out of the main function above to keep
|
|
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
|
|
index 518091c..eae9a76 100644
|
|
--- a/arch/arm/mach-omap2/board-n8x0.c
|
|
+++ b/arch/arm/mach-omap2/board-n8x0.c
|
|
@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
|
|
}
|
|
#endif
|
|
|
|
-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
|
|
+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
|
|
.late_init = n8x0_menelaus_late_init,
|
|
};
|
|
|
|
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
|
|
index be99f57..387ba32 100644
|
|
--- a/arch/arm/mm/fault.c
|
|
+++ b/arch/arm/mm/fault.c
|
|
@@ -184,6 +184,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (fsr & FSR_LNX_PF) {
|
|
+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
tsk->thread.address = addr;
|
|
tsk->thread.error_code = fsr;
|
|
tsk->thread.trap_no = 14;
|
|
@@ -407,6 +414,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|
}
|
|
#endif /* CONFIG_MMU */
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 20; i++) {
|
|
+ unsigned char c;
|
|
+ if (get_user(c, (__force unsigned char __user *)pc+i))
|
|
+ printk(KERN_CONT "?? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%02x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at SP-4: ");
|
|
+ for (i = -1; i < 20; i++) {
|
|
+ unsigned long c;
|
|
+ if (get_user(c, (__force unsigned long __user *)sp+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08lx ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* First Level Translation Fault Handler
|
|
*
|
|
@@ -704,6 +738,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
|
|
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
|
|
struct siginfo info;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ if (fsr_fs(ifsr) == 2) {
|
|
+ unsigned int bkpt;
|
|
+
|
|
+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
|
|
+ current->thread.error_code = ifsr;
|
|
+ current->thread.trap_no = 0;
|
|
+ pax_report_refcount_overflow(regs);
|
|
+ fixup_exception(regs);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
|
|
return;
|
|
|
|
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
|
|
index ce8cb19..3ec539d 100644
|
|
--- a/arch/arm/mm/mmap.c
|
|
+++ b/arch/arm/mm/mmap.c
|
|
@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
if (do_align)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
if (len > mm->cached_hole_size) {
|
|
- start_addr = addr = mm->free_area_cache;
|
|
+ start_addr = addr = mm->free_area_cache;
|
|
} else {
|
|
- start_addr = addr = mm->mmap_base;
|
|
- mm->cached_hole_size = 0;
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
}
|
|
|
|
full_search:
|
|
@@ -124,14 +127,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
* Start a new search - just in case we missed
|
|
* some holes.
|
|
*/
|
|
- if (start_addr != TASK_UNMAPPED_BASE) {
|
|
- start_addr = addr = TASK_UNMAPPED_BASE;
|
|
+ if (start_addr != mm->mmap_base) {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
mm->cached_hole_size = 0;
|
|
goto full_search;
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base(random_factor);
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
|
|
index fd556f7..af2e7d2 100644
|
|
--- a/arch/arm/plat-orion/include/plat/addr-map.h
|
|
+++ b/arch/arm/plat-orion/include/plat/addr-map.h
|
|
@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
|
|
value in bridge_virt_base */
|
|
void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
|
|
const int win);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Information needed to setup one address mapping.
|
|
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
|
|
index 71a6827..e7fbc23 100644
|
|
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
|
|
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
|
|
@@ -43,7 +43,7 @@ struct samsung_dma_ops {
|
|
int (*started)(unsigned ch);
|
|
int (*flush)(unsigned ch);
|
|
int (*stop)(unsigned ch);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern void *samsung_dmadev_get_ops(void);
|
|
extern void *s3c_dma_get_ops(void);
|
|
diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
|
|
index 5f28cae..3d23723 100644
|
|
--- a/arch/arm/plat-samsung/include/plat/ehci.h
|
|
+++ b/arch/arm/plat-samsung/include/plat/ehci.h
|
|
@@ -14,7 +14,7 @@
|
|
struct s5p_ehci_platdata {
|
|
int (*phy_init)(struct platform_device *pdev, int type);
|
|
int (*phy_exit)(struct platform_device *pdev, int type);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
|
|
|
|
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
|
|
index 3b3159b..425ea94d 100644
|
|
--- a/arch/avr32/include/asm/elf.h
|
|
+++ b/arch/avr32/include/asm/elf.h
|
|
@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
|
|
the loader. We need to make sure that it is out of the way of the program
|
|
that it will "exec", and that there is sufficient room for the brk. */
|
|
|
|
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
|
|
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN 15
|
|
+#define PAX_DELTA_STACK_LEN 15
|
|
+#endif
|
|
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. This could be done in user space,
|
|
diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
|
|
index b7f5c68..556135c 100644
|
|
--- a/arch/avr32/include/asm/kmap_types.h
|
|
+++ b/arch/avr32/include/asm/kmap_types.h
|
|
@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
|
|
D(11) KM_IRQ1,
|
|
D(12) KM_SOFTIRQ0,
|
|
D(13) KM_SOFTIRQ1,
|
|
-D(14) KM_TYPE_NR
|
|
+D(14) KM_CLEARPAGE,
|
|
+D(15) KM_TYPE_NR
|
|
};
|
|
|
|
#undef D
|
|
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
|
|
index 632b649..043ddd2 100644
|
|
--- a/arch/avr32/mm/fault.c
|
|
+++ b/arch/avr32/mm/fault.c
|
|
@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
|
|
|
|
int exception_trace = 1;
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 20; i++) {
|
|
+ unsigned char c;
|
|
+ if (get_user(c, (unsigned char *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%02x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* This routine handles page faults. It determines the address and the
|
|
* problem, and then passes it off to one of the appropriate routines.
|
|
@@ -158,6 +175,16 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
|
|
up_read(&mm->mmap_sem);
|
|
|
|
if (user_mode(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
|
|
+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
|
|
+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (exception_trace && printk_ratelimit())
|
|
printk("%s%s[%d]: segfault at %08lx pc %08lx "
|
|
"sp %08lx ecr %lu\n",
|
|
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
|
|
index b86329d..6709906 100644
|
|
--- a/arch/frv/include/asm/atomic.h
|
|
+++ b/arch/frv/include/asm/atomic.h
|
|
@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
|
|
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
|
|
#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
int c, old;
|
|
diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
|
|
index f8e16b2..c73ff79 100644
|
|
--- a/arch/frv/include/asm/kmap_types.h
|
|
+++ b/arch/frv/include/asm/kmap_types.h
|
|
@@ -23,6 +23,7 @@ enum km_type {
|
|
KM_IRQ1,
|
|
KM_SOFTIRQ0,
|
|
KM_SOFTIRQ1,
|
|
+ KM_CLEARPAGE,
|
|
KM_TYPE_NR
|
|
};
|
|
|
|
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
|
|
index 385fd30..6c3d97e 100644
|
|
--- a/arch/frv/mm/elf-fdpic.c
|
|
+++ b/arch/frv/mm/elf-fdpic.c
|
|
@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma(current->mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
goto success;
|
|
}
|
|
|
|
@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
for (; vma; vma = vma->vm_next) {
|
|
if (addr > limit)
|
|
break;
|
|
- if (addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
goto success;
|
|
addr = vma->vm_end;
|
|
}
|
|
@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
for (; vma; vma = vma->vm_next) {
|
|
if (addr > limit)
|
|
break;
|
|
- if (addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
goto success;
|
|
addr = vma->vm_end;
|
|
}
|
|
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
|
|
index 6e6fe18..a6ae668 100644
|
|
--- a/arch/ia64/include/asm/atomic.h
|
|
+++ b/arch/ia64/include/asm/atomic.h
|
|
@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
|
|
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
/* Atomic operations are already serializing */
|
|
#define smp_mb__before_atomic_dec() barrier()
|
|
#define smp_mb__after_atomic_dec() barrier()
|
|
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
|
|
index b5298eb..67c6e62 100644
|
|
--- a/arch/ia64/include/asm/elf.h
|
|
+++ b/arch/ia64/include/asm/elf.h
|
|
@@ -42,6 +42,13 @@
|
|
*/
|
|
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
|
|
+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
|
|
+#endif
|
|
+
|
|
#define PT_IA_64_UNWIND 0x70000001
|
|
|
|
/* IA-64 relocations: */
|
|
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
|
|
index 96a8d92..617a1cf 100644
|
|
--- a/arch/ia64/include/asm/pgalloc.h
|
|
+++ b/arch/ia64/include/asm/pgalloc.h
|
|
@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
|
|
pgd_val(*pgd_entry) = __pa(pud);
|
|
}
|
|
|
|
+static inline void
|
|
+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
|
|
+{
|
|
+ pgd_populate(mm, pgd_entry, pud);
|
|
+}
|
|
+
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
|
|
pud_val(*pud_entry) = __pa(pmd);
|
|
}
|
|
|
|
+static inline void
|
|
+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
|
|
+{
|
|
+ pud_populate(mm, pud_entry, pmd);
|
|
+}
|
|
+
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return quicklist_alloc(0, GFP_KERNEL, NULL);
|
|
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
|
|
index 815810c..d60bd4c 100644
|
|
--- a/arch/ia64/include/asm/pgtable.h
|
|
+++ b/arch/ia64/include/asm/pgtable.h
|
|
@@ -12,7 +12,7 @@
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
*/
|
|
|
|
-
|
|
+#include <linux/const.h>
|
|
#include <asm/mman.h>
|
|
#include <asm/page.h>
|
|
#include <asm/processor.h>
|
|
@@ -142,6 +142,17 @@
|
|
#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
|
#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
|
#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
|
|
+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
|
+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
|
|
+#else
|
|
+# define PAGE_SHARED_NOEXEC PAGE_SHARED
|
|
+# define PAGE_READONLY_NOEXEC PAGE_READONLY
|
|
+# define PAGE_COPY_NOEXEC PAGE_COPY
|
|
+#endif
|
|
+
|
|
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
|
|
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
|
|
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
|
|
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
|
|
index 54ff557..70c88b7 100644
|
|
--- a/arch/ia64/include/asm/spinlock.h
|
|
+++ b/arch/ia64/include/asm/spinlock.h
|
|
@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
|
|
|
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
|
|
- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
|
+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
|
|
}
|
|
|
|
static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
|
|
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
|
|
index 449c8c0..432a3d2 100644
|
|
--- a/arch/ia64/include/asm/uaccess.h
|
|
+++ b/arch/ia64/include/asm/uaccess.h
|
|
@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|
const void *__cu_from = (from); \
|
|
long __cu_len = (n); \
|
|
\
|
|
- if (__access_ok(__cu_to, __cu_len, get_fs())) \
|
|
+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
|
|
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
|
|
__cu_len; \
|
|
})
|
|
@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|
long __cu_len = (n); \
|
|
\
|
|
__chk_user_ptr(__cu_from); \
|
|
- if (__access_ok(__cu_from, __cu_len, get_fs())) \
|
|
+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
|
|
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
|
|
__cu_len; \
|
|
})
|
|
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
|
|
index 24603be..948052d 100644
|
|
--- a/arch/ia64/kernel/module.c
|
|
+++ b/arch/ia64/kernel/module.c
|
|
@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
|
|
void
|
|
module_free (struct module *mod, void *module_region)
|
|
{
|
|
- if (mod && mod->arch.init_unw_table &&
|
|
- module_region == mod->module_init) {
|
|
+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
|
|
unw_remove_unwind_table(mod->arch.init_unw_table);
|
|
mod->arch.init_unw_table = NULL;
|
|
}
|
|
@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
|
|
}
|
|
|
|
static inline int
|
|
+in_init_rx (const struct module *mod, uint64_t addr)
|
|
+{
|
|
+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
|
|
+}
|
|
+
|
|
+static inline int
|
|
+in_init_rw (const struct module *mod, uint64_t addr)
|
|
+{
|
|
+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
|
|
+}
|
|
+
|
|
+static inline int
|
|
in_init (const struct module *mod, uint64_t addr)
|
|
{
|
|
- return addr - (uint64_t) mod->module_init < mod->init_size;
|
|
+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
|
|
+}
|
|
+
|
|
+static inline int
|
|
+in_core_rx (const struct module *mod, uint64_t addr)
|
|
+{
|
|
+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
|
|
+}
|
|
+
|
|
+static inline int
|
|
+in_core_rw (const struct module *mod, uint64_t addr)
|
|
+{
|
|
+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
|
|
}
|
|
|
|
static inline int
|
|
in_core (const struct module *mod, uint64_t addr)
|
|
{
|
|
- return addr - (uint64_t) mod->module_core < mod->core_size;
|
|
+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
|
|
}
|
|
|
|
static inline int
|
|
@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
|
|
break;
|
|
|
|
case RV_BDREL:
|
|
- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
|
|
+ if (in_init_rx(mod, val))
|
|
+ val -= (uint64_t) mod->module_init_rx;
|
|
+ else if (in_init_rw(mod, val))
|
|
+ val -= (uint64_t) mod->module_init_rw;
|
|
+ else if (in_core_rx(mod, val))
|
|
+ val -= (uint64_t) mod->module_core_rx;
|
|
+ else if (in_core_rw(mod, val))
|
|
+ val -= (uint64_t) mod->module_core_rw;
|
|
break;
|
|
|
|
case RV_LTV:
|
|
@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
|
|
* addresses have been selected...
|
|
*/
|
|
uint64_t gp;
|
|
- if (mod->core_size > MAX_LTOFF)
|
|
+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
|
|
/*
|
|
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated
|
|
* at the end of the module.
|
|
*/
|
|
- gp = mod->core_size - MAX_LTOFF / 2;
|
|
+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
|
|
else
|
|
- gp = mod->core_size / 2;
|
|
- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
|
|
+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
|
|
+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
|
|
mod->arch.gp = gp;
|
|
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
|
|
}
|
|
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
|
|
index 609d500..7dde2a8 100644
|
|
--- a/arch/ia64/kernel/sys_ia64.c
|
|
+++ b/arch/ia64/kernel/sys_ia64.c
|
|
@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
|
if (REGION_NUMBER(addr) == RGN_HPAGE)
|
|
addr = 0;
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ addr = mm->free_area_cache;
|
|
+ else
|
|
+#endif
|
|
+
|
|
if (!addr)
|
|
addr = mm->free_area_cache;
|
|
|
|
@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
|
|
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
|
|
- if (start_addr != TASK_UNMAPPED_BASE) {
|
|
+ if (start_addr != mm->mmap_base) {
|
|
/* Start a new search --- just in case we missed some holes. */
|
|
- addr = TASK_UNMAPPED_BASE;
|
|
+ addr = mm->mmap_base;
|
|
goto full_search;
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/* Remember the address where we stopped this search: */
|
|
mm->free_area_cache = addr + len;
|
|
return addr;
|
|
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
|
|
index 0ccb28f..8992469 100644
|
|
--- a/arch/ia64/kernel/vmlinux.lds.S
|
|
+++ b/arch/ia64/kernel/vmlinux.lds.S
|
|
@@ -198,7 +198,7 @@ SECTIONS {
|
|
/* Per-cpu data: */
|
|
. = ALIGN(PERCPU_PAGE_SIZE);
|
|
PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
|
|
- __phys_per_cpu_start = __per_cpu_load;
|
|
+ __phys_per_cpu_start = per_cpu_load;
|
|
/*
|
|
* ensure percpu data fits
|
|
* into percpu page size
|
|
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
|
|
index 3c820ea..5008b6c 100644
|
|
--- a/arch/ia64/mm/fault.c
|
|
+++ b/arch/ia64/mm/fault.c
|
|
@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
|
|
return pte_present(pte);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
void __kprobes
|
|
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
|
|
{
|
|
@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|
mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
|
|
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
|
|
|
|
- if ((vma->vm_flags & mask) != mask)
|
|
+ if ((vma->vm_flags & mask) != mask) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
|
|
+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
|
|
+ goto bad_area;
|
|
+
|
|
+ up_read(&mm->mmap_sem);
|
|
+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
goto bad_area;
|
|
|
|
+ }
|
|
+
|
|
/*
|
|
* If for any reason at all we couldn't handle the fault, make
|
|
* sure we exit gracefully rather than endlessly redo the
|
|
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
|
|
index 5ca674b..e0e1b70 100644
|
|
--- a/arch/ia64/mm/hugetlbpage.c
|
|
+++ b/arch/ia64/mm/hugetlbpage.c
|
|
@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
|
|
/* At this point: (!vmm || addr < vmm->vm_end). */
|
|
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
|
|
return -ENOMEM;
|
|
- if (!vmm || (addr + len) <= vmm->vm_start)
|
|
+ if (check_heap_stack_gap(vmm, addr, len))
|
|
return addr;
|
|
addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
|
|
}
|
|
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
|
|
index 0eab454..bd794f2 100644
|
|
--- a/arch/ia64/mm/init.c
|
|
+++ b/arch/ia64/mm/init.c
|
|
@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
|
|
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
|
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
|
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
|
|
+ vma->vm_flags &= ~VM_EXEC;
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
|
|
+ vma->vm_flags &= ~VM_MAYEXEC;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+#endif
|
|
+
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
down_write(¤t->mm->mmap_sem);
|
|
if (insert_vm_struct(current->mm, vma)) {
|
|
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
|
|
index 82abd15..d95ae5d 100644
|
|
--- a/arch/m32r/lib/usercopy.c
|
|
+++ b/arch/m32r/lib/usercopy.c
|
|
@@ -14,6 +14,9 @@
|
|
unsigned long
|
|
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
prefetch(from);
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
__copy_user(to,from,n);
|
|
@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
unsigned long
|
|
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
prefetchw(to);
|
|
if (access_ok(VERIFY_READ, from, n))
|
|
__copy_user_zeroing(to,from,n);
|
|
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
|
|
index 3f4c5cb..3439c6e 100644
|
|
--- a/arch/mips/include/asm/atomic.h
|
|
+++ b/arch/mips/include/asm/atomic.h
|
|
@@ -21,6 +21,10 @@
|
|
#include <asm/cmpxchg.h>
|
|
#include <asm/war.h>
|
|
|
|
+#ifdef CONFIG_GENERIC_ATOMIC64
|
|
+#include <asm-generic/atomic64.h>
|
|
+#endif
|
|
+
|
|
#define ATOMIC_INIT(i) { (i) }
|
|
|
|
/*
|
|
@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
*/
|
|
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#endif /* CONFIG_64BIT */
|
|
|
|
/*
|
|
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
|
|
index 455c0ac..ad65fbe 100644
|
|
--- a/arch/mips/include/asm/elf.h
|
|
+++ b/arch/mips/include/asm/elf.h
|
|
@@ -372,13 +372,16 @@ extern const char *__elf_platform;
|
|
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#endif
|
|
+
|
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
|
struct linux_binprm;
|
|
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
int uses_interp);
|
|
|
|
-struct mm_struct;
|
|
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
-#define arch_randomize_brk arch_randomize_brk
|
|
-
|
|
#endif /* _ASM_ELF_H */
|
|
diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
|
|
index c1f6afa..38cc6e9a 100644
|
|
--- a/arch/mips/include/asm/exec.h
|
|
+++ b/arch/mips/include/asm/exec.h
|
|
@@ -12,6 +12,6 @@
|
|
#ifndef _ASM_EXEC_H
|
|
#define _ASM_EXEC_H
|
|
|
|
-extern unsigned long arch_align_stack(unsigned long sp);
|
|
+#define arch_align_stack(x) ((x) & ~0xfUL)
|
|
|
|
#endif /* _ASM_EXEC_H */
|
|
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
|
|
index 0913b4f..b68754e 100644
|
|
--- a/arch/mips/include/asm/page.h
|
|
+++ b/arch/mips/include/asm/page.h
|
|
@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
|
|
#ifdef CONFIG_CPU_MIPS32
|
|
typedef struct { unsigned long pte_low, pte_high; } pte_t;
|
|
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
|
|
- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
|
|
+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
|
|
#else
|
|
typedef struct { unsigned long long pte; } pte_t;
|
|
#define pte_val(x) ((x).pte)
|
|
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
|
|
index 881d18b4..cea38bc 100644
|
|
--- a/arch/mips/include/asm/pgalloc.h
|
|
+++ b/arch/mips/include/asm/pgalloc.h
|
|
@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
{
|
|
set_pud(pud, __pud((unsigned long)pmd));
|
|
}
|
|
+
|
|
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
+{
|
|
+ pud_populate(mm, pud, pmd);
|
|
+}
|
|
#endif
|
|
|
|
/*
|
|
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
|
|
index 9fdd8bc..4bd7f1a 100644
|
|
--- a/arch/mips/kernel/binfmt_elfn32.c
|
|
+++ b/arch/mips/kernel/binfmt_elfn32.c
|
|
@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|
#undef ELF_ET_DYN_BASE
|
|
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#endif
|
|
+
|
|
#include <asm/processor.h>
|
|
#include <linux/module.h>
|
|
#include <linux/elfcore.h>
|
|
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
|
|
index ff44823..97f8906e 100644
|
|
--- a/arch/mips/kernel/binfmt_elfo32.c
|
|
+++ b/arch/mips/kernel/binfmt_elfo32.c
|
|
@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
|
#undef ELF_ET_DYN_BASE
|
|
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
|
|
+#endif
|
|
+
|
|
#include <asm/processor.h>
|
|
|
|
/*
|
|
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
|
|
index 69b17a9..9db82f9 100644
|
|
--- a/arch/mips/kernel/process.c
|
|
+++ b/arch/mips/kernel/process.c
|
|
@@ -478,15 +478,3 @@ unsigned long get_wchan(struct task_struct *task)
|
|
out:
|
|
return pc;
|
|
}
|
|
-
|
|
-/*
|
|
- * Don't forget that the stack pointer must be aligned on a 8 bytes
|
|
- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
|
|
- */
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
-{
|
|
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
- sp -= get_random_int() & ~PAGE_MASK;
|
|
-
|
|
- return sp & ALMASK;
|
|
-}
|
|
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
|
|
index 5c9ba6a..4566d4d 100644
|
|
--- a/arch/mips/mm/fault.c
|
|
+++ b/arch/mips/mm/fault.c
|
|
@@ -27,6 +27,23 @@
|
|
#include <asm/highmem.h> /* For VMALLOC_END */
|
|
#include <linux/kdebug.h>
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 5; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* This routine handles page faults. It determines the address,
|
|
* and the problem, and then passes it off to one of the appropriate
|
|
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
|
|
index 302d779..7d35bf8 100644
|
|
--- a/arch/mips/mm/mmap.c
|
|
+++ b/arch/mips/mm/mmap.c
|
|
@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|
do_color_align = 1;
|
|
|
|
/* requesting a specific address */
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
if (do_color_align)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
|
|
return addr;
|
|
}
|
|
|
|
@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
if (TASK_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
- if (!vma || addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vmm, addr, len))
|
|
return addr;
|
|
addr = vma->vm_end;
|
|
if (do_color_align)
|
|
@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|
/* make sure it can fit in the remaining address space */
|
|
if (likely(addr > len)) {
|
|
vma = find_vma(mm, addr - len);
|
|
- if (!vma || addr <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vmm, addr - len, len))
|
|
/* cache the address as a hint for next time */
|
|
return mm->free_area_cache = addr - len;
|
|
}
|
|
@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (likely(!vma || addr + len <= vma->vm_start)) {
|
|
+ if (check_heap_stack_gap(vmm, addr, len)) {
|
|
/* cache the address as a hint for next time */
|
|
return mm->free_area_cache = addr;
|
|
}
|
|
@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
}
|
|
-
|
|
-static inline unsigned long brk_rnd(void)
|
|
-{
|
|
- unsigned long rnd = get_random_int();
|
|
-
|
|
- rnd = rnd << PAGE_SHIFT;
|
|
- /* 8MB for 32bit, 256MB for 64bit */
|
|
- if (TASK_IS_32BIT_ADDR)
|
|
- rnd = rnd & 0x7ffffful;
|
|
- else
|
|
- rnd = rnd & 0xffffffful;
|
|
-
|
|
- return rnd;
|
|
-}
|
|
-
|
|
-unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
-{
|
|
- unsigned long base = mm->brk;
|
|
- unsigned long ret;
|
|
-
|
|
- ret = PAGE_ALIGN(base + brk_rnd());
|
|
-
|
|
- if (ret < mm->brk)
|
|
- return mm->brk;
|
|
-
|
|
- return ret;
|
|
-}
|
|
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
|
|
index af9cf30..2aae9b2 100644
|
|
--- a/arch/parisc/include/asm/atomic.h
|
|
+++ b/arch/parisc/include/asm/atomic.h
|
|
@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#endif /* !CONFIG_64BIT */
|
|
|
|
|
|
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
|
|
index 19f6cb1..6c78cf2 100644
|
|
--- a/arch/parisc/include/asm/elf.h
|
|
+++ b/arch/parisc/include/asm/elf.h
|
|
@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
|
|
|
|
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE 0x10000UL
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN 16
|
|
+#define PAX_DELTA_STACK_LEN 16
|
|
+#endif
|
|
+
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. This could be done in user space,
|
|
but it's not easy, and we've already done it here. */
|
|
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
|
|
index fc987a1..6e068ef 100644
|
|
--- a/arch/parisc/include/asm/pgalloc.h
|
|
+++ b/arch/parisc/include/asm/pgalloc.h
|
|
@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
|
|
}
|
|
|
|
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
+{
|
|
+ pgd_populate(mm, pgd, pmd);
|
|
+}
|
|
+
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
{
|
|
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
|
|
@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
|
|
#define pmd_free(mm, x) do { } while (0)
|
|
#define pgd_populate(mm, pmd, pte) BUG()
|
|
+#define pgd_populate_kernel(mm, pmd, pte) BUG()
|
|
|
|
#endif
|
|
|
|
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
|
|
index 7df49fa..38b62bf 100644
|
|
--- a/arch/parisc/include/asm/pgtable.h
|
|
+++ b/arch/parisc/include/asm/pgtable.h
|
|
@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
|
|
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
|
|
#define PAGE_COPY PAGE_EXECREAD
|
|
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
|
|
+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
|
|
+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
|
|
+#else
|
|
+# define PAGE_SHARED_NOEXEC PAGE_SHARED
|
|
+# define PAGE_COPY_NOEXEC PAGE_COPY
|
|
+# define PAGE_READONLY_NOEXEC PAGE_READONLY
|
|
+#endif
|
|
+
|
|
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
|
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
|
|
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
|
|
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
|
|
index 9ac0660..6ed15c4 100644
|
|
--- a/arch/parisc/include/asm/uaccess.h
|
|
+++ b/arch/parisc/include/asm/uaccess.h
|
|
@@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
|
const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
- int sz = __compiletime_object_size(to);
|
|
+ size_t sz = __compiletime_object_size(to);
|
|
int ret = -EFAULT;
|
|
|
|
- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
|
|
+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
|
|
ret = __copy_from_user(to, from, n);
|
|
else
|
|
copy_from_user_overflow();
|
|
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
|
|
index 5e34ccf..672bc9c 100644
|
|
--- a/arch/parisc/kernel/module.c
|
|
+++ b/arch/parisc/kernel/module.c
|
|
@@ -98,16 +98,38 @@
|
|
|
|
/* three functions to determine where in the module core
|
|
* or init pieces the location is */
|
|
+static inline int in_init_rx(struct module *me, void *loc)
|
|
+{
|
|
+ return (loc >= me->module_init_rx &&
|
|
+ loc < (me->module_init_rx + me->init_size_rx));
|
|
+}
|
|
+
|
|
+static inline int in_init_rw(struct module *me, void *loc)
|
|
+{
|
|
+ return (loc >= me->module_init_rw &&
|
|
+ loc < (me->module_init_rw + me->init_size_rw));
|
|
+}
|
|
+
|
|
static inline int in_init(struct module *me, void *loc)
|
|
{
|
|
- return (loc >= me->module_init &&
|
|
- loc <= (me->module_init + me->init_size));
|
|
+ return in_init_rx(me, loc) || in_init_rw(me, loc);
|
|
+}
|
|
+
|
|
+static inline int in_core_rx(struct module *me, void *loc)
|
|
+{
|
|
+ return (loc >= me->module_core_rx &&
|
|
+ loc < (me->module_core_rx + me->core_size_rx));
|
|
+}
|
|
+
|
|
+static inline int in_core_rw(struct module *me, void *loc)
|
|
+{
|
|
+ return (loc >= me->module_core_rw &&
|
|
+ loc < (me->module_core_rw + me->core_size_rw));
|
|
}
|
|
|
|
static inline int in_core(struct module *me, void *loc)
|
|
{
|
|
- return (loc >= me->module_core &&
|
|
- loc <= (me->module_core + me->core_size));
|
|
+ return in_core_rx(me, loc) || in_core_rw(me, loc);
|
|
}
|
|
|
|
static inline int in_local(struct module *me, void *loc)
|
|
@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
|
|
}
|
|
|
|
/* align things a bit */
|
|
- me->core_size = ALIGN(me->core_size, 16);
|
|
- me->arch.got_offset = me->core_size;
|
|
- me->core_size += gots * sizeof(struct got_entry);
|
|
+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
|
|
+ me->arch.got_offset = me->core_size_rw;
|
|
+ me->core_size_rw += gots * sizeof(struct got_entry);
|
|
|
|
- me->core_size = ALIGN(me->core_size, 16);
|
|
- me->arch.fdesc_offset = me->core_size;
|
|
- me->core_size += fdescs * sizeof(Elf_Fdesc);
|
|
+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
|
|
+ me->arch.fdesc_offset = me->core_size_rw;
|
|
+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
|
|
|
|
me->arch.got_max = gots;
|
|
me->arch.fdesc_max = fdescs;
|
|
@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
|
|
|
|
BUG_ON(value == 0);
|
|
|
|
- got = me->module_core + me->arch.got_offset;
|
|
+ got = me->module_core_rw + me->arch.got_offset;
|
|
for (i = 0; got[i].addr; i++)
|
|
if (got[i].addr == value)
|
|
goto out;
|
|
@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
|
|
#ifdef CONFIG_64BIT
|
|
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
|
|
{
|
|
- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
|
|
+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
|
|
|
|
if (!value) {
|
|
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
|
|
@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
|
|
|
|
/* Create new one */
|
|
fdesc->addr = value;
|
|
- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
|
|
+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
|
|
return (Elf_Addr)fdesc;
|
|
}
|
|
#endif /* CONFIG_64BIT */
|
|
@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
|
|
|
|
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
|
|
end = table + sechdrs[me->arch.unwind_section].sh_size;
|
|
- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
|
|
+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
|
|
|
|
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
|
|
me->arch.unwind_section, table, end, gp);
|
|
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
|
|
index 7ea75d1..7b64ef5 100644
|
|
--- a/arch/parisc/kernel/sys_parisc.c
|
|
+++ b/arch/parisc/kernel/sys_parisc.c
|
|
@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
if (TASK_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
- if (!vma || addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
addr = vma->vm_end;
|
|
}
|
|
@@ -81,7 +81,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
if (TASK_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
- if (!vma || addr + len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
|
|
if (addr < vma->vm_end) /* handle wraparound */
|
|
@@ -100,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
if (!addr)
|
|
- addr = TASK_UNMAPPED_BASE;
|
|
+ addr = current->mm->mmap_base;
|
|
|
|
if (filp) {
|
|
addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
|
|
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
|
|
index 71d7d72..78ed4e9 100644
|
|
--- a/arch/parisc/kernel/traps.c
|
|
+++ b/arch/parisc/kernel/traps.c
|
|
@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
vma = find_vma(current->mm,regs->iaoq[0]);
|
|
- if (vma && (regs->iaoq[0] >= vma->vm_start)
|
|
- && (vma->vm_flags & VM_EXEC)) {
|
|
-
|
|
+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
|
|
fault_address = regs->iaoq[0];
|
|
fault_space = regs->iasq[0];
|
|
|
|
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
|
|
index a9b765a..e78ae8e 100644
|
|
--- a/arch/parisc/mm/fault.c
|
|
+++ b/arch/parisc/mm/fault.c
|
|
@@ -15,6 +15,7 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/unistd.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/traps.h>
|
|
@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
|
|
static unsigned long
|
|
parisc_acctyp(unsigned long code, unsigned int inst)
|
|
{
|
|
- if (code == 6 || code == 16)
|
|
+ if (code == 6 || code == 7 || code == 16)
|
|
return VM_EXEC;
|
|
|
|
switch (inst & 0xf0000000) {
|
|
@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ * 2 when rt_sigreturn trampoline was detected
|
|
+ * 3 when unpatched PLT trampoline was detected
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: unpatched PLT emulation */
|
|
+ unsigned int bl, depwi;
|
|
+
|
|
+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
|
|
+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
|
|
+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
|
|
+
|
|
+ err = get_user(ldw, (unsigned int *)addr);
|
|
+ err |= get_user(bv, (unsigned int *)(addr+4));
|
|
+ err |= get_user(ldw2, (unsigned int *)(addr+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (ldw == 0x0E801096U &&
|
|
+ bv == 0xEAC0C000U &&
|
|
+ ldw2 == 0x0E881095U)
|
|
+ {
|
|
+ unsigned int resolver, map;
|
|
+
|
|
+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
|
|
+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ regs->gr[20] = instruction_pointer(regs)+8;
|
|
+ regs->gr[21] = map;
|
|
+ regs->gr[22] = resolver;
|
|
+ regs->iaoq[0] = resolver | 3UL;
|
|
+ regs->iaoq[1] = regs->iaoq[0] + 4;
|
|
+ return 3;
|
|
+ }
|
|
+ }
|
|
+ } while (0);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+
|
|
+#ifndef CONFIG_PAX_EMUSIGRT
|
|
+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
|
|
+ return 1;
|
|
+#endif
|
|
+
|
|
+ do { /* PaX: rt_sigreturn emulation */
|
|
+ unsigned int ldi1, ldi2, bel, nop;
|
|
+
|
|
+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
|
|
+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
|
|
+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
|
|
+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
|
|
+ ldi2 == 0x3414015AU &&
|
|
+ bel == 0xE4008200U &&
|
|
+ nop == 0x08000240U)
|
|
+ {
|
|
+ regs->gr[25] = (ldi1 & 2) >> 1;
|
|
+ regs->gr[20] = __NR_rt_sigreturn;
|
|
+ regs->gr[31] = regs->iaoq[1] + 16;
|
|
+ regs->sr[0] = regs->iasq[1];
|
|
+ regs->iaoq[0] = 0x100UL;
|
|
+ regs->iaoq[1] = regs->iaoq[0] + 4;
|
|
+ regs->iasq[0] = regs->sr[2];
|
|
+ regs->iasq[1] = regs->sr[2];
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 5; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
int fixup_exception(struct pt_regs *regs)
|
|
{
|
|
const struct exception_table_entry *fix;
|
|
@@ -192,8 +303,33 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|
|
|
acc_type = parisc_acctyp(code,regs->iir);
|
|
|
|
- if ((vma->vm_flags & acc_type) != acc_type)
|
|
+ if ((vma->vm_flags & acc_type) != acc_type) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
|
|
+ (address & ~3UL) == instruction_pointer(regs))
|
|
+ {
|
|
+ up_read(&mm->mmap_sem);
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ case 3:
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ case 2:
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
goto bad_area;
|
|
+ }
|
|
|
|
/*
|
|
* If for any reason at all we couldn't handle the fault, make
|
|
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
|
|
index da29032..f76c24c 100644
|
|
--- a/arch/powerpc/include/asm/atomic.h
|
|
+++ b/arch/powerpc/include/asm/atomic.h
|
|
@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
|
|
return t1;
|
|
}
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#endif /* __powerpc64__ */
|
|
|
|
#endif /* __KERNEL__ */
|
|
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
|
|
index 3bf9cca..e7457d0 100644
|
|
--- a/arch/powerpc/include/asm/elf.h
|
|
+++ b/arch/powerpc/include/asm/elf.h
|
|
@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
|
|
the loader. We need to make sure that it is out of the way of the program
|
|
that it will "exec", and that there is sufficient room for the brk. */
|
|
|
|
-extern unsigned long randomize_et_dyn(unsigned long base);
|
|
-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
|
|
+#define ELF_ET_DYN_BASE (0x20000000)
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
|
|
+
|
|
+#ifdef __powerpc64__
|
|
+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
|
|
+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
|
|
+#else
|
|
+#define PAX_DELTA_MMAP_LEN 15
|
|
+#define PAX_DELTA_STACK_LEN 15
|
|
+#endif
|
|
+#endif
|
|
|
|
/*
|
|
* Our registers are always unsigned longs, whether we're a 32 bit
|
|
@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|
(0x7ff >> (PAGE_SHIFT - 12)) : \
|
|
(0x3ffff >> (PAGE_SHIFT - 12)))
|
|
|
|
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
-#define arch_randomize_brk arch_randomize_brk
|
|
-
|
|
#endif /* __KERNEL__ */
|
|
|
|
/*
|
|
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
|
|
index 8196e9c..d83a9f3 100644
|
|
--- a/arch/powerpc/include/asm/exec.h
|
|
+++ b/arch/powerpc/include/asm/exec.h
|
|
@@ -4,6 +4,6 @@
|
|
#ifndef _ASM_POWERPC_EXEC_H
|
|
#define _ASM_POWERPC_EXEC_H
|
|
|
|
-extern unsigned long arch_align_stack(unsigned long sp);
|
|
+#define arch_align_stack(x) ((x) & ~0xfUL)
|
|
|
|
#endif /* _ASM_POWERPC_EXEC_H */
|
|
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
|
|
index bca8fdc..61e9580 100644
|
|
--- a/arch/powerpc/include/asm/kmap_types.h
|
|
+++ b/arch/powerpc/include/asm/kmap_types.h
|
|
@@ -27,6 +27,7 @@ enum km_type {
|
|
KM_PPC_SYNC_PAGE,
|
|
KM_PPC_SYNC_ICACHE,
|
|
KM_KDB,
|
|
+ KM_CLEARPAGE,
|
|
KM_TYPE_NR
|
|
};
|
|
|
|
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
|
|
index d4a7f64..451de1c 100644
|
|
--- a/arch/powerpc/include/asm/mman.h
|
|
+++ b/arch/powerpc/include/asm/mman.h
|
|
@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
|
|
}
|
|
#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
|
|
|
|
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
|
|
+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
|
|
{
|
|
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
|
|
}
|
|
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
|
|
index 2e6c4e5a..d20a07d 100644
|
|
--- a/arch/powerpc/include/asm/page.h
|
|
+++ b/arch/powerpc/include/asm/page.h
|
|
@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
|
|
* and needs to be executable. This means the whole heap ends
|
|
* up being executable.
|
|
*/
|
|
-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
|
|
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
+#define VM_DATA_DEFAULT_FLAGS32 \
|
|
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
|
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
|
|
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
|
|
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
|
|
#endif
|
|
|
|
+#define ktla_ktva(addr) (addr)
|
|
+#define ktva_ktla(addr) (addr)
|
|
+
|
|
/*
|
|
* Use the top bit of the higher-level page table entries to indicate whether
|
|
* the entries we point to contain hugepages. This works because we know that
|
|
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
|
|
index fed85e6..da5c71b 100644
|
|
--- a/arch/powerpc/include/asm/page_64.h
|
|
+++ b/arch/powerpc/include/asm/page_64.h
|
|
@@ -146,15 +146,18 @@ do { \
|
|
* stack by default, so in the absence of a PT_GNU_STACK program header
|
|
* we turn execute permission off.
|
|
*/
|
|
-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
|
|
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
+#define VM_STACK_DEFAULT_FLAGS32 \
|
|
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
|
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
|
|
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
+#ifndef CONFIG_PAX_PAGEEXEC
|
|
#define VM_STACK_DEFAULT_FLAGS \
|
|
(is_32bit_task() ? \
|
|
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
|
|
+#endif
|
|
|
|
#include <asm-generic/getorder.h>
|
|
|
|
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
|
|
index 292725c..f87ae14 100644
|
|
--- a/arch/powerpc/include/asm/pgalloc-64.h
|
|
+++ b/arch/powerpc/include/asm/pgalloc-64.h
|
|
@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
|
|
|
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
|
|
+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
|
|
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
pud_set(pud, (unsigned long)pmd);
|
|
}
|
|
|
|
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
+{
|
|
+ pud_populate(mm, pud, pmd);
|
|
+}
|
|
+
|
|
#define pmd_populate(mm, pmd, pte_page) \
|
|
pmd_populate_kernel(mm, pmd, page_address(pte_page))
|
|
#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
|
|
@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
#else /* CONFIG_PPC_64K_PAGES */
|
|
|
|
#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
|
|
+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
|
|
|
|
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
|
|
pte_t *pte)
|
|
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
|
|
index 2e0e411..7899c68 100644
|
|
--- a/arch/powerpc/include/asm/pgtable.h
|
|
+++ b/arch/powerpc/include/asm/pgtable.h
|
|
@@ -2,6 +2,7 @@
|
|
#define _ASM_POWERPC_PGTABLE_H
|
|
#ifdef __KERNEL__
|
|
|
|
+#include <linux/const.h>
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/processor.h> /* For TASK_SIZE */
|
|
#include <asm/mmu.h>
|
|
diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
|
|
index 4aad413..85d86bf 100644
|
|
--- a/arch/powerpc/include/asm/pte-hash32.h
|
|
+++ b/arch/powerpc/include/asm/pte-hash32.h
|
|
@@ -21,6 +21,7 @@
|
|
#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
|
|
#define _PAGE_USER 0x004 /* usermode access allowed */
|
|
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
|
|
+#define _PAGE_EXEC _PAGE_GUARDED
|
|
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
|
|
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
|
|
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
|
|
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
|
|
index cae0ed7..da44a51 100644
|
|
--- a/arch/powerpc/include/asm/reg.h
|
|
+++ b/arch/powerpc/include/asm/reg.h
|
|
@@ -212,6 +212,7 @@
|
|
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
|
|
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
|
|
#define DSISR_NOHPTE 0x40000000 /* no translation found */
|
|
+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
|
|
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
|
|
#define DSISR_ISSTORE 0x02000000 /* access was a store */
|
|
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
|
|
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
|
|
index bd0fb84..a42a14b 100644
|
|
--- a/arch/powerpc/include/asm/uaccess.h
|
|
+++ b/arch/powerpc/include/asm/uaccess.h
|
|
@@ -13,6 +13,8 @@
|
|
#define VERIFY_READ 0
|
|
#define VERIFY_WRITE 1
|
|
|
|
+extern void check_object_size(const void *ptr, unsigned long n, bool to);
|
|
+
|
|
/*
|
|
* The fs value determines whether argument validity checking should be
|
|
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
|
@@ -327,52 +329,6 @@ do { \
|
|
extern unsigned long __copy_tofrom_user(void __user *to,
|
|
const void __user *from, unsigned long size);
|
|
|
|
-#ifndef __powerpc64__
|
|
-
|
|
-static inline unsigned long copy_from_user(void *to,
|
|
- const void __user *from, unsigned long n)
|
|
-{
|
|
- unsigned long over;
|
|
-
|
|
- if (access_ok(VERIFY_READ, from, n))
|
|
- return __copy_tofrom_user((__force void __user *)to, from, n);
|
|
- if ((unsigned long)from < TASK_SIZE) {
|
|
- over = (unsigned long)from + n - TASK_SIZE;
|
|
- return __copy_tofrom_user((__force void __user *)to, from,
|
|
- n - over) + over;
|
|
- }
|
|
- return n;
|
|
-}
|
|
-
|
|
-static inline unsigned long copy_to_user(void __user *to,
|
|
- const void *from, unsigned long n)
|
|
-{
|
|
- unsigned long over;
|
|
-
|
|
- if (access_ok(VERIFY_WRITE, to, n))
|
|
- return __copy_tofrom_user(to, (__force void __user *)from, n);
|
|
- if ((unsigned long)to < TASK_SIZE) {
|
|
- over = (unsigned long)to + n - TASK_SIZE;
|
|
- return __copy_tofrom_user(to, (__force void __user *)from,
|
|
- n - over) + over;
|
|
- }
|
|
- return n;
|
|
-}
|
|
-
|
|
-#else /* __powerpc64__ */
|
|
-
|
|
-#define __copy_in_user(to, from, size) \
|
|
- __copy_tofrom_user((to), (from), (size))
|
|
-
|
|
-extern unsigned long copy_from_user(void *to, const void __user *from,
|
|
- unsigned long n);
|
|
-extern unsigned long copy_to_user(void __user *to, const void *from,
|
|
- unsigned long n);
|
|
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
|
|
- unsigned long n);
|
|
-
|
|
-#endif /* __powerpc64__ */
|
|
-
|
|
static inline unsigned long __copy_from_user_inatomic(void *to,
|
|
const void __user *from, unsigned long n)
|
|
{
|
|
@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
|
|
if (ret == 0)
|
|
return 0;
|
|
}
|
|
+
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+
|
|
return __copy_tofrom_user((__force void __user *)to, from, n);
|
|
}
|
|
|
|
@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
|
|
if (ret == 0)
|
|
return 0;
|
|
}
|
|
+
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
+
|
|
return __copy_tofrom_user(to, (__force const void __user *)from, n);
|
|
}
|
|
|
|
@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
|
|
return __copy_to_user_inatomic(to, from, size);
|
|
}
|
|
|
|
+#ifndef __powerpc64__
|
|
+
|
|
+static inline unsigned long __must_check copy_from_user(void *to,
|
|
+ const void __user *from, unsigned long n)
|
|
+{
|
|
+ unsigned long over;
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (access_ok(VERIFY_READ, from, n)) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+ return __copy_tofrom_user((__force void __user *)to, from, n);
|
|
+ }
|
|
+ if ((unsigned long)from < TASK_SIZE) {
|
|
+ over = (unsigned long)from + n - TASK_SIZE;
|
|
+ if (!__builtin_constant_p(n - over))
|
|
+ check_object_size(to, n - over, false);
|
|
+ return __copy_tofrom_user((__force void __user *)to, from,
|
|
+ n - over) + over;
|
|
+ }
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static inline unsigned long __must_check copy_to_user(void __user *to,
|
|
+ const void *from, unsigned long n)
|
|
+{
|
|
+ unsigned long over;
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (access_ok(VERIFY_WRITE, to, n)) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
|
|
+ }
|
|
+ if ((unsigned long)to < TASK_SIZE) {
|
|
+ over = (unsigned long)to + n - TASK_SIZE;
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n - over, true);
|
|
+ return __copy_tofrom_user(to, (__force void __user *)from,
|
|
+ n - over) + over;
|
|
+ }
|
|
+ return n;
|
|
+}
|
|
+
|
|
+#else /* __powerpc64__ */
|
|
+
|
|
+#define __copy_in_user(to, from, size) \
|
|
+ __copy_tofrom_user((to), (from), (size))
|
|
+
|
|
+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+{
|
|
+ if ((long)n < 0 || n > INT_MAX)
|
|
+ return n;
|
|
+
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+
|
|
+ if (likely(access_ok(VERIFY_READ, from, n)))
|
|
+ n = __copy_from_user(to, from, n);
|
|
+ else
|
|
+ memset(to, 0, n);
|
|
+ return n;
|
|
+}
|
|
+
|
|
+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+{
|
|
+ if ((long)n < 0 || n > INT_MAX)
|
|
+ return n;
|
|
+
|
|
+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
+ n = __copy_to_user(to, from, n);
|
|
+ }
|
|
+ return n;
|
|
+}
|
|
+
|
|
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
|
|
+ unsigned long n);
|
|
+
|
|
+#endif /* __powerpc64__ */
|
|
+
|
|
extern unsigned long __clear_user(void __user *addr, unsigned long size);
|
|
|
|
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
|
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
|
|
index 7215cc2..a9730c1 100644
|
|
--- a/arch/powerpc/kernel/exceptions-64e.S
|
|
+++ b/arch/powerpc/kernel/exceptions-64e.S
|
|
@@ -661,6 +661,7 @@ storage_fault_common:
|
|
std r14,_DAR(r1)
|
|
std r15,_DSISR(r1)
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
|
+ bl .save_nvgprs
|
|
mr r4,r14
|
|
mr r5,r15
|
|
ld r14,PACA_EXGEN+EX_R14(r13)
|
|
@@ -669,8 +670,7 @@ storage_fault_common:
|
|
cmpdi r3,0
|
|
bne- 1f
|
|
b .ret_from_except_lite
|
|
-1: bl .save_nvgprs
|
|
- mr r5,r3
|
|
+1: mr r5,r3
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
|
ld r4,_DAR(r1)
|
|
bl .bad_page_fault
|
|
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
|
|
index 846a163..cee606a 100644
|
|
--- a/arch/powerpc/kernel/exceptions-64s.S
|
|
+++ b/arch/powerpc/kernel/exceptions-64s.S
|
|
@@ -890,10 +890,10 @@ handle_page_fault:
|
|
11: ld r4,_DAR(r1)
|
|
ld r5,_DSISR(r1)
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
|
+ bl .save_nvgprs
|
|
bl .do_page_fault
|
|
cmpdi r3,0
|
|
beq+ 12f
|
|
- bl .save_nvgprs
|
|
mr r5,r3
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
|
lwz r4,_DAR(r1)
|
|
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
|
|
index 2e3200c..72095ce 100644
|
|
--- a/arch/powerpc/kernel/module_32.c
|
|
+++ b/arch/powerpc/kernel/module_32.c
|
|
@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
|
|
me->arch.core_plt_section = i;
|
|
}
|
|
if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
|
|
- printk("Module doesn't contain .plt or .init.plt sections.\n");
|
|
+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
|
|
return -ENOEXEC;
|
|
}
|
|
|
|
@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
|
|
|
|
DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
|
|
/* Init, or core PLT? */
|
|
- if (location >= mod->module_core
|
|
- && location < mod->module_core + mod->core_size)
|
|
+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
|
|
+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
|
|
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
|
|
- else
|
|
+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
|
|
+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
|
|
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
|
|
+ else {
|
|
+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
|
|
+ return ~0UL;
|
|
+ }
|
|
|
|
/* Find this entry, or if that fails, the next avail. entry */
|
|
while (entry->jump[0]) {
|
|
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
|
|
index c1aef40..373be39 100644
|
|
--- a/arch/powerpc/kernel/process.c
|
|
+++ b/arch/powerpc/kernel/process.c
|
|
@@ -1274,58 +1274,3 @@ void thread_info_cache_init(void)
|
|
}
|
|
|
|
#endif /* THREAD_SHIFT < PAGE_SHIFT */
|
|
-
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
-{
|
|
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
- sp -= get_random_int() & ~PAGE_MASK;
|
|
- return sp & ~0xf;
|
|
-}
|
|
-
|
|
-static inline unsigned long brk_rnd(void)
|
|
-{
|
|
- unsigned long rnd = 0;
|
|
-
|
|
- /* 8MB for 32bit, 1GB for 64bit */
|
|
- if (is_32bit_task())
|
|
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
|
|
- else
|
|
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
|
|
-
|
|
- return rnd << PAGE_SHIFT;
|
|
-}
|
|
-
|
|
-unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
-{
|
|
- unsigned long base = mm->brk;
|
|
- unsigned long ret;
|
|
-
|
|
-#ifdef CONFIG_PPC_STD_MMU_64
|
|
- /*
|
|
- * If we are using 1TB segments and we are allowed to randomise
|
|
- * the heap, we can put it above 1TB so it is backed by a 1TB
|
|
- * segment. Otherwise the heap will be in the bottom 1TB
|
|
- * which always uses 256MB segments and this may result in a
|
|
- * performance penalty.
|
|
- */
|
|
- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
|
|
- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
|
|
-#endif
|
|
-
|
|
- ret = PAGE_ALIGN(base + brk_rnd());
|
|
-
|
|
- if (ret < mm->brk)
|
|
- return mm->brk;
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-unsigned long randomize_et_dyn(unsigned long base)
|
|
-{
|
|
- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
|
|
-
|
|
- if (ret < base)
|
|
- return base;
|
|
-
|
|
- return ret;
|
|
-}
|
|
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
|
|
index 32e2c81..4bdce51 100644
|
|
--- a/arch/powerpc/kernel/signal_32.c
|
|
+++ b/arch/powerpc/kernel/signal_32.c
|
|
@@ -867,7 +867,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
|
|
/* Save user registers on the stack */
|
|
frame = &rt_sf->uc.uc_mcontext;
|
|
addr = frame;
|
|
- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
|
|
+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
|
|
if (save_user_regs(regs, frame, 0, 1))
|
|
goto badframe;
|
|
regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
|
|
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
|
|
index 3ad1b50..c7aafad 100644
|
|
--- a/arch/powerpc/kernel/signal_64.c
|
|
+++ b/arch/powerpc/kernel/signal_64.c
|
|
@@ -436,7 +436,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
|
|
current->thread.fpscr.val = 0;
|
|
|
|
/* Set up to return from userspace. */
|
|
- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
|
|
+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
|
|
regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
|
|
} else {
|
|
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
|
|
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
|
|
index 9eb5b9b..e45498a 100644
|
|
--- a/arch/powerpc/kernel/vdso.c
|
|
+++ b/arch/powerpc/kernel/vdso.c
|
|
@@ -34,6 +34,7 @@
|
|
#include <asm/firmware.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/vdso_datapage.h>
|
|
+#include <asm/mman.h>
|
|
|
|
#include "setup.h"
|
|
|
|
@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
vdso_base = VDSO32_MBASE;
|
|
#endif
|
|
|
|
- current->mm->context.vdso_base = 0;
|
|
+ current->mm->context.vdso_base = ~0UL;
|
|
|
|
/* vDSO has a problem and was disabled, just don't "enable" it for the
|
|
* process
|
|
@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
vdso_base = get_unmapped_area(NULL, vdso_base,
|
|
(vdso_pages << PAGE_SHIFT) +
|
|
((VDSO_ALIGNMENT - 1) & PAGE_MASK),
|
|
- 0, 0);
|
|
+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
|
|
if (IS_ERR_VALUE(vdso_base)) {
|
|
rc = vdso_base;
|
|
goto fail_mmapsem;
|
|
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
|
|
index 5eea6f3..5d10396 100644
|
|
--- a/arch/powerpc/lib/usercopy_64.c
|
|
+++ b/arch/powerpc/lib/usercopy_64.c
|
|
@@ -9,22 +9,6 @@
|
|
#include <linux/module.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
-{
|
|
- if (likely(access_ok(VERIFY_READ, from, n)))
|
|
- n = __copy_from_user(to, from, n);
|
|
- else
|
|
- memset(to, 0, n);
|
|
- return n;
|
|
-}
|
|
-
|
|
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
-{
|
|
- if (likely(access_ok(VERIFY_WRITE, to, n)))
|
|
- n = __copy_to_user(to, from, n);
|
|
- return n;
|
|
-}
|
|
-
|
|
unsigned long copy_in_user(void __user *to, const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
|
|
return n;
|
|
}
|
|
|
|
-EXPORT_SYMBOL(copy_from_user);
|
|
-EXPORT_SYMBOL(copy_to_user);
|
|
EXPORT_SYMBOL(copy_in_user);
|
|
|
|
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
|
|
index 3d30a4a..248c6f6 100644
|
|
--- a/arch/powerpc/mm/fault.c
|
|
+++ b/arch/powerpc/mm/fault.c
|
|
@@ -32,6 +32,10 @@
|
|
#include <linux/perf_event.h>
|
|
#include <linux/magic.h>
|
|
#include <linux/ratelimit.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/pagemap.h>
|
|
+#include <linux/compiler.h>
|
|
+#include <linux/unistd.h>
|
|
|
|
#include <asm/firmware.h>
|
|
#include <asm/page.h>
|
|
@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (regs->nip = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 5; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int __user *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Check whether the instruction at regs->nip is a store using
|
|
* an update addressing form which will update r1.
|
|
@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
* indicate errors in DSISR but can validly be set in SRR1.
|
|
*/
|
|
if (trap == 0x400)
|
|
- error_code &= 0x48200000;
|
|
+ error_code &= 0x58200000;
|
|
else
|
|
is_write = error_code & DSISR_ISSTORE;
|
|
#else
|
|
@@ -366,7 +397,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
* "undefined". Of those that can be set, this is the only
|
|
* one which seems bad.
|
|
*/
|
|
- if (error_code & 0x10000000)
|
|
+ if (error_code & DSISR_GUARDED)
|
|
/* Guarded storage error. */
|
|
goto bad_area;
|
|
#endif /* CONFIG_8xx */
|
|
@@ -381,7 +412,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
* processors use the same I/D cache coherency mechanism
|
|
* as embedded.
|
|
*/
|
|
- if (error_code & DSISR_PROTFAULT)
|
|
+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
|
|
goto bad_area;
|
|
#endif /* CONFIG_PPC_STD_MMU */
|
|
|
|
@@ -467,6 +498,23 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
bad_area_nosemaphore:
|
|
/* User mode accesses cause a SIGSEGV */
|
|
if (user_mode(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
|
|
+#ifdef CONFIG_PPC_STD_MMU
|
|
+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
|
|
+#else
|
|
+ if (is_exec && regs->nip == address) {
|
|
+#endif
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+ }
|
|
+
|
|
+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
_exception(SIGSEGV, regs, code, address);
|
|
return 0;
|
|
}
|
|
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
|
|
index 67a42ed..1c7210c 100644
|
|
--- a/arch/powerpc/mm/mmap_64.c
|
|
+++ b/arch/powerpc/mm/mmap_64.c
|
|
@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
*/
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base();
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
|
|
index 73709f7..6b90313 100644
|
|
--- a/arch/powerpc/mm/slice.c
|
|
+++ b/arch/powerpc/mm/slice.c
|
|
@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
|
|
if ((mm->task_size - len) < addr)
|
|
return 0;
|
|
vma = find_vma(mm, addr);
|
|
- return (!vma || (addr + len) <= vma->vm_start);
|
|
+ return check_heap_stack_gap(vma, addr, len);
|
|
}
|
|
|
|
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
|
|
@@ -256,7 +256,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
|
|
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
|
|
continue;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
|
|
}
|
|
}
|
|
|
|
- addr = mm->mmap_base;
|
|
- while (addr > len) {
|
|
+ if (mm->mmap_base < len)
|
|
+ addr = -ENOMEM;
|
|
+ else
|
|
+ addr = mm->mmap_base - len;
|
|
+
|
|
+ while (!IS_ERR_VALUE(addr)) {
|
|
/* Go down by chunk size */
|
|
- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
|
|
+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
|
|
|
|
/* Check for hit with different page size */
|
|
mask = slice_range_to_mask(addr, len);
|
|
@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (!vma || (addr + len) <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/* remember the address as a hint for next time */
|
|
if (use_cache)
|
|
mm->free_area_cache = addr;
|
|
@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = vma->vm_start;
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
}
|
|
|
|
/*
|
|
@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
|
|
if (fixed && addr > (mm->task_size - len))
|
|
return -EINVAL;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+ addr = 0;
|
|
+#endif
|
|
+
|
|
/* If hint, make sure it matches our alignment restrictions */
|
|
if (!fixed && addr) {
|
|
addr = _ALIGN_UP(addr, 1ul << pshift);
|
|
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
|
|
index 748347b..81bc6c7 100644
|
|
--- a/arch/s390/include/asm/atomic.h
|
|
+++ b/arch/s390/include/asm/atomic.h
|
|
@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
#define smp_mb__before_atomic_inc() smp_mb()
|
|
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
|
|
index c4ee39f..352881b 100644
|
|
--- a/arch/s390/include/asm/elf.h
|
|
+++ b/arch/s390/include/asm/elf.h
|
|
@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
|
|
the loader. We need to make sure that it is out of the way of the program
|
|
that it will "exec", and that there is sufficient room for the brk. */
|
|
|
|
-extern unsigned long randomize_et_dyn(unsigned long base);
|
|
-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
|
|
+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
|
|
+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
|
|
+#endif
|
|
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. */
|
|
@@ -210,7 +216,4 @@ struct linux_binprm;
|
|
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
|
|
int arch_setup_additional_pages(struct linux_binprm *, int);
|
|
|
|
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
-#define arch_randomize_brk arch_randomize_brk
|
|
-
|
|
#endif
|
|
diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
|
|
index c4a93d6..4d2a9b4 100644
|
|
--- a/arch/s390/include/asm/exec.h
|
|
+++ b/arch/s390/include/asm/exec.h
|
|
@@ -7,6 +7,6 @@
|
|
#ifndef __ASM_EXEC_H
|
|
#define __ASM_EXEC_H
|
|
|
|
-extern unsigned long arch_align_stack(unsigned long sp);
|
|
+#define arch_align_stack(x) ((x) & ~0xfUL)
|
|
|
|
#endif /* __ASM_EXEC_H */
|
|
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
|
|
index 8f2cada..43072c1 100644
|
|
--- a/arch/s390/include/asm/uaccess.h
|
|
+++ b/arch/s390/include/asm/uaccess.h
|
|
@@ -236,6 +236,10 @@ static inline unsigned long __must_check
|
|
copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
n = __copy_to_user(to, from, n);
|
|
return n;
|
|
@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
static inline unsigned long __must_check
|
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (__builtin_constant_p(n) && (n <= 256))
|
|
return uaccess.copy_from_user_small(n, from, to);
|
|
else
|
|
@@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
|
|
static inline unsigned long __must_check
|
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
- unsigned int sz = __compiletime_object_size(to);
|
|
+ size_t sz = __compiletime_object_size(to);
|
|
|
|
might_fault();
|
|
- if (unlikely(sz != -1 && sz < n)) {
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (unlikely(sz != (size_t)-1 && sz < n)) {
|
|
copy_from_user_overflow();
|
|
return n;
|
|
}
|
|
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
|
|
index dfcb343..eda788a 100644
|
|
--- a/arch/s390/kernel/module.c
|
|
+++ b/arch/s390/kernel/module.c
|
|
@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
|
|
|
/* Increase core size by size of got & plt and set start
|
|
offsets for got and plt. */
|
|
- me->core_size = ALIGN(me->core_size, 4);
|
|
- me->arch.got_offset = me->core_size;
|
|
- me->core_size += me->arch.got_size;
|
|
- me->arch.plt_offset = me->core_size;
|
|
- me->core_size += me->arch.plt_size;
|
|
+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
|
|
+ me->arch.got_offset = me->core_size_rw;
|
|
+ me->core_size_rw += me->arch.got_size;
|
|
+ me->arch.plt_offset = me->core_size_rx;
|
|
+ me->core_size_rx += me->arch.plt_size;
|
|
return 0;
|
|
}
|
|
|
|
@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
if (info->got_initialized == 0) {
|
|
Elf_Addr *gotent;
|
|
|
|
- gotent = me->module_core + me->arch.got_offset +
|
|
+ gotent = me->module_core_rw + me->arch.got_offset +
|
|
info->got_offset;
|
|
*gotent = val;
|
|
info->got_initialized = 1;
|
|
@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
else if (r_type == R_390_GOTENT ||
|
|
r_type == R_390_GOTPLTENT)
|
|
*(unsigned int *) loc =
|
|
- (val + (Elf_Addr) me->module_core - loc) >> 1;
|
|
+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
|
|
else if (r_type == R_390_GOT64 ||
|
|
r_type == R_390_GOTPLT64)
|
|
*(unsigned long *) loc = val;
|
|
@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
|
|
if (info->plt_initialized == 0) {
|
|
unsigned int *ip;
|
|
- ip = me->module_core + me->arch.plt_offset +
|
|
+ ip = me->module_core_rx + me->arch.plt_offset +
|
|
info->plt_offset;
|
|
#ifndef CONFIG_64BIT
|
|
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
|
|
@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
val - loc + 0xffffUL < 0x1ffffeUL) ||
|
|
(r_type == R_390_PLT32DBL &&
|
|
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
|
|
- val = (Elf_Addr) me->module_core +
|
|
+ val = (Elf_Addr) me->module_core_rx +
|
|
me->arch.plt_offset +
|
|
info->plt_offset;
|
|
val += rela->r_addend - loc;
|
|
@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
|
|
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
|
|
val = val + rela->r_addend -
|
|
- ((Elf_Addr) me->module_core + me->arch.got_offset);
|
|
+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
|
|
if (r_type == R_390_GOTOFF16)
|
|
*(unsigned short *) loc = val;
|
|
else if (r_type == R_390_GOTOFF32)
|
|
@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
|
break;
|
|
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
|
|
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
|
|
- val = (Elf_Addr) me->module_core + me->arch.got_offset +
|
|
+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
|
|
rela->r_addend - loc;
|
|
if (r_type == R_390_GOTPC)
|
|
*(unsigned int *) loc = val;
|
|
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
|
|
index 60055ce..ee4b252 100644
|
|
--- a/arch/s390/kernel/process.c
|
|
+++ b/arch/s390/kernel/process.c
|
|
@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
|
|
}
|
|
return 0;
|
|
}
|
|
-
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
-{
|
|
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
- sp -= get_random_int() & ~PAGE_MASK;
|
|
- return sp & ~0xf;
|
|
-}
|
|
-
|
|
-static inline unsigned long brk_rnd(void)
|
|
-{
|
|
- /* 8MB for 32bit, 1GB for 64bit */
|
|
- if (is_32bit_task())
|
|
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
|
|
- else
|
|
- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
|
|
-}
|
|
-
|
|
-unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
-{
|
|
- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
|
|
-
|
|
- if (ret < mm->brk)
|
|
- return mm->brk;
|
|
- return ret;
|
|
-}
|
|
-
|
|
-unsigned long randomize_et_dyn(unsigned long base)
|
|
-{
|
|
- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
|
|
-
|
|
- if (!(current->flags & PF_RANDOMIZE))
|
|
- return base;
|
|
- if (ret < base)
|
|
- return base;
|
|
- return ret;
|
|
-}
|
|
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
|
|
index a64fe53..5c66963 100644
|
|
--- a/arch/s390/mm/mmap.c
|
|
+++ b/arch/s390/mm/mmap.c
|
|
@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
*/
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base();
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
@@ -174,10 +186,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
*/
|
|
if (mmap_is_legacy()) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = s390_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
mm->mmap_base = mmap_base();
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
|
|
index f9f3cd5..58ff438 100644
|
|
--- a/arch/score/include/asm/exec.h
|
|
+++ b/arch/score/include/asm/exec.h
|
|
@@ -1,6 +1,6 @@
|
|
#ifndef _ASM_SCORE_EXEC_H
|
|
#define _ASM_SCORE_EXEC_H
|
|
|
|
-extern unsigned long arch_align_stack(unsigned long sp);
|
|
+#define arch_align_stack(x) (x)
|
|
|
|
#endif /* _ASM_SCORE_EXEC_H */
|
|
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
|
|
index 637970c..0b6556b 100644
|
|
--- a/arch/score/kernel/process.c
|
|
+++ b/arch/score/kernel/process.c
|
|
@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
|
|
|
|
return task_pt_regs(task)->cp0_epc;
|
|
}
|
|
-
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
-{
|
|
- return sp;
|
|
-}
|
|
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
|
|
index afeb710..d1d1289 100644
|
|
--- a/arch/sh/mm/mmap.c
|
|
+++ b/arch/sh/mm/mmap.c
|
|
@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
|
|
@@ -106,7 +105,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (likely(!vma || addr + len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
|
|
@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
/* make sure it can fit in the remaining address space */
|
|
if (likely(addr > len)) {
|
|
vma = find_vma(mm, addr-len);
|
|
- if (!vma || addr <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr - len, len)) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr-len);
|
|
}
|
|
@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
if (unlikely(mm->mmap_base < len))
|
|
goto bottomup;
|
|
|
|
- addr = mm->mmap_base-len;
|
|
- if (do_colour_align)
|
|
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
+ addr = mm->mmap_base - len;
|
|
|
|
do {
|
|
+ if (do_colour_align)
|
|
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
/*
|
|
* Lookup failure means no vma is above this address,
|
|
* else if new region fits below vma->vm_start,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (likely(!vma || addr+len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr);
|
|
}
|
|
@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = vma->vm_start-len;
|
|
- if (do_colour_align)
|
|
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
- } while (likely(len < vma->vm_start));
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
bottomup:
|
|
/*
|
|
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
|
|
index ce35a1c..2e7b8f9 100644
|
|
--- a/arch/sparc/include/asm/atomic_64.h
|
|
+++ b/arch/sparc/include/asm/atomic_64.h
|
|
@@ -14,18 +14,40 @@
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
|
|
+{
|
|
+ return v->counter;
|
|
+}
|
|
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
|
|
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
|
|
+{
|
|
+ return v->counter;
|
|
+}
|
|
|
|
#define atomic_set(v, i) (((v)->counter) = i)
|
|
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
|
|
+{
|
|
+ v->counter = i;
|
|
+}
|
|
#define atomic64_set(v, i) (((v)->counter) = i)
|
|
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
|
|
+{
|
|
+ v->counter = i;
|
|
+}
|
|
|
|
extern void atomic_add(int, atomic_t *);
|
|
+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
|
|
extern void atomic64_add(long, atomic64_t *);
|
|
+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
|
|
extern void atomic_sub(int, atomic_t *);
|
|
+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
|
|
extern void atomic64_sub(long, atomic64_t *);
|
|
+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
|
|
|
|
extern int atomic_add_ret(int, atomic_t *);
|
|
+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
|
|
extern long atomic64_add_ret(long, atomic64_t *);
|
|
+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
|
|
extern int atomic_sub_ret(int, atomic_t *);
|
|
extern long atomic64_sub_ret(long, atomic64_t *);
|
|
|
|
@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
|
|
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
|
|
|
|
#define atomic_inc_return(v) atomic_add_ret(1, v)
|
|
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ return atomic_add_ret_unchecked(1, v);
|
|
+}
|
|
#define atomic64_inc_return(v) atomic64_add_ret(1, v)
|
|
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ return atomic64_add_ret_unchecked(1, v);
|
|
+}
|
|
|
|
#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
|
|
#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
|
|
|
|
#define atomic_add_return(i, v) atomic_add_ret(i, v)
|
|
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ return atomic_add_ret_unchecked(i, v);
|
|
+}
|
|
#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
|
|
+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
|
|
+{
|
|
+ return atomic64_add_ret_unchecked(i, v);
|
|
+}
|
|
|
|
/*
|
|
* atomic_inc_and_test - increment and test
|
|
@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
|
|
* other cases.
|
|
*/
|
|
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
|
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ return atomic_inc_return_unchecked(v) == 0;
|
|
+}
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
|
|
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
|
|
@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
|
|
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
|
|
|
|
#define atomic_inc(v) atomic_add(1, v)
|
|
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ atomic_add_unchecked(1, v);
|
|
+}
|
|
#define atomic64_inc(v) atomic64_add(1, v)
|
|
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ atomic64_add_unchecked(1, v);
|
|
+}
|
|
|
|
#define atomic_dec(v) atomic_sub(1, v)
|
|
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ atomic_sub_unchecked(1, v);
|
|
+}
|
|
#define atomic64_dec(v) atomic64_sub(1, v)
|
|
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ atomic64_sub_unchecked(1, v);
|
|
+}
|
|
|
|
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
|
|
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
|
|
|
|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
|
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
|
|
+{
|
|
+ return cmpxchg(&v->counter, old, new);
|
|
+}
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
|
|
+{
|
|
+ return xchg(&v->counter, new);
|
|
+}
|
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
- int c, old;
|
|
+ int c, old, new;
|
|
c = atomic_read(v);
|
|
for (;;) {
|
|
- if (unlikely(c == (u)))
|
|
+ if (unlikely(c == u))
|
|
break;
|
|
- old = atomic_cmpxchg((v), c, c + (a));
|
|
+
|
|
+ asm volatile("addcc %2, %0, %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "tvs %%icc, 6\n"
|
|
+#endif
|
|
+
|
|
+ : "=r" (new)
|
|
+ : "0" (c), "ir" (a)
|
|
+ : "cc");
|
|
+
|
|
+ old = atomic_cmpxchg(v, c, new);
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
#define atomic64_cmpxchg(v, o, n) \
|
|
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
|
|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
|
+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
|
|
+{
|
|
+ return xchg(&v->counter, new);
|
|
+}
|
|
|
|
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
- long c, old;
|
|
+ long c, old, new;
|
|
c = atomic64_read(v);
|
|
for (;;) {
|
|
- if (unlikely(c == (u)))
|
|
+ if (unlikely(c == u))
|
|
break;
|
|
- old = atomic64_cmpxchg((v), c, c + (a));
|
|
+
|
|
+ asm volatile("addcc %2, %0, %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "tvs %%xcc, 6\n"
|
|
+#endif
|
|
+
|
|
+ : "=r" (new)
|
|
+ : "0" (c), "ir" (a)
|
|
+ : "cc");
|
|
+
|
|
+ old = atomic64_cmpxchg(v, c, new);
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
- return c != (u);
|
|
+ return c != u;
|
|
}
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
|
|
index 69358b5..17b4745 100644
|
|
--- a/arch/sparc/include/asm/cache.h
|
|
+++ b/arch/sparc/include/asm/cache.h
|
|
@@ -10,7 +10,7 @@
|
|
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
|
|
|
#define L1_CACHE_SHIFT 5
|
|
-#define L1_CACHE_BYTES 32
|
|
+#define L1_CACHE_BYTES 32UL
|
|
|
|
#ifdef CONFIG_SPARC32
|
|
#define SMP_CACHE_BYTES_SHIFT 5
|
|
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
|
|
index 4269ca6..e3da77f 100644
|
|
--- a/arch/sparc/include/asm/elf_32.h
|
|
+++ b/arch/sparc/include/asm/elf_32.h
|
|
@@ -114,6 +114,13 @@ typedef struct {
|
|
|
|
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE 0x10000UL
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN 16
|
|
+#define PAX_DELTA_STACK_LEN 16
|
|
+#endif
|
|
+
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this cpu supports. This can NOT be done in userspace
|
|
on Sparc. */
|
|
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
|
|
index 7df8b7f..4946269 100644
|
|
--- a/arch/sparc/include/asm/elf_64.h
|
|
+++ b/arch/sparc/include/asm/elf_64.h
|
|
@@ -180,6 +180,13 @@ typedef struct {
|
|
#define ELF_ET_DYN_BASE 0x0000010000000000UL
|
|
#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
|
|
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
|
|
+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
|
|
+#endif
|
|
+
|
|
extern unsigned long sparc64_elf_hwcap;
|
|
#define ELF_HWCAP sparc64_elf_hwcap
|
|
|
|
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
|
|
index ca2b344..c6084f89 100644
|
|
--- a/arch/sparc/include/asm/pgalloc_32.h
|
|
+++ b/arch/sparc/include/asm/pgalloc_32.h
|
|
@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
|
|
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
|
|
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
|
|
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
|
|
+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
|
|
|
|
BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
|
|
#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
|
|
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
|
|
index 40b2d7a..22a665b 100644
|
|
--- a/arch/sparc/include/asm/pgalloc_64.h
|
|
+++ b/arch/sparc/include/asm/pgalloc_64.h
|
|
@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
}
|
|
|
|
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
|
|
+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
|
|
|
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
|
|
index 3d71018..48a11c5 100644
|
|
--- a/arch/sparc/include/asm/pgtable_32.h
|
|
+++ b/arch/sparc/include/asm/pgtable_32.h
|
|
@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
|
|
BTFIXUPDEF_INT(page_none)
|
|
BTFIXUPDEF_INT(page_copy)
|
|
BTFIXUPDEF_INT(page_readonly)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+BTFIXUPDEF_INT(page_shared_noexec)
|
|
+BTFIXUPDEF_INT(page_copy_noexec)
|
|
+BTFIXUPDEF_INT(page_readonly_noexec)
|
|
+#endif
|
|
+
|
|
BTFIXUPDEF_INT(page_kernel)
|
|
|
|
#define PMD_SHIFT SUN4C_PMD_SHIFT
|
|
@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
|
|
#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
|
|
#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+extern pgprot_t PAGE_SHARED_NOEXEC;
|
|
+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
|
|
+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
|
|
+#else
|
|
+# define PAGE_SHARED_NOEXEC PAGE_SHARED
|
|
+# define PAGE_COPY_NOEXEC PAGE_COPY
|
|
+# define PAGE_READONLY_NOEXEC PAGE_READONLY
|
|
+#endif
|
|
+
|
|
extern unsigned long page_kernel;
|
|
|
|
#ifdef MODULE
|
|
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
|
|
index f6ae2b2..b03ffc7 100644
|
|
--- a/arch/sparc/include/asm/pgtsrmmu.h
|
|
+++ b/arch/sparc/include/asm/pgtsrmmu.h
|
|
@@ -115,6 +115,13 @@
|
|
SRMMU_EXEC | SRMMU_REF)
|
|
#define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
|
|
SRMMU_EXEC | SRMMU_REF)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
|
|
+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
|
|
+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
|
|
+#endif
|
|
+
|
|
#define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
|
|
SRMMU_DIRTY | SRMMU_REF)
|
|
|
|
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
|
|
index 9689176..63c18ea 100644
|
|
--- a/arch/sparc/include/asm/spinlock_64.h
|
|
+++ b/arch/sparc/include/asm/spinlock_64.h
|
|
@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
|
|
|
|
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
|
|
|
|
-static void inline arch_read_lock(arch_rwlock_t *lock)
|
|
+static inline void arch_read_lock(arch_rwlock_t *lock)
|
|
{
|
|
unsigned long tmp1, tmp2;
|
|
|
|
__asm__ __volatile__ (
|
|
"1: ldsw [%2], %0\n"
|
|
" brlz,pn %0, 2f\n"
|
|
-"4: add %0, 1, %1\n"
|
|
+"4: addcc %0, 1, %1\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" tvs %%icc, 6\n"
|
|
+#endif
|
|
+
|
|
" cas [%2], %0, %1\n"
|
|
" cmp %0, %1\n"
|
|
" bne,pn %%icc, 1b\n"
|
|
@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
|
|
" .previous"
|
|
: "=&r" (tmp1), "=&r" (tmp2)
|
|
: "r" (lock)
|
|
- : "memory");
|
|
+ : "memory", "cc");
|
|
}
|
|
|
|
-static int inline arch_read_trylock(arch_rwlock_t *lock)
|
|
+static inline int arch_read_trylock(arch_rwlock_t *lock)
|
|
{
|
|
int tmp1, tmp2;
|
|
|
|
@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
|
|
"1: ldsw [%2], %0\n"
|
|
" brlz,a,pn %0, 2f\n"
|
|
" mov 0, %0\n"
|
|
-" add %0, 1, %1\n"
|
|
+" addcc %0, 1, %1\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" tvs %%icc, 6\n"
|
|
+#endif
|
|
+
|
|
" cas [%2], %0, %1\n"
|
|
" cmp %0, %1\n"
|
|
" bne,pn %%icc, 1b\n"
|
|
@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
|
|
return tmp1;
|
|
}
|
|
|
|
-static void inline arch_read_unlock(arch_rwlock_t *lock)
|
|
+static inline void arch_read_unlock(arch_rwlock_t *lock)
|
|
{
|
|
unsigned long tmp1, tmp2;
|
|
|
|
__asm__ __volatile__(
|
|
"1: lduw [%2], %0\n"
|
|
-" sub %0, 1, %1\n"
|
|
+" subcc %0, 1, %1\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+" tvs %%icc, 6\n"
|
|
+#endif
|
|
+
|
|
" cas [%2], %0, %1\n"
|
|
" cmp %0, %1\n"
|
|
" bne,pn %%xcc, 1b\n"
|
|
@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
|
|
: "memory");
|
|
}
|
|
|
|
-static void inline arch_write_lock(arch_rwlock_t *lock)
|
|
+static inline void arch_write_lock(arch_rwlock_t *lock)
|
|
{
|
|
unsigned long mask, tmp1, tmp2;
|
|
|
|
@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
|
|
: "memory");
|
|
}
|
|
|
|
-static void inline arch_write_unlock(arch_rwlock_t *lock)
|
|
+static inline void arch_write_unlock(arch_rwlock_t *lock)
|
|
{
|
|
__asm__ __volatile__(
|
|
" stw %%g0, [%0]"
|
|
@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
|
|
: "memory");
|
|
}
|
|
|
|
-static int inline arch_write_trylock(arch_rwlock_t *lock)
|
|
+static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|
{
|
|
unsigned long mask, tmp1, tmp2, result;
|
|
|
|
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
|
|
index c2a1080..21ed218 100644
|
|
--- a/arch/sparc/include/asm/thread_info_32.h
|
|
+++ b/arch/sparc/include/asm/thread_info_32.h
|
|
@@ -50,6 +50,8 @@ struct thread_info {
|
|
unsigned long w_saved;
|
|
|
|
struct restart_block restart_block;
|
|
+
|
|
+ unsigned long lowest_stack;
|
|
};
|
|
|
|
/*
|
|
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
|
|
index 01d057f..0a02f7e 100644
|
|
--- a/arch/sparc/include/asm/thread_info_64.h
|
|
+++ b/arch/sparc/include/asm/thread_info_64.h
|
|
@@ -63,6 +63,8 @@ struct thread_info {
|
|
struct pt_regs *kern_una_regs;
|
|
unsigned int kern_una_insn;
|
|
|
|
+ unsigned long lowest_stack;
|
|
+
|
|
unsigned long fpregs[0] __attribute__ ((aligned(64)));
|
|
};
|
|
|
|
diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
|
|
index e88fbe5..96b0ce5 100644
|
|
--- a/arch/sparc/include/asm/uaccess.h
|
|
+++ b/arch/sparc/include/asm/uaccess.h
|
|
@@ -1,5 +1,13 @@
|
|
#ifndef ___ASM_SPARC_UACCESS_H
|
|
#define ___ASM_SPARC_UACCESS_H
|
|
+
|
|
+#ifdef __KERNEL__
|
|
+#ifndef __ASSEMBLY__
|
|
+#include <linux/types.h>
|
|
+extern void check_object_size(const void *ptr, unsigned long n, bool to);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
#if defined(__sparc__) && defined(__arch64__)
|
|
#include <asm/uaccess_64.h>
|
|
#else
|
|
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
|
|
index 8303ac4..07f333d 100644
|
|
--- a/arch/sparc/include/asm/uaccess_32.h
|
|
+++ b/arch/sparc/include/asm/uaccess_32.h
|
|
@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
|
|
|
|
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
- if (n && __access_ok((unsigned long) to, n))
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (n && __access_ok((unsigned long) to, n)) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
return __copy_user(to, (__force void __user *) from, n);
|
|
- else
|
|
+ } else
|
|
return n;
|
|
}
|
|
|
|
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
+
|
|
return __copy_user(to, (__force void __user *) from, n);
|
|
}
|
|
|
|
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
- if (n && __access_ok((unsigned long) from, n))
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
+ if (n && __access_ok((unsigned long) from, n)) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
return __copy_user((__force void __user *) to, from, n);
|
|
- else
|
|
+ } else
|
|
return n;
|
|
}
|
|
|
|
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
return __copy_user((__force void __user *) to, from, n);
|
|
}
|
|
|
|
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
|
|
index 8bc9afd..35f4a52 100644
|
|
--- a/arch/sparc/include/asm/uaccess_64.h
|
|
+++ b/arch/sparc/include/asm/uaccess_64.h
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/compiler.h>
|
|
#include <linux/string.h>
|
|
#include <linux/thread_info.h>
|
|
+#include <linux/kernel.h>
|
|
#include <asm/asi.h>
|
|
#include <asm/spitfire.h>
|
|
#include <asm-generic/uaccess-unaligned.h>
|
|
@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
|
|
static inline unsigned long __must_check
|
|
copy_from_user(void *to, const void __user *from, unsigned long size)
|
|
{
|
|
- unsigned long ret = ___copy_from_user(to, from, size);
|
|
+ unsigned long ret;
|
|
|
|
+ if ((long)size < 0 || size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+ if (!__builtin_constant_p(size))
|
|
+ check_object_size(to, size, false);
|
|
+
|
|
+ ret = ___copy_from_user(to, from, size);
|
|
if (unlikely(ret))
|
|
ret = copy_from_user_fixup(to, from, size);
|
|
|
|
@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
|
|
static inline unsigned long __must_check
|
|
copy_to_user(void __user *to, const void *from, unsigned long size)
|
|
{
|
|
- unsigned long ret = ___copy_to_user(to, from, size);
|
|
+ unsigned long ret;
|
|
+
|
|
+ if ((long)size < 0 || size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+ if (!__builtin_constant_p(size))
|
|
+ check_object_size(from, size, true);
|
|
|
|
+ ret = ___copy_to_user(to, from, size);
|
|
if (unlikely(ret))
|
|
ret = copy_to_user_fixup(to, from, size);
|
|
return ret;
|
|
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
|
|
index cb85458..e063f17 100644
|
|
--- a/arch/sparc/kernel/Makefile
|
|
+++ b/arch/sparc/kernel/Makefile
|
|
@@ -3,7 +3,7 @@
|
|
#
|
|
|
|
asflags-y := -ansi
|
|
-ccflags-y := -Werror
|
|
+#ccflags-y := -Werror
|
|
|
|
extra-y := head_$(BITS).o
|
|
extra-y += init_task.o
|
|
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
|
|
index 42b282f..28ce9f2 100644
|
|
--- a/arch/sparc/kernel/sys_sparc_32.c
|
|
+++ b/arch/sparc/kernel/sys_sparc_32.c
|
|
@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
if (ARCH_SUN4C && len > 0x20000000)
|
|
return -ENOMEM;
|
|
if (!addr)
|
|
- addr = TASK_UNMAPPED_BASE;
|
|
+ addr = current->mm->mmap_base;
|
|
|
|
if (flags & MAP_SHARED)
|
|
addr = COLOUR_ALIGN(addr);
|
|
@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
}
|
|
if (TASK_SIZE - PAGE_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
- if (!vmm || addr + len <= vmm->vm_start)
|
|
+ if (check_heap_stack_gap(vmm, addr, len))
|
|
return addr;
|
|
addr = vmm->vm_end;
|
|
if (flags & MAP_SHARED)
|
|
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
|
|
index 57b7cab..b2f7857 100644
|
|
--- a/arch/sparc/kernel/sys_sparc_64.c
|
|
+++ b/arch/sparc/kernel/sys_sparc_64.c
|
|
@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
/* We do not accept a shared mapping if it would violate
|
|
* cache aliasing constraints.
|
|
*/
|
|
- if ((flags & MAP_SHARED) &&
|
|
+ if ((filp || (flags & MAP_SHARED)) &&
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
return -EINVAL;
|
|
return addr;
|
|
@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
if (filp || (flags & MAP_SHARED))
|
|
do_color_align = 1;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
if (do_color_align)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (task_size - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
|
|
if (len > mm->cached_hole_size) {
|
|
- start_addr = addr = mm->free_area_cache;
|
|
+ start_addr = addr = mm->free_area_cache;
|
|
} else {
|
|
- start_addr = addr = TASK_UNMAPPED_BASE;
|
|
+ start_addr = addr = mm->mmap_base;
|
|
mm->cached_hole_size = 0;
|
|
}
|
|
|
|
@@ -174,14 +177,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
|
|
vma = find_vma(mm, VA_EXCLUDE_END);
|
|
}
|
|
if (unlikely(task_size < addr)) {
|
|
- if (start_addr != TASK_UNMAPPED_BASE) {
|
|
- start_addr = addr = TASK_UNMAPPED_BASE;
|
|
+ if (start_addr != mm->mmap_base) {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
mm->cached_hole_size = 0;
|
|
goto full_search;
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (likely(!vma || addr + len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
/* We do not accept a shared mapping if it would violate
|
|
* cache aliasing constraints.
|
|
*/
|
|
- if ((flags & MAP_SHARED) &&
|
|
+ if ((filp || (flags & MAP_SHARED)) &&
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
return -EINVAL;
|
|
return addr;
|
|
@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
vma = find_vma(mm, addr);
|
|
- if (task_size - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
|
|
@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
/* make sure it can fit in the remaining address space */
|
|
if (likely(addr > len)) {
|
|
vma = find_vma(mm, addr-len);
|
|
- if (!vma || addr <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr - len, len)) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr-len);
|
|
}
|
|
@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
if (unlikely(mm->mmap_base < len))
|
|
goto bottomup;
|
|
|
|
- addr = mm->mmap_base-len;
|
|
- if (do_color_align)
|
|
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
+ addr = mm->mmap_base - len;
|
|
|
|
do {
|
|
+ if (do_color_align)
|
|
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
/*
|
|
* Lookup failure means no vma is above this address,
|
|
* else if new region fits below vma->vm_start,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (likely(!vma || addr+len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr);
|
|
}
|
|
@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = vma->vm_start-len;
|
|
- if (do_color_align)
|
|
- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
|
|
- } while (likely(len < vma->vm_start));
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
bottomup:
|
|
/*
|
|
@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
gap == RLIM_INFINITY ||
|
|
sysctl_legacy_va_layout) {
|
|
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
} else {
|
|
@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
gap = (task_size / 6 * 5);
|
|
|
|
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
mm->unmap_area = arch_unmap_area_topdown;
|
|
}
|
|
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
|
|
index c72fdf5..4add6d8 100644
|
|
--- a/arch/sparc/kernel/traps_64.c
|
|
+++ b/arch/sparc/kernel/traps_64.c
|
|
@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
|
|
|
|
lvl -= 0x100;
|
|
if (regs->tstate & TSTATE_PRIV) {
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ if (lvl == 6)
|
|
+ pax_report_refcount_overflow(regs);
|
|
+#endif
|
|
+
|
|
sprintf(buffer, "Kernel bad sw trap %lx", lvl);
|
|
die_if_kernel(buffer, regs);
|
|
}
|
|
@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
|
|
void bad_trap_tl1(struct pt_regs *regs, long lvl)
|
|
{
|
|
char buffer[32];
|
|
-
|
|
+
|
|
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
|
|
0, lvl, SIGTRAP) == NOTIFY_STOP)
|
|
return;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ if (lvl == 6)
|
|
+ pax_report_refcount_overflow(regs);
|
|
+#endif
|
|
+
|
|
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
|
|
|
|
sprintf (buffer, "Bad trap %lx at tl>0", lvl);
|
|
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
|
|
index 4961516..f82ff86 100644
|
|
--- a/arch/sparc/lib/Makefile
|
|
+++ b/arch/sparc/lib/Makefile
|
|
@@ -2,7 +2,7 @@
|
|
#
|
|
|
|
asflags-y := -ansi -DST_DIV0=0x02
|
|
-ccflags-y := -Werror
|
|
+#ccflags-y := -Werror
|
|
|
|
lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
|
|
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
|
|
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
|
|
index 59186e0..f747d7af 100644
|
|
--- a/arch/sparc/lib/atomic_64.S
|
|
+++ b/arch/sparc/lib/atomic_64.S
|
|
@@ -18,7 +18,12 @@
|
|
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: lduw [%o1], %g1
|
|
- add %g1, %o0, %g7
|
|
+ addcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %icc, 6
|
|
+#endif
|
|
+
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic_add, .-atomic_add
|
|
|
|
+ .globl atomic_add_unchecked
|
|
+ .type atomic_add_unchecked,#function
|
|
+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: lduw [%o1], %g1
|
|
+ add %g1, %o0, %g7
|
|
+ cas [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %icc, 2f
|
|
+ nop
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic_add_unchecked, .-atomic_add_unchecked
|
|
+
|
|
.globl atomic_sub
|
|
.type atomic_sub,#function
|
|
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: lduw [%o1], %g1
|
|
- sub %g1, %o0, %g7
|
|
+ subcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %icc, 6
|
|
+#endif
|
|
+
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic_sub, .-atomic_sub
|
|
|
|
+ .globl atomic_sub_unchecked
|
|
+ .type atomic_sub_unchecked,#function
|
|
+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: lduw [%o1], %g1
|
|
+ sub %g1, %o0, %g7
|
|
+ cas [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %icc, 2f
|
|
+ nop
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
|
|
+
|
|
.globl atomic_add_ret
|
|
.type atomic_add_ret,#function
|
|
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: lduw [%o1], %g1
|
|
- add %g1, %o0, %g7
|
|
+ addcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %icc, 6
|
|
+#endif
|
|
+
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic_add_ret, .-atomic_add_ret
|
|
|
|
+ .globl atomic_add_ret_unchecked
|
|
+ .type atomic_add_ret_unchecked,#function
|
|
+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: lduw [%o1], %g1
|
|
+ addcc %g1, %o0, %g7
|
|
+ cas [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %icc, 2f
|
|
+ add %g7, %o0, %g7
|
|
+ sra %g7, 0, %o0
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
|
|
+
|
|
.globl atomic_sub_ret
|
|
.type atomic_sub_ret,#function
|
|
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: lduw [%o1], %g1
|
|
- sub %g1, %o0, %g7
|
|
+ subcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %icc, 6
|
|
+#endif
|
|
+
|
|
cas [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: ldx [%o1], %g1
|
|
- add %g1, %o0, %g7
|
|
+ addcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %xcc, 6
|
|
+#endif
|
|
+
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic64_add, .-atomic64_add
|
|
|
|
+ .globl atomic64_add_unchecked
|
|
+ .type atomic64_add_unchecked,#function
|
|
+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: ldx [%o1], %g1
|
|
+ addcc %g1, %o0, %g7
|
|
+ casx [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %xcc, 2f
|
|
+ nop
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
|
|
+
|
|
.globl atomic64_sub
|
|
.type atomic64_sub,#function
|
|
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: ldx [%o1], %g1
|
|
- sub %g1, %o0, %g7
|
|
+ subcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %xcc, 6
|
|
+#endif
|
|
+
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic64_sub, .-atomic64_sub
|
|
|
|
+ .globl atomic64_sub_unchecked
|
|
+ .type atomic64_sub_unchecked,#function
|
|
+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: ldx [%o1], %g1
|
|
+ subcc %g1, %o0, %g7
|
|
+ casx [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %xcc, 2f
|
|
+ nop
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
|
|
+
|
|
.globl atomic64_add_ret
|
|
.type atomic64_add_ret,#function
|
|
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: ldx [%o1], %g1
|
|
- add %g1, %o0, %g7
|
|
+ addcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %xcc, 6
|
|
+#endif
|
|
+
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
|
@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
.size atomic64_add_ret, .-atomic64_add_ret
|
|
|
|
+ .globl atomic64_add_ret_unchecked
|
|
+ .type atomic64_add_ret_unchecked,#function
|
|
+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
|
|
+ BACKOFF_SETUP(%o2)
|
|
+1: ldx [%o1], %g1
|
|
+ addcc %g1, %o0, %g7
|
|
+ casx [%o1], %g1, %g7
|
|
+ cmp %g1, %g7
|
|
+ bne,pn %xcc, 2f
|
|
+ add %g7, %o0, %g7
|
|
+ mov %g7, %o0
|
|
+ retl
|
|
+ nop
|
|
+2: BACKOFF_SPIN(%o2, %o3, 1b)
|
|
+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
|
|
+
|
|
.globl atomic64_sub_ret
|
|
.type atomic64_sub_ret,#function
|
|
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
|
BACKOFF_SETUP(%o2)
|
|
1: ldx [%o1], %g1
|
|
- sub %g1, %o0, %g7
|
|
+ subcc %g1, %o0, %g7
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ tvs %xcc, 6
|
|
+#endif
|
|
+
|
|
casx [%o1], %g1, %g7
|
|
cmp %g1, %g7
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
|
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
|
|
index 0b59bd3..dbd0d44 100644
|
|
--- a/arch/sparc/lib/ksyms.c
|
|
+++ b/arch/sparc/lib/ksyms.c
|
|
@@ -127,12 +127,18 @@ EXPORT_SYMBOL(__clear_user);
|
|
|
|
/* Atomic counter implementation. */
|
|
EXPORT_SYMBOL(atomic_add);
|
|
+EXPORT_SYMBOL(atomic_add_unchecked);
|
|
EXPORT_SYMBOL(atomic_add_ret);
|
|
+EXPORT_SYMBOL(atomic_add_ret_unchecked);
|
|
EXPORT_SYMBOL(atomic_sub);
|
|
+EXPORT_SYMBOL(atomic_sub_unchecked);
|
|
EXPORT_SYMBOL(atomic_sub_ret);
|
|
EXPORT_SYMBOL(atomic64_add);
|
|
+EXPORT_SYMBOL(atomic64_add_unchecked);
|
|
EXPORT_SYMBOL(atomic64_add_ret);
|
|
+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
|
|
EXPORT_SYMBOL(atomic64_sub);
|
|
+EXPORT_SYMBOL(atomic64_sub_unchecked);
|
|
EXPORT_SYMBOL(atomic64_sub_ret);
|
|
|
|
/* Atomic bit operations. */
|
|
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
|
|
index 301421c..e2535d1 100644
|
|
--- a/arch/sparc/mm/Makefile
|
|
+++ b/arch/sparc/mm/Makefile
|
|
@@ -2,7 +2,7 @@
|
|
#
|
|
|
|
asflags-y := -ansi
|
|
-ccflags-y := -Werror
|
|
+#ccflags-y := -Werror
|
|
|
|
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
|
|
obj-y += fault_$(BITS).o
|
|
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
|
|
index 5c6238d..d597636 100644
|
|
--- a/arch/sparc/mm/fault_32.c
|
|
+++ b/arch/sparc/mm/fault_32.c
|
|
@@ -21,6 +21,9 @@
|
|
#include <linux/perf_event.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kdebug.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/pagemap.h>
|
|
+#include <linux/compiler.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
@@ -207,6 +210,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
|
|
return safe_compute_effective_address(regs, insn);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+static void pax_emuplt_close(struct vm_area_struct *vma)
|
|
+{
|
|
+ vma->vm_mm->call_dl_resolve = 0UL;
|
|
+}
|
|
+
|
|
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
+{
|
|
+ unsigned int *kaddr;
|
|
+
|
|
+ vmf->page = alloc_page(GFP_HIGHUSER);
|
|
+ if (!vmf->page)
|
|
+ return VM_FAULT_OOM;
|
|
+
|
|
+ kaddr = kmap(vmf->page);
|
|
+ memset(kaddr, 0, PAGE_SIZE);
|
|
+ kaddr[0] = 0x9DE3BFA8U; /* save */
|
|
+ flush_dcache_page(vmf->page);
|
|
+ kunmap(vmf->page);
|
|
+ return VM_FAULT_MAJOR;
|
|
+}
|
|
+
|
|
+static const struct vm_operations_struct pax_vm_ops = {
|
|
+ .close = pax_emuplt_close,
|
|
+ .fault = pax_emuplt_fault
|
|
+};
|
|
+
|
|
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
+ vma->vm_mm = current->mm;
|
|
+ vma->vm_start = addr;
|
|
+ vma->vm_end = addr + PAGE_SIZE;
|
|
+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
|
|
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
+ vma->vm_ops = &pax_vm_ops;
|
|
+
|
|
+ ret = insert_vm_struct(current->mm, vma);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ++current->mm->total_vm;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (regs->pc = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ * 2 when patched PLT trampoline was detected
|
|
+ * 3 when unpatched PLT trampoline was detected
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #1 */
|
|
+ unsigned int sethi1, sethi2, jmpl;
|
|
+
|
|
+ err = get_user(sethi1, (unsigned int *)regs->pc);
|
|
+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
|
|
+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
|
|
+ {
|
|
+ unsigned int addr;
|
|
+
|
|
+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
|
|
+ addr = regs->u_regs[UREG_G1];
|
|
+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
|
|
+ regs->pc = addr;
|
|
+ regs->npc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #2 */
|
|
+ unsigned int ba;
|
|
+
|
|
+ err = get_user(ba, (unsigned int *)regs->pc);
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
|
|
+ unsigned int addr;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U)
|
|
+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
|
|
+ else
|
|
+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
|
|
+ regs->pc = addr;
|
|
+ regs->npc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #3 */
|
|
+ unsigned int sethi, bajmpl, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->pc);
|
|
+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned int addr;
|
|
+
|
|
+ addr = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G1] = addr;
|
|
+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
|
|
+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
|
|
+ else
|
|
+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
|
|
+ regs->pc = addr;
|
|
+ regs->npc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: unpatched PLT emulation step 1 */
|
|
+ unsigned int sethi, ba, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->pc);
|
|
+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned int addr, save, call;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U)
|
|
+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
|
|
+ else
|
|
+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
|
|
+
|
|
+ err = get_user(save, (unsigned int *)addr);
|
|
+ err |= get_user(call, (unsigned int *)(addr+4));
|
|
+ err |= get_user(nop, (unsigned int *)(addr+8));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+ if (save == 0x9DE3BFA8U &&
|
|
+ (call & 0xC0000000U) == 0x40000000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ struct vm_area_struct *vma;
|
|
+ unsigned long call_dl_resolve;
|
|
+
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
+ call_dl_resolve = current->mm->call_dl_resolve;
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+ if (likely(call_dl_resolve))
|
|
+ goto emulate;
|
|
+
|
|
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
+
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
+ if (current->mm->call_dl_resolve) {
|
|
+ call_dl_resolve = current->mm->call_dl_resolve;
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ if (vma)
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ goto emulate;
|
|
+ }
|
|
+
|
|
+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
|
|
+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ if (vma)
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ if (pax_insert_vma(vma, call_dl_resolve)) {
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ current->mm->call_dl_resolve = call_dl_resolve;
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+
|
|
+emulate:
|
|
+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->pc = call_dl_resolve;
|
|
+ regs->npc = addr+4;
|
|
+ return 3;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
|
|
+ if ((save & 0xFFC00000U) == 0x05000000U &&
|
|
+ (call & 0xFFFFE000U) == 0x85C0A000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G2] = addr + 4;
|
|
+ addr = (save & 0x003FFFFFU) << 10;
|
|
+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
|
|
+ regs->pc = addr;
|
|
+ regs->npc = addr+4;
|
|
+ return 3;
|
|
+ }
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: unpatched PLT emulation step 2 */
|
|
+ unsigned int save, call, nop;
|
|
+
|
|
+ err = get_user(save, (unsigned int *)(regs->pc-4));
|
|
+ err |= get_user(call, (unsigned int *)regs->pc);
|
|
+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (save == 0x9DE3BFA8U &&
|
|
+ (call & 0xC0000000U) == 0x40000000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
|
|
+
|
|
+ regs->u_regs[UREG_RETPC] = regs->pc;
|
|
+ regs->pc = dl_resolve;
|
|
+ regs->npc = dl_resolve+4;
|
|
+ return 3;
|
|
+ }
|
|
+ } while (0);
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
|
|
int text_fault)
|
|
{
|
|
@@ -282,6 +556,24 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|
if(!(vma->vm_flags & VM_WRITE))
|
|
goto bad_area;
|
|
} else {
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
|
|
+ up_read(&mm->mmap_sem);
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ case 2:
|
|
+ case 3:
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* Allow reads even for write-only mappings */
|
|
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
|
goto bad_area;
|
|
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
|
|
index 0dc7b90..c984253 100644
|
|
--- a/arch/sparc/mm/fault_64.c
|
|
+++ b/arch/sparc/mm/fault_64.c
|
|
@@ -21,6 +21,9 @@
|
|
#include <linux/kprobes.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/percpu.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/pagemap.h>
|
|
+#include <linux/compiler.h>
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
@@ -282,6 +285,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
|
|
show_regs(regs);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+static void pax_emuplt_close(struct vm_area_struct *vma)
|
|
+{
|
|
+ vma->vm_mm->call_dl_resolve = 0UL;
|
|
+}
|
|
+
|
|
+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
+{
|
|
+ unsigned int *kaddr;
|
|
+
|
|
+ vmf->page = alloc_page(GFP_HIGHUSER);
|
|
+ if (!vmf->page)
|
|
+ return VM_FAULT_OOM;
|
|
+
|
|
+ kaddr = kmap(vmf->page);
|
|
+ memset(kaddr, 0, PAGE_SIZE);
|
|
+ kaddr[0] = 0x9DE3BFA8U; /* save */
|
|
+ flush_dcache_page(vmf->page);
|
|
+ kunmap(vmf->page);
|
|
+ return VM_FAULT_MAJOR;
|
|
+}
|
|
+
|
|
+static const struct vm_operations_struct pax_vm_ops = {
|
|
+ .close = pax_emuplt_close,
|
|
+ .fault = pax_emuplt_fault
|
|
+};
|
|
+
|
|
+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
+ vma->vm_mm = current->mm;
|
|
+ vma->vm_start = addr;
|
|
+ vma->vm_end = addr + PAGE_SIZE;
|
|
+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
|
|
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
+ vma->vm_ops = &pax_vm_ops;
|
|
+
|
|
+ ret = insert_vm_struct(current->mm, vma);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ++current->mm->total_vm;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (regs->tpc = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ * 2 when patched PLT trampoline was detected
|
|
+ * 3 when unpatched PLT trampoline was detected
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #1 */
|
|
+ unsigned int sethi1, sethi2, jmpl;
|
|
+
|
|
+ err = get_user(sethi1, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
|
|
+ addr = regs->u_regs[UREG_G1];
|
|
+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #2 */
|
|
+ unsigned int ba;
|
|
+
|
|
+ err = get_user(ba, (unsigned int *)regs->tpc);
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
|
|
+ unsigned long addr;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U)
|
|
+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
|
|
+ else
|
|
+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #3 */
|
|
+ unsigned int sethi, bajmpl, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ addr = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G1] = addr;
|
|
+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
|
|
+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
|
|
+ else
|
|
+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #4 */
|
|
+ unsigned int sethi, mov1, call, mov2;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
|
|
+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ mov1 == 0x8210000FU &&
|
|
+ (call & 0xC0000000U) == 0x40000000U &&
|
|
+ mov2 == 0x9E100001U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
|
|
+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #5 */
|
|
+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
|
|
+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
|
|
+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
|
|
+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
|
|
+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
|
|
+ (or1 & 0xFFFFE000U) == 0x82106000U &&
|
|
+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
|
|
+ sllx == 0x83287020U &&
|
|
+ jmpl == 0x81C04005U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
|
|
+ regs->u_regs[UREG_G1] <<= 32;
|
|
+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
|
|
+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #6 */
|
|
+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
|
|
+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
|
|
+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
|
|
+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
|
|
+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
|
|
+ sllx == 0x83287020U &&
|
|
+ (or & 0xFFFFE000U) == 0x8A116000U &&
|
|
+ jmpl == 0x81C04005U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G1] <<= 32;
|
|
+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
|
|
+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: unpatched PLT emulation step 1 */
|
|
+ unsigned int sethi, ba, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+ unsigned int save, call;
|
|
+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
|
|
+
|
|
+ if ((ba & 0xFFC00000U) == 0x30800000U)
|
|
+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
|
|
+ else
|
|
+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ err = get_user(save, (unsigned int *)addr);
|
|
+ err |= get_user(call, (unsigned int *)(addr+4));
|
|
+ err |= get_user(nop, (unsigned int *)(addr+8));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+ if (save == 0x9DE3BFA8U &&
|
|
+ (call & 0xC0000000U) == 0x40000000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ struct vm_area_struct *vma;
|
|
+ unsigned long call_dl_resolve;
|
|
+
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
+ call_dl_resolve = current->mm->call_dl_resolve;
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
+ if (likely(call_dl_resolve))
|
|
+ goto emulate;
|
|
+
|
|
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
+
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
+ if (current->mm->call_dl_resolve) {
|
|
+ call_dl_resolve = current->mm->call_dl_resolve;
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ if (vma)
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ goto emulate;
|
|
+ }
|
|
+
|
|
+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
|
|
+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ if (vma)
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ if (pax_insert_vma(vma, call_dl_resolve)) {
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ kmem_cache_free(vm_area_cachep, vma);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ current->mm->call_dl_resolve = call_dl_resolve;
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+
|
|
+emulate:
|
|
+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->tpc = call_dl_resolve;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 3;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
|
|
+ if ((save & 0xFFC00000U) == 0x05000000U &&
|
|
+ (call & 0xFFFFE000U) == 0x85C0A000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G2] = addr + 4;
|
|
+ addr = (save & 0x003FFFFFU) << 10;
|
|
+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 3;
|
|
+ }
|
|
+
|
|
+ /* PaX: 64-bit PLT stub */
|
|
+ err = get_user(sethi1, (unsigned int *)addr);
|
|
+ err |= get_user(sethi2, (unsigned int *)(addr+4));
|
|
+ err |= get_user(or1, (unsigned int *)(addr+8));
|
|
+ err |= get_user(or2, (unsigned int *)(addr+12));
|
|
+ err |= get_user(sllx, (unsigned int *)(addr+16));
|
|
+ err |= get_user(add, (unsigned int *)(addr+20));
|
|
+ err |= get_user(jmpl, (unsigned int *)(addr+24));
|
|
+ err |= get_user(nop, (unsigned int *)(addr+28));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
|
|
+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
|
|
+ (or1 & 0xFFFFE000U) == 0x88112000U &&
|
|
+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
|
|
+ sllx == 0x89293020U &&
|
|
+ add == 0x8A010005U &&
|
|
+ jmpl == 0x89C14000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
|
|
+ regs->u_regs[UREG_G4] <<= 32;
|
|
+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
|
|
+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
|
|
+ regs->u_regs[UREG_G4] = addr + 24;
|
|
+ addr = regs->u_regs[UREG_G5];
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 3;
|
|
+ }
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+ do { /* PaX: unpatched PLT emulation step 2 */
|
|
+ unsigned int save, call, nop;
|
|
+
|
|
+ err = get_user(save, (unsigned int *)(regs->tpc-4));
|
|
+ err |= get_user(call, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (save == 0x9DE3BFA8U &&
|
|
+ (call & 0xC0000000U) == 0x40000000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ dl_resolve &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->u_regs[UREG_RETPC] = regs->tpc;
|
|
+ regs->tpc = dl_resolve;
|
|
+ regs->tnpc = dl_resolve+4;
|
|
+ return 3;
|
|
+ }
|
|
+ } while (0);
|
|
+#endif
|
|
+
|
|
+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
|
|
+ unsigned int sethi, ba, nop;
|
|
+
|
|
+ err = get_user(sethi, (unsigned int *)regs->tpc);
|
|
+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
|
|
+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
|
|
+ (ba & 0xFFF00000U) == 0x30600000U &&
|
|
+ nop == 0x01000000U)
|
|
+ {
|
|
+ unsigned long addr;
|
|
+
|
|
+ addr = (sethi & 0x003FFFFFU) << 10;
|
|
+ regs->u_regs[UREG_G1] = addr;
|
|
+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
|
|
+
|
|
+ if (test_thread_flag(TIF_32BIT))
|
|
+ addr &= 0xFFFFFFFFUL;
|
|
+
|
|
+ regs->tpc = addr;
|
|
+ regs->tnpc = addr+4;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+#endif
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ unsigned long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ unsigned int c;
|
|
+ if (get_user(c, (unsigned int *)pc+i))
|
|
+ printk(KERN_CONT "???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%08x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
@@ -351,6 +814,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|
if (!vma)
|
|
goto bad_area;
|
|
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ /* PaX: detect ITLB misses on non-exec pages */
|
|
+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
|
|
+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
|
|
+ {
|
|
+ if (address != regs->tpc)
|
|
+ goto good_area;
|
|
+
|
|
+ up_read(&mm->mmap_sem);
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ case 2:
|
|
+ case 3:
|
|
+ return;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* Pure DTLB misses do not tell us whether the fault causing
|
|
* load/store/atomic was a write or not, it only says that there
|
|
* was no match. So in such a case we (carefully) read the
|
|
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
|
|
index 07e1453..0a7d9e9 100644
|
|
--- a/arch/sparc/mm/hugetlbpage.c
|
|
+++ b/arch/sparc/mm/hugetlbpage.c
|
|
@@ -67,7 +67,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (likely(!vma || addr + len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
/* make sure it can fit in the remaining address space */
|
|
if (likely(addr > len)) {
|
|
vma = find_vma(mm, addr-len);
|
|
- if (!vma || addr <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr - len, len)) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr-len);
|
|
}
|
|
@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
if (unlikely(mm->mmap_base < len))
|
|
goto bottomup;
|
|
|
|
- addr = (mm->mmap_base-len) & HPAGE_MASK;
|
|
+ addr = mm->mmap_base - len;
|
|
|
|
do {
|
|
+ addr &= HPAGE_MASK;
|
|
/*
|
|
* Lookup failure means no vma is above this address,
|
|
* else if new region fits below vma->vm_start,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (likely(!vma || addr+len <= vma->vm_start)) {
|
|
+ if (likely(check_heap_stack_gap(vma, addr, len))) {
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr);
|
|
}
|
|
@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = (vma->vm_start-len) & HPAGE_MASK;
|
|
- } while (likely(len < vma->vm_start));
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
bottomup:
|
|
/*
|
|
@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
if (addr) {
|
|
addr = ALIGN(addr, HPAGE_SIZE);
|
|
vma = find_vma(mm, addr);
|
|
- if (task_size - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
|
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
|
|
index fbf4eeb..f42624b 100644
|
|
--- a/arch/sparc/mm/init_32.c
|
|
+++ b/arch/sparc/mm/init_32.c
|
|
@@ -315,6 +315,9 @@ extern void device_scan(void);
|
|
pgprot_t PAGE_SHARED __read_mostly;
|
|
EXPORT_SYMBOL(PAGE_SHARED);
|
|
|
|
+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
|
|
+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
|
|
+
|
|
void __init paging_init(void)
|
|
{
|
|
switch(sparc_cpu_model) {
|
|
@@ -343,17 +346,17 @@ void __init paging_init(void)
|
|
|
|
/* Initialize the protection map with non-constant, MMU dependent values. */
|
|
protection_map[0] = PAGE_NONE;
|
|
- protection_map[1] = PAGE_READONLY;
|
|
- protection_map[2] = PAGE_COPY;
|
|
- protection_map[3] = PAGE_COPY;
|
|
+ protection_map[1] = PAGE_READONLY_NOEXEC;
|
|
+ protection_map[2] = PAGE_COPY_NOEXEC;
|
|
+ protection_map[3] = PAGE_COPY_NOEXEC;
|
|
protection_map[4] = PAGE_READONLY;
|
|
protection_map[5] = PAGE_READONLY;
|
|
protection_map[6] = PAGE_COPY;
|
|
protection_map[7] = PAGE_COPY;
|
|
protection_map[8] = PAGE_NONE;
|
|
- protection_map[9] = PAGE_READONLY;
|
|
- protection_map[10] = PAGE_SHARED;
|
|
- protection_map[11] = PAGE_SHARED;
|
|
+ protection_map[9] = PAGE_READONLY_NOEXEC;
|
|
+ protection_map[10] = PAGE_SHARED_NOEXEC;
|
|
+ protection_map[11] = PAGE_SHARED_NOEXEC;
|
|
protection_map[12] = PAGE_READONLY;
|
|
protection_map[13] = PAGE_READONLY;
|
|
protection_map[14] = PAGE_SHARED;
|
|
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
|
|
index cbef74e..c38fead 100644
|
|
--- a/arch/sparc/mm/srmmu.c
|
|
+++ b/arch/sparc/mm/srmmu.c
|
|
@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
|
|
PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
|
|
BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
|
|
BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
|
|
+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
|
|
+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
|
|
+#endif
|
|
+
|
|
BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
|
|
page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
|
|
|
|
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
|
|
index f4500c6..889656c 100644
|
|
--- a/arch/tile/include/asm/atomic_64.h
|
|
+++ b/arch/tile/include/asm/atomic_64.h
|
|
@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
/* Atomic dec and inc don't implement barrier, so provide them if needed. */
|
|
#define smp_mb__before_atomic_dec() smp_mb()
|
|
#define smp_mb__after_atomic_dec() smp_mb()
|
|
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
|
|
index ef34d2caa..d6ce60c 100644
|
|
--- a/arch/tile/include/asm/uaccess.h
|
|
+++ b/arch/tile/include/asm/uaccess.h
|
|
@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
|
const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
- int sz = __compiletime_object_size(to);
|
|
+ size_t sz = __compiletime_object_size(to);
|
|
|
|
- if (likely(sz == -1 || sz >= n))
|
|
+ if (likely(sz == (size_t)-1 || sz >= n))
|
|
n = _copy_from_user(to, from, n);
|
|
else
|
|
copy_from_user_overflow();
|
|
diff --git a/arch/um/Makefile b/arch/um/Makefile
|
|
index 55c0661..86ad413 100644
|
|
--- a/arch/um/Makefile
|
|
+++ b/arch/um/Makefile
|
|
@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
|
|
$(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
|
|
$(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
|
|
|
|
+ifdef CONSTIFY_PLUGIN
|
|
+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
|
|
+endif
|
|
+
|
|
#This will adjust *FLAGS accordingly to the platform.
|
|
include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
|
|
|
|
diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
|
|
index 6c03acd..a5e0215 100644
|
|
--- a/arch/um/include/asm/kmap_types.h
|
|
+++ b/arch/um/include/asm/kmap_types.h
|
|
@@ -23,6 +23,7 @@ enum km_type {
|
|
KM_IRQ1,
|
|
KM_SOFTIRQ0,
|
|
KM_SOFTIRQ1,
|
|
+ KM_CLEARPAGE,
|
|
KM_TYPE_NR
|
|
};
|
|
|
|
diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
|
|
index 7cfc3ce..cbd1a58 100644
|
|
--- a/arch/um/include/asm/page.h
|
|
+++ b/arch/um/include/asm/page.h
|
|
@@ -14,6 +14,9 @@
|
|
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
|
|
#define PAGE_MASK (~(PAGE_SIZE-1))
|
|
|
|
+#define ktla_ktva(addr) (addr)
|
|
+#define ktva_ktla(addr) (addr)
|
|
+
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct page;
|
|
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
|
|
index 0032f92..cd151e0 100644
|
|
--- a/arch/um/include/asm/pgtable-3level.h
|
|
+++ b/arch/um/include/asm/pgtable-3level.h
|
|
@@ -58,6 +58,7 @@
|
|
#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
|
|
#define pud_populate(mm, pud, pmd) \
|
|
set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
|
|
+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
|
|
|
|
#ifdef CONFIG_64BIT
|
|
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
|
|
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
|
|
index 2b73ded..804f540 100644
|
|
--- a/arch/um/kernel/process.c
|
|
+++ b/arch/um/kernel/process.c
|
|
@@ -404,22 +404,6 @@ int singlestepping(void * t)
|
|
return 2;
|
|
}
|
|
|
|
-/*
|
|
- * Only x86 and x86_64 have an arch_align_stack().
|
|
- * All other arches have "#define arch_align_stack(x) (x)"
|
|
- * in their asm/system.h
|
|
- * As this is included in UML from asm-um/system-generic.h,
|
|
- * we can use it to behave as the subarch does.
|
|
- */
|
|
-#ifndef arch_align_stack
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
-{
|
|
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
- sp -= get_random_int() % 8192;
|
|
- return sp & ~0xf;
|
|
-}
|
|
-#endif
|
|
-
|
|
unsigned long get_wchan(struct task_struct *p)
|
|
{
|
|
unsigned long stack_page, sp, ip;
|
|
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
|
|
index efb991f..1787f5f 100644
|
|
--- a/arch/x86/Kconfig
|
|
+++ b/arch/x86/Kconfig
|
|
@@ -231,7 +231,7 @@ config X86_HT
|
|
|
|
config X86_32_LAZY_GS
|
|
def_bool y
|
|
- depends on X86_32 && !CC_STACKPROTECTOR
|
|
+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
|
|
|
|
config ARCH_HWEIGHT_CFLAGS
|
|
string
|
|
@@ -1152,7 +1152,7 @@ config PAGE_OFFSET
|
|
hex
|
|
default 0xB0000000 if VMSPLIT_3G_OPT
|
|
default 0x80000000 if VMSPLIT_2G
|
|
- default 0x78000000 if VMSPLIT_2G_OPT
|
|
+ default 0x70000000 if VMSPLIT_2G_OPT
|
|
default 0x40000000 if VMSPLIT_1G
|
|
default 0xC0000000
|
|
depends on X86_32
|
|
@@ -1538,6 +1538,7 @@ config SECCOMP
|
|
|
|
config CC_STACKPROTECTOR
|
|
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
|
|
+ depends on X86_64 || !PAX_MEMORY_UDEREF
|
|
---help---
|
|
This option turns on the -fstack-protector GCC feature. This
|
|
feature puts, at the beginning of functions, a canary value on
|
|
@@ -1595,6 +1596,7 @@ config KEXEC_JUMP
|
|
config PHYSICAL_START
|
|
hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
|
|
default "0x1000000"
|
|
+ range 0x400000 0x40000000
|
|
---help---
|
|
This gives the physical address where the kernel is loaded.
|
|
|
|
@@ -1658,6 +1660,7 @@ config X86_NEED_RELOCS
|
|
config PHYSICAL_ALIGN
|
|
hex "Alignment value to which kernel should be aligned" if X86_32
|
|
default "0x1000000"
|
|
+ range 0x400000 0x1000000 if PAX_KERNEXEC
|
|
range 0x2000 0x1000000
|
|
---help---
|
|
This value puts the alignment restrictions on physical address
|
|
@@ -1689,7 +1692,7 @@ config HOTPLUG_CPU
|
|
Say N if you want to disable CPU hotplug.
|
|
|
|
config COMPAT_VDSO
|
|
- def_bool y
|
|
+ def_bool n
|
|
prompt "Compat VDSO support"
|
|
depends on X86_32 || IA32_EMULATION
|
|
---help---
|
|
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
|
|
index 706e12e..62e4feb 100644
|
|
--- a/arch/x86/Kconfig.cpu
|
|
+++ b/arch/x86/Kconfig.cpu
|
|
@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
|
|
|
|
config X86_F00F_BUG
|
|
def_bool y
|
|
- depends on M586MMX || M586TSC || M586 || M486 || M386
|
|
+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
|
|
|
|
config X86_INVD_BUG
|
|
def_bool y
|
|
@@ -358,7 +358,7 @@ config X86_POPAD_OK
|
|
|
|
config X86_ALIGNMENT_16
|
|
def_bool y
|
|
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
|
|
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
|
|
|
|
config X86_INTEL_USERCOPY
|
|
def_bool y
|
|
@@ -404,7 +404,7 @@ config X86_CMPXCHG64
|
|
# generates cmov.
|
|
config X86_CMOV
|
|
def_bool y
|
|
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
|
|
+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
|
|
|
|
config X86_MINIMUM_CPU_FAMILY
|
|
int
|
|
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
|
|
index e46c214..46d6e8c 100644
|
|
--- a/arch/x86/Kconfig.debug
|
|
+++ b/arch/x86/Kconfig.debug
|
|
@@ -84,7 +84,7 @@ config X86_PTDUMP
|
|
config DEBUG_RODATA
|
|
bool "Write protect kernel read-only data structures"
|
|
default y
|
|
- depends on DEBUG_KERNEL
|
|
+ depends on DEBUG_KERNEL && BROKEN
|
|
---help---
|
|
Mark the kernel read-only data as write-protected in the pagetables,
|
|
in order to catch accidental (and incorrect) writes to such const
|
|
@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
|
|
|
|
config DEBUG_SET_MODULE_RONX
|
|
bool "Set loadable kernel module data as NX and text as RO"
|
|
- depends on MODULES
|
|
+ depends on MODULES && BROKEN
|
|
---help---
|
|
This option helps catch unintended modifications to loadable
|
|
kernel module's text and read-only data. It also prevents execution
|
|
@@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
|
|
|
|
config DEBUG_STRICT_USER_COPY_CHECKS
|
|
bool "Strict copy size checks"
|
|
- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
|
|
+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && BROKEN
|
|
---help---
|
|
Enabling this option turns a certain set of sanity checks for user
|
|
copy operations into compile time failures.
|
|
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
|
|
index f1276aa..8eed88a 100644
|
|
--- a/arch/x86/Makefile
|
|
+++ b/arch/x86/Makefile
|
|
@@ -46,6 +46,7 @@ else
|
|
UTS_MACHINE := x86_64
|
|
CHECKFLAGS += -D__x86_64__ -m64
|
|
|
|
+ biarch := $(call cc-option,-m64)
|
|
KBUILD_AFLAGS += -m64
|
|
KBUILD_CFLAGS += -m64
|
|
|
|
@@ -222,3 +223,12 @@ define archhelp
|
|
echo ' FDARGS="..." arguments for the booted kernel'
|
|
echo ' FDINITRD=file initrd for the booted kernel'
|
|
endef
|
|
+
|
|
+define OLD_LD
|
|
+
|
|
+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
|
|
+*** Please upgrade your binutils to 2.18 or newer
|
|
+endef
|
|
+
|
|
+archprepare:
|
|
+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
|
|
diff --git a/arch/x86/boot/Makefile.rej b/arch/x86/boot/Makefile.rej
|
|
new file mode 100644
|
|
index 0000000..f90340c
|
|
--- /dev/null
|
|
+++ b/arch/x86/boot/Makefile.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/boot/Makefile 2012-05-21 11:32:56.723927619 +0200
|
|
++++ arch/x86/boot/Makefile 2012-05-21 12:10:09.268048874 +0200
|
|
+@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
|
|
+ $(call cc-option, -fno-stack-protector) \
|
|
+ $(call cc-option, -mpreferred-stack-boundary=2)
|
|
+ KBUILD_CFLAGS += $(call cc-option, -m32)
|
|
++ifdef CONSTIFY_PLUGIN
|
|
++KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
|
|
++endif
|
|
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
|
+ GCOV_PROFILE := n
|
|
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
|
|
index 878e4b9..20537ab 100644
|
|
--- a/arch/x86/boot/bitops.h
|
|
+++ b/arch/x86/boot/bitops.h
|
|
@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
|
|
u8 v;
|
|
const u32 *p = (const u32 *)addr;
|
|
|
|
- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
|
|
+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
|
|
return v;
|
|
}
|
|
|
|
@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
|
|
|
|
static inline void set_bit(int nr, void *addr)
|
|
{
|
|
- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
|
|
+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
|
|
}
|
|
|
|
#endif /* BOOT_BITOPS_H */
|
|
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
|
|
index 18997e5..83d9c67 100644
|
|
--- a/arch/x86/boot/boot.h
|
|
+++ b/arch/x86/boot/boot.h
|
|
@@ -85,7 +85,7 @@ static inline void io_delay(void)
|
|
static inline u16 ds(void)
|
|
{
|
|
u16 seg;
|
|
- asm("movw %%ds,%0" : "=rm" (seg));
|
|
+ asm volatile("movw %%ds,%0" : "=rm" (seg));
|
|
return seg;
|
|
}
|
|
|
|
@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
|
|
static inline int memcmp(const void *s1, const void *s2, size_t len)
|
|
{
|
|
u8 diff;
|
|
- asm("repe; cmpsb; setnz %0"
|
|
+ asm volatile("repe; cmpsb; setnz %0"
|
|
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
|
|
return diff;
|
|
}
|
|
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
|
|
index 7194d9f..52cdadb 100644
|
|
--- a/arch/x86/boot/compressed/Makefile
|
|
+++ b/arch/x86/boot/compressed/Makefile
|
|
@@ -15,6 +15,9 @@ KBUILD_CFLAGS += $(cflags-y)
|
|
KBUILD_CFLAGS += -mno-mmx -mno-sse
|
|
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
|
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
|
|
+ifdef CONSTIFY_PLUGIN
|
|
+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
|
|
+endif
|
|
|
|
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
|
GCOV_PROFILE := n
|
|
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
|
|
index 8bb9070..87a6626 100644
|
|
--- a/arch/x86/boot/compressed/eboot.c
|
|
+++ b/arch/x86/boot/compressed/eboot.c
|
|
@@ -124,7 +124,6 @@ static efi_status_t high_alloc(unsigned long size, unsigned long align,
|
|
*addr = max_addr;
|
|
}
|
|
|
|
-free_pool:
|
|
efi_call_phys1(sys_table->boottime->free_pool, map);
|
|
|
|
fail:
|
|
@@ -188,7 +187,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
|
|
if (i == map_size / desc_size)
|
|
status = EFI_NOT_FOUND;
|
|
|
|
-free_pool:
|
|
efi_call_phys1(sys_table->boottime->free_pool, map);
|
|
fail:
|
|
return status;
|
|
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
|
|
index c85e3ac..6f5aa80 100644
|
|
--- a/arch/x86/boot/compressed/head_32.S
|
|
+++ b/arch/x86/boot/compressed/head_32.S
|
|
@@ -106,7 +106,7 @@ preferred_addr:
|
|
notl %eax
|
|
andl %eax, %ebx
|
|
#else
|
|
- movl $LOAD_PHYSICAL_ADDR, %ebx
|
|
+ movl $____LOAD_PHYSICAL_ADDR, %ebx
|
|
#endif
|
|
|
|
/* Target address to relocate to for decompression */
|
|
@@ -192,7 +192,7 @@ relocated:
|
|
* and where it was actually loaded.
|
|
*/
|
|
movl %ebp, %ebx
|
|
- subl $LOAD_PHYSICAL_ADDR, %ebx
|
|
+ subl $____LOAD_PHYSICAL_ADDR, %ebx
|
|
jz 2f /* Nothing to be done if loaded at compiled addr. */
|
|
/*
|
|
* Process relocations.
|
|
@@ -200,8 +200,7 @@ relocated:
|
|
|
|
1: subl $4, %edi
|
|
movl (%edi), %ecx
|
|
- testl %ecx, %ecx
|
|
- jz 2f
|
|
+ jecxz 2f
|
|
addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
|
|
jmp 1b
|
|
2:
|
|
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
|
|
index 87e03a1..0d94c76 100644
|
|
--- a/arch/x86/boot/compressed/head_64.S
|
|
+++ b/arch/x86/boot/compressed/head_64.S
|
|
@@ -91,7 +91,7 @@ ENTRY(startup_32)
|
|
notl %eax
|
|
andl %eax, %ebx
|
|
#else
|
|
- movl $LOAD_PHYSICAL_ADDR, %ebx
|
|
+ movl $____LOAD_PHYSICAL_ADDR, %ebx
|
|
#endif
|
|
|
|
/* Target address to relocate to for decompression */
|
|
@@ -263,7 +263,7 @@ preferred_addr:
|
|
notq %rax
|
|
andq %rax, %rbp
|
|
#else
|
|
- movq $LOAD_PHYSICAL_ADDR, %rbp
|
|
+ movq $____LOAD_PHYSICAL_ADDR, %rbp
|
|
#endif
|
|
|
|
/* Target address to relocate to for decompression */
|
|
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
|
|
index 7116dcb..d9ae1d7 100644
|
|
--- a/arch/x86/boot/compressed/misc.c
|
|
+++ b/arch/x86/boot/compressed/misc.c
|
|
@@ -310,7 +310,7 @@ static void parse_elf(void *output)
|
|
case PT_LOAD:
|
|
#ifdef CONFIG_RELOCATABLE
|
|
dest = output;
|
|
- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
|
+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
|
|
#else
|
|
dest = (void *)(phdr->p_paddr);
|
|
#endif
|
|
@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
|
|
error("Destination address too large");
|
|
#endif
|
|
#ifndef CONFIG_RELOCATABLE
|
|
- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
|
|
+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
|
|
error("Wrong destination address");
|
|
#endif
|
|
|
|
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
|
|
index 4d3ff03..e4972ff 100644
|
|
--- a/arch/x86/boot/cpucheck.c
|
|
+++ b/arch/x86/boot/cpucheck.c
|
|
@@ -74,7 +74,7 @@ static int has_fpu(void)
|
|
u16 fcw = -1, fsw = -1;
|
|
u32 cr0;
|
|
|
|
- asm("movl %%cr0,%0" : "=r" (cr0));
|
|
+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
|
|
if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
|
|
cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
|
|
asm volatile("movl %0,%%cr0" : : "r" (cr0));
|
|
@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
|
|
{
|
|
u32 f0, f1;
|
|
|
|
- asm("pushfl ; "
|
|
+ asm volatile("pushfl ; "
|
|
"pushfl ; "
|
|
"popl %0 ; "
|
|
"movl %0,%1 ; "
|
|
@@ -115,7 +115,7 @@ static void get_flags(void)
|
|
set_bit(X86_FEATURE_FPU, cpu.flags);
|
|
|
|
if (has_eflag(X86_EFLAGS_ID)) {
|
|
- asm("cpuid"
|
|
+ asm volatile("cpuid"
|
|
: "=a" (max_intel_level),
|
|
"=b" (cpu_vendor[0]),
|
|
"=d" (cpu_vendor[1]),
|
|
@@ -124,7 +124,7 @@ static void get_flags(void)
|
|
|
|
if (max_intel_level >= 0x00000001 &&
|
|
max_intel_level <= 0x0000ffff) {
|
|
- asm("cpuid"
|
|
+ asm volatile("cpuid"
|
|
: "=a" (tfms),
|
|
"=c" (cpu.flags[4]),
|
|
"=d" (cpu.flags[0])
|
|
@@ -136,7 +136,7 @@ static void get_flags(void)
|
|
cpu.model += ((tfms >> 16) & 0xf) << 4;
|
|
}
|
|
|
|
- asm("cpuid"
|
|
+ asm volatile("cpuid"
|
|
: "=a" (max_amd_level)
|
|
: "a" (0x80000000)
|
|
: "ebx", "ecx", "edx");
|
|
@@ -144,7 +144,7 @@ static void get_flags(void)
|
|
if (max_amd_level >= 0x80000001 &&
|
|
max_amd_level <= 0x8000ffff) {
|
|
u32 eax = 0x80000001;
|
|
- asm("cpuid"
|
|
+ asm volatile("cpuid"
|
|
: "+a" (eax),
|
|
"=c" (cpu.flags[6]),
|
|
"=d" (cpu.flags[1])
|
|
@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
|
u32 ecx = MSR_K7_HWCR;
|
|
u32 eax, edx;
|
|
|
|
- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
eax &= ~(1 << 15);
|
|
- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
|
|
get_flags(); /* Make sure it really did something */
|
|
err = check_flags();
|
|
@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
|
u32 ecx = MSR_VIA_FCR;
|
|
u32 eax, edx;
|
|
|
|
- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
eax |= (1<<1)|(1<<7);
|
|
- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
|
|
set_bit(X86_FEATURE_CX8, cpu.flags);
|
|
err = check_flags();
|
|
@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
|
u32 eax, edx;
|
|
u32 level = 1;
|
|
|
|
- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
|
|
- asm("cpuid"
|
|
+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
|
|
+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
|
|
+ asm volatile("cpuid"
|
|
: "+a" (level), "=d" (cpu.flags[0])
|
|
: : "ecx", "ebx");
|
|
- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
|
|
|
|
err = check_flags();
|
|
}
|
|
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
|
|
index f1bbeeb..e58f183 100644
|
|
--- a/arch/x86/boot/header.S
|
|
+++ b/arch/x86/boot/header.S
|
|
@@ -372,10 +372,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
|
|
# single linked list of
|
|
# struct setup_data
|
|
|
|
-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
|
|
+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
|
|
|
|
#define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
|
|
+#else
|
|
#define VO_INIT_SIZE (VO__end - VO__text)
|
|
+#endif
|
|
#if ZO_INIT_SIZE > VO_INIT_SIZE
|
|
#define INIT_SIZE ZO_INIT_SIZE
|
|
#else
|
|
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
|
|
index db75d07..8e6d0af 100644
|
|
--- a/arch/x86/boot/memory.c
|
|
+++ b/arch/x86/boot/memory.c
|
|
@@ -19,7 +19,7 @@
|
|
|
|
static int detect_memory_e820(void)
|
|
{
|
|
- int count = 0;
|
|
+ unsigned int count = 0;
|
|
struct biosregs ireg, oreg;
|
|
struct e820entry *desc = boot_params.e820_map;
|
|
static struct e820entry buf; /* static so it is zeroed */
|
|
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
|
|
index 11e8c6e..fdbb1ed 100644
|
|
--- a/arch/x86/boot/video-vesa.c
|
|
+++ b/arch/x86/boot/video-vesa.c
|
|
@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
|
|
|
|
boot_params.screen_info.vesapm_seg = oreg.es;
|
|
boot_params.screen_info.vesapm_off = oreg.di;
|
|
+ boot_params.screen_info.vesapm_size = oreg.cx;
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
|
|
index 43eda28..5ab5fdb 100644
|
|
--- a/arch/x86/boot/video.c
|
|
+++ b/arch/x86/boot/video.c
|
|
@@ -96,7 +96,7 @@ static void store_mode_params(void)
|
|
static unsigned int get_entry(void)
|
|
{
|
|
char entry_buf[4];
|
|
- int i, len = 0;
|
|
+ unsigned int i, len = 0;
|
|
int key;
|
|
unsigned int v;
|
|
|
|
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
|
|
index 5b577d5..3c1fed4 100644
|
|
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
|
|
@@ -8,6 +8,8 @@
|
|
* including this sentence is retained in full.
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
.extern crypto_ft_tab
|
|
.extern crypto_it_tab
|
|
.extern crypto_fl_tab
|
|
@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
|
|
je B192; \
|
|
leaq 32(r9),r9;
|
|
|
|
+#define ret pax_force_retaddr 0, 1; ret
|
|
+
|
|
#define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
|
|
movq r1,r2; \
|
|
movq r3,r4; \
|
|
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
|
|
index 3470624..201259d 100644
|
|
--- a/arch/x86/crypto/aesni-intel_asm.S
|
|
+++ b/arch/x86/crypto/aesni-intel_asm.S
|
|
@@ -31,6 +31,7 @@
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/inst.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#ifdef __x86_64__
|
|
.data
|
|
@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
|
|
pop %r14
|
|
pop %r13
|
|
pop %r12
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_gcm_dec)
|
|
|
|
|
|
/*****************************************************************************
|
|
@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
|
|
pop %r14
|
|
pop %r13
|
|
pop %r12
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_gcm_enc)
|
|
|
|
#endif
|
|
|
|
@@ -1714,6 +1719,7 @@ _key_expansion_256a:
|
|
pxor %xmm1, %xmm0
|
|
movaps %xmm0, (TKEYP)
|
|
add $0x10, TKEYP
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
.align 4
|
|
@@ -1738,6 +1744,7 @@ _key_expansion_192a:
|
|
shufps $0b01001110, %xmm2, %xmm1
|
|
movaps %xmm1, 0x10(TKEYP)
|
|
add $0x20, TKEYP
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
.align 4
|
|
@@ -1757,6 +1764,7 @@ _key_expansion_192b:
|
|
|
|
movaps %xmm0, (TKEYP)
|
|
add $0x10, TKEYP
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
.align 4
|
|
@@ -1769,6 +1777,7 @@ _key_expansion_256b:
|
|
pxor %xmm1, %xmm2
|
|
movaps %xmm2, (TKEYP)
|
|
add $0x10, TKEYP
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
|
|
#ifndef __x86_64__
|
|
popl KEYP
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_set_key)
|
|
|
|
/*
|
|
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
|
|
@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
|
|
popl KLEN
|
|
popl KEYP
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_enc)
|
|
|
|
/*
|
|
* _aesni_enc1: internal ABI
|
|
@@ -1959,6 +1972,7 @@ _aesni_enc1:
|
|
AESENC KEY STATE
|
|
movaps 0x70(TKEYP), KEY
|
|
AESENCLAST KEY STATE
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2067,6 +2081,7 @@ _aesni_enc4:
|
|
AESENCLAST KEY STATE2
|
|
AESENCLAST KEY STATE3
|
|
AESENCLAST KEY STATE4
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
|
|
popl KLEN
|
|
popl KEYP
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_dec)
|
|
|
|
/*
|
|
* _aesni_dec1: internal ABI
|
|
@@ -2146,6 +2163,7 @@ _aesni_dec1:
|
|
AESDEC KEY STATE
|
|
movaps 0x70(TKEYP), KEY
|
|
AESDECLAST KEY STATE
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2254,6 +2272,7 @@ _aesni_dec4:
|
|
AESDECLAST KEY STATE2
|
|
AESDECLAST KEY STATE3
|
|
AESDECLAST KEY STATE4
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
|
|
popl KEYP
|
|
popl LEN
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_ecb_enc)
|
|
|
|
/*
|
|
* void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
|
|
@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
|
|
popl KEYP
|
|
popl LEN
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_ecb_dec)
|
|
|
|
/*
|
|
* void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
|
|
@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
|
|
popl LEN
|
|
popl IVP
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_cbc_enc)
|
|
|
|
/*
|
|
* void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
|
|
@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
|
|
popl LEN
|
|
popl IVP
|
|
#endif
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_cbc_dec)
|
|
|
|
#ifdef __x86_64__
|
|
.align 16
|
|
@@ -2526,6 +2553,7 @@ _aesni_inc_init:
|
|
mov $1, TCTR_LOW
|
|
MOVQ_R64_XMM TCTR_LOW INC
|
|
MOVQ_R64_XMM CTR TCTR_LOW
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2554,6 +2582,7 @@ _aesni_inc:
|
|
.Linc_low:
|
|
movaps CTR, IV
|
|
PSHUFB_XMM BSWAP_MASK IV
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
|
|
.Lctr_enc_ret:
|
|
movups IV, (IVP)
|
|
.Lctr_enc_just_ret:
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
+ENDPROC(aesni_ctr_enc)
|
|
#endif
|
|
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
|
|
index 391d245..67f35c2 100644
|
|
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
|
|
@@ -20,6 +20,8 @@
|
|
*
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
.file "blowfish-x86_64-asm.S"
|
|
.text
|
|
|
|
@@ -151,9 +153,11 @@ __blowfish_enc_blk:
|
|
jnz __enc_xor;
|
|
|
|
write_block();
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
__enc_xor:
|
|
xor_block();
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
.align 8
|
|
@@ -188,6 +192,7 @@ blowfish_dec_blk:
|
|
|
|
movq %r11, %rbp;
|
|
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
/**********************************************************************
|
|
@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
|
|
|
|
popq %rbx;
|
|
popq %rbp;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
__enc_xor4:
|
|
@@ -349,6 +355,7 @@ __enc_xor4:
|
|
|
|
popq %rbx;
|
|
popq %rbp;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
.align 8
|
|
@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
|
|
popq %rbx;
|
|
popq %rbp;
|
|
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
|
|
index 0b33743..7a56206 100644
|
|
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
|
|
@@ -20,6 +20,8 @@
|
|
*
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
.file "camellia-x86_64-asm_64.S"
|
|
.text
|
|
|
|
@@ -229,12 +231,14 @@ __enc_done:
|
|
enc_outunpack(mov, RT1);
|
|
|
|
movq RRBP, %rbp;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
__enc_xor:
|
|
enc_outunpack(xor, RT1);
|
|
|
|
movq RRBP, %rbp;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
.global camellia_dec_blk;
|
|
@@ -275,6 +279,7 @@ __dec_rounds16:
|
|
dec_outunpack();
|
|
|
|
movq RRBP, %rbp;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
/**********************************************************************
|
|
@@ -468,6 +473,7 @@ __enc2_done:
|
|
|
|
movq RRBP, %rbp;
|
|
popq %rbx;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
__enc2_xor:
|
|
@@ -475,6 +481,7 @@ __enc2_xor:
|
|
|
|
movq RRBP, %rbp;
|
|
popq %rbx;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
.global camellia_dec_blk_2way;
|
|
@@ -517,4 +524,5 @@ __dec2_rounds16:
|
|
|
|
movq RRBP, %rbp;
|
|
movq RXOR, %rbx;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
|
|
index 6214a9b..1f4fc9a 100644
|
|
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
|
|
@@ -1,3 +1,5 @@
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
# enter ECRYPT_encrypt_bytes
|
|
.text
|
|
.p2align 5
|
|
@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
|
|
add %r11,%rsp
|
|
mov %rdi,%rax
|
|
mov %rsi,%rdx
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
# bytesatleast65:
|
|
._bytesatleast65:
|
|
@@ -891,6 +894,7 @@ ECRYPT_keysetup:
|
|
add %r11,%rsp
|
|
mov %rdi,%rax
|
|
mov %rsi,%rdx
|
|
+ pax_force_retaddr
|
|
ret
|
|
# enter ECRYPT_ivsetup
|
|
.text
|
|
@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
|
|
add %r11,%rsp
|
|
mov %rdi,%rax
|
|
mov %rsi,%rdx
|
|
+ pax_force_retaddr
|
|
ret
|
|
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
|
|
index 3ee1ff0..cbc568b 100644
|
|
--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
|
|
@@ -24,6 +24,8 @@
|
|
*
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
.file "serpent-sse2-x86_64-asm_64.S"
|
|
.text
|
|
|
|
@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
|
|
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
|
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
|
|
|
+ pax_force_retaddr
|
|
ret;
|
|
|
|
__enc_xor8:
|
|
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
|
|
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
|
|
|
|
+ pax_force_retaddr
|
|
ret;
|
|
|
|
.align 8
|
|
@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
|
|
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
|
|
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
|
|
|
|
+ pax_force_retaddr
|
|
ret;
|
|
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
|
|
index b2c2f57..8470cab 100644
|
|
--- a/arch/x86/crypto/sha1_ssse3_asm.S
|
|
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
|
|
@@ -28,6 +28,8 @@
|
|
* (at your option) any later version.
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
#define CTX %rdi // arg1
|
|
#define BUF %rsi // arg2
|
|
#define CNT %rdx // arg3
|
|
@@ -104,6 +106,7 @@
|
|
pop %r12
|
|
pop %rbp
|
|
pop %rbx
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
|
|
.size \name, .-\name
|
|
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
|
|
index 5b012a2..36d5364 100644
|
|
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
|
|
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
|
|
@@ -20,6 +20,8 @@
|
|
*
|
|
*/
|
|
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
.file "twofish-x86_64-asm-3way.S"
|
|
.text
|
|
|
|
@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
|
|
popq %r13;
|
|
popq %r14;
|
|
popq %r15;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
__enc_xor3:
|
|
@@ -271,6 +274,7 @@ __enc_xor3:
|
|
popq %r13;
|
|
popq %r14;
|
|
popq %r15;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
.global twofish_dec_blk_3way
|
|
@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
|
|
popq %r13;
|
|
popq %r14;
|
|
popq %r15;
|
|
+ pax_force_retaddr 0, 1
|
|
ret;
|
|
|
|
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
|
|
index 7bcf3fc..f53832f 100644
|
|
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
|
|
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
|
|
@@ -21,6 +21,7 @@
|
|
.text
|
|
|
|
#include <asm/asm-offsets.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#define a_offset 0
|
|
#define b_offset 4
|
|
@@ -268,6 +269,7 @@ twofish_enc_blk:
|
|
|
|
popq R1
|
|
movq $1,%rax
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
|
|
twofish_dec_blk:
|
|
@@ -319,4 +321,5 @@ twofish_dec_blk:
|
|
|
|
popq R1
|
|
movq $1,%rax
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
|
|
index b154661..9c7b359 100644
|
|
--- a/arch/x86/ia32/ia32_signal.c
|
|
+++ b/arch/x86/ia32/ia32_signal.c
|
|
@@ -172,7 +172,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
|
|
}
|
|
seg = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
|
|
+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
|
|
set_fs(seg);
|
|
if (ret >= 0 && uoss_ptr) {
|
|
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
|
|
@@ -373,7 +373,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|
*/
|
|
static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
|
|
size_t frame_size,
|
|
- void **fpstate)
|
|
+ void __user **fpstate)
|
|
{
|
|
unsigned long sp;
|
|
|
|
@@ -394,7 +394,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
|
|
|
|
if (used_math()) {
|
|
sp = sp - sig_xstate_ia32_size;
|
|
- *fpstate = (struct _fpstate_ia32 *) sp;
|
|
+ *fpstate = (struct _fpstate_ia32 __user *) sp;
|
|
if (save_i387_xstate_ia32(*fpstate) < 0)
|
|
return (void __user *) -1L;
|
|
}
|
|
@@ -402,7 +402,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
|
|
sp -= frame_size;
|
|
/* Align the stack pointer according to the i386 ABI,
|
|
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
|
|
- sp = ((sp + 4) & -16ul) - 4;
|
|
+ sp = ((sp - 12) & -16ul) - 4;
|
|
return (void __user *) sp;
|
|
}
|
|
|
|
@@ -460,7 +460,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|
* These are actually not used anymore, but left because some
|
|
* gdb versions depend on them as a marker.
|
|
*/
|
|
- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
|
+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
|
|
} put_user_catch(err);
|
|
|
|
if (err)
|
|
@@ -502,7 +502,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
0xb8,
|
|
__NR_ia32_rt_sigreturn,
|
|
0x80cd,
|
|
- 0,
|
|
+ 0
|
|
};
|
|
|
|
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
|
|
@@ -532,16 +532,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
if (ka->sa.sa_flags & SA_RESTORER)
|
|
restorer = ka->sa.sa_restorer;
|
|
+ else if (current->mm->context.vdso)
|
|
+ /* Return stub is in 32bit vsyscall page */
|
|
+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
|
else
|
|
- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
|
|
- rt_sigreturn);
|
|
+ restorer = &frame->retcode;
|
|
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
|
|
|
|
/*
|
|
* Not actually used anymore, but left because some gdb
|
|
* versions need it.
|
|
*/
|
|
- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
|
|
+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
|
|
} put_user_catch(err);
|
|
|
|
if (err)
|
|
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
|
|
index 66c4cae..6f3a5e3 100644
|
|
--- a/arch/x86/ia32/ia32entry.S
|
|
+++ b/arch/x86/ia32/ia32entry.S
|
|
@@ -13,8 +13,10 @@
|
|
#include <asm/thread_info.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/irqflags.h>
|
|
+#include <asm/pgtable.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/err.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
|
#include <linux/elf-em.h>
|
|
@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
|
|
ENDPROC(native_irq_enable_sysexit)
|
|
#endif
|
|
|
|
+ .macro pax_enter_kernel_user
|
|
+ pax_set_fptr_mask
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ call pax_enter_kernel_user
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+ .macro pax_exit_kernel_user
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ call pax_exit_kernel_user
|
|
+#endif
|
|
+#ifdef CONFIG_PAX_RANDKSTACK
|
|
+ pushq %rax
|
|
+ pushq %r11
|
|
+ call pax_randomize_kstack
|
|
+ popq %r11
|
|
+ popq %rax
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+.macro pax_erase_kstack
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+ call pax_erase_kstack
|
|
+#endif
|
|
+.endm
|
|
+
|
|
/*
|
|
* 32bit SYSENTER instruction entry.
|
|
*
|
|
@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
|
|
CFI_REGISTER rsp,rbp
|
|
SWAPGS_UNSAFE_STACK
|
|
movq PER_CPU_VAR(kernel_stack), %rsp
|
|
- addq $(KERNEL_STACK_OFFSET),%rsp
|
|
- /*
|
|
- * No need to follow this irqs on/off section: the syscall
|
|
- * disabled irqs, here we enable it straight after entry:
|
|
- */
|
|
- ENABLE_INTERRUPTS(CLBR_NONE)
|
|
movl %ebp,%ebp /* zero extension */
|
|
pushq_cfi $__USER32_DS
|
|
/*CFI_REL_OFFSET ss,0*/
|
|
@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
|
|
CFI_REL_OFFSET rsp,0
|
|
pushfq_cfi
|
|
/*CFI_REL_OFFSET rflags,0*/
|
|
- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
|
|
- CFI_REGISTER rip,r10
|
|
+ orl $X86_EFLAGS_IF,(%rsp)
|
|
+ GET_THREAD_INFO(%r11)
|
|
+ movl TI_sysenter_return(%r11), %r11d
|
|
+ CFI_REGISTER rip,r11
|
|
pushq_cfi $__USER32_CS
|
|
/*CFI_REL_OFFSET cs,0*/
|
|
movl %eax, %eax
|
|
- pushq_cfi %r10
|
|
+ pushq_cfi %r11
|
|
CFI_REL_OFFSET rip,0
|
|
pushq_cfi %rax
|
|
cld
|
|
SAVE_ARGS 0,1,0
|
|
+ pax_enter_kernel_user
|
|
+ /*
|
|
+ * No need to follow this irqs on/off section: the syscall
|
|
+ * disabled irqs, here we enable it straight after entry:
|
|
+ */
|
|
+ ENABLE_INTERRUPTS(CLBR_NONE)
|
|
/* no need to do an access_ok check here because rbp has been
|
|
32bit zero extended */
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ mov $PAX_USER_SHADOW_BASE,%r11
|
|
+ add %r11,%rbp
|
|
+#endif
|
|
+
|
|
1: movl (%rbp),%ebp
|
|
.section __ex_table,"a"
|
|
.quad 1b,ia32_badarg
|
|
.previous
|
|
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ GET_THREAD_INFO(%r11)
|
|
+ orl $TS_COMPAT,TI_status(%r11)
|
|
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
|
|
CFI_REMEMBER_STATE
|
|
jnz sysenter_tracesys
|
|
cmpq $(IA32_NR_syscalls-1),%rax
|
|
@@ -160,12 +197,15 @@ sysenter_do_call:
|
|
sysenter_dispatch:
|
|
call *ia32_sys_call_table(,%rax,8)
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
|
+ GET_THREAD_INFO(%r11)
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
TRACE_IRQS_OFF
|
|
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
|
|
jnz sysexit_audit
|
|
sysexit_from_sys_call:
|
|
- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ pax_exit_kernel_user
|
|
+ pax_erase_kstack
|
|
+ andl $~TS_COMPAT,TI_status(%r11)
|
|
/* clear IF, that popfq doesn't enable interrupts early */
|
|
andl $~0x200,EFLAGS-R11(%rsp)
|
|
movl RIP-R11(%rsp),%edx /* User %eip */
|
|
@@ -191,6 +231,9 @@ sysexit_from_sys_call:
|
|
movl %eax,%esi /* 2nd arg: syscall number */
|
|
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
|
|
call __audit_syscall_entry
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
|
|
cmpq $(IA32_NR_syscalls-1),%rax
|
|
ja ia32_badsys
|
|
@@ -202,7 +245,7 @@ sysexit_from_sys_call:
|
|
.endm
|
|
|
|
.macro auditsys_exit exit
|
|
- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
|
|
jnz ia32_ret_from_sys_call
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
@@ -235,7 +278,7 @@ sysexit_audit:
|
|
|
|
sysenter_tracesys:
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
|
|
jz sysenter_auditsys
|
|
#endif
|
|
SAVE_REST
|
|
@@ -243,6 +286,9 @@ sysenter_tracesys:
|
|
movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
|
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
|
call syscall_trace_enter
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
|
RESTORE_REST
|
|
cmpq $(IA32_NR_syscalls-1),%rax
|
|
@@ -274,19 +320,20 @@ ENDPROC(ia32_sysenter_target)
|
|
ENTRY(ia32_cstar_target)
|
|
CFI_STARTPROC32 simple
|
|
CFI_SIGNAL_FRAME
|
|
- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
|
+ CFI_DEF_CFA rsp,0
|
|
CFI_REGISTER rip,rcx
|
|
/*CFI_REGISTER rflags,r11*/
|
|
SWAPGS_UNSAFE_STACK
|
|
movl %esp,%r8d
|
|
CFI_REGISTER rsp,r8
|
|
movq PER_CPU_VAR(kernel_stack),%rsp
|
|
+ SAVE_ARGS 8*6,0,0
|
|
+ pax_enter_kernel_user
|
|
/*
|
|
* No need to follow this irqs on/off section: the syscall
|
|
* disabled irqs and here we enable it straight after entry:
|
|
*/
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
- SAVE_ARGS 8,0,0
|
|
movl %eax,%eax /* zero extension */
|
|
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
|
|
movq %rcx,RIP-ARGOFFSET(%rsp)
|
|
@@ -302,12 +349,19 @@ ENTRY(ia32_cstar_target)
|
|
/* no need to do an access_ok check here because r8 has been
|
|
32bit zero extended */
|
|
/* hardware stack frame is complete now */
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ mov $PAX_USER_SHADOW_BASE,%r11
|
|
+ add %r11,%r8
|
|
+#endif
|
|
+
|
|
1: movl (%r8),%r9d
|
|
.section __ex_table,"a"
|
|
.quad 1b,ia32_badarg
|
|
.previous
|
|
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ GET_THREAD_INFO(%r11)
|
|
+ orl $TS_COMPAT,TI_status(%r11)
|
|
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
|
|
CFI_REMEMBER_STATE
|
|
jnz cstar_tracesys
|
|
cmpq $IA32_NR_syscalls-1,%rax
|
|
@@ -317,12 +371,15 @@ cstar_do_call:
|
|
cstar_dispatch:
|
|
call *ia32_sys_call_table(,%rax,8)
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
|
+ GET_THREAD_INFO(%r11)
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
TRACE_IRQS_OFF
|
|
- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
|
|
jnz sysretl_audit
|
|
sysretl_from_sys_call:
|
|
- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ pax_exit_kernel_user
|
|
+ pax_erase_kstack
|
|
+ andl $~TS_COMPAT,TI_status(%r11)
|
|
RESTORE_ARGS 0,-ARG_SKIP,0,0,0
|
|
movl RIP-ARGOFFSET(%rsp),%ecx
|
|
CFI_REGISTER rip,rcx
|
|
@@ -350,7 +407,7 @@ sysretl_audit:
|
|
|
|
cstar_tracesys:
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
|
|
jz cstar_auditsys
|
|
#endif
|
|
xchgl %r9d,%ebp
|
|
@@ -359,6 +416,9 @@ cstar_tracesys:
|
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
|
call syscall_trace_enter
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
|
|
RESTORE_REST
|
|
xchgl %ebp,%r9d
|
|
@@ -404,19 +464,21 @@ ENTRY(ia32_syscall)
|
|
CFI_REL_OFFSET rip,RIP-RIP
|
|
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
|
SWAPGS
|
|
- /*
|
|
- * No need to follow this irqs on/off section: the syscall
|
|
- * disabled irqs and here we enable it straight after entry:
|
|
- */
|
|
- ENABLE_INTERRUPTS(CLBR_NONE)
|
|
movl %eax,%eax
|
|
pushq_cfi %rax
|
|
cld
|
|
/* note the registers are not zero extended to the sf.
|
|
this could be a problem. */
|
|
SAVE_ARGS 0,1,0
|
|
- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ pax_enter_kernel_user
|
|
+ /*
|
|
+ * No need to follow this irqs on/off section: the syscall
|
|
+ * disabled irqs and here we enable it straight after entry:
|
|
+ */
|
|
+ ENABLE_INTERRUPTS(CLBR_NONE)
|
|
+ GET_THREAD_INFO(%r11)
|
|
+ orl $TS_COMPAT,TI_status(%r11)
|
|
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
|
|
jnz ia32_tracesys
|
|
cmpq $(IA32_NR_syscalls-1),%rax
|
|
ja ia32_badsys
|
|
@@ -435,6 +497,9 @@ ia32_tracesys:
|
|
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
|
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
|
call syscall_trace_enter
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
|
|
RESTORE_REST
|
|
cmpq $(IA32_NR_syscalls-1),%rax
|
|
diff --git a/arch/x86/ia32/ia32entry.S.rej b/arch/x86/ia32/ia32entry.S.rej
|
|
new file mode 100644
|
|
index 0000000..164a807
|
|
--- /dev/null
|
|
+++ b/arch/x86/ia32/ia32entry.S.rej
|
|
@@ -0,0 +1,16 @@
|
|
+--- arch/x86/ia32/ia32entry.S 2012-03-19 10:38:56.404050007 +0100
|
|
++++ arch/x86/ia32/ia32entry.S 2012-05-21 12:10:09.300048875 +0200
|
|
+@@ -256,11 +299,12 @@ sysexit_from_sys_call:
|
|
+ 1: setbe %al /* 1 if error, 0 if not */
|
|
+ movzbl %al,%edi /* zero-extend that into %edi */
|
|
+ call __audit_syscall_exit
|
|
++ GET_THREAD_INFO(%r11)
|
|
+ movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
|
|
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
|
+ cli
|
|
+ TRACE_IRQS_OFF
|
|
+- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
++ testl %edi,TI_flags(%r11)
|
|
+ jz \exit
|
|
+ CLEAR_RREGS -ARGOFFSET
|
|
+ jmp int_with_check
|
|
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
|
|
index aec2202..f76174e 100644
|
|
--- a/arch/x86/ia32/sys_ia32.c
|
|
+++ b/arch/x86/ia32/sys_ia32.c
|
|
@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
|
|
*/
|
|
static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
|
|
{
|
|
- typeof(ubuf->st_uid) uid = 0;
|
|
- typeof(ubuf->st_gid) gid = 0;
|
|
+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
|
|
+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
|
|
SET_UID(uid, stat->uid);
|
|
SET_GID(gid, stat->gid);
|
|
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
|
|
@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
|
|
return alarm_setitimer(seconds);
|
|
}
|
|
|
|
-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
|
|
+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
|
|
int options)
|
|
{
|
|
return compat_sys_wait4(pid, stat_addr, options, NULL);
|
|
@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
|
|
+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
|
|
set_fs(old_fs);
|
|
if (put_compat_timespec(&t, interval))
|
|
return -EFAULT;
|
|
@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
|
|
+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
|
|
set_fs(old_fs);
|
|
if (!ret) {
|
|
switch (_NSIG_WORDS) {
|
|
@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
|
|
if (copy_siginfo_from_user32(&info, uinfo))
|
|
return -EFAULT;
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
|
|
+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
|
|
set_fs(old_fs);
|
|
return ret;
|
|
}
|
|
@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
|
|
return -EFAULT;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
|
|
+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
|
|
count);
|
|
set_fs(old_fs);
|
|
|
|
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
|
|
index 952bd01..7692c6f 100644
|
|
--- a/arch/x86/include/asm/alternative-asm.h
|
|
+++ b/arch/x86/include/asm/alternative-asm.h
|
|
@@ -15,6 +15,45 @@
|
|
.endm
|
|
#endif
|
|
|
|
+#ifdef KERNEXEC_PLUGIN
|
|
+ .macro pax_force_retaddr_bts rip=0
|
|
+ btsq $63,\rip(%rsp)
|
|
+ .endm
|
|
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
|
|
+ .macro pax_force_retaddr rip=0, reload=0
|
|
+ btsq $63,\rip(%rsp)
|
|
+ .endm
|
|
+ .macro pax_force_fptr ptr
|
|
+ btsq $63,\ptr
|
|
+ .endm
|
|
+ .macro pax_set_fptr_mask
|
|
+ .endm
|
|
+#endif
|
|
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
|
|
+ .macro pax_force_retaddr rip=0, reload=0
|
|
+ .if \reload
|
|
+ pax_set_fptr_mask
|
|
+ .endif
|
|
+ orq %r10,\rip(%rsp)
|
|
+ .endm
|
|
+ .macro pax_force_fptr ptr
|
|
+ orq %r10,\ptr
|
|
+ .endm
|
|
+ .macro pax_set_fptr_mask
|
|
+ movabs $0x8000000000000000,%r10
|
|
+ .endm
|
|
+#endif
|
|
+#else
|
|
+ .macro pax_force_retaddr rip=0, reload=0
|
|
+ .endm
|
|
+ .macro pax_force_fptr ptr
|
|
+ .endm
|
|
+ .macro pax_force_retaddr_bts rip=0
|
|
+ .endm
|
|
+ .macro pax_set_fptr_mask
|
|
+ .endm
|
|
+#endif
|
|
+
|
|
.macro altinstruction_entry orig alt feature orig_len alt_len
|
|
.long \orig - .
|
|
.long \alt - .
|
|
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
|
|
index 49331be..9706065 100644
|
|
--- a/arch/x86/include/asm/alternative.h
|
|
+++ b/arch/x86/include/asm/alternative.h
|
|
@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
|
|
".section .discard,\"aw\",@progbits\n" \
|
|
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
|
|
".previous\n" \
|
|
- ".section .altinstr_replacement, \"ax\"\n" \
|
|
+ ".section .altinstr_replacement, \"a\"\n" \
|
|
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
|
|
".previous"
|
|
|
|
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
|
|
index d854101..f6ea947 100644
|
|
--- a/arch/x86/include/asm/apic.h
|
|
+++ b/arch/x86/include/asm/apic.h
|
|
@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
-extern unsigned int apic_verbosity;
|
|
+extern int apic_verbosity;
|
|
extern int local_apic_timer_c2_ok;
|
|
|
|
extern int disable_apic;
|
|
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
|
|
index 20370c6..a2eb9b0 100644
|
|
--- a/arch/x86/include/asm/apm.h
|
|
+++ b/arch/x86/include/asm/apm.h
|
|
@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
|
|
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
|
"pushl %%edi\n\t"
|
|
"pushl %%ebp\n\t"
|
|
- "lcall *%%cs:apm_bios_entry\n\t"
|
|
+ "lcall *%%ss:apm_bios_entry\n\t"
|
|
"setc %%al\n\t"
|
|
"popl %%ebp\n\t"
|
|
"popl %%edi\n\t"
|
|
@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
|
|
__asm__ __volatile__(APM_DO_ZERO_SEGS
|
|
"pushl %%edi\n\t"
|
|
"pushl %%ebp\n\t"
|
|
- "lcall *%%cs:apm_bios_entry\n\t"
|
|
+ "lcall *%%ss:apm_bios_entry\n\t"
|
|
"setc %%bl\n\t"
|
|
"popl %%ebp\n\t"
|
|
"popl %%edi\n\t"
|
|
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
|
|
index 58cb6d4..a4b806c 100644
|
|
--- a/arch/x86/include/asm/atomic.h
|
|
+++ b/arch/x86/include/asm/atomic.h
|
|
@@ -22,7 +22,18 @@
|
|
*/
|
|
static inline int atomic_read(const atomic_t *v)
|
|
{
|
|
- return (*(volatile int *)&(v)->counter);
|
|
+ return (*(volatile const int *)&(v)->counter);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_read_unchecked - read atomic variable
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically reads the value of @v.
|
|
+ */
|
|
+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
|
|
+{
|
|
+ return (*(volatile const int *)&(v)->counter);
|
|
}
|
|
|
|
/**
|
|
@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
|
|
}
|
|
|
|
/**
|
|
+ * atomic_set_unchecked - set atomic variable
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ * @i: required value
|
|
+ *
|
|
+ * Atomically sets the value of @v to @i.
|
|
+ */
|
|
+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
|
|
+{
|
|
+ v->counter = i;
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic_add - add integer to atomic variable
|
|
* @i: integer value to add
|
|
* @v: pointer of type atomic_t
|
|
@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
|
|
*/
|
|
static inline void atomic_add(int i, atomic_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "addl %1,%0"
|
|
+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "subl %1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "+m" (v->counter)
|
|
+ : "ir" (i));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_add_unchecked - add integer to atomic variable
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v.
|
|
+ */
|
|
+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
|
|
: "+m" (v->counter)
|
|
: "ir" (i));
|
|
}
|
|
@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
|
|
*/
|
|
static inline void atomic_sub(int i, atomic_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "subl %1,%0"
|
|
+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "addl %1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "+m" (v->counter)
|
|
+ : "ir" (i));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_sub_unchecked - subtract integer from atomic variable
|
|
+ * @i: integer value to subtract
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically subtracts @i from @v.
|
|
+ */
|
|
+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
|
|
: "+m" (v->counter)
|
|
: "ir" (i));
|
|
}
|
|
@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "addl %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "+m" (v->counter), "=qm" (c)
|
|
: "ir" (i) : "memory");
|
|
return c;
|
|
@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
|
|
*/
|
|
static inline void atomic_inc(atomic_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "incl %0"
|
|
+ asm volatile(LOCK_PREFIX "incl %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "decl %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "+m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_inc_unchecked - increment atomic variable
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically increments @v by 1.
|
|
+ */
|
|
+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "incl %0\n"
|
|
: "+m" (v->counter));
|
|
}
|
|
|
|
@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
|
|
*/
|
|
static inline void atomic_dec(atomic_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "decl %0"
|
|
+ asm volatile(LOCK_PREFIX "decl %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "incl %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "+m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_dec_unchecked - decrement atomic variable
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically decrements @v by 1.
|
|
+ */
|
|
+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "decl %0\n"
|
|
: "+m" (v->counter));
|
|
}
|
|
|
|
@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "decl %0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "decl %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "incl %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "+m" (v->counter), "=qm" (c)
|
|
: : "memory");
|
|
return c != 0;
|
|
@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "incl %0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "incl %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "decl %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
+ : "+m" (v->counter), "=qm" (c)
|
|
+ : : "memory");
|
|
+ return c != 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic_inc_and_test_unchecked - increment and test
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically increments @v by 1
|
|
+ * and returns true if the result is zero, or false for all
|
|
+ * other cases.
|
|
+ */
|
|
+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ unsigned char c;
|
|
+
|
|
+ asm volatile(LOCK_PREFIX "incl %0\n"
|
|
+ "sete %1\n"
|
|
: "+m" (v->counter), "=qm" (c)
|
|
: : "memory");
|
|
return c != 0;
|
|
@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
|
|
+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "subl %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sets %1\n"
|
|
: "+m" (v->counter), "=qm" (c)
|
|
: "ir" (i) : "memory");
|
|
return c;
|
|
@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|
goto no_xadd;
|
|
#endif
|
|
/* Modern 486+ processor */
|
|
- return i + xadd(&v->counter, i);
|
|
+ return i + xadd_check_overflow(&v->counter, i);
|
|
|
|
#ifdef CONFIG_M386
|
|
no_xadd: /* Legacy 386 processor */
|
|
@@ -192,6 +354,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
|
}
|
|
|
|
/**
|
|
+ * atomic_add_return_unchecked - add integer and return
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer of type atomic_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v and returns @i + @v
|
|
+ */
|
|
+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
|
|
+{
|
|
+#ifdef CONFIG_M386
|
|
+ int __i;
|
|
+ unsigned long flags;
|
|
+ if (unlikely(boot_cpu_data.x86 <= 3))
|
|
+ goto no_xadd;
|
|
+#endif
|
|
+ /* Modern 486+ processor */
|
|
+ return i + xadd(&v->counter, i);
|
|
+
|
|
+#ifdef CONFIG_M386
|
|
+no_xadd: /* Legacy 386 processor */
|
|
+ raw_local_irq_save(flags);
|
|
+ __i = atomic_read_unchecked(v);
|
|
+ atomic_set_unchecked(v, i + __i);
|
|
+ raw_local_irq_restore(flags);
|
|
+ return i + __i;
|
|
+#endif
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic_sub_return - subtract integer and return
|
|
* @v: pointer of type atomic_t
|
|
* @i: integer value to subtract
|
|
@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|
}
|
|
|
|
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
|
+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
|
|
+{
|
|
+ return atomic_add_return_unchecked(1, v);
|
|
+}
|
|
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
return cmpxchg(&v->counter, old, new);
|
|
}
|
|
|
|
+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
|
|
+{
|
|
+ return cmpxchg(&v->counter, old, new);
|
|
+}
|
|
+
|
|
static inline int atomic_xchg(atomic_t *v, int new)
|
|
{
|
|
return xchg(&v->counter, new);
|
|
}
|
|
|
|
+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
|
|
+{
|
|
+ return xchg(&v->counter, new);
|
|
+}
|
|
+
|
|
/**
|
|
* __atomic_add_unless - add unless the number is already a given value
|
|
* @v: pointer of type atomic_t
|
|
@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
|
|
*/
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
{
|
|
- int c, old;
|
|
+ int c, old, new;
|
|
c = atomic_read(v);
|
|
for (;;) {
|
|
- if (unlikely(c == (u)))
|
|
+ if (unlikely(c == u))
|
|
break;
|
|
- old = atomic_cmpxchg((v), c, c + (a));
|
|
+
|
|
+ asm volatile("addl %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "subl %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=r" (new)
|
|
+ : "0" (c), "ir" (a));
|
|
+
|
|
+ old = atomic_cmpxchg(v, c, new);
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
return c;
|
|
}
|
|
|
|
+/**
|
|
+ * atomic_inc_not_zero_hint - increment if not null
|
|
+ * @v: pointer of type atomic_t
|
|
+ * @hint: probable value of the atomic before the increment
|
|
+ *
|
|
+ * This version of atomic_inc_not_zero() gives a hint of probable
|
|
+ * value of the atomic. This helps processor to not read the memory
|
|
+ * before doing the atomic read/modify/write cycle, lowering
|
|
+ * number of bus transactions on some arches.
|
|
+ *
|
|
+ * Returns: 0 if increment was not done, 1 otherwise.
|
|
+ */
|
|
+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
|
|
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
|
|
+{
|
|
+ int val, c = hint, new;
|
|
+
|
|
+ /* sanity test, should be removed by compiler if hint is a constant */
|
|
+ if (!hint)
|
|
+ return __atomic_add_unless(v, 1, 0);
|
|
+
|
|
+ do {
|
|
+ asm volatile("incl %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "decl %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=r" (new)
|
|
+ : "0" (c));
|
|
+
|
|
+ val = atomic_cmpxchg(v, c, new);
|
|
+ if (val == c)
|
|
+ return 1;
|
|
+ c = val;
|
|
+ } while (c);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
|
|
/*
|
|
* atomic_dec_if_positive - decrement by 1 if old value positive
|
|
@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
|
|
#endif
|
|
|
|
/* These are x86-specific, used by some header files */
|
|
-#define atomic_clear_mask(mask, addr) \
|
|
- asm volatile(LOCK_PREFIX "andl %0,%1" \
|
|
- : : "r" (~(mask)), "m" (*(addr)) : "memory")
|
|
-
|
|
-#define atomic_set_mask(mask, addr) \
|
|
- asm volatile(LOCK_PREFIX "orl %0,%1" \
|
|
- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
|
|
- : "memory")
|
|
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "andl %1,%0"
|
|
+ : "+m" (v->counter)
|
|
+ : "r" (~(mask))
|
|
+ : "memory");
|
|
+}
|
|
+
|
|
+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "andl %1,%0"
|
|
+ : "+m" (v->counter)
|
|
+ : "r" (~(mask))
|
|
+ : "memory");
|
|
+}
|
|
+
|
|
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "orl %1,%0"
|
|
+ : "+m" (v->counter)
|
|
+ : "r" (mask)
|
|
+ : "memory");
|
|
+}
|
|
+
|
|
+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "orl %1,%0"
|
|
+ : "+m" (v->counter)
|
|
+ : "r" (mask)
|
|
+ : "memory");
|
|
+}
|
|
|
|
/* Atomic operations are already serializing on x86 */
|
|
#define smp_mb__before_atomic_dec() barrier()
|
|
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
|
|
index 1981199..36b9dfb 100644
|
|
--- a/arch/x86/include/asm/atomic64_32.h
|
|
+++ b/arch/x86/include/asm/atomic64_32.h
|
|
@@ -12,6 +12,14 @@ typedef struct {
|
|
u64 __aligned(8) counter;
|
|
} atomic64_t;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+typedef struct {
|
|
+ u64 __aligned(8) counter;
|
|
+} atomic64_unchecked_t;
|
|
+#else
|
|
+typedef atomic64_t atomic64_unchecked_t;
|
|
+#endif
|
|
+
|
|
#define ATOMIC64_INIT(val) { (val) }
|
|
|
|
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
|
|
@@ -37,21 +45,31 @@ typedef struct {
|
|
ATOMIC64_DECL_ONE(sym##_386)
|
|
|
|
ATOMIC64_DECL_ONE(add_386);
|
|
+ATOMIC64_DECL_ONE(add_unchecked_386);
|
|
ATOMIC64_DECL_ONE(sub_386);
|
|
+ATOMIC64_DECL_ONE(sub_unchecked_386);
|
|
ATOMIC64_DECL_ONE(inc_386);
|
|
+ATOMIC64_DECL_ONE(inc_unchecked_386);
|
|
ATOMIC64_DECL_ONE(dec_386);
|
|
+ATOMIC64_DECL_ONE(dec_unchecked_386);
|
|
#endif
|
|
|
|
#define alternative_atomic64(f, out, in...) \
|
|
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
|
|
|
|
ATOMIC64_DECL(read);
|
|
+ATOMIC64_DECL(read_unchecked);
|
|
ATOMIC64_DECL(set);
|
|
+ATOMIC64_DECL(set_unchecked);
|
|
ATOMIC64_DECL(xchg);
|
|
ATOMIC64_DECL(add_return);
|
|
+ATOMIC64_DECL(add_return_unchecked);
|
|
ATOMIC64_DECL(sub_return);
|
|
+ATOMIC64_DECL(sub_return_unchecked);
|
|
ATOMIC64_DECL(inc_return);
|
|
+ATOMIC64_DECL(inc_return_unchecked);
|
|
ATOMIC64_DECL(dec_return);
|
|
+ATOMIC64_DECL(dec_return_unchecked);
|
|
ATOMIC64_DECL(dec_if_positive);
|
|
ATOMIC64_DECL(inc_not_zero);
|
|
ATOMIC64_DECL(add_unless);
|
|
@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
|
|
}
|
|
|
|
/**
|
|
+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
|
|
+ * @p: pointer to type atomic64_unchecked_t
|
|
+ * @o: expected value
|
|
+ * @n: new value
|
|
+ *
|
|
+ * Atomically sets @v to @n if it was equal to @o and returns
|
|
+ * the old value.
|
|
+ */
|
|
+
|
|
+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
|
|
+{
|
|
+ return cmpxchg64(&v->counter, o, n);
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic64_xchg - xchg atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
* @n: value to assign
|
|
@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
|
|
}
|
|
|
|
/**
|
|
+ * atomic64_set_unchecked - set atomic64 variable
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ * @n: value to assign
|
|
+ *
|
|
+ * Atomically sets the value of @v to @n.
|
|
+ */
|
|
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
|
|
+{
|
|
+ unsigned high = (unsigned)(i >> 32);
|
|
+ unsigned low = (unsigned)i;
|
|
+ alternative_atomic64(set, /* no output */,
|
|
+ "S" (v), "b" (low), "c" (high)
|
|
+ : "eax", "edx", "memory");
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic64_read - read atomic64 variable
|
|
* @v: pointer to type atomic64_t
|
|
*
|
|
@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
|
|
}
|
|
|
|
/**
|
|
+ * atomic64_read_unchecked - read atomic64 variable
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically reads the value of @v and returns it.
|
|
+ */
|
|
+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ long long r;
|
|
+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+/**
|
|
* atomic64_add_return - add and return
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
|
return i;
|
|
}
|
|
|
|
+/**
|
|
+ * atomic64_add_return_unchecked - add and return
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v and returns @i + *@v
|
|
+ */
|
|
+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
|
|
+{
|
|
+ alternative_atomic64(add_return_unchecked,
|
|
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
+ ASM_NO_INPUT_CLOBBER("memory"));
|
|
+ return i;
|
|
+}
|
|
+
|
|
/*
|
|
* Other variants with different arithmetic operators:
|
|
*/
|
|
@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
|
|
return a;
|
|
}
|
|
|
|
+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ long long a;
|
|
+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
|
|
+ "S" (v) : "memory", "ecx");
|
|
+ return a;
|
|
+}
|
|
+
|
|
static inline long long atomic64_dec_return(atomic64_t *v)
|
|
{
|
|
long long a;
|
|
@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
|
|
}
|
|
|
|
/**
|
|
+ * atomic64_add_unchecked - add integer to atomic64 variable
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v.
|
|
+ */
|
|
+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
|
|
+{
|
|
+ __alternative_atomic64(add_unchecked, add_return_unchecked,
|
|
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
|
|
+ ASM_NO_INPUT_CLOBBER("memory"));
|
|
+ return i;
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic64_sub - subtract the atomic64 variable
|
|
* @i: integer value to subtract
|
|
* @v: pointer to type atomic64_t
|
|
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
|
|
index 0e1cbfc..5623683 100644
|
|
--- a/arch/x86/include/asm/atomic64_64.h
|
|
+++ b/arch/x86/include/asm/atomic64_64.h
|
|
@@ -18,7 +18,19 @@
|
|
*/
|
|
static inline long atomic64_read(const atomic64_t *v)
|
|
{
|
|
- return (*(volatile long *)&(v)->counter);
|
|
+ return (*(volatile const long *)&(v)->counter);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_read_unchecked - read atomic64 variable
|
|
+ * @v: pointer of type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically reads the value of @v.
|
|
+ * Doesn't imply a read memory barrier.
|
|
+ */
|
|
+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
|
|
+{
|
|
+ return (*(volatile const long *)&(v)->counter);
|
|
}
|
|
|
|
/**
|
|
@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
|
|
}
|
|
|
|
/**
|
|
+ * atomic64_set_unchecked - set atomic64 variable
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ * @i: required value
|
|
+ *
|
|
+ * Atomically sets the value of @v to @i.
|
|
+ */
|
|
+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
|
|
+{
|
|
+ v->counter = i;
|
|
+}
|
|
+
|
|
+/**
|
|
* atomic64_add - add integer to atomic64 variable
|
|
* @i: integer value to add
|
|
* @v: pointer to type atomic64_t
|
|
@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
|
|
*/
|
|
static inline void atomic64_add(long i, atomic64_t *v)
|
|
{
|
|
+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "subq %1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=m" (v->counter)
|
|
+ : "er" (i), "m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_add_unchecked - add integer to atomic64 variable
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v.
|
|
+ */
|
|
+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
|
|
+{
|
|
asm volatile(LOCK_PREFIX "addq %1,%0"
|
|
: "=m" (v->counter)
|
|
: "er" (i), "m" (v->counter));
|
|
@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
|
|
*/
|
|
static inline void atomic64_sub(long i, atomic64_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "subq %1,%0"
|
|
+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "addq %1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=m" (v->counter)
|
|
+ : "er" (i), "m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_sub_unchecked - subtract the atomic64 variable
|
|
+ * @i: integer value to subtract
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically subtracts @i from @v.
|
|
+ */
|
|
+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
|
|
: "=m" (v->counter)
|
|
: "er" (i), "m" (v->counter));
|
|
}
|
|
@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "addq %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "=m" (v->counter), "=qm" (c)
|
|
: "er" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
|
|
*/
|
|
static inline void atomic64_inc(atomic64_t *v)
|
|
{
|
|
+ asm volatile(LOCK_PREFIX "incq %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "decq %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=m" (v->counter)
|
|
+ : "m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_inc_unchecked - increment atomic64 variable
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically increments @v by 1.
|
|
+ */
|
|
+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
asm volatile(LOCK_PREFIX "incq %0"
|
|
: "=m" (v->counter)
|
|
: "m" (v->counter));
|
|
@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
|
|
*/
|
|
static inline void atomic64_dec(atomic64_t *v)
|
|
{
|
|
- asm volatile(LOCK_PREFIX "decq %0"
|
|
+ asm volatile(LOCK_PREFIX "decq %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "incq %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=m" (v->counter)
|
|
+ : "m" (v->counter));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_dec_unchecked - decrement atomic64 variable
|
|
+ * @v: pointer to type atomic64_t
|
|
+ *
|
|
+ * Atomically decrements @v by 1.
|
|
+ */
|
|
+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ asm volatile(LOCK_PREFIX "decq %0\n"
|
|
: "=m" (v->counter)
|
|
: "m" (v->counter));
|
|
}
|
|
@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "decq %0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "decq %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "incq %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "=m" (v->counter), "=qm" (c)
|
|
: "m" (v->counter) : "memory");
|
|
return c != 0;
|
|
@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "incq %0; sete %1"
|
|
+ asm volatile(LOCK_PREFIX "incq %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "decq %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "=m" (v->counter), "=qm" (c)
|
|
: "m" (v->counter) : "memory");
|
|
return c != 0;
|
|
@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
|
|
+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX "subq %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sets %1\n"
|
|
: "=m" (v->counter), "=qm" (c)
|
|
: "er" (i), "m" (v->counter) : "memory");
|
|
return c;
|
|
@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
|
|
*/
|
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
{
|
|
+ return i + xadd_check_overflow(&v->counter, i);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * atomic64_add_return_unchecked - add and return
|
|
+ * @i: integer value to add
|
|
+ * @v: pointer to type atomic64_unchecked_t
|
|
+ *
|
|
+ * Atomically adds @i to @v and returns @i + @v
|
|
+ */
|
|
+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
|
|
+{
|
|
return i + xadd(&v->counter, i);
|
|
}
|
|
|
|
@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
|
|
}
|
|
|
|
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
|
|
+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
|
|
+{
|
|
+ return atomic64_add_return_unchecked(1, v);
|
|
+}
|
|
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
|
|
|
|
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
|
@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
|
return cmpxchg(&v->counter, old, new);
|
|
}
|
|
|
|
+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
|
|
+{
|
|
+ return cmpxchg(&v->counter, old, new);
|
|
+}
|
|
+
|
|
static inline long atomic64_xchg(atomic64_t *v, long new)
|
|
{
|
|
return xchg(&v->counter, new);
|
|
@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
|
|
*/
|
|
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
{
|
|
- long c, old;
|
|
+ long c, old, new;
|
|
c = atomic64_read(v);
|
|
for (;;) {
|
|
- if (unlikely(c == (u)))
|
|
+ if (unlikely(c == u))
|
|
break;
|
|
- old = atomic64_cmpxchg((v), c, c + (a));
|
|
+
|
|
+ asm volatile("add %2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "sub %2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ : "=r" (new)
|
|
+ : "0" (c), "ir" (a));
|
|
+
|
|
+ old = atomic64_cmpxchg(v, c, new);
|
|
if (likely(old == c))
|
|
break;
|
|
c = old;
|
|
}
|
|
- return c != (u);
|
|
+ return c != u;
|
|
}
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
|
|
index b97596e..9bd48b06 100644
|
|
--- a/arch/x86/include/asm/bitops.h
|
|
+++ b/arch/x86/include/asm/bitops.h
|
|
@@ -38,7 +38,7 @@
|
|
* a mask operation on a byte.
|
|
*/
|
|
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
|
|
-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
|
|
+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
|
|
#define CONST_MASK(nr) (1 << ((nr) & 7))
|
|
|
|
/**
|
|
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
|
|
index 5e1a2ee..c9f9533 100644
|
|
--- a/arch/x86/include/asm/boot.h
|
|
+++ b/arch/x86/include/asm/boot.h
|
|
@@ -11,10 +11,15 @@
|
|
#include <asm/pgtable_types.h>
|
|
|
|
/* Physical address where kernel should be loaded. */
|
|
-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
|
|
+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
|
|
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
|
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
|
|
|
+#ifndef __ASSEMBLY__
|
|
+extern unsigned char __LOAD_PHYSICAL_ADDR[];
|
|
+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
|
|
+#endif
|
|
+
|
|
/* Minimum kernel alignment, as a power of two */
|
|
#ifdef CONFIG_X86_64
|
|
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
|
|
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
|
|
index 48f99f1..d78ebf9 100644
|
|
--- a/arch/x86/include/asm/cache.h
|
|
+++ b/arch/x86/include/asm/cache.h
|
|
@@ -5,12 +5,13 @@
|
|
|
|
/* L1 cache line size */
|
|
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
|
|
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
|
|
|
|
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
|
+#define __read_only __attribute__((__section__(".data..read_only")))
|
|
|
|
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
|
|
-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
|
|
+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
|
|
|
|
#ifdef CONFIG_X86_VSMP
|
|
#ifdef CONFIG_SMP
|
|
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
|
|
index 9863ee3..4a1f8e1 100644
|
|
--- a/arch/x86/include/asm/cacheflush.h
|
|
+++ b/arch/x86/include/asm/cacheflush.h
|
|
@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
|
|
unsigned long pg_flags = pg->flags & _PGMT_MASK;
|
|
|
|
if (pg_flags == _PGMT_DEFAULT)
|
|
- return -1;
|
|
+ return ~0UL;
|
|
else if (pg_flags == _PGMT_WC)
|
|
return _PAGE_CACHE_WC;
|
|
else if (pg_flags == _PGMT_UC_MINUS)
|
|
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
|
|
index 46fc474..b02b0f9 100644
|
|
--- a/arch/x86/include/asm/checksum_32.h
|
|
+++ b/arch/x86/include/asm/checksum_32.h
|
|
@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
|
|
int len, __wsum sum,
|
|
int *src_err_ptr, int *dst_err_ptr);
|
|
|
|
+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
|
|
+ int len, __wsum sum,
|
|
+ int *src_err_ptr, int *dst_err_ptr);
|
|
+
|
|
+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
|
|
+ int len, __wsum sum,
|
|
+ int *src_err_ptr, int *dst_err_ptr);
|
|
+
|
|
/*
|
|
* Note: when you get a NULL pointer exception here this means someone
|
|
* passed in an incorrect kernel address to one of these functions.
|
|
@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
|
|
int *err_ptr)
|
|
{
|
|
might_sleep();
|
|
- return csum_partial_copy_generic((__force void *)src, dst,
|
|
+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
|
|
len, sum, err_ptr, NULL);
|
|
}
|
|
|
|
@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
|
|
{
|
|
might_sleep();
|
|
if (access_ok(VERIFY_WRITE, dst, len))
|
|
- return csum_partial_copy_generic(src, (__force void *)dst,
|
|
+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
|
|
len, sum, NULL, err_ptr);
|
|
|
|
if (len)
|
|
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
|
|
index 99480e5..d81165b 100644
|
|
--- a/arch/x86/include/asm/cmpxchg.h
|
|
+++ b/arch/x86/include/asm/cmpxchg.h
|
|
@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
|
|
__compiletime_error("Bad argument size for cmpxchg");
|
|
extern void __xadd_wrong_size(void)
|
|
__compiletime_error("Bad argument size for xadd");
|
|
+extern void __xadd_check_overflow_wrong_size(void)
|
|
+ __compiletime_error("Bad argument size for xadd_check_overflow");
|
|
extern void __add_wrong_size(void)
|
|
__compiletime_error("Bad argument size for add");
|
|
+extern void __add_check_overflow_wrong_size(void)
|
|
+ __compiletime_error("Bad argument size for add_check_overflow");
|
|
|
|
/*
|
|
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
|
@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
|
|
__ret; \
|
|
})
|
|
|
|
+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
|
|
+ ({ \
|
|
+ __typeof__ (*(ptr)) __ret = (arg); \
|
|
+ switch (sizeof(*(ptr))) { \
|
|
+ case __X86_CASE_L: \
|
|
+ asm volatile (lock #op "l %0, %1\n" \
|
|
+ "jno 0f\n" \
|
|
+ "mov %0,%1\n" \
|
|
+ "int $4\n0:\n" \
|
|
+ _ASM_EXTABLE(0b, 0b) \
|
|
+ : "+r" (__ret), "+m" (*(ptr)) \
|
|
+ : : "memory", "cc"); \
|
|
+ break; \
|
|
+ case __X86_CASE_Q: \
|
|
+ asm volatile (lock #op "q %q0, %1\n" \
|
|
+ "jno 0f\n" \
|
|
+ "mov %0,%1\n" \
|
|
+ "int $4\n0:\n" \
|
|
+ _ASM_EXTABLE(0b, 0b) \
|
|
+ : "+r" (__ret), "+m" (*(ptr)) \
|
|
+ : : "memory", "cc"); \
|
|
+ break; \
|
|
+ default: \
|
|
+ __ ## op ## _check_overflow_wrong_size(); \
|
|
+ } \
|
|
+ __ret; \
|
|
+ })
|
|
+
|
|
/*
|
|
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
|
|
* Since this is generally used to protect other memory information, we
|
|
@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
|
|
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
|
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
|
|
|
+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
|
|
+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
|
|
+
|
|
#define __add(ptr, inc, lock) \
|
|
({ \
|
|
__typeof__ (*(ptr)) __ret = (inc); \
|
|
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
|
|
index 5bd3f9f..6bfa845 100644
|
|
--- a/arch/x86/include/asm/cpufeature.h
|
|
+++ b/arch/x86/include/asm/cpufeature.h
|
|
@@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
|
|
".section .discard,\"aw\",@progbits\n"
|
|
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
|
|
".previous\n"
|
|
- ".section .altinstr_replacement,\"ax\"\n"
|
|
+ ".section .altinstr_replacement,\"a\"\n"
|
|
"3: movb $1,%0\n"
|
|
"4:\n"
|
|
".previous\n"
|
|
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
|
|
index fa9c8c7..476e877 100644
|
|
--- a/arch/x86/include/asm/desc.h
|
|
+++ b/arch/x86/include/asm/desc.h
|
|
@@ -4,6 +4,7 @@
|
|
#include <asm/desc_defs.h>
|
|
#include <asm/ldt.h>
|
|
#include <asm/mmu.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
|
|
|
|
desc->type = (info->read_exec_only ^ 1) << 1;
|
|
desc->type |= info->contents << 2;
|
|
+ desc->type |= info->seg_not_present ^ 1;
|
|
|
|
desc->s = 1;
|
|
desc->dpl = 0x3;
|
|
@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
|
|
}
|
|
|
|
extern struct desc_ptr idt_descr;
|
|
-extern gate_desc idt_table[];
|
|
extern struct desc_ptr nmi_idt_descr;
|
|
-extern gate_desc nmi_idt_table[];
|
|
-
|
|
-struct gdt_page {
|
|
- struct desc_struct gdt[GDT_ENTRIES];
|
|
-} __attribute__((aligned(PAGE_SIZE)));
|
|
-
|
|
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
|
|
+extern gate_desc idt_table[256];
|
|
+extern gate_desc nmi_idt_table[256];
|
|
|
|
+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
|
|
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
|
{
|
|
- return per_cpu(gdt_page, cpu).gdt;
|
|
+ return cpu_gdt_table[cpu];
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
|
|
unsigned long base, unsigned dpl, unsigned flags,
|
|
unsigned short seg)
|
|
{
|
|
- gate->a = (seg << 16) | (base & 0xffff);
|
|
- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
|
|
+ gate->gate.offset_low = base;
|
|
+ gate->gate.seg = seg;
|
|
+ gate->gate.reserved = 0;
|
|
+ gate->gate.type = type;
|
|
+ gate->gate.s = 0;
|
|
+ gate->gate.dpl = dpl;
|
|
+ gate->gate.p = 1;
|
|
+ gate->gate.offset_high = base >> 16;
|
|
}
|
|
|
|
#endif
|
|
@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
|
|
|
static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
|
|
{
|
|
+ pax_open_kernel();
|
|
memcpy(&idt[entry], gate, sizeof(*gate));
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
|
|
{
|
|
+ pax_open_kernel();
|
|
memcpy(&ldt[entry], desc, 8);
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void
|
|
@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
|
|
default: size = sizeof(*gdt); break;
|
|
}
|
|
|
|
+ pax_open_kernel();
|
|
memcpy(&gdt[entry], desc, size);
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
|
|
@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
|
|
|
|
static inline void native_load_tr_desc(void)
|
|
{
|
|
+ pax_open_kernel();
|
|
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_load_gdt(const struct desc_ptr *dtr)
|
|
@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
|
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
|
unsigned int i;
|
|
|
|
+ pax_open_kernel();
|
|
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
|
|
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/* This intentionally ignores lm, since 32-bit apps don't have that field. */
|
|
@@ -318,7 +331,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
-static inline void set_nmi_gate(int gate, void *addr)
|
|
+static inline void set_nmi_gate(int gate, const void *addr)
|
|
{
|
|
gate_desc s;
|
|
|
|
@@ -327,7 +340,7 @@ static inline void set_nmi_gate(int gate, void *addr)
|
|
}
|
|
#endif
|
|
|
|
-static inline void _set_gate(int gate, unsigned type, void *addr,
|
|
+static inline void _set_gate(int gate, unsigned type, const void *addr,
|
|
unsigned dpl, unsigned ist, unsigned seg)
|
|
{
|
|
gate_desc s;
|
|
@@ -346,7 +359,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
|
|
* Pentium F0 0F bugfix can have resulted in the mapped
|
|
* IDT being write-protected.
|
|
*/
|
|
-static inline void set_intr_gate(unsigned int n, void *addr)
|
|
+static inline void set_intr_gate(unsigned int n, const void *addr)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
|
|
@@ -376,19 +389,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
|
|
/*
|
|
* This routine sets up an interrupt gate at directory privilege level 3.
|
|
*/
|
|
-static inline void set_system_intr_gate(unsigned int n, void *addr)
|
|
+static inline void set_system_intr_gate(unsigned int n, const void *addr)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
|
|
}
|
|
|
|
-static inline void set_system_trap_gate(unsigned int n, void *addr)
|
|
+static inline void set_system_trap_gate(unsigned int n, const void *addr)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
|
|
}
|
|
|
|
-static inline void set_trap_gate(unsigned int n, void *addr)
|
|
+static inline void set_trap_gate(unsigned int n, const void *addr)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
|
|
@@ -397,19 +410,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
|
|
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
|
|
+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
|
|
}
|
|
|
|
-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
|
|
+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
|
|
}
|
|
|
|
-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
|
|
+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
|
|
{
|
|
BUG_ON((unsigned)n > 0xFF);
|
|
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
|
|
}
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
|
|
+{
|
|
+ struct desc_struct d;
|
|
+
|
|
+ if (likely(limit))
|
|
+ limit = (limit - 1UL) >> PAGE_SHIFT;
|
|
+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
|
|
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
|
|
+}
|
|
+#endif
|
|
+
|
|
#endif /* _ASM_X86_DESC_H */
|
|
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
|
|
index 278441f..b95a174 100644
|
|
--- a/arch/x86/include/asm/desc_defs.h
|
|
+++ b/arch/x86/include/asm/desc_defs.h
|
|
@@ -31,6 +31,12 @@ struct desc_struct {
|
|
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
|
|
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
|
|
};
|
|
+ struct {
|
|
+ u16 offset_low;
|
|
+ u16 seg;
|
|
+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
|
|
+ unsigned offset_high: 16;
|
|
+ } gate;
|
|
};
|
|
} __attribute__((packed));
|
|
|
|
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
|
|
index 3778256..c5d4fce 100644
|
|
--- a/arch/x86/include/asm/e820.h
|
|
+++ b/arch/x86/include/asm/e820.h
|
|
@@ -69,7 +69,7 @@ struct e820map {
|
|
#define ISA_START_ADDRESS 0xa0000
|
|
#define ISA_END_ADDRESS 0x100000
|
|
|
|
-#define BIOS_BEGIN 0x000a0000
|
|
+#define BIOS_BEGIN 0x000c0000
|
|
#define BIOS_END 0x00100000
|
|
|
|
#define BIOS_ROM_BASE 0xffe00000
|
|
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
|
|
index 06ec1fe..991397e 100644
|
|
--- a/arch/x86/include/asm/elf.h
|
|
+++ b/arch/x86/include/asm/elf.h
|
|
@@ -244,7 +244,25 @@ extern int force_personality32;
|
|
the loader. We need to make sure that it is out of the way of the program
|
|
that it will "exec", and that there is sufficient room for the brk. */
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
|
|
+#else
|
|
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+#ifdef CONFIG_X86_32
|
|
+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
|
|
+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
|
|
+#else
|
|
+#define PAX_ELF_ET_DYN_BASE 0x400000UL
|
|
+
|
|
+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
|
|
+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
|
|
+#endif
|
|
+#endif
|
|
|
|
/* This yields a mask that user programs can use to figure out what
|
|
instruction set this CPU supports. This could be done in user space,
|
|
@@ -297,16 +315,12 @@ do { \
|
|
|
|
#define ARCH_DLINFO \
|
|
do { \
|
|
- if (vdso_enabled) \
|
|
- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
|
- (unsigned long)current->mm->context.vdso); \
|
|
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
|
|
} while (0)
|
|
|
|
#define ARCH_DLINFO_X32 \
|
|
do { \
|
|
- if (vdso_enabled) \
|
|
- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
|
|
- (unsigned long)current->mm->context.vdso); \
|
|
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
|
|
} while (0)
|
|
|
|
#define AT_SYSINFO 32
|
|
@@ -321,7 +335,7 @@ else \
|
|
|
|
#endif /* !CONFIG_X86_32 */
|
|
|
|
-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
|
|
+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
|
|
|
|
#define VDSO_ENTRY \
|
|
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
|
|
@@ -337,9 +351,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
|
|
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
|
|
#define compat_arch_setup_additional_pages syscall32_setup_pages
|
|
|
|
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
|
-#define arch_randomize_brk arch_randomize_brk
|
|
-
|
|
/*
|
|
* True on X86_32 or when emulating IA32 on X86_64
|
|
*/
|
|
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
|
|
index cc70c1c..d96d011 100644
|
|
--- a/arch/x86/include/asm/emergency-restart.h
|
|
+++ b/arch/x86/include/asm/emergency-restart.h
|
|
@@ -15,6 +15,6 @@ enum reboot_type {
|
|
|
|
extern enum reboot_type reboot_type;
|
|
|
|
-extern void machine_emergency_restart(void);
|
|
+extern void machine_emergency_restart(void) __noreturn;
|
|
|
|
#endif /* _ASM_X86_EMERGENCY_RESTART_H */
|
|
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
|
|
index a65708f..1da9ad0 100644
|
|
--- a/arch/x86/include/asm/fpu-internal.h
|
|
+++ b/arch/x86/include/asm/fpu-internal.h
|
|
@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
|
|
{
|
|
int err;
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
|
|
+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
|
|
+#endif
|
|
+
|
|
/* See comment in fxsave() below. */
|
|
#ifdef CONFIG_AS_FXSAVEQ
|
|
asm volatile("1: fxrstorq %[fx]\n\t"
|
|
@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
|
|
{
|
|
int err;
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
|
|
+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Clear the bytes not touched by the fxsave and reserved
|
|
* for the SW usage.
|
|
diff --git a/arch/x86/include/asm/fpu-internal.h.rej b/arch/x86/include/asm/fpu-internal.h.rej
|
|
new file mode 100644
|
|
index 0000000..625c8ea
|
|
--- /dev/null
|
|
+++ b/arch/x86/include/asm/fpu-internal.h.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/include/asm/fpu-internal.h 2012-05-21 11:32:56.839927625 +0200
|
|
++++ arch/x86/include/asm/fpu-internal.h 2012-05-21 12:10:09.348048879 +0200
|
|
+@@ -281,7 +291,7 @@ static inline int restore_fpu_checking(s
|
|
+ "emms\n\t" /* clear stack tags */
|
|
+ "fildl %P[addr]", /* set F?P to defined value */
|
|
+ X86_FEATURE_FXSAVE_LEAK,
|
|
+- [addr] "m" (tsk->thread.fpu.has_fpu));
|
|
++ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
|
|
+
|
|
+ return fpu_restore_checking(&tsk->thread.fpu);
|
|
+ }
|
|
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
|
|
index 71ecbcb..bac10b7 100644
|
|
--- a/arch/x86/include/asm/futex.h
|
|
+++ b/arch/x86/include/asm/futex.h
|
|
@@ -11,16 +11,18 @@
|
|
#include <asm/processor.h>
|
|
|
|
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
|
|
+ typecheck(u32 __user *, uaddr); \
|
|
asm volatile("1:\t" insn "\n" \
|
|
"2:\t.section .fixup,\"ax\"\n" \
|
|
"3:\tmov\t%3, %1\n" \
|
|
"\tjmp\t2b\n" \
|
|
"\t.previous\n" \
|
|
_ASM_EXTABLE(1b, 3b) \
|
|
- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
|
|
+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
|
|
: "i" (-EFAULT), "0" (oparg), "1" (0))
|
|
|
|
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
|
|
+ typecheck(u32 __user *, uaddr); \
|
|
asm volatile("1:\tmovl %2, %0\n" \
|
|
"\tmovl\t%0, %3\n" \
|
|
"\t" insn "\n" \
|
|
@@ -33,7 +35,7 @@
|
|
_ASM_EXTABLE(1b, 4b) \
|
|
_ASM_EXTABLE(2b, 4b) \
|
|
: "=&a" (oldval), "=&r" (ret), \
|
|
- "+m" (*uaddr), "=&r" (tem) \
|
|
+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
|
|
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
|
|
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
|
@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
|
|
|
switch (op) {
|
|
case FUTEX_OP_SET:
|
|
- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
|
|
+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_ADD:
|
|
- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
|
|
+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
|
|
uaddr, oparg);
|
|
break;
|
|
case FUTEX_OP_OR:
|
|
@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
|
return -EFAULT;
|
|
|
|
- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
|
|
+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
|
|
"2:\t.section .fixup, \"ax\"\n"
|
|
"3:\tmov %3, %0\n"
|
|
"\tjmp 2b\n"
|
|
"\t.previous\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
|
|
+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
|
|
: "i" (-EFAULT), "r" (newval), "1" (oldval)
|
|
: "memory"
|
|
);
|
|
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
|
|
index eb92a6e..b98b2f4 100644
|
|
--- a/arch/x86/include/asm/hw_irq.h
|
|
+++ b/arch/x86/include/asm/hw_irq.h
|
|
@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
|
|
extern void enable_IO_APIC(void);
|
|
|
|
/* Statistics */
|
|
-extern atomic_t irq_err_count;
|
|
-extern atomic_t irq_mis_count;
|
|
+extern atomic_unchecked_t irq_err_count;
|
|
+extern atomic_unchecked_t irq_mis_count;
|
|
|
|
/* EISA */
|
|
extern void eisa_set_level_irq(unsigned int irq);
|
|
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
|
|
index d8e8eef..99f81ae 100644
|
|
--- a/arch/x86/include/asm/io.h
|
|
+++ b/arch/x86/include/asm/io.h
|
|
@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
|
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
|
|
+{
|
|
+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
|
|
+}
|
|
+
|
|
+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
|
|
+{
|
|
+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
|
|
+}
|
|
+
|
|
/*
|
|
* Convert a virtual cached pointer to an uncached pointer
|
|
*/
|
|
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
|
|
index 0a8b519..80e7d5b 100644
|
|
--- a/arch/x86/include/asm/irqflags.h
|
|
+++ b/arch/x86/include/asm/irqflags.h
|
|
@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
|
|
sti; \
|
|
sysexit
|
|
|
|
+#define GET_CR0_INTO_RDI mov %cr0, %rdi
|
|
+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
|
|
+#define GET_CR3_INTO_RDI mov %cr3, %rdi
|
|
+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
|
|
+
|
|
#else
|
|
#define INTERRUPT_RETURN iret
|
|
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
|
|
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
|
|
index 5478825..839e88c 100644
|
|
--- a/arch/x86/include/asm/kprobes.h
|
|
+++ b/arch/x86/include/asm/kprobes.h
|
|
@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
|
|
#define RELATIVEJUMP_SIZE 5
|
|
#define RELATIVECALL_OPCODE 0xe8
|
|
#define RELATIVE_ADDR_SIZE 4
|
|
-#define MAX_STACK_SIZE 64
|
|
-#define MIN_STACK_SIZE(ADDR) \
|
|
- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
|
- THREAD_SIZE - (unsigned long)(ADDR))) \
|
|
- ? (MAX_STACK_SIZE) \
|
|
- : (((unsigned long)current_thread_info()) + \
|
|
- THREAD_SIZE - (unsigned long)(ADDR)))
|
|
+#define MAX_STACK_SIZE 64UL
|
|
+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
|
|
|
|
#define flush_insn_slot(p) do { } while (0)
|
|
|
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
index d60facb..a651caa 100644
|
|
--- a/arch/x86/include/asm/kvm_host.h
|
|
+++ b/arch/x86/include/asm/kvm_host.h
|
|
@@ -680,7 +680,7 @@ struct kvm_x86_ops {
|
|
int (*check_intercept)(struct kvm_vcpu *vcpu,
|
|
struct x86_instruction_info *info,
|
|
enum x86_intercept_stage stage);
|
|
-};
|
|
+} __do_const;
|
|
|
|
struct kvm_arch_async_pf {
|
|
u32 token;
|
|
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
|
|
index c8bed0d..e5721fa 100644
|
|
--- a/arch/x86/include/asm/local.h
|
|
+++ b/arch/x86/include/asm/local.h
|
|
@@ -17,26 +17,58 @@ typedef struct {
|
|
|
|
static inline void local_inc(local_t *l)
|
|
{
|
|
- asm volatile(_ASM_INC "%0"
|
|
+ asm volatile(_ASM_INC "%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_DEC "%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (l->a.counter));
|
|
}
|
|
|
|
static inline void local_dec(local_t *l)
|
|
{
|
|
- asm volatile(_ASM_DEC "%0"
|
|
+ asm volatile(_ASM_DEC "%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_INC "%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (l->a.counter));
|
|
}
|
|
|
|
static inline void local_add(long i, local_t *l)
|
|
{
|
|
- asm volatile(_ASM_ADD "%1,%0"
|
|
+ asm volatile(_ASM_ADD "%1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_SUB "%1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (l->a.counter)
|
|
: "ir" (i));
|
|
}
|
|
|
|
static inline void local_sub(long i, local_t *l)
|
|
{
|
|
- asm volatile(_ASM_SUB "%1,%0"
|
|
+ asm volatile(_ASM_SUB "%1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_ADD "%1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (l->a.counter)
|
|
: "ir" (i));
|
|
}
|
|
@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(_ASM_SUB "%2,%0; sete %1"
|
|
+ asm volatile(_ASM_SUB "%2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_ADD "%2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "+m" (l->a.counter), "=qm" (c)
|
|
: "ir" (i) : "memory");
|
|
return c;
|
|
@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(_ASM_DEC "%0; sete %1"
|
|
+ asm volatile(_ASM_DEC "%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_INC "%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "+m" (l->a.counter), "=qm" (c)
|
|
: : "memory");
|
|
return c != 0;
|
|
@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(_ASM_INC "%0; sete %1"
|
|
+ asm volatile(_ASM_INC "%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_DEC "%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sete %1\n"
|
|
: "+m" (l->a.counter), "=qm" (c)
|
|
: : "memory");
|
|
return c != 0;
|
|
@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
|
|
{
|
|
unsigned char c;
|
|
|
|
- asm volatile(_ASM_ADD "%2,%0; sets %1"
|
|
+ asm volatile(_ASM_ADD "%2,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_SUB "%2,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+ "sets %1\n"
|
|
: "+m" (l->a.counter), "=qm" (c)
|
|
: "ir" (i) : "memory");
|
|
return c;
|
|
@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
|
|
#endif
|
|
/* Modern 486+ processor */
|
|
__i = i;
|
|
- asm volatile(_ASM_XADD "%0, %1;"
|
|
+ asm volatile(_ASM_XADD "%0, %1\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ _ASM_MOV "%0,%1\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+r" (i), "+m" (l->a.counter)
|
|
: : "memory");
|
|
return i + __i;
|
|
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
|
|
index 593e51d..fa69c9a 100644
|
|
--- a/arch/x86/include/asm/mman.h
|
|
+++ b/arch/x86/include/asm/mman.h
|
|
@@ -5,4 +5,14 @@
|
|
|
|
#include <asm-generic/mman.h>
|
|
|
|
+#ifdef __KERNEL__
|
|
+#ifndef __ASSEMBLY__
|
|
+#ifdef CONFIG_X86_32
|
|
+#define arch_mmap_check i386_mmap_check
|
|
+int i386_mmap_check(unsigned long addr, unsigned long len,
|
|
+ unsigned long flags);
|
|
+#endif
|
|
+#endif
|
|
+#endif
|
|
+
|
|
#endif /* _ASM_X86_MMAN_H */
|
|
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
|
|
index 5f55e69..e20bfb1 100644
|
|
--- a/arch/x86/include/asm/mmu.h
|
|
+++ b/arch/x86/include/asm/mmu.h
|
|
@@ -9,7 +9,7 @@
|
|
* we put the segment information here.
|
|
*/
|
|
typedef struct {
|
|
- void *ldt;
|
|
+ struct desc_struct *ldt;
|
|
int size;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -18,7 +18,19 @@ typedef struct {
|
|
#endif
|
|
|
|
struct mutex lock;
|
|
- void *vdso;
|
|
+ unsigned long vdso;
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ unsigned long user_cs_base;
|
|
+ unsigned long user_cs_limit;
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
|
|
+ cpumask_t cpu_user_cs_mask;
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+#endif
|
|
} mm_context_t;
|
|
|
|
#ifdef CONFIG_SMP
|
|
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
|
|
index 6902152..da4283a 100644
|
|
--- a/arch/x86/include/asm/mmu_context.h
|
|
+++ b/arch/x86/include/asm/mmu_context.h
|
|
@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
|
|
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ unsigned int i;
|
|
+ pgd_t *pgd;
|
|
+
|
|
+ pax_open_kernel();
|
|
+ pgd = get_cpu_pgd(smp_processor_id());
|
|
+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
|
|
+ set_pgd_batched(pgd+i, native_make_pgd(0));
|
|
+ pax_close_kernel();
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
|
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
|
|
@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
unsigned cpu = smp_processor_id();
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
|
|
+ int tlbstate = TLBSTATE_OK;
|
|
+#endif
|
|
|
|
if (likely(prev != next)) {
|
|
#ifdef CONFIG_SMP
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
|
|
+ tlbstate = percpu_read(cpu_tlbstate.state);
|
|
+#endif
|
|
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
|
percpu_write(cpu_tlbstate.active_mm, next);
|
|
#endif
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
|
/* Re-load page tables */
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ pax_open_kernel();
|
|
+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
|
|
+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
|
|
+ pax_close_kernel();
|
|
+ load_cr3(get_cpu_pgd(cpu));
|
|
+#else
|
|
load_cr3(next->pgd);
|
|
+#endif
|
|
|
|
/* stop flush ipis for the previous mm */
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
*/
|
|
if (unlikely(prev->context.ldt != next->context.ldt))
|
|
load_LDT_nolock(&next->context);
|
|
- }
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
|
|
+ if (!(__supported_pte_mask & _PAGE_NX)) {
|
|
+ smp_mb__before_clear_bit();
|
|
+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
|
|
+ smp_mb__after_clear_bit();
|
|
+ cpu_set(cpu, next->context.cpu_user_cs_mask);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
|
|
+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
|
|
+ prev->context.user_cs_limit != next->context.user_cs_limit))
|
|
+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
|
|
#ifdef CONFIG_SMP
|
|
+ else if (unlikely(tlbstate != TLBSTATE_OK))
|
|
+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ }
|
|
else {
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ pax_open_kernel();
|
|
+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
|
|
+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
|
|
+ pax_close_kernel();
|
|
+ load_cr3(get_cpu_pgd(cpu));
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_SMP
|
|
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
|
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
|
|
|
|
@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
* tlb flush IPI delivery. We must reload CR3
|
|
* to make sure to use no freed page tables.
|
|
*/
|
|
+
|
|
+#ifndef CONFIG_PAX_PER_CPU_PGD
|
|
load_cr3(next->pgd);
|
|
+#endif
|
|
+
|
|
load_LDT_nolock(&next->context);
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
|
|
+ if (!(__supported_pte_mask & _PAGE_NX))
|
|
+ cpu_set(cpu, next->context.cpu_user_cs_mask);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
|
|
+#endif
|
|
+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
|
|
+#endif
|
|
+
|
|
}
|
|
- }
|
|
#endif
|
|
+ }
|
|
}
|
|
|
|
#define activate_mm(prev, next) \
|
|
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
|
|
index 9eae775..c914fea 100644
|
|
--- a/arch/x86/include/asm/module.h
|
|
+++ b/arch/x86/include/asm/module.h
|
|
@@ -5,6 +5,7 @@
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/* X86_64 does not define MODULE_PROC_FAMILY */
|
|
+#define MODULE_PROC_FAMILY ""
|
|
#elif defined CONFIG_M386
|
|
#define MODULE_PROC_FAMILY "386 "
|
|
#elif defined CONFIG_M486
|
|
@@ -59,8 +60,20 @@
|
|
#error unknown processor family
|
|
#endif
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
|
|
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
|
|
+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
|
|
+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
|
|
+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
|
|
+#else
|
|
+#define MODULE_PAX_KERNEXEC ""
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+#define MODULE_PAX_UDEREF "UDEREF "
|
|
+#else
|
|
+#define MODULE_PAX_UDEREF ""
|
|
+#endif
|
|
+
|
|
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
|
|
+
|
|
#endif /* _ASM_X86_MODULE_H */
|
|
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
|
|
index a9e9937..0caba23 100644
|
|
--- a/arch/x86/include/asm/page_64_types.h
|
|
+++ b/arch/x86/include/asm/page_64_types.h
|
|
@@ -55,7 +55,7 @@ void copy_page(void *to, void *from);
|
|
|
|
/* duplicated to the one in bootmem.h */
|
|
extern unsigned long max_pfn;
|
|
-extern unsigned long phys_base;
|
|
+extern const unsigned long phys_base;
|
|
|
|
extern unsigned long __phys_addr(unsigned long);
|
|
#define __phys_reloc_hide(x) (x)
|
|
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
|
|
index 25e9734..85a5bf8 100644
|
|
--- a/arch/x86/include/asm/paravirt.h
|
|
+++ b/arch/x86/include/asm/paravirt.h
|
|
@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
|
val);
|
|
}
|
|
|
|
+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
|
|
+{
|
|
+ pgdval_t val = native_pgd_val(pgd);
|
|
+
|
|
+ if (sizeof(pgdval_t) > sizeof(long))
|
|
+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
|
|
+ val, (u64)val >> 32);
|
|
+ else
|
|
+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
|
|
+ val);
|
|
+}
|
|
+
|
|
static inline void pgd_clear(pgd_t *pgdp)
|
|
{
|
|
set_pgd(pgdp, __pgd(0));
|
|
@@ -752,6 +764,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
|
pv_mmu_ops.set_fixmap(idx, phys, flags);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+static inline unsigned long pax_open_kernel(void)
|
|
+{
|
|
+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
|
|
+}
|
|
+
|
|
+static inline unsigned long pax_close_kernel(void)
|
|
+{
|
|
+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
|
|
+}
|
|
+#else
|
|
+static inline unsigned long pax_open_kernel(void) { return 0; }
|
|
+static inline unsigned long pax_close_kernel(void) { return 0; }
|
|
+#endif
|
|
+
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
|
|
static inline int arch_spin_is_locked(struct arch_spinlock *lock)
|
|
@@ -968,7 +995,7 @@ extern void default_banner(void);
|
|
|
|
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
|
|
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
|
|
-#define PARA_INDIRECT(addr) *%cs:addr
|
|
+#define PARA_INDIRECT(addr) *%ss:addr
|
|
#endif
|
|
|
|
#define INTERRUPT_RETURN \
|
|
@@ -1045,6 +1072,21 @@ extern void default_banner(void);
|
|
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
|
|
CLBR_NONE, \
|
|
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
|
|
+
|
|
+#define GET_CR0_INTO_RDI \
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
|
|
+ mov %rax,%rdi
|
|
+
|
|
+#define SET_RDI_INTO_CR0 \
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
|
|
+
|
|
+#define GET_CR3_INTO_RDI \
|
|
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
|
|
+ mov %rax,%rdi
|
|
+
|
|
+#define SET_RDI_INTO_CR3 \
|
|
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
|
|
+
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
|
|
index faf2c04..8428be1 100644
|
|
--- a/arch/x86/include/asm/paravirt_types.h
|
|
+++ b/arch/x86/include/asm/paravirt_types.h
|
|
@@ -194,7 +194,7 @@ struct pv_cpu_ops {
|
|
|
|
void (*start_context_switch)(struct task_struct *prev);
|
|
void (*end_context_switch)(struct task_struct *next);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct pv_irq_ops {
|
|
/*
|
|
@@ -225,7 +225,7 @@ struct pv_apic_ops {
|
|
unsigned long start_eip,
|
|
unsigned long start_esp);
|
|
#endif
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct pv_mmu_ops {
|
|
unsigned long (*read_cr2)(void);
|
|
@@ -314,6 +314,7 @@ struct pv_mmu_ops {
|
|
struct paravirt_callee_save make_pud;
|
|
|
|
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
|
|
+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
|
|
#endif /* PAGETABLE_LEVELS == 4 */
|
|
#endif /* PAGETABLE_LEVELS >= 3 */
|
|
|
|
@@ -325,6 +326,12 @@ struct pv_mmu_ops {
|
|
an mfn. We can tell which is which from the index. */
|
|
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
|
|
phys_addr_t phys, pgprot_t flags);
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ unsigned long (*pax_open_kernel)(void);
|
|
+ unsigned long (*pax_close_kernel)(void);
|
|
+#endif
|
|
+
|
|
};
|
|
|
|
struct arch_spinlock;
|
|
@@ -335,7 +342,7 @@ struct pv_lock_ops {
|
|
void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
|
|
int (*spin_trylock)(struct arch_spinlock *lock);
|
|
void (*spin_unlock)(struct arch_spinlock *lock);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* This contains all the paravirt structures: we get a convenient
|
|
* number for each function using the offset which we use to indicate
|
|
diff --git a/arch/x86/include/asm/paravirt_types.h.rej b/arch/x86/include/asm/paravirt_types.h.rej
|
|
new file mode 100644
|
|
index 0000000..72d0834
|
|
--- /dev/null
|
|
+++ b/arch/x86/include/asm/paravirt_types.h.rej
|
|
@@ -0,0 +1,26 @@
|
|
+--- arch/x86/include/asm/paravirt_types.h 2011-10-24 12:48:26.123091780 +0200
|
|
++++ arch/x86/include/asm/paravirt_types.h 2012-05-21 12:10:09.376048880 +0200
|
|
+@@ -84,20 +84,20 @@ struct pv_init_ops {
|
|
+ */
|
|
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
|
|
+ unsigned long addr, unsigned len);
|
|
+-};
|
|
++} __no_const;
|
|
+
|
|
+
|
|
+ struct pv_lazy_ops {
|
|
+ /* Set deferred update mode, used for batching operations. */
|
|
+ void (*enter)(void);
|
|
+ void (*leave)(void);
|
|
+-};
|
|
++} __no_const;
|
|
+
|
|
+ struct pv_time_ops {
|
|
+ unsigned long long (*sched_clock)(void);
|
|
+ unsigned long long (*steal_clock)(int cpu);
|
|
+ unsigned long (*get_tsc_khz)(void);
|
|
+-};
|
|
++} __no_const;
|
|
+
|
|
+ struct pv_cpu_ops {
|
|
+ /* hooks for various privileged instructions */
|
|
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
|
|
index b4389a4..7024269 100644
|
|
--- a/arch/x86/include/asm/pgalloc.h
|
|
+++ b/arch/x86/include/asm/pgalloc.h
|
|
@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
|
|
pmd_t *pmd, pte_t *pte)
|
|
{
|
|
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
|
|
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
|
|
+}
|
|
+
|
|
+static inline void pmd_populate_user(struct mm_struct *mm,
|
|
+ pmd_t *pmd, pte_t *pte)
|
|
+{
|
|
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
|
|
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
|
|
}
|
|
|
|
@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
|
|
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
|
+{
|
|
+ pud_populate(mm, pudp, pmd);
|
|
+}
|
|
#else /* !CONFIG_X86_PAE */
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
{
|
|
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
|
|
set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
|
|
}
|
|
+
|
|
+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
+{
|
|
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
|
|
+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
|
|
+}
|
|
#endif /* CONFIG_X86_PAE */
|
|
|
|
#if PAGETABLE_LEVELS > 3
|
|
@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
|
|
}
|
|
|
|
+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
+{
|
|
+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
|
|
+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
|
|
+}
|
|
+
|
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
|
|
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
|
|
index 98391db8..8f6984e 100644
|
|
--- a/arch/x86/include/asm/pgtable-2level.h
|
|
+++ b/arch/x86/include/asm/pgtable-2level.h
|
|
@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
|
|
|
|
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
+ pax_open_kernel();
|
|
*pmdp = pmd;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
|
|
index cb00ccc..17e9054 100644
|
|
--- a/arch/x86/include/asm/pgtable-3level.h
|
|
+++ b/arch/x86/include/asm/pgtable-3level.h
|
|
@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
|
|
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
+ pax_open_kernel();
|
|
set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_set_pud(pud_t *pudp, pud_t pud)
|
|
{
|
|
+ pax_open_kernel();
|
|
set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
|
|
index 3f3dd52..ef2b389 100644
|
|
--- a/arch/x86/include/asm/pgtable.h
|
|
+++ b/arch/x86/include/asm/pgtable.h
|
|
@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
|
|
+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
|
|
#define pgd_clear(pgd) native_pgd_clear(pgd)
|
|
#endif
|
|
|
|
@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
|
|
|
#define arch_end_context_switch(prev) do {} while(0)
|
|
|
|
+#define pax_open_kernel() native_pax_open_kernel()
|
|
+#define pax_close_kernel() native_pax_close_kernel()
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
+#define __HAVE_ARCH_PAX_OPEN_KERNEL
|
|
+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+static inline unsigned long native_pax_open_kernel(void)
|
|
+{
|
|
+ unsigned long cr0;
|
|
+
|
|
+ preempt_disable();
|
|
+ barrier();
|
|
+ cr0 = read_cr0() ^ X86_CR0_WP;
|
|
+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
|
|
+ write_cr0(cr0);
|
|
+ return cr0 ^ X86_CR0_WP;
|
|
+}
|
|
+
|
|
+static inline unsigned long native_pax_close_kernel(void)
|
|
+{
|
|
+ unsigned long cr0;
|
|
+
|
|
+ cr0 = read_cr0() ^ X86_CR0_WP;
|
|
+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
|
|
+ write_cr0(cr0);
|
|
+ barrier();
|
|
+ preempt_enable_no_resched();
|
|
+ return cr0 ^ X86_CR0_WP;
|
|
+}
|
|
+#else
|
|
+static inline unsigned long native_pax_open_kernel(void) { return 0; }
|
|
+static inline unsigned long native_pax_close_kernel(void) { return 0; }
|
|
+#endif
|
|
+
|
|
/*
|
|
* The following only work if pte_present() is true.
|
|
* Undefined behaviour if not..
|
|
*/
|
|
+static inline int pte_user(pte_t pte)
|
|
+{
|
|
+ return pte_val(pte) & _PAGE_USER;
|
|
+}
|
|
+
|
|
static inline int pte_dirty(pte_t pte)
|
|
{
|
|
return pte_flags(pte) & _PAGE_DIRTY;
|
|
@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
|
|
return pte_clear_flags(pte, _PAGE_RW);
|
|
}
|
|
|
|
+static inline pte_t pte_mkread(pte_t pte)
|
|
+{
|
|
+ return __pte(pte_val(pte) | _PAGE_USER);
|
|
+}
|
|
+
|
|
static inline pte_t pte_mkexec(pte_t pte)
|
|
{
|
|
- return pte_clear_flags(pte, _PAGE_NX);
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ if (__supported_pte_mask & _PAGE_NX)
|
|
+ return pte_clear_flags(pte, _PAGE_NX);
|
|
+ else
|
|
+#endif
|
|
+ return pte_set_flags(pte, _PAGE_USER);
|
|
+}
|
|
+
|
|
+static inline pte_t pte_exprotect(pte_t pte)
|
|
+{
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ if (__supported_pte_mask & _PAGE_NX)
|
|
+ return pte_set_flags(pte, _PAGE_NX);
|
|
+ else
|
|
+#endif
|
|
+ return pte_clear_flags(pte, _PAGE_USER);
|
|
}
|
|
|
|
static inline pte_t pte_mkdirty(pte_t pte)
|
|
@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
|
|
+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
|
|
+{
|
|
+ return cpu_pgd[cpu];
|
|
+}
|
|
+#endif
|
|
+
|
|
#include <linux/mm_types.h>
|
|
|
|
static inline int pte_none(pte_t pte)
|
|
@@ -570,7 +639,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
|
|
|
|
static inline int pgd_bad(pgd_t pgd)
|
|
{
|
|
- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
|
+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
|
|
}
|
|
|
|
static inline int pgd_none(pgd_t pgd)
|
|
@@ -593,7 +662,12 @@ static inline int pgd_none(pgd_t pgd)
|
|
* pgd_offset() returns a (pgd_t *)
|
|
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
|
|
*/
|
|
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
|
|
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
|
|
+#endif
|
|
+
|
|
/*
|
|
* a shortcut which implies the use of the kernel's pgd, instead
|
|
* of a process's
|
|
@@ -604,6 +678,20 @@ static inline int pgd_none(pgd_t pgd)
|
|
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
|
|
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
|
|
+#else
|
|
+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
|
|
+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
|
|
+#else
|
|
+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
#ifndef __ASSEMBLY__
|
|
|
|
extern int direct_gbpages;
|
|
@@ -768,11 +856,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
|
* dst and src can be on the same page, but the range must not overlap,
|
|
* and must not cross a page boundary.
|
|
*/
|
|
-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
|
|
+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
|
|
{
|
|
- memcpy(dst, src, count * sizeof(pgd_t));
|
|
+ pax_open_kernel();
|
|
+ while (count--)
|
|
+ *dst++ = *src++;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
|
|
+#else
|
|
+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
|
|
+#endif
|
|
|
|
#include <asm-generic/pgtable.h>
|
|
#endif /* __ASSEMBLY__ */
|
|
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
|
|
index 0c92113..34a77c6 100644
|
|
--- a/arch/x86/include/asm/pgtable_32.h
|
|
+++ b/arch/x86/include/asm/pgtable_32.h
|
|
@@ -25,9 +25,6 @@
|
|
struct mm_struct;
|
|
struct vm_area_struct;
|
|
|
|
-extern pgd_t swapper_pg_dir[1024];
|
|
-extern pgd_t initial_page_table[1024];
|
|
-
|
|
static inline void pgtable_cache_init(void) { }
|
|
static inline void check_pgt_cache(void) { }
|
|
void paging_init(void);
|
|
@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
|
# include <asm/pgtable-2level.h>
|
|
#endif
|
|
|
|
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
+extern pgd_t initial_page_table[PTRS_PER_PGD];
|
|
+#ifdef CONFIG_X86_PAE
|
|
+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
|
|
+#endif
|
|
+
|
|
#if defined(CONFIG_HIGHPTE)
|
|
#define pte_offset_map(dir, address) \
|
|
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
|
|
@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
|
/* Clear a kernel PTE and flush it from the TLB */
|
|
#define kpte_clear_flush(ptep, vaddr) \
|
|
do { \
|
|
+ pax_open_kernel(); \
|
|
pte_clear(&init_mm, (vaddr), (ptep)); \
|
|
+ pax_close_kernel(); \
|
|
__flush_tlb_one((vaddr)); \
|
|
} while (0)
|
|
|
|
@@ -74,6 +79,9 @@ do { \
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
+#define HAVE_ARCH_UNMAPPED_AREA
|
|
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
+
|
|
/*
|
|
* kern_addr_valid() is (1) for FLATMEM and (0) for
|
|
* SPARSEMEM and DISCONTIGMEM
|
|
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
|
|
index ed5903be..c7fe163 100644
|
|
--- a/arch/x86/include/asm/pgtable_32_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_32_types.h
|
|
@@ -8,7 +8,7 @@
|
|
*/
|
|
#ifdef CONFIG_X86_PAE
|
|
# include <asm/pgtable-3level_types.h>
|
|
-# define PMD_SIZE (1UL << PMD_SHIFT)
|
|
+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
|
|
# define PMD_MASK (~(PMD_SIZE - 1))
|
|
#else
|
|
# include <asm/pgtable-2level_types.h>
|
|
@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
|
|
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#ifndef __ASSEMBLY__
|
|
+extern unsigned char MODULES_EXEC_VADDR[];
|
|
+extern unsigned char MODULES_EXEC_END[];
|
|
+#endif
|
|
+#include <asm/boot.h>
|
|
+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
|
|
+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
|
|
+#else
|
|
+#define ktla_ktva(addr) (addr)
|
|
+#define ktva_ktla(addr) (addr)
|
|
+#endif
|
|
+
|
|
#define MODULES_VADDR VMALLOC_START
|
|
#define MODULES_END VMALLOC_END
|
|
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
|
|
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
|
|
index 975f709..9f779c9 100644
|
|
--- a/arch/x86/include/asm/pgtable_64.h
|
|
+++ b/arch/x86/include/asm/pgtable_64.h
|
|
@@ -16,10 +16,14 @@
|
|
|
|
extern pud_t level3_kernel_pgt[512];
|
|
extern pud_t level3_ident_pgt[512];
|
|
+extern pud_t level3_vmalloc_start_pgt[512];
|
|
+extern pud_t level3_vmalloc_end_pgt[512];
|
|
+extern pud_t level3_vmemmap_pgt[512];
|
|
+extern pud_t level2_vmemmap_pgt[512];
|
|
extern pmd_t level2_kernel_pgt[512];
|
|
extern pmd_t level2_fixmap_pgt[512];
|
|
-extern pmd_t level2_ident_pgt[512];
|
|
-extern pgd_t init_level4_pgt[];
|
|
+extern pmd_t level2_ident_pgt[512*2];
|
|
+extern pgd_t init_level4_pgt[512];
|
|
|
|
#define swapper_pg_dir init_level4_pgt
|
|
|
|
@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
|
|
|
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
+ pax_open_kernel();
|
|
*pmdp = pmd;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_pmd_clear(pmd_t *pmd)
|
|
@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
|
|
|
static inline void native_set_pud(pud_t *pudp, pud_t pud)
|
|
{
|
|
+ pax_open_kernel();
|
|
*pudp = pud;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static inline void native_pud_clear(pud_t *pud)
|
|
@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
|
|
|
|
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
|
|
{
|
|
+ pax_open_kernel();
|
|
+ *pgdp = pgd;
|
|
+ pax_close_kernel();
|
|
+}
|
|
+
|
|
+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
|
|
+{
|
|
*pgdp = pgd;
|
|
}
|
|
|
|
diff --git a/arch/x86/include/asm/pgtable_64_types.h.rej b/arch/x86/include/asm/pgtable_64_types.h.rej
|
|
new file mode 100644
|
|
index 0000000..3bb8f29
|
|
--- /dev/null
|
|
+++ b/arch/x86/include/asm/pgtable_64_types.h.rej
|
|
@@ -0,0 +1,13 @@
|
|
+--- arch/x86/include/asm/pgtable_64_types.h 2011-07-22 04:17:23.000000000 +0200
|
|
++++ arch/x86/include/asm/pgtable_64_types.h 2012-05-21 12:10:09.388048880 +0200
|
|
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
|
|
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
|
|
+ #define MODULES_END _AC(0xffffffffff000000, UL)
|
|
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
|
|
++#define MODULES_EXEC_VADDR MODULES_VADDR
|
|
++#define MODULES_EXEC_END MODULES_END
|
|
++
|
|
++#define ktla_ktva(addr) (addr)
|
|
++#define ktva_ktla(addr) (addr)
|
|
+
|
|
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
|
|
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
|
|
index 013286a..8b42f4f 100644
|
|
--- a/arch/x86/include/asm/pgtable_types.h
|
|
+++ b/arch/x86/include/asm/pgtable_types.h
|
|
@@ -16,13 +16,12 @@
|
|
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
|
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
|
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
|
-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
|
|
+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
|
|
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
|
|
#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
|
|
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
|
|
-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
|
|
-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
|
|
-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
|
|
+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
|
|
+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
|
|
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
|
|
|
|
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
|
@@ -40,7 +39,6 @@
|
|
#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
|
|
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
|
|
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
|
|
-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
|
|
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
|
|
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
|
|
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
|
|
@@ -57,8 +55,10 @@
|
|
|
|
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
|
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
|
|
-#else
|
|
+#elif defined(CONFIG_KMEMCHECK)
|
|
#define _PAGE_NX (_AT(pteval_t, 0))
|
|
+#else
|
|
+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
|
|
#endif
|
|
|
|
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
|
|
@@ -96,6 +96,9 @@
|
|
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
|
|
_PAGE_ACCESSED)
|
|
|
|
+#define PAGE_READONLY_NOEXEC PAGE_READONLY
|
|
+#define PAGE_SHARED_NOEXEC PAGE_SHARED
|
|
+
|
|
#define __PAGE_KERNEL_EXEC \
|
|
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
|
|
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
|
|
@@ -106,7 +109,7 @@
|
|
#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
|
|
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
|
|
#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
|
|
-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
|
|
+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
|
|
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
|
|
#define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
|
|
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
|
|
@@ -168,8 +171,8 @@
|
|
* bits are combined, this will alow user to access the high address mapped
|
|
* VDSO in the presence of CONFIG_COMPAT_VDSO
|
|
*/
|
|
-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
|
|
-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
|
|
+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
|
|
+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
|
|
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
|
|
#endif
|
|
|
|
@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
|
|
{
|
|
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
|
|
}
|
|
+#endif
|
|
|
|
+#if PAGETABLE_LEVELS == 3
|
|
+#include <asm-generic/pgtable-nopud.h>
|
|
+#endif
|
|
+
|
|
+#if PAGETABLE_LEVELS == 2
|
|
+#include <asm-generic/pgtable-nopmd.h>
|
|
+#endif
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
#if PAGETABLE_LEVELS > 3
|
|
typedef struct { pudval_t pud; } pud_t;
|
|
|
|
@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
|
|
return pud.pud;
|
|
}
|
|
#else
|
|
-#include <asm-generic/pgtable-nopud.h>
|
|
-
|
|
static inline pudval_t native_pud_val(pud_t pud)
|
|
{
|
|
return native_pgd_val(pud.pgd);
|
|
@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|
return pmd.pmd;
|
|
}
|
|
#else
|
|
-#include <asm-generic/pgtable-nopmd.h>
|
|
-
|
|
static inline pmdval_t native_pmd_val(pmd_t pmd)
|
|
{
|
|
return native_pgd_val(pmd.pud.pgd);
|
|
@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
|
|
|
|
extern pteval_t __supported_pte_mask;
|
|
extern void set_nx(void);
|
|
-extern int nx_enabled;
|
|
|
|
#define pgprot_writecombine pgprot_writecombine
|
|
extern pgprot_t pgprot_writecombine(pgprot_t prot);
|
|
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
|
|
index 4fa7dcc..764e33a 100644
|
|
--- a/arch/x86/include/asm/processor.h
|
|
+++ b/arch/x86/include/asm/processor.h
|
|
@@ -276,7 +276,7 @@ struct tss_struct {
|
|
|
|
} ____cacheline_aligned;
|
|
|
|
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
|
|
+extern struct tss_struct init_tss[NR_CPUS];
|
|
|
|
/*
|
|
* Save the original ist values for checking stack pointers during debugging
|
|
@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
|
|
*/
|
|
#define TASK_SIZE PAGE_OFFSET
|
|
#define TASK_SIZE_MAX TASK_SIZE
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
|
|
+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
|
|
+#else
|
|
#define STACK_TOP TASK_SIZE
|
|
-#define STACK_TOP_MAX STACK_TOP
|
|
+#endif
|
|
+
|
|
+#define STACK_TOP_MAX TASK_SIZE
|
|
|
|
#define INIT_THREAD { \
|
|
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
|
|
+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
|
|
.vm86_info = NULL, \
|
|
.sysenter_cs = __KERNEL_CS, \
|
|
.io_bitmap_ptr = NULL, \
|
|
@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
|
|
*/
|
|
#define INIT_TSS { \
|
|
.x86_tss = { \
|
|
- .sp0 = sizeof(init_stack) + (long)&init_stack, \
|
|
+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
|
|
.ss0 = __KERNEL_DS, \
|
|
.ss1 = __KERNEL_CS, \
|
|
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
|
|
@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
|
|
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
|
|
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
|
|
-#define KSTK_TOP(info) \
|
|
-({ \
|
|
- unsigned long *__ptr = (unsigned long *)(info); \
|
|
- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
|
|
-})
|
|
+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
|
|
|
|
/*
|
|
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
|
|
@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
#define task_pt_regs(task) \
|
|
({ \
|
|
struct pt_regs *__regs__; \
|
|
- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
|
|
+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
|
|
__regs__ - 1; \
|
|
})
|
|
|
|
@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
/*
|
|
* User space process size. 47bits minus one guard page.
|
|
*/
|
|
-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
|
|
+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
|
|
|
|
/* This decides where the kernel will search for a free chunk of vm
|
|
* space during mmap's.
|
|
*/
|
|
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
|
|
- 0xc0000000 : 0xFFFFe000)
|
|
+ 0xc0000000 : 0xFFFFf000)
|
|
|
|
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
|
|
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
|
|
@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|
#define STACK_TOP_MAX TASK_SIZE_MAX
|
|
|
|
#define INIT_THREAD { \
|
|
- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
|
+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
|
|
}
|
|
|
|
#define INIT_TSS { \
|
|
- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
|
+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
|
|
}
|
|
|
|
/*
|
|
@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
|
|
*/
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
|
|
+#endif
|
|
+
|
|
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
|
|
|
|
/* Get/set a process' ability to use the timestamp counter instruction */
|
|
@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
|
|
|
|
void cpu_idle_wait(void);
|
|
|
|
-extern unsigned long arch_align_stack(unsigned long sp);
|
|
+#define arch_align_stack(x) ((x) & ~0xfUL)
|
|
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
|
|
|
void default_idle(void);
|
|
bool set_pm_idle_to_default(void);
|
|
|
|
-void stop_this_cpu(void *dummy);
|
|
+void stop_this_cpu(void *dummy) __noreturn;
|
|
|
|
#endif /* _ASM_X86_PROCESSOR_H */
|
|
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
|
|
index 0b60cd9..ed443e6 100644
|
|
--- a/arch/x86/include/asm/ptrace.h
|
|
+++ b/arch/x86/include/asm/ptrace.h
|
|
@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
|
|
}
|
|
|
|
/*
|
|
- * user_mode_vm(regs) determines whether a register set came from user mode.
|
|
+ * user_mode(regs) determines whether a register set came from user mode.
|
|
* This is true if V8086 mode was enabled OR if the register set was from
|
|
* protected mode with RPL-3 CS value. This tricky test checks that with
|
|
* one comparison. Many places in the kernel can bypass this full check
|
|
- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
|
|
+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
|
|
+ * be used.
|
|
*/
|
|
-static inline int user_mode(struct pt_regs *regs)
|
|
+static inline int user_mode_novm(struct pt_regs *regs)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
|
|
#else
|
|
- return !!(regs->cs & 3);
|
|
+ return !!(regs->cs & SEGMENT_RPL_MASK);
|
|
#endif
|
|
}
|
|
|
|
-static inline int user_mode_vm(struct pt_regs *regs)
|
|
+static inline int user_mode(struct pt_regs *regs)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
|
|
USER_RPL;
|
|
#else
|
|
- return user_mode(regs);
|
|
+ return user_mode_novm(regs);
|
|
#endif
|
|
}
|
|
|
|
@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
|
|
#ifdef CONFIG_X86_64
|
|
static inline bool user_64bit_mode(struct pt_regs *regs)
|
|
{
|
|
+ unsigned long cs = regs->cs & 0xffff;
|
|
#ifndef CONFIG_PARAVIRT
|
|
/*
|
|
* On non-paravirt systems, this is the only long mode CPL 3
|
|
* selector. We do not allow long mode selectors in the LDT.
|
|
*/
|
|
- return regs->cs == __USER_CS;
|
|
+ return cs == __USER_CS;
|
|
#else
|
|
/* Headers are too twisted for this to go in paravirt.h. */
|
|
- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
|
|
+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
|
|
#endif
|
|
}
|
|
#endif
|
|
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
|
|
index 92f29706..a79cbbb 100644
|
|
--- a/arch/x86/include/asm/reboot.h
|
|
+++ b/arch/x86/include/asm/reboot.h
|
|
@@ -6,19 +6,19 @@
|
|
struct pt_regs;
|
|
|
|
struct machine_ops {
|
|
- void (*restart)(char *cmd);
|
|
- void (*halt)(void);
|
|
- void (*power_off)(void);
|
|
+ void (* __noreturn restart)(char *cmd);
|
|
+ void (* __noreturn halt)(void);
|
|
+ void (* __noreturn power_off)(void);
|
|
void (*shutdown)(void);
|
|
void (*crash_shutdown)(struct pt_regs *);
|
|
- void (*emergency_restart)(void);
|
|
-};
|
|
+ void (* __noreturn emergency_restart)(void);
|
|
+} __no_const;
|
|
|
|
extern struct machine_ops machine_ops;
|
|
|
|
void native_machine_crash_shutdown(struct pt_regs *regs);
|
|
void native_machine_shutdown(void);
|
|
-void machine_real_restart(unsigned int type);
|
|
+void machine_real_restart(unsigned int type) __noreturn;
|
|
/* These must match dispatch_table in reboot_32.S */
|
|
#define MRR_BIOS 0
|
|
#define MRR_APM 1
|
|
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
|
|
index 2dbe4a7..ce1db00 100644
|
|
--- a/arch/x86/include/asm/rwsem.h
|
|
+++ b/arch/x86/include/asm/rwsem.h
|
|
@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
|
|
{
|
|
asm volatile("# beginning down_read\n\t"
|
|
LOCK_PREFIX _ASM_INC "(%1)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX _ASM_DEC "(%1)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
/* adds 0x00000001 */
|
|
" jns 1f\n"
|
|
" call call_rwsem_down_read_failed\n"
|
|
@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|
"1:\n\t"
|
|
" mov %1,%2\n\t"
|
|
" add %3,%2\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "sub %3,%2\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
" jle 2f\n\t"
|
|
LOCK_PREFIX " cmpxchg %2,%0\n\t"
|
|
" jnz 1b\n\t"
|
|
@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
|
long tmp;
|
|
asm volatile("# beginning down_write\n\t"
|
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "mov %1,(%2)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
/* adds 0xffff0001, returns the old value */
|
|
" test %1,%1\n\t"
|
|
/* was the count 0 before? */
|
|
@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
|
|
long tmp;
|
|
asm volatile("# beginning __up_read\n\t"
|
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "mov %1,(%2)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
/* subtracts 1, returns the old value */
|
|
" jns 1f\n\t"
|
|
" call call_rwsem_wake\n" /* expects old value in %edx */
|
|
@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
|
|
long tmp;
|
|
asm volatile("# beginning __up_write\n\t"
|
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ "mov %1,(%2)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
/* subtracts 0xffff0001, returns the old value */
|
|
" jns 1f\n\t"
|
|
" call call_rwsem_wake\n" /* expects old value in %edx */
|
|
@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|
{
|
|
asm volatile("# beginning __downgrade_write\n\t"
|
|
LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
/*
|
|
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
|
|
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
|
|
@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|
*/
|
|
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
|
{
|
|
- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
|
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (sem->count)
|
|
: "er" (delta));
|
|
}
|
|
@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
|
*/
|
|
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
|
{
|
|
- return delta + xadd(&sem->count, delta);
|
|
+ return delta + xadd_check_overflow(&sem->count, delta);
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
|
|
index 1654662..5af4157 100644
|
|
--- a/arch/x86/include/asm/segment.h
|
|
+++ b/arch/x86/include/asm/segment.h
|
|
@@ -64,10 +64,15 @@
|
|
* 26 - ESPFIX small SS
|
|
* 27 - per-cpu [ offset to per-cpu data area ]
|
|
* 28 - stack_canary-20 [ for stack protector ]
|
|
- * 29 - unused
|
|
- * 30 - unused
|
|
+ * 29 - PCI BIOS CS
|
|
+ * 30 - PCI BIOS DS
|
|
* 31 - TSS for double fault handler
|
|
*/
|
|
+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
|
|
+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
|
|
+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
|
|
+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
|
|
+
|
|
#define GDT_ENTRY_TLS_MIN 6
|
|
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
|
|
|
|
@@ -79,6 +84,8 @@
|
|
|
|
#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
|
|
|
|
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
|
|
+
|
|
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
|
|
|
|
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
|
|
@@ -104,6 +111,12 @@
|
|
#define __KERNEL_STACK_CANARY 0
|
|
#endif
|
|
|
|
+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
|
|
+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
|
|
+
|
|
+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
|
|
+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
|
|
+
|
|
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
|
|
|
|
/*
|
|
@@ -141,7 +154,7 @@
|
|
*/
|
|
|
|
/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
|
|
-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
|
|
+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
|
|
|
|
|
|
#else
|
|
@@ -165,6 +178,8 @@
|
|
#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
|
|
#define __USER32_DS __USER_DS
|
|
|
|
+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
|
|
+
|
|
#define GDT_ENTRY_TSS 8 /* needs two entries */
|
|
#define GDT_ENTRY_LDT 10 /* needs two entries */
|
|
#define GDT_ENTRY_TLS_MIN 12
|
|
@@ -185,6 +200,7 @@
|
|
#endif
|
|
|
|
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
|
|
+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
|
|
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
|
|
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
|
|
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
|
|
@@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
|
|
{
|
|
unsigned long __limit;
|
|
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
|
|
- return __limit + 1;
|
|
+ return __limit;
|
|
}
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
|
|
index 4eb3a74..bcc52eb 100644
|
|
--- a/arch/x86/include/asm/smp.h
|
|
+++ b/arch/x86/include/asm/smp.h
|
|
@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
|
|
/* cpus sharing the last level cache: */
|
|
DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
|
|
DECLARE_PER_CPU(u16, cpu_llc_id);
|
|
-DECLARE_PER_CPU(int, cpu_number);
|
|
+DECLARE_PER_CPU(unsigned int, cpu_number);
|
|
|
|
static inline struct cpumask *cpu_sibling_mask(int cpu)
|
|
{
|
|
@@ -79,7 +79,7 @@ struct smp_ops {
|
|
|
|
void (*send_call_func_ipi)(const struct cpumask *mask);
|
|
void (*send_call_func_single_ipi)(int cpu);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* Globals due to paravirt */
|
|
extern void set_cpu_sibling_map(int cpu);
|
|
@@ -194,14 +194,8 @@ extern unsigned disabled_cpus __cpuinitdata;
|
|
extern int safe_smp_processor_id(void);
|
|
|
|
#elif defined(CONFIG_X86_64_SMP)
|
|
-#define raw_smp_processor_id() (percpu_read(cpu_number))
|
|
-
|
|
-#define stack_smp_processor_id() \
|
|
-({ \
|
|
- struct thread_info *ti; \
|
|
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
|
|
- ti->cpu; \
|
|
-})
|
|
+#define raw_smp_processor_id() (percpu_read(cpu_number))
|
|
+#define stack_smp_processor_id() raw_smp_processor_id()
|
|
#define safe_smp_processor_id() smp_processor_id()
|
|
|
|
#endif
|
|
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
|
|
index 76bfa2c..12d3fe7 100644
|
|
--- a/arch/x86/include/asm/spinlock.h
|
|
+++ b/arch/x86/include/asm/spinlock.h
|
|
@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
{
|
|
asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
"jns 1f\n"
|
|
"call __read_lock_failed\n\t"
|
|
"1:\n"
|
|
@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
{
|
|
asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
"jz 1f\n"
|
|
"call __write_lock_failed\n\t"
|
|
"1:\n"
|
|
@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
{
|
|
- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
|
|
+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
:"+m" (rw->lock) : : "memory");
|
|
}
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
{
|
|
- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
|
|
+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ "jno 0f\n"
|
|
+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
|
|
+ "int $4\n0:\n"
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
: "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
|
|
}
|
|
|
|
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
|
|
index b5d9533..41655fa 100644
|
|
--- a/arch/x86/include/asm/stackprotector.h
|
|
+++ b/arch/x86/include/asm/stackprotector.h
|
|
@@ -47,7 +47,7 @@
|
|
* head_32 for boot CPU and setup_per_cpu_areas() for others.
|
|
*/
|
|
#define GDT_STACK_CANARY_INIT \
|
|
- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
|
|
+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
|
|
|
|
/*
|
|
* Initialize the stackprotector canary value.
|
|
@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
|
|
|
|
static inline void load_stack_canary_segment(void)
|
|
{
|
|
-#ifdef CONFIG_X86_32
|
|
+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
asm volatile ("mov %0, %%gs" : : "r" (0));
|
|
#endif
|
|
}
|
|
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
|
|
index 70bbe39..4ae2bd4 100644
|
|
--- a/arch/x86/include/asm/stacktrace.h
|
|
+++ b/arch/x86/include/asm/stacktrace.h
|
|
@@ -11,28 +11,20 @@
|
|
|
|
extern int kstack_depth_to_print;
|
|
|
|
-struct thread_info;
|
|
+struct task_struct;
|
|
struct stacktrace_ops;
|
|
|
|
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
|
|
- unsigned long *stack,
|
|
- unsigned long bp,
|
|
- const struct stacktrace_ops *ops,
|
|
- void *data,
|
|
- unsigned long *end,
|
|
- int *graph);
|
|
-
|
|
-extern unsigned long
|
|
-print_context_stack(struct thread_info *tinfo,
|
|
- unsigned long *stack, unsigned long bp,
|
|
- const struct stacktrace_ops *ops, void *data,
|
|
- unsigned long *end, int *graph);
|
|
-
|
|
-extern unsigned long
|
|
-print_context_stack_bp(struct thread_info *tinfo,
|
|
- unsigned long *stack, unsigned long bp,
|
|
- const struct stacktrace_ops *ops, void *data,
|
|
- unsigned long *end, int *graph);
|
|
+typedef unsigned long walk_stack_t(struct task_struct *task,
|
|
+ void *stack_start,
|
|
+ unsigned long *stack,
|
|
+ unsigned long bp,
|
|
+ const struct stacktrace_ops *ops,
|
|
+ void *data,
|
|
+ unsigned long *end,
|
|
+ int *graph);
|
|
+
|
|
+extern walk_stack_t print_context_stack;
|
|
+extern walk_stack_t print_context_stack_bp;
|
|
|
|
/* Generic stack tracer with callbacks */
|
|
|
|
@@ -40,7 +32,7 @@ struct stacktrace_ops {
|
|
void (*address)(void *data, unsigned long address, int reliable);
|
|
/* On negative return stop dumping */
|
|
int (*stack)(void *data, char *name);
|
|
- walk_stack_t walk_stack;
|
|
+ walk_stack_t *walk_stack;
|
|
};
|
|
|
|
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
|
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
|
|
index 4ec45b3..a4f0a8a 100644
|
|
--- a/arch/x86/include/asm/switch_to.h
|
|
+++ b/arch/x86/include/asm/switch_to.h
|
|
@@ -108,7 +108,7 @@ do { \
|
|
"call __switch_to\n\t" \
|
|
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
|
__switch_canary \
|
|
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
|
|
+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
|
|
"movq %%rax,%%rdi\n\t" \
|
|
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
|
"jnz ret_from_fork\n\t" \
|
|
@@ -119,7 +119,7 @@ do { \
|
|
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
|
|
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
|
[_tif_fork] "i" (_TIF_FORK), \
|
|
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
|
|
+ [thread_info] "m" (current_tinfo), \
|
|
[current_task] "m" (current_task) \
|
|
__switch_canary_iparam \
|
|
: "memory", "cc" __EXTRA_CLOBBER)
|
|
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
|
|
index 3fda9db4..4ca1c61 100644
|
|
--- a/arch/x86/include/asm/sys_ia32.h
|
|
+++ b/arch/x86/include/asm/sys_ia32.h
|
|
@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
|
|
struct old_sigaction32 __user *);
|
|
asmlinkage long sys32_alarm(unsigned int);
|
|
|
|
-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
|
|
+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
|
|
asmlinkage long sys32_sysfs(int, u32, u32);
|
|
|
|
asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
|
|
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
|
|
index ad6df8c..617e4b4 100644
|
|
--- a/arch/x86/include/asm/thread_info.h
|
|
+++ b/arch/x86/include/asm/thread_info.h
|
|
@@ -10,6 +10,7 @@
|
|
#include <linux/compiler.h>
|
|
#include <asm/page.h>
|
|
#include <asm/types.h>
|
|
+#include <asm/percpu.h>
|
|
|
|
/*
|
|
* low level task data that entry.S needs immediate access to
|
|
@@ -24,7 +25,6 @@ struct exec_domain;
|
|
#include <linux/atomic.h>
|
|
|
|
struct thread_info {
|
|
- struct task_struct *task; /* main task structure */
|
|
struct exec_domain *exec_domain; /* execution domain */
|
|
__u32 flags; /* low level flags */
|
|
__u32 status; /* thread synchronous flags */
|
|
@@ -34,19 +34,13 @@ struct thread_info {
|
|
mm_segment_t addr_limit;
|
|
struct restart_block restart_block;
|
|
void __user *sysenter_return;
|
|
-#ifdef CONFIG_X86_32
|
|
- unsigned long previous_esp; /* ESP of the previous stack in
|
|
- case of nested (IRQ) stacks
|
|
- */
|
|
- __u8 supervisor_stack[0];
|
|
-#endif
|
|
+ unsigned long lowest_stack;
|
|
unsigned int sig_on_uaccess_error:1;
|
|
unsigned int uaccess_err:1; /* uaccess failed */
|
|
};
|
|
|
|
-#define INIT_THREAD_INFO(tsk) \
|
|
+#define INIT_THREAD_INFO \
|
|
{ \
|
|
- .task = &tsk, \
|
|
.exec_domain = &default_exec_domain, \
|
|
.flags = 0, \
|
|
.cpu = 0, \
|
|
@@ -57,7 +51,7 @@ struct thread_info {
|
|
}, \
|
|
}
|
|
|
|
-#define init_thread_info (init_thread_union.thread_info)
|
|
+#define init_thread_info (init_thread_union.stack)
|
|
#define init_stack (init_thread_union.stack)
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
@@ -173,6 +167,23 @@ struct thread_info {
|
|
ret; \
|
|
})
|
|
|
|
+#ifdef __ASSEMBLY__
|
|
+/* how to get the thread information struct from ASM */
|
|
+#define GET_THREAD_INFO(reg) \
|
|
+ mov PER_CPU_VAR(current_tinfo), reg
|
|
+
|
|
+/* use this one if reg already contains %esp */
|
|
+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
|
|
+#else
|
|
+/* how to get the thread information struct from C */
|
|
+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
|
|
+
|
|
+static __always_inline struct thread_info *current_thread_info(void)
|
|
+{
|
|
+ return percpu_read_stable(current_tinfo);
|
|
+}
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
|
|
#define STACK_WARN (THREAD_SIZE/8)
|
|
@@ -183,35 +194,13 @@ struct thread_info {
|
|
*/
|
|
#ifndef __ASSEMBLY__
|
|
|
|
-
|
|
/* how to get the current stack pointer from C */
|
|
register unsigned long current_stack_pointer asm("esp") __used;
|
|
|
|
-/* how to get the thread information struct from C */
|
|
-static inline struct thread_info *current_thread_info(void)
|
|
-{
|
|
- return (struct thread_info *)
|
|
- (current_stack_pointer & ~(THREAD_SIZE - 1));
|
|
-}
|
|
-
|
|
-#else /* !__ASSEMBLY__ */
|
|
-
|
|
-/* how to get the thread information struct from ASM */
|
|
-#define GET_THREAD_INFO(reg) \
|
|
- movl $-THREAD_SIZE, reg; \
|
|
- andl %esp, reg
|
|
-
|
|
-/* use this one if reg already contains %esp */
|
|
-#define GET_THREAD_INFO_WITH_ESP(reg) \
|
|
- andl $-THREAD_SIZE, reg
|
|
-
|
|
#endif
|
|
|
|
#else /* X86_32 */
|
|
|
|
-#include <asm/percpu.h>
|
|
-#define KERNEL_STACK_OFFSET (5*8)
|
|
-
|
|
/*
|
|
* macros/functions for gaining access to the thread information structure
|
|
* preempt_count needs to be 1 initially, until the scheduler is functional.
|
|
@@ -219,27 +208,8 @@ static inline struct thread_info *current_thread_info(void)
|
|
#ifndef __ASSEMBLY__
|
|
DECLARE_PER_CPU(unsigned long, kernel_stack);
|
|
|
|
-static inline struct thread_info *current_thread_info(void)
|
|
-{
|
|
- struct thread_info *ti;
|
|
- ti = (void *)(percpu_read_stable(kernel_stack) +
|
|
- KERNEL_STACK_OFFSET - THREAD_SIZE);
|
|
- return ti;
|
|
-}
|
|
-
|
|
-#else /* !__ASSEMBLY__ */
|
|
-
|
|
-/* how to get the thread information struct from ASM */
|
|
-#define GET_THREAD_INFO(reg) \
|
|
- movq PER_CPU_VAR(kernel_stack),reg ; \
|
|
- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
|
|
-
|
|
-/*
|
|
- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
|
|
- * a certain register (to be used in assembler memory operands).
|
|
- */
|
|
-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
|
|
-
|
|
+/* how to get the current stack pointer from C */
|
|
+register unsigned long current_stack_pointer asm("rsp") __used;
|
|
#endif
|
|
|
|
#endif /* !X86_32 */
|
|
@@ -285,5 +255,16 @@ extern void arch_task_cache_init(void);
|
|
extern void free_thread_info(struct thread_info *ti);
|
|
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
|
#define arch_task_cache_init arch_task_cache_init
|
|
+
|
|
+#define __HAVE_THREAD_FUNCTIONS
|
|
+#define task_thread_info(task) (&(task)->tinfo)
|
|
+#define task_stack_page(task) ((task)->stack)
|
|
+#define setup_thread_stack(p, org) do {} while (0)
|
|
+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
|
|
+
|
|
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
|
+extern struct task_struct *alloc_task_struct_node(int node);
|
|
+extern void free_task_struct(struct task_struct *);
|
|
+
|
|
#endif
|
|
#endif /* _ASM_X86_THREAD_INFO_H */
|
|
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
|
|
index e054459..14bc8a7 100644
|
|
--- a/arch/x86/include/asm/uaccess.h
|
|
+++ b/arch/x86/include/asm/uaccess.h
|
|
@@ -7,12 +7,15 @@
|
|
#include <linux/compiler.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/string.h>
|
|
+#include <linux/sched.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/page.h>
|
|
|
|
#define VERIFY_READ 0
|
|
#define VERIFY_WRITE 1
|
|
|
|
+extern void check_object_size(const void *ptr, unsigned long n, bool to);
|
|
+
|
|
/*
|
|
* The fs value determines whether argument validity checking should be
|
|
* performed or not. If get_fs() == USER_DS, checking is performed, with
|
|
@@ -28,7 +31,12 @@
|
|
|
|
#define get_ds() (KERNEL_DS)
|
|
#define get_fs() (current_thread_info()->addr_limit)
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+void __set_fs(mm_segment_t x);
|
|
+void set_fs(mm_segment_t x);
|
|
+#else
|
|
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
|
+#endif
|
|
|
|
#define segment_eq(a, b) ((a).seg == (b).seg)
|
|
|
|
@@ -76,7 +84,33 @@
|
|
* checks that the pointer is in the user space range - after calling
|
|
* this function, memory access functions may still return -EFAULT.
|
|
*/
|
|
-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
|
|
+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
|
|
+#define access_ok(type, addr, size) \
|
|
+({ \
|
|
+ long __size = size; \
|
|
+ unsigned long __addr = (unsigned long)addr; \
|
|
+ unsigned long __addr_ao = __addr & PAGE_MASK; \
|
|
+ unsigned long __end_ao = __addr + __size - 1; \
|
|
+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
|
|
+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
|
|
+ while(__addr_ao <= __end_ao) { \
|
|
+ char __c_ao; \
|
|
+ __addr_ao += PAGE_SIZE; \
|
|
+ if (__size > PAGE_SIZE) \
|
|
+ cond_resched(); \
|
|
+ if (__get_user(__c_ao, (char __user *)__addr)) \
|
|
+ break; \
|
|
+ if (type != VERIFY_WRITE) { \
|
|
+ __addr = __addr_ao; \
|
|
+ continue; \
|
|
+ } \
|
|
+ if (__put_user(__c_ao, (char __user *)__addr)) \
|
|
+ break; \
|
|
+ __addr = __addr_ao; \
|
|
+ } \
|
|
+ } \
|
|
+ __ret_ao; \
|
|
+})
|
|
|
|
/*
|
|
* The exception table consists of pairs of addresses: the first is the
|
|
@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
|
|
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
|
|
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
|
|
|
|
-
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define __copyuser_seg "gs;"
|
|
+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
|
|
+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
|
|
+#else
|
|
+#define __copyuser_seg
|
|
+#define __COPYUSER_SET_ES
|
|
+#define __COPYUSER_RESTORE_ES
|
|
+#endif
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define __put_user_asm_u64(x, addr, err, errret) \
|
|
- asm volatile("1: movl %%eax,0(%2)\n" \
|
|
- "2: movl %%edx,4(%2)\n" \
|
|
+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
|
|
+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
|
|
"3:\n" \
|
|
".section .fixup,\"ax\"\n" \
|
|
"4: movl %3,%0\n" \
|
|
@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
|
|
: "A" (x), "r" (addr), "i" (errret), "0" (err))
|
|
|
|
#define __put_user_asm_ex_u64(x, addr) \
|
|
- asm volatile("1: movl %%eax,0(%1)\n" \
|
|
- "2: movl %%edx,4(%1)\n" \
|
|
+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
|
|
+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
|
|
"3:\n" \
|
|
_ASM_EXTABLE(1b, 2b - 1b) \
|
|
_ASM_EXTABLE(2b, 3b - 2b) \
|
|
@@ -252,7 +294,7 @@ extern void __put_user_8(void);
|
|
__typeof__(*(ptr)) __pu_val; \
|
|
__chk_user_ptr(ptr); \
|
|
might_fault(); \
|
|
- __pu_val = x; \
|
|
+ __pu_val = (x); \
|
|
switch (sizeof(*(ptr))) { \
|
|
case 1: \
|
|
__put_user_x(1, __pu_val, ptr, __ret_pu); \
|
|
@@ -373,7 +415,7 @@ do { \
|
|
} while (0)
|
|
|
|
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
|
- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
|
|
+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
|
|
"2:\n" \
|
|
".section .fixup,\"ax\"\n" \
|
|
"3: mov %3,%0\n" \
|
|
@@ -381,7 +423,7 @@ do { \
|
|
" jmp 2b\n" \
|
|
".previous\n" \
|
|
_ASM_EXTABLE(1b, 3b) \
|
|
- : "=r" (err), ltype(x) \
|
|
+ : "=r" (err), ltype (x) \
|
|
: "m" (__m(addr)), "i" (errret), "0" (err))
|
|
|
|
#define __get_user_size_ex(x, ptr, size) \
|
|
@@ -406,7 +448,7 @@ do { \
|
|
} while (0)
|
|
|
|
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
|
|
- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
|
|
+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
|
|
"2:\n" \
|
|
_ASM_EXTABLE(1b, 2b - 1b) \
|
|
: ltype(x) : "m" (__m(addr)))
|
|
@@ -423,13 +465,24 @@ do { \
|
|
int __gu_err; \
|
|
unsigned long __gu_val; \
|
|
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
|
|
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
|
|
+ (x) = (__typeof__(*(ptr)))__gu_val; \
|
|
__gu_err; \
|
|
})
|
|
|
|
/* FIXME: this hack is definitely wrong -AK */
|
|
struct __large_struct { unsigned long buf[100]; };
|
|
-#define __m(x) (*(struct __large_struct __user *)(x))
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define ____m(x) \
|
|
+({ \
|
|
+ unsigned long ____x = (unsigned long)(x); \
|
|
+ if (____x < PAX_USER_SHADOW_BASE) \
|
|
+ ____x += PAX_USER_SHADOW_BASE; \
|
|
+ (void __user *)____x; \
|
|
+})
|
|
+#else
|
|
+#define ____m(x) (x)
|
|
+#endif
|
|
+#define __m(x) (*(struct __large_struct __user *)____m(x))
|
|
|
|
/*
|
|
* Tell gcc we read from memory instead of writing: this is because
|
|
@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|
* aliasing issues.
|
|
*/
|
|
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
|
|
- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
|
|
+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
|
|
"2:\n" \
|
|
".section .fixup,\"ax\"\n" \
|
|
"3: mov %3,%0\n" \
|
|
@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
|
|
".previous\n" \
|
|
_ASM_EXTABLE(1b, 3b) \
|
|
: "=r"(err) \
|
|
- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
|
|
+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
|
|
|
|
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
|
|
- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
|
|
+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
|
|
"2:\n" \
|
|
_ASM_EXTABLE(1b, 2b - 1b) \
|
|
: : ltype(x), "m" (__m(addr)))
|
|
@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
|
|
* On error, the variable @x is set to zero.
|
|
*/
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define __get_user(x, ptr) get_user((x), (ptr))
|
|
+#else
|
|
#define __get_user(x, ptr) \
|
|
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
|
+#endif
|
|
|
|
/**
|
|
* __put_user: - Write a simple value into user space, with less checking.
|
|
@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
|
|
* Returns zero on success, or -EFAULT on error.
|
|
*/
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define __put_user(x, ptr) put_user((x), (ptr))
|
|
+#else
|
|
#define __put_user(x, ptr) \
|
|
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
|
+#endif
|
|
|
|
#define __get_user_unaligned __get_user
|
|
#define __put_user_unaligned __put_user
|
|
@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|
#define get_user_ex(x, ptr) do { \
|
|
unsigned long __gue_val; \
|
|
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
|
|
- (x) = (__force __typeof__(*(ptr)))__gue_val; \
|
|
+ (x) = (__typeof__(*(ptr)))__gue_val; \
|
|
} while (0)
|
|
|
|
#ifdef CONFIG_X86_WP_WORKS_OK
|
|
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
|
|
index 8084bc7..3d6ec37 100644
|
|
--- a/arch/x86/include/asm/uaccess_32.h
|
|
+++ b/arch/x86/include/asm/uaccess_32.h
|
|
@@ -11,15 +11,15 @@
|
|
#include <asm/page.h>
|
|
|
|
unsigned long __must_check __copy_to_user_ll
|
|
- (void __user *to, const void *from, unsigned long n);
|
|
+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
|
|
unsigned long __must_check __copy_from_user_ll
|
|
- (void *to, const void __user *from, unsigned long n);
|
|
+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
|
|
unsigned long __must_check __copy_from_user_ll_nozero
|
|
- (void *to, const void __user *from, unsigned long n);
|
|
+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
|
|
unsigned long __must_check __copy_from_user_ll_nocache
|
|
- (void *to, const void __user *from, unsigned long n);
|
|
+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
|
|
unsigned long __must_check __copy_from_user_ll_nocache_nozero
|
|
- (void *to, const void __user *from, unsigned long n);
|
|
+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
|
|
|
|
/**
|
|
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
|
|
@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
|
|
static __always_inline unsigned long __must_check
|
|
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
|
return ret;
|
|
}
|
|
}
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(from, n, true);
|
|
return __copy_to_user_ll(to, from, n);
|
|
}
|
|
|
|
@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
|
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
+
|
|
return __copy_to_user_inatomic(to, from, n);
|
|
}
|
|
|
|
static __always_inline unsigned long
|
|
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
/* Avoid zeroing the tail if the copy fails..
|
|
* If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
|
|
* but as the zeroing behaviour is only significant when n is not
|
|
@@ -137,6 +146,10 @@ static __always_inline unsigned long
|
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
return ret;
|
|
}
|
|
}
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
return __copy_from_user_ll(to, from, n);
|
|
}
|
|
|
|
@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
|
|
const void __user *from, unsigned long n)
|
|
{
|
|
might_fault();
|
|
+
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
+
|
|
if (__builtin_constant_p(n)) {
|
|
unsigned long ret;
|
|
|
|
@@ -181,15 +200,19 @@ static __always_inline unsigned long
|
|
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
- return __copy_from_user_ll_nocache_nozero(to, from, n);
|
|
-}
|
|
+ if ((long)n < 0)
|
|
+ return n;
|
|
|
|
-unsigned long __must_check copy_to_user(void __user *to,
|
|
- const void *from, unsigned long n);
|
|
-unsigned long __must_check _copy_from_user(void *to,
|
|
- const void __user *from,
|
|
- unsigned long n);
|
|
+ return __copy_from_user_ll_nocache_nozero(to, from, n);
|
|
+}
|
|
|
|
+extern void copy_to_user_overflow(void)
|
|
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
+ __compiletime_error("copy_to_user() buffer size is not provably correct")
|
|
+#else
|
|
+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
|
|
+#endif
|
|
+;
|
|
|
|
extern void copy_from_user_overflow(void)
|
|
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
|
|
#endif
|
|
;
|
|
|
|
-static inline unsigned long __must_check copy_from_user(void *to,
|
|
- const void __user *from,
|
|
- unsigned long n)
|
|
+/**
|
|
+ * copy_to_user: - Copy a block of data into user space.
|
|
+ * @to: Destination address, in user space.
|
|
+ * @from: Source address, in kernel space.
|
|
+ * @n: Number of bytes to copy.
|
|
+ *
|
|
+ * Context: User context only. This function may sleep.
|
|
+ *
|
|
+ * Copy data from kernel space to user space.
|
|
+ *
|
|
+ * Returns number of bytes that could not be copied.
|
|
+ * On success, this will be zero.
|
|
+ */
|
|
+static inline unsigned long __must_check
|
|
+copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
{
|
|
- int sz = __compiletime_object_size(to);
|
|
+ size_t sz = __compiletime_object_size(from);
|
|
|
|
- if (likely(sz == -1 || sz >= n))
|
|
- n = _copy_from_user(to, from, n);
|
|
- else
|
|
- copy_from_user_overflow();
|
|
+ if (unlikely(sz != (size_t)-1 && sz < n))
|
|
+ copy_to_user_overflow();
|
|
+ else if (access_ok(VERIFY_WRITE, to, n))
|
|
+ n = __copy_to_user(to, from, n);
|
|
+ return n;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * copy_from_user: - Copy a block of data from user space.
|
|
+ * @to: Destination address, in kernel space.
|
|
+ * @from: Source address, in user space.
|
|
+ * @n: Number of bytes to copy.
|
|
+ *
|
|
+ * Context: User context only. This function may sleep.
|
|
+ *
|
|
+ * Copy data from user space to kernel space.
|
|
+ *
|
|
+ * Returns number of bytes that could not be copied.
|
|
+ * On success, this will be zero.
|
|
+ *
|
|
+ * If some data could not be copied, this function will pad the copied
|
|
+ * data to the requested size using zero bytes.
|
|
+ */
|
|
+static inline unsigned long __must_check
|
|
+copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+{
|
|
+ size_t sz = __compiletime_object_size(to);
|
|
|
|
+ if (unlikely(sz != (size_t)-1 && sz < n))
|
|
+ copy_from_user_overflow();
|
|
+ else if (access_ok(VERIFY_READ, from, n))
|
|
+ n = __copy_from_user(to, from, n);
|
|
+ else if ((long)n > 0) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+ memset(to, 0, n);
|
|
+ }
|
|
return n;
|
|
}
|
|
|
|
@@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
|
|
#define strlen_user(str) strnlen_user(str, LONG_MAX)
|
|
|
|
long strnlen_user(const char __user *str, long n);
|
|
-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
|
|
-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
|
|
+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
|
|
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
|
|
|
|
#endif /* _ASM_X86_UACCESS_32_H */
|
|
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
|
|
index fcd4b6f..ef04f8f 100644
|
|
--- a/arch/x86/include/asm/uaccess_64.h
|
|
+++ b/arch/x86/include/asm/uaccess_64.h
|
|
@@ -10,6 +10,9 @@
|
|
#include <asm/alternative.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/page.h>
|
|
+#include <asm/pgtable.h>
|
|
+
|
|
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
|
|
|
/*
|
|
* Copy To/From Userspace
|
|
@@ -17,12 +20,14 @@
|
|
|
|
/* Handles exceptions in both to and from, but doesn't do access_ok */
|
|
__must_check unsigned long
|
|
-copy_user_generic_string(void *to, const void *from, unsigned len);
|
|
+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
|
|
__must_check unsigned long
|
|
-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
|
|
+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
|
|
|
|
static __always_inline __must_check unsigned long
|
|
-copy_user_generic(void *to, const void *from, unsigned len)
|
|
+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
|
|
+static __always_inline __must_check unsigned long
|
|
+copy_user_generic(void *to, const void *from, unsigned long len)
|
|
{
|
|
unsigned ret;
|
|
|
|
@@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
|
|
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
|
|
"=d" (len)),
|
|
"1" (to), "2" (from), "3" (len)
|
|
- : "memory", "rcx", "r8", "r9", "r10", "r11");
|
|
+ : "memory", "rcx", "r8", "r9", "r11");
|
|
return ret;
|
|
}
|
|
|
|
+static __always_inline __must_check unsigned long
|
|
+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
|
|
+static __always_inline __must_check unsigned long
|
|
+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
|
|
__must_check unsigned long
|
|
-_copy_to_user(void __user *to, const void *from, unsigned len);
|
|
-__must_check unsigned long
|
|
-_copy_from_user(void *to, const void __user *from, unsigned len);
|
|
-__must_check unsigned long
|
|
-copy_in_user(void __user *to, const void __user *from, unsigned len);
|
|
+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
|
|
+
|
|
+extern void copy_to_user_overflow(void)
|
|
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
+ __compiletime_error("copy_to_user() buffer size is not provably correct")
|
|
+#else
|
|
+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
|
|
+#endif
|
|
+;
|
|
+
|
|
+extern void copy_from_user_overflow(void)
|
|
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
|
|
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
|
|
+#else
|
|
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
|
|
+#endif
|
|
+;
|
|
|
|
static inline unsigned long __must_check copy_from_user(void *to,
|
|
const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
- int sz = __compiletime_object_size(to);
|
|
-
|
|
might_fault();
|
|
- if (likely(sz == -1 || sz >= n))
|
|
- n = _copy_from_user(to, from, n);
|
|
-#ifdef CONFIG_DEBUG_VM
|
|
- else
|
|
- WARN(1, "Buffer overflow detected!\n");
|
|
-#endif
|
|
+
|
|
+ if (access_ok(VERIFY_READ, from, n))
|
|
+ n = __copy_from_user(to, from, n);
|
|
+ else if (n < INT_MAX) {
|
|
+ if (!__builtin_constant_p(n))
|
|
+ check_object_size(to, n, false);
|
|
+ memset(to, 0, n);
|
|
+ }
|
|
return n;
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
+int copy_to_user(void __user *dst, const void *src, unsigned long size)
|
|
{
|
|
might_fault();
|
|
|
|
- return _copy_to_user(dst, src, size);
|
|
+ if (access_ok(VERIFY_WRITE, dst, size))
|
|
+ size = __copy_to_user(dst, src, size);
|
|
+ return size;
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
|
|
{
|
|
- int ret = 0;
|
|
+ size_t sz = __compiletime_object_size(dst);
|
|
+ unsigned ret = 0;
|
|
|
|
might_fault();
|
|
- if (!__builtin_constant_p(size))
|
|
- return copy_user_generic(dst, (__force void *)src, size);
|
|
+
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_READ, src, size))
|
|
+ return size;
|
|
+#endif
|
|
+
|
|
+ if (unlikely(sz != (size_t)-1 && sz < size)) {
|
|
+ copy_from_user_overflow();
|
|
+ return size;
|
|
+ }
|
|
+
|
|
+ if (!__builtin_constant_p(size)) {
|
|
+ check_object_size(dst, size, false);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
|
|
+ }
|
|
switch (size) {
|
|
- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
|
|
+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
|
|
ret, "b", "b", "=q", 1);
|
|
return ret;
|
|
- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
|
|
+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
|
|
ret, "w", "w", "=r", 2);
|
|
return ret;
|
|
- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
|
|
+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
|
|
ret, "l", "k", "=r", 4);
|
|
return ret;
|
|
- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
|
|
ret, "q", "", "=r", 8);
|
|
return ret;
|
|
case 10:
|
|
- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
|
|
ret, "q", "", "=r", 10);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
__get_user_asm(*(u16 *)(8 + (char *)dst),
|
|
- (u16 __user *)(8 + (char __user *)src),
|
|
+ (const u16 __user *)(8 + (const char __user *)src),
|
|
ret, "w", "w", "=r", 2);
|
|
return ret;
|
|
case 16:
|
|
- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
|
|
+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
|
|
ret, "q", "", "=r", 16);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
__get_user_asm(*(u64 *)(8 + (char *)dst),
|
|
- (u64 __user *)(8 + (char __user *)src),
|
|
+ (const u64 __user *)(8 + (const char __user *)src),
|
|
ret, "q", "", "=r", 8);
|
|
return ret;
|
|
default:
|
|
- return copy_user_generic(dst, (__force void *)src, size);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
|
|
}
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|
+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
|
|
{
|
|
- int ret = 0;
|
|
+ size_t sz = __compiletime_object_size(src);
|
|
+ unsigned ret = 0;
|
|
|
|
might_fault();
|
|
- if (!__builtin_constant_p(size))
|
|
- return copy_user_generic((__force void *)dst, src, size);
|
|
+
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_WRITE, dst, size))
|
|
+ return size;
|
|
+#endif
|
|
+
|
|
+ if (unlikely(sz != (size_t)-1 && sz < size)) {
|
|
+ copy_to_user_overflow();
|
|
+ return size;
|
|
+ }
|
|
+
|
|
+ if (!__builtin_constant_p(size)) {
|
|
+ check_object_size(src, size, true);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((__force_kernel void *)dst, src, size);
|
|
+ }
|
|
switch (size) {
|
|
- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
|
|
+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
|
|
ret, "b", "b", "iq", 1);
|
|
return ret;
|
|
- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
|
|
+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
|
|
ret, "w", "w", "ir", 2);
|
|
return ret;
|
|
- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
|
|
+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
|
|
ret, "l", "k", "ir", 4);
|
|
return ret;
|
|
- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "er", 8);
|
|
return ret;
|
|
case 10:
|
|
- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "er", 10);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
asm("":::"memory");
|
|
- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
|
|
+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
|
|
ret, "w", "w", "ir", 2);
|
|
return ret;
|
|
case 16:
|
|
- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
|
|
+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
|
|
ret, "q", "", "er", 16);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
asm("":::"memory");
|
|
- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
|
|
+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
|
|
ret, "q", "", "er", 8);
|
|
return ret;
|
|
default:
|
|
- return copy_user_generic((__force void *)dst, src, size);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((__force_kernel void *)dst, src, size);
|
|
}
|
|
}
|
|
|
|
static __always_inline __must_check
|
|
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
|
|
{
|
|
- int ret = 0;
|
|
+ unsigned ret = 0;
|
|
|
|
might_fault();
|
|
- if (!__builtin_constant_p(size))
|
|
- return copy_user_generic((__force void *)dst,
|
|
- (__force void *)src, size);
|
|
+
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_READ, src, size))
|
|
+ return size;
|
|
+ if (!__access_ok(VERIFY_WRITE, dst, size))
|
|
+ return size;
|
|
+#endif
|
|
+
|
|
+ if (!__builtin_constant_p(size)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((__force_kernel void *)dst,
|
|
+ (__force_kernel const void *)src, size);
|
|
+ }
|
|
switch (size) {
|
|
case 1: {
|
|
u8 tmp;
|
|
- __get_user_asm(tmp, (u8 __user *)src,
|
|
+ __get_user_asm(tmp, (const u8 __user *)src,
|
|
ret, "b", "b", "=q", 1);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u8 __user *)dst,
|
|
@@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
}
|
|
case 2: {
|
|
u16 tmp;
|
|
- __get_user_asm(tmp, (u16 __user *)src,
|
|
+ __get_user_asm(tmp, (const u16 __user *)src,
|
|
ret, "w", "w", "=r", 2);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u16 __user *)dst,
|
|
@@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
|
|
case 4: {
|
|
u32 tmp;
|
|
- __get_user_asm(tmp, (u32 __user *)src,
|
|
+ __get_user_asm(tmp, (const u32 __user *)src,
|
|
ret, "l", "k", "=r", 4);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u32 __user *)dst,
|
|
@@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
}
|
|
case 8: {
|
|
u64 tmp;
|
|
- __get_user_asm(tmp, (u64 __user *)src,
|
|
+ __get_user_asm(tmp, (const u64 __user *)src,
|
|
ret, "q", "", "=r", 8);
|
|
if (likely(!ret))
|
|
__put_user_asm(tmp, (u64 __user *)dst,
|
|
@@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|
return ret;
|
|
}
|
|
default:
|
|
- return copy_user_generic((__force void *)dst,
|
|
- (__force void *)src, size);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((__force_kernel void *)dst,
|
|
+ (__force_kernel const void *)src, size);
|
|
}
|
|
}
|
|
|
|
__must_check long strnlen_user(const char __user *str, long n);
|
|
__must_check long __strnlen_user(const char __user *str, long n);
|
|
__must_check long strlen_user(const char __user *str);
|
|
-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
|
|
-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
|
|
+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
|
|
+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
|
|
|
|
static __must_check __always_inline int
|
|
-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
|
|
+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
|
|
{
|
|
- return copy_user_generic(dst, (__force const void *)src, size);
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_READ, src, size))
|
|
+ return size;
|
|
+
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
|
|
}
|
|
|
|
-static __must_check __always_inline int
|
|
-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
|
+static __must_check __always_inline unsigned long
|
|
+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
|
|
{
|
|
- return copy_user_generic((__force void *)dst, src, size);
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_WRITE, dst, size))
|
|
+ return size;
|
|
+
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((__force_kernel void *)dst, src, size);
|
|
}
|
|
|
|
-extern long __copy_user_nocache(void *dst, const void __user *src,
|
|
- unsigned size, int zerorest);
|
|
+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
|
|
+ unsigned long size, int zerorest) __size_overflow(3);
|
|
|
|
-static inline int
|
|
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
|
|
+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
|
|
{
|
|
might_sleep();
|
|
+
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_READ, src, size))
|
|
+ return size;
|
|
+#endif
|
|
+
|
|
return __copy_user_nocache(dst, src, size, 1);
|
|
}
|
|
|
|
-static inline int
|
|
-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
|
|
- unsigned size)
|
|
+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
|
|
+ unsigned long size)
|
|
{
|
|
+ if (size > INT_MAX)
|
|
+ return size;
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if (!__access_ok(VERIFY_READ, src, size))
|
|
+ return size;
|
|
+#endif
|
|
+
|
|
return __copy_user_nocache(dst, src, size, 0);
|
|
}
|
|
|
|
-unsigned long
|
|
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
|
|
+extern unsigned long
|
|
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
|
|
|
|
#endif /* _ASM_X86_UACCESS_64_H */
|
|
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
|
|
index bb05228..d763d5bd 100644
|
|
--- a/arch/x86/include/asm/vdso.h
|
|
+++ b/arch/x86/include/asm/vdso.h
|
|
@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
|
|
#define VDSO32_SYMBOL(base, name) \
|
|
({ \
|
|
extern const char VDSO32_##name[]; \
|
|
- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
|
|
+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
|
|
})
|
|
#endif
|
|
|
|
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
|
|
index 764b66a..ad3cfc8 100644
|
|
--- a/arch/x86/include/asm/x86_init.h
|
|
+++ b/arch/x86/include/asm/x86_init.h
|
|
@@ -29,7 +29,7 @@ struct x86_init_mpparse {
|
|
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
|
|
void (*find_smp_config)(void);
|
|
void (*get_smp_config)(unsigned int early);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_resources - platform specific resource related ops
|
|
@@ -43,7 +43,7 @@ struct x86_init_resources {
|
|
void (*probe_roms)(void);
|
|
void (*reserve_resources)(void);
|
|
char *(*memory_setup)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_irqs - platform specific interrupt setup
|
|
@@ -56,7 +56,7 @@ struct x86_init_irqs {
|
|
void (*pre_vector_init)(void);
|
|
void (*intr_init)(void);
|
|
void (*trap_init)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_oem - oem platform specific customizing functions
|
|
@@ -66,7 +66,7 @@ struct x86_init_irqs {
|
|
struct x86_init_oem {
|
|
void (*arch_setup)(void);
|
|
void (*banner)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_mapping - platform specific initial kernel pagetable setup
|
|
@@ -77,7 +77,7 @@ struct x86_init_oem {
|
|
*/
|
|
struct x86_init_mapping {
|
|
void (*pagetable_reserve)(u64 start, u64 end);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_paging - platform specific paging functions
|
|
@@ -87,7 +87,7 @@ struct x86_init_mapping {
|
|
struct x86_init_paging {
|
|
void (*pagetable_setup_start)(pgd_t *base);
|
|
void (*pagetable_setup_done)(pgd_t *base);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_timers - platform specific timer setup
|
|
@@ -102,7 +102,7 @@ struct x86_init_timers {
|
|
void (*tsc_pre_init)(void);
|
|
void (*timer_init)(void);
|
|
void (*wallclock_init)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_iommu - platform specific iommu setup
|
|
@@ -110,7 +110,7 @@ struct x86_init_timers {
|
|
*/
|
|
struct x86_init_iommu {
|
|
int (*iommu_init)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_pci - platform specific pci init functions
|
|
@@ -124,7 +124,7 @@ struct x86_init_pci {
|
|
int (*init)(void);
|
|
void (*init_irq)(void);
|
|
void (*fixup_irqs)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_init_ops - functions for platform specific setup
|
|
@@ -140,7 +140,7 @@ struct x86_init_ops {
|
|
struct x86_init_timers timers;
|
|
struct x86_init_iommu iommu;
|
|
struct x86_init_pci pci;
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
|
|
@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
|
|
void (*setup_percpu_clockev)(void);
|
|
void (*early_percpu_clock_init)(void);
|
|
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct x86_platform_ops - platform specific runtime functions
|
|
@@ -177,7 +177,7 @@ struct x86_platform_ops {
|
|
int (*i8042_detect)(void);
|
|
void (*save_sched_clock_state)(void);
|
|
void (*restore_sched_clock_state)(void);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct pci_dev;
|
|
|
|
@@ -186,7 +186,7 @@ struct x86_msi_ops {
|
|
void (*teardown_msi_irq)(unsigned int irq);
|
|
void (*teardown_msi_irqs)(struct pci_dev *dev);
|
|
void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern struct x86_init_ops x86_init;
|
|
extern struct x86_cpuinit_ops x86_cpuinit;
|
|
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
|
|
index c6ce245..ffbdab7 100644
|
|
--- a/arch/x86/include/asm/xsave.h
|
|
+++ b/arch/x86/include/asm/xsave.h
|
|
@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
|
|
{
|
|
int err;
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
|
|
+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Clear the xsave header first, so that reserved fields are
|
|
* initialized to zero.
|
|
@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
|
|
static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
|
|
{
|
|
int err;
|
|
- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
|
|
+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
|
|
u32 lmask = mask;
|
|
u32 hmask = mask >> 32;
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
|
|
+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
|
|
+#endif
|
|
+
|
|
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
|
|
"2:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
|
|
index 6a564ac..9b1340c 100644
|
|
--- a/arch/x86/kernel/acpi/realmode/Makefile
|
|
+++ b/arch/x86/kernel/acpi/realmode/Makefile
|
|
@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
|
|
$(call cc-option, -fno-stack-protector) \
|
|
$(call cc-option, -mpreferred-stack-boundary=2)
|
|
KBUILD_CFLAGS += $(call cc-option, -m32)
|
|
+ifdef CONSTIFY_PLUGIN
|
|
+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
|
|
+endif
|
|
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
|
GCOV_PROFILE := n
|
|
|
|
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
|
|
index 146a49c..1b5338b 100644
|
|
--- a/arch/x86/kernel/acpi/sleep.c
|
|
+++ b/arch/x86/kernel/acpi/sleep.c
|
|
@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
|
|
header->trampoline_segment = trampoline_address() >> 4;
|
|
#ifdef CONFIG_SMP
|
|
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
|
|
+
|
|
+ pax_open_kernel();
|
|
early_gdt_descr.address =
|
|
(unsigned long)get_cpu_gdt_table(smp_processor_id());
|
|
+ pax_close_kernel();
|
|
+
|
|
initial_gs = per_cpu_offset(smp_processor_id());
|
|
#endif
|
|
initial_code = (unsigned long)wakeup_long64;
|
|
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
|
|
index 7261083..5c12053 100644
|
|
--- a/arch/x86/kernel/acpi/wakeup_32.S
|
|
+++ b/arch/x86/kernel/acpi/wakeup_32.S
|
|
@@ -30,13 +30,11 @@ wakeup_pmode_return:
|
|
# and restore the stack ... but you need gdt for this to work
|
|
movl saved_context_esp, %esp
|
|
|
|
- movl %cs:saved_magic, %eax
|
|
- cmpl $0x12345678, %eax
|
|
+ cmpl $0x12345678, saved_magic
|
|
jne bogus_magic
|
|
|
|
# jump to place where we left off
|
|
- movl saved_eip, %eax
|
|
- jmp *%eax
|
|
+ jmp *(saved_eip)
|
|
|
|
bogus_magic:
|
|
jmp bogus_magic
|
|
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
|
|
index bda833c..a9bdd97 100644
|
|
--- a/arch/x86/kernel/alternative.c
|
|
+++ b/arch/x86/kernel/alternative.c
|
|
@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
|
|
*/
|
|
for (a = start; a < end; a++) {
|
|
instr = (u8 *)&a->instr_offset + a->instr_offset;
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
|
|
+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+#endif
|
|
+
|
|
replacement = (u8 *)&a->repl_offset + a->repl_offset;
|
|
BUG_ON(a->replacementlen > a->instrlen);
|
|
BUG_ON(a->instrlen > sizeof(insnbuf));
|
|
@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
|
|
for (poff = start; poff < end; poff++) {
|
|
u8 *ptr = (u8 *)poff + *poff;
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
|
|
+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+#endif
|
|
+
|
|
if (!*poff || ptr < text || ptr >= text_end)
|
|
continue;
|
|
/* turn DS segment override prefix into lock prefix */
|
|
- if (*ptr == 0x3e)
|
|
+ if (*ktla_ktva(ptr) == 0x3e)
|
|
text_poke(ptr, ((unsigned char []){0xf0}), 1);
|
|
};
|
|
mutex_unlock(&text_mutex);
|
|
@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
|
|
for (poff = start; poff < end; poff++) {
|
|
u8 *ptr = (u8 *)poff + *poff;
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
|
|
+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
|
|
+#endif
|
|
+
|
|
if (!*poff || ptr < text || ptr >= text_end)
|
|
continue;
|
|
/* turn lock prefix into DS segment override prefix */
|
|
- if (*ptr == 0xf0)
|
|
+ if (*ktla_ktva(ptr) == 0xf0)
|
|
text_poke(ptr, ((unsigned char []){0x3E}), 1);
|
|
};
|
|
mutex_unlock(&text_mutex);
|
|
@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
|
|
|
|
BUG_ON(p->len > MAX_PATCH_LEN);
|
|
/* prep the buffer with the original instructions */
|
|
- memcpy(insnbuf, p->instr, p->len);
|
|
+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
|
|
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
|
|
(unsigned long)p->instr, p->len);
|
|
|
|
@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
|
|
if (smp_alt_once)
|
|
free_init_pages("SMP alternatives",
|
|
(unsigned long)__smp_locks,
|
|
- (unsigned long)__smp_locks_end);
|
|
+ PAGE_ALIGN((unsigned long)__smp_locks_end));
|
|
|
|
restart_nmi();
|
|
}
|
|
@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
|
|
* instructions. And on the local CPU you need to be protected again NMI or MCE
|
|
* handlers seeing an inconsistent instruction while you patch.
|
|
*/
|
|
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
|
|
+void *__kprobes text_poke_early(void *addr, const void *opcode,
|
|
size_t len)
|
|
{
|
|
unsigned long flags;
|
|
local_irq_save(flags);
|
|
- memcpy(addr, opcode, len);
|
|
+
|
|
+ pax_open_kernel();
|
|
+ memcpy(ktla_ktva(addr), opcode, len);
|
|
sync_core();
|
|
+ pax_close_kernel();
|
|
+
|
|
local_irq_restore(flags);
|
|
/* Could also do a CLFLUSH here to speed up CPU recovery; but
|
|
that causes hangs on some VIA CPUs. */
|
|
@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
|
|
*/
|
|
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
|
|
{
|
|
- unsigned long flags;
|
|
- char *vaddr;
|
|
+ unsigned char *vaddr = ktla_ktva(addr);
|
|
struct page *pages[2];
|
|
- int i;
|
|
+ size_t i;
|
|
|
|
if (!core_kernel_text((unsigned long)addr)) {
|
|
- pages[0] = vmalloc_to_page(addr);
|
|
- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
|
|
+ pages[0] = vmalloc_to_page(vaddr);
|
|
+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
|
|
} else {
|
|
- pages[0] = virt_to_page(addr);
|
|
+ pages[0] = virt_to_page(vaddr);
|
|
WARN_ON(!PageReserved(pages[0]));
|
|
- pages[1] = virt_to_page(addr + PAGE_SIZE);
|
|
+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
|
|
}
|
|
BUG_ON(!pages[0]);
|
|
- local_irq_save(flags);
|
|
- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
|
|
- if (pages[1])
|
|
- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
|
|
- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
|
|
- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
|
|
- clear_fixmap(FIX_TEXT_POKE0);
|
|
- if (pages[1])
|
|
- clear_fixmap(FIX_TEXT_POKE1);
|
|
- local_flush_tlb();
|
|
- sync_core();
|
|
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
|
|
- that causes hangs on some VIA CPUs. */
|
|
+ text_poke_early(addr, opcode, len);
|
|
for (i = 0; i < len; i++)
|
|
- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
|
|
- local_irq_restore(flags);
|
|
+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
|
|
return addr;
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
|
index cb5b54e..6ad18da 100644
|
|
--- a/arch/x86/kernel/apic/apic.c
|
|
+++ b/arch/x86/kernel/apic/apic.c
|
|
@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
|
|
/*
|
|
* Debug level, exported for io_apic.c
|
|
*/
|
|
-unsigned int apic_verbosity;
|
|
+int apic_verbosity;
|
|
|
|
int pic_mode;
|
|
|
|
@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
|
|
apic_write(APIC_ESR, 0);
|
|
v1 = apic_read(APIC_ESR);
|
|
ack_APIC_irq();
|
|
- atomic_inc(&irq_err_count);
|
|
+ atomic_inc_unchecked(&irq_err_count);
|
|
|
|
apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
|
|
smp_processor_id(), v0 , v1);
|
|
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
|
|
index e88300d..cd5a87a 100644
|
|
--- a/arch/x86/kernel/apic/io_apic.c
|
|
+++ b/arch/x86/kernel/apic/io_apic.c
|
|
@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
|
|
|
|
void __init set_io_apic_ops(const struct io_apic_ops *ops)
|
|
{
|
|
- io_apic_ops = *ops;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/*
|
|
@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
|
|
}
|
|
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
|
|
|
|
-void lock_vector_lock(void)
|
|
+void lock_vector_lock(void) __acquires(vector_lock)
|
|
{
|
|
/* Used to the online set of cpus does not change
|
|
* during assign_irq_vector.
|
|
@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
|
|
raw_spin_lock(&vector_lock);
|
|
}
|
|
|
|
-void unlock_vector_lock(void)
|
|
+void unlock_vector_lock(void) __releases(vector_lock)
|
|
{
|
|
raw_spin_unlock(&vector_lock);
|
|
}
|
|
@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
|
|
ack_APIC_irq();
|
|
}
|
|
|
|
-atomic_t irq_mis_count;
|
|
+atomic_unchecked_t irq_mis_count;
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
|
|
@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
|
|
* at the cpu.
|
|
*/
|
|
if (!(v & (1 << (i & 0x1f)))) {
|
|
- atomic_inc(&irq_mis_count);
|
|
+ atomic_inc_unchecked(&irq_mis_count);
|
|
|
|
eoi_ioapic_irq(irq, cfg);
|
|
}
|
|
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
|
|
index 459e78c..f037006 100644
|
|
--- a/arch/x86/kernel/apm_32.c
|
|
+++ b/arch/x86/kernel/apm_32.c
|
|
@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
|
|
* This is for buggy BIOS's that refer to (real mode) segment 0x40
|
|
* even though they are called in protected mode.
|
|
*/
|
|
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
|
|
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
|
|
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
|
|
|
|
static const char driver_version[] = "1.16ac"; /* no spaces */
|
|
@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
|
|
BUG_ON(cpu != 0);
|
|
gdt = get_cpu_gdt_table(cpu);
|
|
save_desc_40 = gdt[0x40 / 8];
|
|
+
|
|
+ pax_open_kernel();
|
|
gdt[0x40 / 8] = bad_bios_desc;
|
|
+ pax_close_kernel();
|
|
|
|
apm_irq_save(flags);
|
|
APM_DO_SAVE_SEGS;
|
|
@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
|
|
&call->esi);
|
|
APM_DO_RESTORE_SEGS;
|
|
apm_irq_restore(flags);
|
|
+
|
|
+ pax_open_kernel();
|
|
gdt[0x40 / 8] = save_desc_40;
|
|
+ pax_close_kernel();
|
|
+
|
|
put_cpu();
|
|
|
|
return call->eax & 0xff;
|
|
@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
|
|
BUG_ON(cpu != 0);
|
|
gdt = get_cpu_gdt_table(cpu);
|
|
save_desc_40 = gdt[0x40 / 8];
|
|
+
|
|
+ pax_open_kernel();
|
|
gdt[0x40 / 8] = bad_bios_desc;
|
|
+ pax_close_kernel();
|
|
|
|
apm_irq_save(flags);
|
|
APM_DO_SAVE_SEGS;
|
|
@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
|
|
&call->eax);
|
|
APM_DO_RESTORE_SEGS;
|
|
apm_irq_restore(flags);
|
|
+
|
|
+ pax_open_kernel();
|
|
gdt[0x40 / 8] = save_desc_40;
|
|
+ pax_close_kernel();
|
|
+
|
|
put_cpu();
|
|
return error;
|
|
}
|
|
@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
|
|
* code to that CPU.
|
|
*/
|
|
gdt = get_cpu_gdt_table(0);
|
|
+
|
|
+ pax_open_kernel();
|
|
set_desc_base(&gdt[APM_CS >> 3],
|
|
(unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
|
|
set_desc_base(&gdt[APM_CS_16 >> 3],
|
|
(unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
|
|
set_desc_base(&gdt[APM_DS >> 3],
|
|
(unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
|
|
+ pax_close_kernel();
|
|
|
|
proc_create("apm", 0, NULL, &apm_file_ops);
|
|
|
|
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
|
|
index 68de2dc..1f3c720 100644
|
|
--- a/arch/x86/kernel/asm-offsets.c
|
|
+++ b/arch/x86/kernel/asm-offsets.c
|
|
@@ -33,6 +33,8 @@ void common(void) {
|
|
OFFSET(TI_status, thread_info, status);
|
|
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
|
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
|
+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
|
|
+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
|
|
|
|
BLANK();
|
|
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
|
|
@@ -53,8 +55,26 @@ void common(void) {
|
|
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
|
|
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
|
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
|
|
+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
|
|
+#ifdef CONFIG_X86_64
|
|
+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
|
|
+#endif
|
|
#endif
|
|
|
|
+#endif
|
|
+
|
|
+ BLANK();
|
|
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
|
+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
|
|
+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
|
|
+
|
|
#ifdef CONFIG_XEN
|
|
BLANK();
|
|
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
|
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
|
|
index 1b4754f..fbb4227 100644
|
|
--- a/arch/x86/kernel/asm-offsets_64.c
|
|
+++ b/arch/x86/kernel/asm-offsets_64.c
|
|
@@ -76,6 +76,7 @@ int main(void)
|
|
BLANK();
|
|
#undef ENTRY
|
|
|
|
+ DEFINE(TSS_size, sizeof(struct tss_struct));
|
|
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
|
|
BLANK();
|
|
|
|
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
|
|
index 6ab6aa2..8f71507 100644
|
|
--- a/arch/x86/kernel/cpu/Makefile
|
|
+++ b/arch/x86/kernel/cpu/Makefile
|
|
@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
|
|
CFLAGS_REMOVE_perf_event.o = -pg
|
|
endif
|
|
|
|
-# Make sure load_percpu_segment has no stackprotector
|
|
-nostackp := $(call cc-option, -fno-stack-protector)
|
|
-CFLAGS_common.o := $(nostackp)
|
|
-
|
|
obj-y := intel_cacheinfo.o scattered.o topology.o
|
|
obj-y += proc.o capflags.o powerflags.o common.o
|
|
obj-y += vmware.o hypervisor.o sched.o mshyperv.o
|
|
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
|
|
index 5beec8a..83b6959 100644
|
|
--- a/arch/x86/kernel/cpu/amd.c
|
|
+++ b/arch/x86/kernel/cpu/amd.c
|
|
@@ -729,7 +729,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
|
|
unsigned int size)
|
|
{
|
|
/* AMD errata T13 (order #21922) */
|
|
- if ((c->x86 == 6)) {
|
|
+ if (c->x86 == 6) {
|
|
/* Duron Rev A0 */
|
|
if (c->x86_model == 3 && c->x86_mask == 0)
|
|
size = 64;
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 114db0f..1f0e264 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
|
|
|
|
static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
|
|
|
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
|
-#ifdef CONFIG_X86_64
|
|
- /*
|
|
- * We need valid kernel segments for data and code in long mode too
|
|
- * IRET will check the segment types kkeil 2000/10/28
|
|
- * Also sysret mandates a special GDT layout
|
|
- *
|
|
- * TLS descriptors are currently at a different place compared to i386.
|
|
- * Hopefully nobody expects them at a fixed place (Wine?)
|
|
- */
|
|
- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
|
|
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
|
|
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
|
|
- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
|
|
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
|
|
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
|
|
-#else
|
|
- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
|
|
- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
|
- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
|
|
- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
|
|
- /*
|
|
- * Segments used for calling PnP BIOS have byte granularity.
|
|
- * They code segments and data segments have fixed 64k limits,
|
|
- * the transfer segment sizes are set at run time.
|
|
- */
|
|
- /* 32-bit code */
|
|
- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
|
|
- /* 16-bit code */
|
|
- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
|
|
- /* 16-bit data */
|
|
- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
|
|
- /* 16-bit data */
|
|
- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
|
|
- /* 16-bit data */
|
|
- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
|
|
- /*
|
|
- * The APM segments have byte granularity and their bases
|
|
- * are set at run time. All have 64k limits.
|
|
- */
|
|
- /* 32-bit code */
|
|
- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
|
|
- /* 16-bit code */
|
|
- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
|
|
- /* data */
|
|
- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
|
|
-
|
|
- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
|
- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
|
- GDT_STACK_CANARY_INIT
|
|
-#endif
|
|
-} };
|
|
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
|
-
|
|
static int __init x86_xsave_setup(char *s)
|
|
{
|
|
if (strlen(s))
|
|
@@ -376,7 +322,7 @@ void switch_to_new_gdt(int cpu)
|
|
{
|
|
struct desc_ptr gdt_descr;
|
|
|
|
- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
|
|
+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
load_gdt(&gdt_descr);
|
|
/* Reload the per-cpu base */
|
|
@@ -843,6 +789,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
|
/* Filter out anything that depends on CPUID levels we don't have */
|
|
filter_cpuid_features(c, true);
|
|
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
|
|
+ setup_clear_cpu_cap(X86_FEATURE_SEP);
|
|
+#endif
|
|
+
|
|
/* If the model name is still unset, do table lookup. */
|
|
if (!c->x86_model_id[0]) {
|
|
const char *p;
|
|
@@ -1023,10 +973,12 @@ static __init int setup_disablecpuid(char *arg)
|
|
}
|
|
__setup("clearcpuid=", setup_disablecpuid);
|
|
|
|
+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
|
|
+EXPORT_PER_CPU_SYMBOL(current_tinfo);
|
|
+
|
|
#ifdef CONFIG_X86_64
|
|
struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
|
|
-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
|
|
- (unsigned long) nmi_idt_table };
|
|
+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
|
|
|
|
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
|
irq_stack_union) __aligned(PAGE_SIZE);
|
|
@@ -1040,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
|
|
EXPORT_PER_CPU_SYMBOL(current_task);
|
|
|
|
DEFINE_PER_CPU(unsigned long, kernel_stack) =
|
|
- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
|
|
+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
|
|
EXPORT_PER_CPU_SYMBOL(kernel_stack);
|
|
|
|
DEFINE_PER_CPU(char *, irq_stack_ptr) =
|
|
@@ -1128,7 +1080,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
|
|
{
|
|
memset(regs, 0, sizeof(struct pt_regs));
|
|
regs->fs = __KERNEL_PERCPU;
|
|
- regs->gs = __KERNEL_STACK_CANARY;
|
|
+ savesegment(gs, regs->gs);
|
|
|
|
return regs;
|
|
}
|
|
@@ -1183,7 +1135,7 @@ void __cpuinit cpu_init(void)
|
|
int i;
|
|
|
|
cpu = stack_smp_processor_id();
|
|
- t = &per_cpu(init_tss, cpu);
|
|
+ t = init_tss + cpu;
|
|
oist = &per_cpu(orig_ist, cpu);
|
|
|
|
#ifdef CONFIG_NUMA
|
|
@@ -1209,7 +1161,7 @@ void __cpuinit cpu_init(void)
|
|
switch_to_new_gdt(cpu);
|
|
loadsegment(fs, 0);
|
|
|
|
- load_idt((const struct desc_ptr *)&idt_descr);
|
|
+ load_idt(&idt_descr);
|
|
|
|
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
|
|
syscall_init();
|
|
@@ -1218,7 +1170,6 @@ void __cpuinit cpu_init(void)
|
|
wrmsrl(MSR_KERNEL_GS_BASE, 0);
|
|
barrier();
|
|
|
|
- x86_configure_nx();
|
|
if (cpu != 0)
|
|
enable_x2apic();
|
|
|
|
@@ -1274,7 +1225,7 @@ void __cpuinit cpu_init(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct task_struct *curr = current;
|
|
- struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
+ struct tss_struct *t = init_tss + cpu;
|
|
struct thread_struct *thread = &curr->thread;
|
|
|
|
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
|
|
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
|
index e7a64dd..6a192f6 100644
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -189,7 +189,7 @@ static void __cpuinit trap_init_f00f_bug(void)
|
|
* Update the IDT descriptor and reload the IDT so that
|
|
* it uses the read-only mapped virtual address.
|
|
*/
|
|
- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
|
|
+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
|
|
load_idt(&idt_descr);
|
|
}
|
|
#endif
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
index 9eeaed4..4b9d2b2 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/mce.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
|
|
@@ -42,6 +42,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/local.h>
|
|
|
|
#include "mce-internal.h"
|
|
|
|
@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
|
|
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
|
|
m->cs, m->ip);
|
|
|
|
- if (m->cs == __KERNEL_CS)
|
|
+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
|
|
print_symbol("{%s}", m->ip);
|
|
pr_cont("\n");
|
|
}
|
|
@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
|
|
|
|
#define PANIC_TIMEOUT 5 /* 5 seconds */
|
|
|
|
-static atomic_t mce_paniced;
|
|
+static atomic_unchecked_t mce_paniced;
|
|
|
|
static int fake_panic;
|
|
-static atomic_t mce_fake_paniced;
|
|
+static atomic_unchecked_t mce_fake_paniced;
|
|
|
|
/* Panic in progress. Enable interrupts and wait for final IPI */
|
|
static void wait_for_panic(void)
|
|
@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
|
|
/*
|
|
* Make sure only one CPU runs in machine check panic
|
|
*/
|
|
- if (atomic_inc_return(&mce_paniced) > 1)
|
|
+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
|
|
wait_for_panic();
|
|
barrier();
|
|
|
|
@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
|
|
console_verbose();
|
|
} else {
|
|
/* Don't log too much for fake panic */
|
|
- if (atomic_inc_return(&mce_fake_paniced) > 1)
|
|
+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
|
|
return;
|
|
}
|
|
/* First print corrected ones that are still unlogged */
|
|
@@ -687,7 +688,7 @@ static int mce_timed_out(u64 *t)
|
|
* might have been modified by someone else.
|
|
*/
|
|
rmb();
|
|
- if (atomic_read(&mce_paniced))
|
|
+ if (atomic_read_unchecked(&mce_paniced))
|
|
wait_for_panic();
|
|
if (!monarch_timeout)
|
|
goto out;
|
|
@@ -1540,7 +1541,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
|
|
}
|
|
|
|
/* Call the installed machine check handler for this CPU setup. */
|
|
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
|
+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
|
|
unexpected_machine_check;
|
|
|
|
/*
|
|
@@ -1563,7 +1564,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
|
|
return;
|
|
}
|
|
|
|
+ pax_open_kernel();
|
|
machine_check_vector = do_machine_check;
|
|
+ pax_close_kernel();
|
|
|
|
__mcheck_cpu_init_generic();
|
|
__mcheck_cpu_init_vendor(c);
|
|
@@ -1577,7 +1580,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
|
|
*/
|
|
|
|
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
|
|
-static int mce_chrdev_open_count; /* #times opened */
|
|
+static local_t mce_chrdev_open_count; /* #times opened */
|
|
static int mce_chrdev_open_exclu; /* already open exclusive? */
|
|
|
|
static int mce_chrdev_open(struct inode *inode, struct file *file)
|
|
@@ -1585,7 +1588,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
|
|
spin_lock(&mce_chrdev_state_lock);
|
|
|
|
if (mce_chrdev_open_exclu ||
|
|
- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
|
|
+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
|
|
spin_unlock(&mce_chrdev_state_lock);
|
|
|
|
return -EBUSY;
|
|
@@ -1593,7 +1596,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
|
|
|
|
if (file->f_flags & O_EXCL)
|
|
mce_chrdev_open_exclu = 1;
|
|
- mce_chrdev_open_count++;
|
|
+ local_inc(&mce_chrdev_open_count);
|
|
|
|
spin_unlock(&mce_chrdev_state_lock);
|
|
|
|
@@ -1604,7 +1607,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
|
|
{
|
|
spin_lock(&mce_chrdev_state_lock);
|
|
|
|
- mce_chrdev_open_count--;
|
|
+ local_dec(&mce_chrdev_open_count);
|
|
mce_chrdev_open_exclu = 0;
|
|
|
|
spin_unlock(&mce_chrdev_state_lock);
|
|
@@ -2329,7 +2332,7 @@ struct dentry *mce_get_debugfs_dir(void)
|
|
static void mce_reset(void)
|
|
{
|
|
cpu_missing = 0;
|
|
- atomic_set(&mce_fake_paniced, 0);
|
|
+ atomic_set_unchecked(&mce_fake_paniced, 0);
|
|
atomic_set(&mce_executing, 0);
|
|
atomic_set(&mce_callin, 0);
|
|
atomic_set(&global_nwo, 0);
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
|
|
index 2d5454c..51987eb 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/p5.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
|
|
@@ -11,6 +11,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
/* By default disabled */
|
|
int mce_p5_enabled __read_mostly;
|
|
@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
|
|
if (!cpu_has(c, X86_FEATURE_MCE))
|
|
return;
|
|
|
|
+ pax_open_kernel();
|
|
machine_check_vector = pentium_machine_check;
|
|
+ pax_close_kernel();
|
|
/* Make sure the vector pointer is visible before we enable MCEs: */
|
|
wmb();
|
|
|
|
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
|
|
index 2d7998f..17c9de1 100644
|
|
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
|
|
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
|
|
@@ -10,6 +10,7 @@
|
|
#include <asm/processor.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
/* Machine check handler for WinChip C6: */
|
|
static void winchip_machine_check(struct pt_regs *regs, long error_code)
|
|
@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
|
|
{
|
|
u32 lo, hi;
|
|
|
|
+ pax_open_kernel();
|
|
machine_check_vector = winchip_machine_check;
|
|
+ pax_close_kernel();
|
|
/* Make sure the vector pointer is visible before we enable MCEs: */
|
|
wmb();
|
|
|
|
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
|
|
index 6b96110..0da73eb 100644
|
|
--- a/arch/x86/kernel/cpu/mtrr/main.c
|
|
+++ b/arch/x86/kernel/cpu/mtrr/main.c
|
|
@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
|
|
u64 size_or_mask, size_and_mask;
|
|
static bool mtrr_aps_delayed_init;
|
|
|
|
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
|
|
+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
|
|
|
|
const struct mtrr_ops *mtrr_if;
|
|
|
|
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
|
|
index df5e41f..816c719 100644
|
|
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
|
|
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
|
|
@@ -25,7 +25,7 @@ struct mtrr_ops {
|
|
int (*validate_add_page)(unsigned long base, unsigned long size,
|
|
unsigned int type);
|
|
int (*have_wrcomb)(void);
|
|
-};
|
|
+} __do_const;
|
|
|
|
extern int generic_get_free_region(unsigned long base, unsigned long size,
|
|
int replace_reg);
|
|
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
|
|
index 87477a1..f92e7b3 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event.c
|
|
@@ -1838,7 +1838,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|
break;
|
|
|
|
perf_callchain_store(entry, frame.return_address);
|
|
- fp = frame.next_frame;
|
|
+ fp = (const void __force_user *)frame.next_frame;
|
|
}
|
|
}
|
|
|
|
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
|
|
index 69e231b..8b4e1c6 100644
|
|
--- a/arch/x86/kernel/crash.c
|
|
+++ b/arch/x86/kernel/crash.c
|
|
@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
struct pt_regs fixed_regs;
|
|
-#endif
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
- if (!user_mode_vm(regs)) {
|
|
+ if (!user_mode(regs)) {
|
|
crash_fixup_ss_esp(&fixed_regs, regs);
|
|
regs = &fixed_regs;
|
|
}
|
|
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
|
|
index 37250fe..bf2ec74 100644
|
|
--- a/arch/x86/kernel/doublefault_32.c
|
|
+++ b/arch/x86/kernel/doublefault_32.c
|
|
@@ -11,7 +11,7 @@
|
|
|
|
#define DOUBLEFAULT_STACKSIZE (1024)
|
|
static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
|
|
-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
|
|
+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
|
|
|
|
#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
|
|
|
|
@@ -21,7 +21,7 @@ static void doublefault_fn(void)
|
|
unsigned long gdt, tss;
|
|
|
|
store_gdt(&gdt_desc);
|
|
- gdt = gdt_desc.address;
|
|
+ gdt = (unsigned long)gdt_desc.address;
|
|
|
|
printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
|
|
|
|
@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
|
|
/* 0x2 bit is always set */
|
|
.flags = X86_EFLAGS_SF | 0x2,
|
|
.sp = STACK_START,
|
|
- .es = __USER_DS,
|
|
+ .es = __KERNEL_DS,
|
|
.cs = __KERNEL_CS,
|
|
.ss = __KERNEL_DS,
|
|
- .ds = __USER_DS,
|
|
+ .ds = __KERNEL_DS,
|
|
.fs = __KERNEL_PERCPU,
|
|
|
|
.__cr3 = __pa_nodebug(swapper_pg_dir),
|
|
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
|
|
index 1b81839..6027784 100644
|
|
--- a/arch/x86/kernel/dumpstack.c
|
|
+++ b/arch/x86/kernel/dumpstack.c
|
|
@@ -35,16 +35,14 @@ void printk_address(unsigned long address, int reliable)
|
|
static void
|
|
print_ftrace_graph_addr(unsigned long addr, void *data,
|
|
const struct stacktrace_ops *ops,
|
|
- struct thread_info *tinfo, int *graph)
|
|
+ struct task_struct *task, int *graph)
|
|
{
|
|
- struct task_struct *task;
|
|
unsigned long ret_addr;
|
|
int index;
|
|
|
|
if (addr != (unsigned long)return_to_handler)
|
|
return;
|
|
|
|
- task = tinfo->task;
|
|
index = task->curr_ret_stack;
|
|
|
|
if (!task->ret_stack || index < *graph)
|
|
@@ -61,7 +59,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
|
|
static inline void
|
|
print_ftrace_graph_addr(unsigned long addr, void *data,
|
|
const struct stacktrace_ops *ops,
|
|
- struct thread_info *tinfo, int *graph)
|
|
+ struct task_struct *task, int *graph)
|
|
{ }
|
|
#endif
|
|
|
|
@@ -72,10 +70,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
|
|
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
|
*/
|
|
|
|
-static inline int valid_stack_ptr(struct thread_info *tinfo,
|
|
- void *p, unsigned int size, void *end)
|
|
+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
|
|
{
|
|
- void *t = tinfo;
|
|
if (end) {
|
|
if (p < end && p >= (end-THREAD_SIZE))
|
|
return 1;
|
|
@@ -86,14 +82,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
|
|
}
|
|
|
|
unsigned long
|
|
-print_context_stack(struct thread_info *tinfo,
|
|
+print_context_stack(struct task_struct *task, void *stack_start,
|
|
unsigned long *stack, unsigned long bp,
|
|
const struct stacktrace_ops *ops, void *data,
|
|
unsigned long *end, int *graph)
|
|
{
|
|
struct stack_frame *frame = (struct stack_frame *)bp;
|
|
|
|
- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
|
|
+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
|
|
unsigned long addr;
|
|
|
|
addr = *stack;
|
|
@@ -105,7 +101,7 @@ print_context_stack(struct thread_info *tinfo,
|
|
} else {
|
|
ops->address(data, addr, 0);
|
|
}
|
|
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
|
|
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
|
|
}
|
|
stack++;
|
|
}
|
|
@@ -114,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
|
|
EXPORT_SYMBOL_GPL(print_context_stack);
|
|
|
|
unsigned long
|
|
-print_context_stack_bp(struct thread_info *tinfo,
|
|
+print_context_stack_bp(struct task_struct *task, void *stack_start,
|
|
unsigned long *stack, unsigned long bp,
|
|
const struct stacktrace_ops *ops, void *data,
|
|
unsigned long *end, int *graph)
|
|
@@ -122,7 +118,7 @@ print_context_stack_bp(struct thread_info *tinfo,
|
|
struct stack_frame *frame = (struct stack_frame *)bp;
|
|
unsigned long *ret_addr = &frame->return_address;
|
|
|
|
- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
|
|
+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
|
|
unsigned long addr = *ret_addr;
|
|
|
|
if (!__kernel_text_address(addr))
|
|
@@ -131,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
|
|
ops->address(data, addr, 1);
|
|
frame = frame->next_frame;
|
|
ret_addr = &frame->return_address;
|
|
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
|
|
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
|
|
}
|
|
|
|
return (unsigned long)frame;
|
|
@@ -189,7 +185,7 @@ void dump_stack(void)
|
|
|
|
bp = stack_frame(current, NULL);
|
|
printk("Pid: %d, comm: %.20s %s %s %.*s\n",
|
|
- current->pid, current->comm, print_tainted(),
|
|
+ task_pid_nr(current), current->comm, print_tainted(),
|
|
init_utsname()->release,
|
|
(int)strcspn(init_utsname()->version, " "),
|
|
init_utsname()->version);
|
|
@@ -246,7 +242,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
|
panic("Fatal exception in interrupt");
|
|
if (panic_on_oops)
|
|
panic("Fatal exception");
|
|
- do_exit(signr);
|
|
+ do_group_exit(signr);
|
|
}
|
|
|
|
int __kprobes __die(const char *str, struct pt_regs *regs, long err)
|
|
@@ -273,7 +269,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
|
|
|
|
show_registers(regs);
|
|
#ifdef CONFIG_X86_32
|
|
- if (user_mode_vm(regs)) {
|
|
+ if (user_mode(regs)) {
|
|
sp = regs->sp;
|
|
ss = regs->ss & 0xffff;
|
|
} else {
|
|
@@ -301,7 +297,7 @@ void die(const char *str, struct pt_regs *regs, long err)
|
|
unsigned long flags = oops_begin();
|
|
int sig = SIGSEGV;
|
|
|
|
- if (!user_mode_vm(regs))
|
|
+ if (!user_mode(regs))
|
|
report_bug(regs->ip, regs);
|
|
|
|
if (__die(str, regs, err))
|
|
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
|
|
index 88ec912..e95e935 100644
|
|
--- a/arch/x86/kernel/dumpstack_32.c
|
|
+++ b/arch/x86/kernel/dumpstack_32.c
|
|
@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
bp = stack_frame(task, regs);
|
|
|
|
for (;;) {
|
|
- struct thread_info *context;
|
|
+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
|
|
|
|
- context = (struct thread_info *)
|
|
- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
|
|
- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
|
|
+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
|
|
|
|
- stack = (unsigned long *)context->previous_esp;
|
|
- if (!stack)
|
|
+ if (stack_start == task_stack_page(task))
|
|
break;
|
|
+ stack = *(unsigned long **)stack_start;
|
|
if (ops->stack(data, "IRQ") < 0)
|
|
break;
|
|
touch_nmi_watchdog();
|
|
@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
|
|
int i;
|
|
|
|
print_modules();
|
|
- __show_regs(regs, !user_mode_vm(regs));
|
|
+ __show_regs(regs, !user_mode(regs));
|
|
|
|
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
|
|
TASK_COMM_LEN, current->comm, task_pid_nr(current),
|
|
@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
|
|
* When in-kernel, we also print out the stack and code at the
|
|
* time of the fault..
|
|
*/
|
|
- if (!user_mode_vm(regs)) {
|
|
+ if (!user_mode(regs)) {
|
|
unsigned int code_prologue = code_bytes * 43 / 64;
|
|
unsigned int code_len = code_bytes;
|
|
unsigned char c;
|
|
u8 *ip;
|
|
+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
|
|
|
|
printk(KERN_EMERG "Stack:\n");
|
|
show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG);
|
|
|
|
printk(KERN_EMERG "Code: ");
|
|
|
|
- ip = (u8 *)regs->ip - code_prologue;
|
|
+ ip = (u8 *)regs->ip - code_prologue + cs_base;
|
|
if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
|
|
/* try starting at IP */
|
|
- ip = (u8 *)regs->ip;
|
|
+ ip = (u8 *)regs->ip + cs_base;
|
|
code_len = code_len - code_prologue + 1;
|
|
}
|
|
for (i = 0; i < code_len; i++, ip++) {
|
|
@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
|
|
printk(KERN_CONT " Bad EIP value.");
|
|
break;
|
|
}
|
|
- if (ip == (u8 *)regs->ip)
|
|
+ if (ip == (u8 *)regs->ip + cs_base)
|
|
printk(KERN_CONT "<%02x> ", c);
|
|
else
|
|
printk(KERN_CONT "%02x ", c);
|
|
@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
|
|
{
|
|
unsigned short ud2;
|
|
|
|
+ ip = ktla_ktva(ip);
|
|
if (ip < PAGE_OFFSET)
|
|
return 0;
|
|
if (probe_kernel_address((unsigned short *)ip, ud2))
|
|
@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
|
|
|
|
return ud2 == 0x0b0f;
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+void pax_check_alloca(unsigned long size)
|
|
+{
|
|
+ unsigned long sp = (unsigned long)&sp, stack_left;
|
|
+
|
|
+ /* all kernel stacks are of the same size */
|
|
+ stack_left = sp & (THREAD_SIZE - 1);
|
|
+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
|
|
+}
|
|
+EXPORT_SYMBOL(pax_check_alloca);
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
|
|
index e820606..ecf211d 100644
|
|
--- a/arch/x86/kernel/dumpstack_64.c
|
|
+++ b/arch/x86/kernel/dumpstack_64.c
|
|
@@ -118,9 +118,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
unsigned long *irq_stack_end =
|
|
(unsigned long *)per_cpu(irq_stack_ptr, cpu);
|
|
unsigned used = 0;
|
|
- struct thread_info *tinfo;
|
|
int graph = 0;
|
|
unsigned long dummy;
|
|
+ void *stack_start;
|
|
|
|
if (!task)
|
|
task = current;
|
|
@@ -141,10 +141,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
* current stack address. If the stacks consist of nested
|
|
* exceptions
|
|
*/
|
|
- tinfo = task_thread_info(task);
|
|
for (;;) {
|
|
char *id;
|
|
unsigned long *estack_end;
|
|
+
|
|
estack_end = in_exception_stack(cpu, (unsigned long)stack,
|
|
&used, &id);
|
|
|
|
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
if (ops->stack(data, id) < 0)
|
|
break;
|
|
|
|
- bp = ops->walk_stack(tinfo, stack, bp, ops,
|
|
+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
|
|
data, estack_end, &graph);
|
|
ops->stack(data, "<EOE>");
|
|
/*
|
|
@@ -160,6 +160,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
* second-to-last pointer (index -2 to end) in the
|
|
* exception stack:
|
|
*/
|
|
+ if ((u16)estack_end[-1] != __KERNEL_DS)
|
|
+ goto out;
|
|
stack = (unsigned long *) estack_end[-2];
|
|
continue;
|
|
}
|
|
@@ -171,7 +173,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
|
|
if (ops->stack(data, "IRQ") < 0)
|
|
break;
|
|
- bp = ops->walk_stack(tinfo, stack, bp,
|
|
+ bp = ops->walk_stack(task, irq_stack, stack, bp,
|
|
ops, data, irq_stack_end, &graph);
|
|
/*
|
|
* We link to the next stack (which would be
|
|
@@ -190,7 +192,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|
/*
|
|
* This handles the process stack:
|
|
*/
|
|
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
|
|
+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
|
|
+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
|
|
+out:
|
|
put_cpu();
|
|
}
|
|
EXPORT_SYMBOL(dump_trace);
|
|
@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
|
|
|
|
return ud2 == 0x0b0f;
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+void pax_check_alloca(unsigned long size)
|
|
+{
|
|
+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
|
|
+ unsigned cpu, used;
|
|
+ char *id;
|
|
+
|
|
+ /* check the process stack first */
|
|
+ stack_start = (unsigned long)task_stack_page(current);
|
|
+ stack_end = stack_start + THREAD_SIZE;
|
|
+ if (likely(stack_start <= sp && sp < stack_end)) {
|
|
+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
|
|
+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ cpu = get_cpu();
|
|
+
|
|
+ /* check the irq stacks */
|
|
+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
|
|
+ stack_start = stack_end - IRQ_STACK_SIZE;
|
|
+ if (stack_start <= sp && sp < stack_end) {
|
|
+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
|
|
+ put_cpu();
|
|
+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* check the exception stacks */
|
|
+ used = 0;
|
|
+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
|
|
+ stack_start = stack_end - EXCEPTION_STKSZ;
|
|
+ if (stack_end && stack_start <= sp && sp < stack_end) {
|
|
+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
|
|
+ put_cpu();
|
|
+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ put_cpu();
|
|
+
|
|
+ /* unknown stack */
|
|
+ BUG();
|
|
+}
|
|
+EXPORT_SYMBOL(pax_check_alloca);
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
|
|
index 9b9f18b..9fcaa04 100644
|
|
--- a/arch/x86/kernel/early_printk.c
|
|
+++ b/arch/x86/kernel/early_printk.c
|
|
@@ -7,6 +7,7 @@
|
|
#include <linux/pci_regs.h>
|
|
#include <linux/pci_ids.h>
|
|
#include <linux/errno.h>
|
|
+#include <linux/sched.h>
|
|
#include <asm/io.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/fcntl.h>
|
|
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
|
|
index a075124..80cdf03 100644
|
|
--- a/arch/x86/kernel/entry_32.S
|
|
+++ b/arch/x86/kernel/entry_32.S
|
|
@@ -179,13 +179,146 @@
|
|
/*CFI_REL_OFFSET gs, PT_GS*/
|
|
.endm
|
|
.macro SET_KERNEL_GS reg
|
|
+
|
|
+#ifdef CONFIG_CC_STACKPROTECTOR
|
|
movl $(__KERNEL_STACK_CANARY), \reg
|
|
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ movl $(__USER_DS), \reg
|
|
+#else
|
|
+ xorl \reg, \reg
|
|
+#endif
|
|
+
|
|
movl \reg, %gs
|
|
.endm
|
|
|
|
#endif /* CONFIG_X86_32_LAZY_GS */
|
|
|
|
-.macro SAVE_ALL
|
|
+.macro pax_enter_kernel
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ call pax_enter_kernel
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+.macro pax_exit_kernel
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ call pax_exit_kernel
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ENTRY(pax_enter_kernel)
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ pushl %eax
|
|
+ pushl %ecx
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
|
|
+ mov %eax, %esi
|
|
+#else
|
|
+ mov %cr0, %esi
|
|
+#endif
|
|
+ bts $16, %esi
|
|
+ jnc 1f
|
|
+ mov %cs, %esi
|
|
+ cmp $__KERNEL_CS, %esi
|
|
+ jz 3f
|
|
+ ljmp $__KERNEL_CS, $3f
|
|
+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
|
|
+2:
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ mov %esi, %eax
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
|
|
+#else
|
|
+ mov %esi, %cr0
|
|
+#endif
|
|
+3:
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ popl %ecx
|
|
+ popl %eax
|
|
+#endif
|
|
+ ret
|
|
+ENDPROC(pax_enter_kernel)
|
|
+
|
|
+ENTRY(pax_exit_kernel)
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ pushl %eax
|
|
+ pushl %ecx
|
|
+#endif
|
|
+ mov %cs, %esi
|
|
+ cmp $__KERNEXEC_KERNEL_CS, %esi
|
|
+ jnz 2f
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
|
|
+ mov %eax, %esi
|
|
+#else
|
|
+ mov %cr0, %esi
|
|
+#endif
|
|
+ btr $16, %esi
|
|
+ ljmp $__KERNEL_CS, $1f
|
|
+1:
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ mov %esi, %eax
|
|
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
|
|
+#else
|
|
+ mov %esi, %cr0
|
|
+#endif
|
|
+2:
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ popl %ecx
|
|
+ popl %eax
|
|
+#endif
|
|
+ ret
|
|
+ENDPROC(pax_exit_kernel)
|
|
+#endif
|
|
+
|
|
+.macro pax_erase_kstack
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+ call pax_erase_kstack
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+/*
|
|
+ * ebp: thread_info
|
|
+ * ecx, edx: can be clobbered
|
|
+ */
|
|
+ENTRY(pax_erase_kstack)
|
|
+ pushl %edi
|
|
+ pushl %eax
|
|
+
|
|
+ mov TI_lowest_stack(%ebp), %edi
|
|
+ mov $0xB4DD00D5, %eax
|
|
+ std
|
|
+
|
|
+1: mov %edi, %ecx
|
|
+ and $THREAD_SIZE_asm - 1, %ecx
|
|
+ shr $2, %ecx
|
|
+ repne scasl
|
|
+ jecxz 2f
|
|
+
|
|
+ cmp $2*16, %ecx
|
|
+ jc 2f
|
|
+
|
|
+ mov $2*16, %ecx
|
|
+ repe scasl
|
|
+ jecxz 2f
|
|
+ jne 1b
|
|
+
|
|
+2: cld
|
|
+ mov %esp, %ecx
|
|
+ sub %edi, %ecx
|
|
+ shr $2, %ecx
|
|
+ rep stosl
|
|
+
|
|
+ mov TI_task_thread_sp0(%ebp), %edi
|
|
+ sub $128, %edi
|
|
+ mov %edi, TI_lowest_stack(%ebp)
|
|
+
|
|
+ popl %eax
|
|
+ popl %edi
|
|
+ ret
|
|
+ENDPROC(pax_erase_kstack)
|
|
+#endif
|
|
+
|
|
+.macro __SAVE_ALL _DS
|
|
cld
|
|
PUSH_GS
|
|
pushl_cfi %fs
|
|
@@ -208,7 +341,7 @@
|
|
CFI_REL_OFFSET ecx, 0
|
|
pushl_cfi %ebx
|
|
CFI_REL_OFFSET ebx, 0
|
|
- movl $(__USER_DS), %edx
|
|
+ movl $\_DS, %edx
|
|
movl %edx, %ds
|
|
movl %edx, %es
|
|
movl $(__KERNEL_PERCPU), %edx
|
|
@@ -216,6 +349,15 @@
|
|
SET_KERNEL_GS %edx
|
|
.endm
|
|
|
|
+.macro SAVE_ALL
|
|
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ __SAVE_ALL __KERNEL_DS
|
|
+ pax_enter_kernel
|
|
+#else
|
|
+ __SAVE_ALL __USER_DS
|
|
+#endif
|
|
+.endm
|
|
+
|
|
.macro RESTORE_INT_REGS
|
|
popl_cfi %ebx
|
|
CFI_RESTORE ebx
|
|
@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
|
|
popfl_cfi
|
|
jmp syscall_exit
|
|
CFI_ENDPROC
|
|
-END(ret_from_fork)
|
|
+ENDPROC(ret_from_fork)
|
|
|
|
/*
|
|
* Interrupt exit functions should be protected against kprobes
|
|
@@ -335,7 +477,15 @@ resume_userspace_sig:
|
|
andl $SEGMENT_RPL_MASK, %eax
|
|
#endif
|
|
cmpl $USER_RPL, %eax
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ jae resume_userspace
|
|
+
|
|
+ pax_exit_kernel
|
|
+ jmp resume_kernel
|
|
+#else
|
|
jb resume_kernel # not returning to v8086 or userspace
|
|
+#endif
|
|
|
|
ENTRY(resume_userspace)
|
|
LOCKDEP_SYS_EXIT
|
|
@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
|
|
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
|
|
# int/exception return?
|
|
jne work_pending
|
|
- jmp restore_all
|
|
-END(ret_from_exception)
|
|
+ jmp restore_all_pax
|
|
+ENDPROC(ret_from_exception)
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
ENTRY(resume_kernel)
|
|
@@ -363,7 +513,7 @@ need_resched:
|
|
jz restore_all
|
|
call preempt_schedule_irq
|
|
jmp need_resched
|
|
-END(resume_kernel)
|
|
+ENDPROC(resume_kernel)
|
|
#endif
|
|
CFI_ENDPROC
|
|
/*
|
|
@@ -397,23 +547,34 @@ sysenter_past_esp:
|
|
/*CFI_REL_OFFSET cs, 0*/
|
|
/*
|
|
* Push current_thread_info()->sysenter_return to the stack.
|
|
- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
|
|
- * pushed above; +8 corresponds to copy_thread's esp0 setting.
|
|
*/
|
|
- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
|
|
+ pushl_cfi $0
|
|
CFI_REL_OFFSET eip, 0
|
|
|
|
pushl_cfi %eax
|
|
SAVE_ALL
|
|
+ GET_THREAD_INFO(%ebp)
|
|
+ movl TI_sysenter_return(%ebp),%ebp
|
|
+ movl %ebp,PT_EIP(%esp)
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
/*
|
|
* Load the potential sixth argument from user stack.
|
|
* Careful about security.
|
|
*/
|
|
+ movl PT_OLDESP(%esp),%ebp
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ mov PT_OLDSS(%esp),%ds
|
|
+1: movl %ds:(%ebp),%ebp
|
|
+ push %ss
|
|
+ pop %ds
|
|
+#else
|
|
cmpl $__PAGE_OFFSET-3,%ebp
|
|
jae syscall_fault
|
|
1: movl (%ebp),%ebp
|
|
+#endif
|
|
+
|
|
movl %ebp,PT_EBP(%esp)
|
|
.section __ex_table,"a"
|
|
.align 4
|
|
@@ -437,12 +598,24 @@ sysenter_after_call:
|
|
testl $_TIF_ALLWORK_MASK, %ecx
|
|
jne sysexit_audit
|
|
sysenter_exit:
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDKSTACK
|
|
+ pushl_cfi %eax
|
|
+ movl %esp, %eax
|
|
+ call pax_randomize_kstack
|
|
+ popl_cfi %eax
|
|
+#endif
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
/* if something modifies registers it must also disable sysexit */
|
|
movl PT_EIP(%esp), %edx
|
|
movl PT_OLDESP(%esp), %ecx
|
|
xorl %ebp,%ebp
|
|
TRACE_IRQS_ON
|
|
1: mov PT_FS(%esp), %fs
|
|
+2: mov PT_DS(%esp), %ds
|
|
+3: mov PT_ES(%esp), %es
|
|
PTGS_TO_GS
|
|
ENABLE_INTERRUPTS_SYSEXIT
|
|
|
|
@@ -459,6 +632,9 @@ sysenter_audit:
|
|
movl %eax,%edx /* 2nd arg: syscall number */
|
|
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
|
|
call __audit_syscall_entry
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
pushl_cfi %ebx
|
|
movl PT_EAX(%esp),%eax /* reload syscall number */
|
|
jmp sysenter_do_call
|
|
@@ -484,11 +660,17 @@ sysexit_audit:
|
|
|
|
CFI_ENDPROC
|
|
.pushsection .fixup,"ax"
|
|
-2: movl $0,PT_FS(%esp)
|
|
+4: movl $0,PT_FS(%esp)
|
|
+ jmp 1b
|
|
+5: movl $0,PT_DS(%esp)
|
|
+ jmp 1b
|
|
+6: movl $0,PT_ES(%esp)
|
|
jmp 1b
|
|
.section __ex_table,"a"
|
|
.align 4
|
|
- .long 1b,2b
|
|
+ .long 1b,4b
|
|
+ .long 2b,5b
|
|
+ .long 3b,6b
|
|
.popsection
|
|
PTGS_TO_GS_EX
|
|
ENDPROC(ia32_sysenter_target)
|
|
@@ -522,6 +704,15 @@ syscall_exit:
|
|
testl $_TIF_ALLWORK_MASK, %ecx # current->work
|
|
jne syscall_exit_work
|
|
|
|
+restore_all_pax:
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDKSTACK
|
|
+ movl %esp, %eax
|
|
+ call pax_randomize_kstack
|
|
+#endif
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
restore_all:
|
|
TRACE_IRQS_IRET
|
|
restore_all_notrace:
|
|
@@ -579,14 +770,34 @@ ldt_ss:
|
|
* compensating for the offset by changing to the ESPFIX segment with
|
|
* a base address that matches for the difference.
|
|
*/
|
|
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
|
|
+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
|
|
mov %esp, %edx /* load kernel esp */
|
|
mov PT_OLDESP(%esp), %eax /* load userspace esp */
|
|
mov %dx, %ax /* eax: new kernel esp */
|
|
sub %eax, %edx /* offset (low word is 0) */
|
|
+#ifdef CONFIG_SMP
|
|
+ movl PER_CPU_VAR(cpu_number), %ebx
|
|
+ shll $PAGE_SHIFT_asm, %ebx
|
|
+ addl $cpu_gdt_table, %ebx
|
|
+#else
|
|
+ movl $cpu_gdt_table, %ebx
|
|
+#endif
|
|
shr $16, %edx
|
|
- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
|
|
- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ mov %cr0, %esi
|
|
+ btr $16, %esi
|
|
+ mov %esi, %cr0
|
|
+#endif
|
|
+
|
|
+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
|
|
+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ bts $16, %esi
|
|
+ mov %esi, %cr0
|
|
+#endif
|
|
+
|
|
pushl_cfi $__ESPFIX_SS
|
|
pushl_cfi %eax /* new kernel esp */
|
|
/* Disable interrupts, but do not irqtrace this section: we
|
|
@@ -616,38 +827,30 @@ work_resched:
|
|
movl TI_flags(%ebp), %ecx
|
|
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
|
|
# than syscall tracing?
|
|
- jz restore_all
|
|
+ jz restore_all_pax
|
|
testb $_TIF_NEED_RESCHED, %cl
|
|
jnz work_resched
|
|
|
|
work_notifysig: # deal with pending signals and
|
|
# notify-resume requests
|
|
+ movl %esp, %eax
|
|
#ifdef CONFIG_VM86
|
|
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
|
|
- movl %esp, %eax
|
|
- jne work_notifysig_v86 # returning to kernel-space or
|
|
+ jz 1f # returning to kernel-space or
|
|
# vm86-space
|
|
- TRACE_IRQS_ON
|
|
- ENABLE_INTERRUPTS(CLBR_NONE)
|
|
- xorl %edx, %edx
|
|
- call do_notify_resume
|
|
- jmp resume_userspace_sig
|
|
|
|
- ALIGN
|
|
-work_notifysig_v86:
|
|
pushl_cfi %ecx # save ti_flags for do_notify_resume
|
|
call save_v86_state # %eax contains pt_regs pointer
|
|
popl_cfi %ecx
|
|
movl %eax, %esp
|
|
-#else
|
|
- movl %esp, %eax
|
|
+1:
|
|
#endif
|
|
TRACE_IRQS_ON
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
xorl %edx, %edx
|
|
call do_notify_resume
|
|
jmp resume_userspace_sig
|
|
-END(work_pending)
|
|
+ENDPROC(work_pending)
|
|
|
|
# perform syscall exit tracing
|
|
ALIGN
|
|
@@ -655,11 +858,14 @@ syscall_trace_entry:
|
|
movl $-ENOSYS,PT_EAX(%esp)
|
|
movl %esp, %eax
|
|
call syscall_trace_enter
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
/* What it returned is what we'll actually use. */
|
|
cmpl $(NR_syscalls), %eax
|
|
jnae syscall_call
|
|
jmp syscall_exit
|
|
-END(syscall_trace_entry)
|
|
+ENDPROC(syscall_trace_entry)
|
|
|
|
# perform syscall exit tracing
|
|
ALIGN
|
|
@@ -762,6 +968,36 @@ ENTRY(ptregs_clone)
|
|
CFI_ENDPROC
|
|
ENDPROC(ptregs_clone)
|
|
|
|
+ ALIGN;
|
|
+ENTRY(kernel_execve)
|
|
+ CFI_STARTPROC
|
|
+ pushl_cfi %ebp
|
|
+ sub $PT_OLDSS+4,%esp
|
|
+ pushl_cfi %edi
|
|
+ pushl_cfi %ecx
|
|
+ pushl_cfi %eax
|
|
+ lea 3*4(%esp),%edi
|
|
+ mov $PT_OLDSS/4+1,%ecx
|
|
+ xorl %eax,%eax
|
|
+ rep stosl
|
|
+ popl_cfi %eax
|
|
+ popl_cfi %ecx
|
|
+ popl_cfi %edi
|
|
+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
|
|
+ pushl_cfi %esp
|
|
+ call sys_execve
|
|
+ add $4,%esp
|
|
+ CFI_ADJUST_CFA_OFFSET -4
|
|
+ GET_THREAD_INFO(%ebp)
|
|
+ test %eax,%eax
|
|
+ jz syscall_exit
|
|
+ add $PT_OLDSS+4,%esp
|
|
+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
|
|
+ popl_cfi %ebp
|
|
+ ret
|
|
+ CFI_ENDPROC
|
|
+ENDPROC(kernel_execve)
|
|
+
|
|
.macro FIXUP_ESPFIX_STACK
|
|
/*
|
|
* Switch back for ESPFIX stack to the normal zerobased stack
|
|
@@ -772,8 +1008,15 @@ ENDPROC(ptregs_clone)
|
|
*/
|
|
#ifdef CONFIG_X86_ESPFIX32
|
|
/* fixup the stack */
|
|
- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
|
|
- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
|
|
+#ifdef CONFIG_SMP
|
|
+ movl PER_CPU_VAR(cpu_number), %ebx
|
|
+ shll $PAGE_SHIFT_asm, %ebx
|
|
+ addl $cpu_gdt_table, %ebx
|
|
+#else
|
|
+ movl $cpu_gdt_table, %ebx
|
|
+#endif
|
|
+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
|
|
+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
|
|
shl $16, %eax
|
|
addl %esp, %eax /* the adjusted stack pointer */
|
|
pushl_cfi $__KERNEL_DS
|
|
@@ -829,7 +1072,7 @@ vector=vector+1
|
|
.endr
|
|
2: jmp common_interrupt
|
|
.endr
|
|
-END(irq_entries_start)
|
|
+ENDPROC(irq_entries_start)
|
|
|
|
.previous
|
|
END(interrupt)
|
|
@@ -877,7 +1120,7 @@ ENTRY(coprocessor_error)
|
|
pushl_cfi $do_coprocessor_error
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(coprocessor_error)
|
|
+ENDPROC(coprocessor_error)
|
|
|
|
ENTRY(simd_coprocessor_error)
|
|
RING0_INT_FRAME
|
|
@@ -898,7 +1141,7 @@ ENTRY(simd_coprocessor_error)
|
|
#endif
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(simd_coprocessor_error)
|
|
+ENDPROC(simd_coprocessor_error)
|
|
|
|
ENTRY(device_not_available)
|
|
RING0_INT_FRAME
|
|
@@ -906,7 +1149,7 @@ ENTRY(device_not_available)
|
|
pushl_cfi $do_device_not_available
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(device_not_available)
|
|
+ENDPROC(device_not_available)
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
ENTRY(native_iret)
|
|
@@ -915,12 +1158,12 @@ ENTRY(native_iret)
|
|
.align 4
|
|
.long native_iret, iret_exc
|
|
.previous
|
|
-END(native_iret)
|
|
+ENDPROC(native_iret)
|
|
|
|
ENTRY(native_irq_enable_sysexit)
|
|
sti
|
|
sysexit
|
|
-END(native_irq_enable_sysexit)
|
|
+ENDPROC(native_irq_enable_sysexit)
|
|
#endif
|
|
|
|
ENTRY(overflow)
|
|
@@ -929,7 +1172,7 @@ ENTRY(overflow)
|
|
pushl_cfi $do_overflow
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(overflow)
|
|
+ENDPROC(overflow)
|
|
|
|
ENTRY(bounds)
|
|
RING0_INT_FRAME
|
|
@@ -937,7 +1180,7 @@ ENTRY(bounds)
|
|
pushl_cfi $do_bounds
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(bounds)
|
|
+ENDPROC(bounds)
|
|
|
|
ENTRY(invalid_op)
|
|
RING0_INT_FRAME
|
|
@@ -945,7 +1188,7 @@ ENTRY(invalid_op)
|
|
pushl_cfi $do_invalid_op
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(invalid_op)
|
|
+ENDPROC(invalid_op)
|
|
|
|
ENTRY(coprocessor_segment_overrun)
|
|
RING0_INT_FRAME
|
|
@@ -953,35 +1196,35 @@ ENTRY(coprocessor_segment_overrun)
|
|
pushl_cfi $do_coprocessor_segment_overrun
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(coprocessor_segment_overrun)
|
|
+ENDPROC(coprocessor_segment_overrun)
|
|
|
|
ENTRY(invalid_TSS)
|
|
RING0_EC_FRAME
|
|
pushl_cfi $do_invalid_TSS
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(invalid_TSS)
|
|
+ENDPROC(invalid_TSS)
|
|
|
|
ENTRY(segment_not_present)
|
|
RING0_EC_FRAME
|
|
pushl_cfi $do_segment_not_present
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(segment_not_present)
|
|
+ENDPROC(segment_not_present)
|
|
|
|
ENTRY(stack_segment)
|
|
RING0_EC_FRAME
|
|
pushl_cfi $do_stack_segment
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(stack_segment)
|
|
+ENDPROC(stack_segment)
|
|
|
|
ENTRY(alignment_check)
|
|
RING0_EC_FRAME
|
|
pushl_cfi $do_alignment_check
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(alignment_check)
|
|
+ENDPROC(alignment_check)
|
|
|
|
ENTRY(divide_error)
|
|
RING0_INT_FRAME
|
|
@@ -989,7 +1232,7 @@ ENTRY(divide_error)
|
|
pushl_cfi $do_divide_error
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(divide_error)
|
|
+ENDPROC(divide_error)
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
ENTRY(machine_check)
|
|
@@ -998,7 +1241,7 @@ ENTRY(machine_check)
|
|
pushl_cfi machine_check_vector
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(machine_check)
|
|
+ENDPROC(machine_check)
|
|
#endif
|
|
|
|
ENTRY(spurious_interrupt_bug)
|
|
@@ -1007,7 +1250,7 @@ ENTRY(spurious_interrupt_bug)
|
|
pushl_cfi $do_spurious_interrupt_bug
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(spurious_interrupt_bug)
|
|
+ENDPROC(spurious_interrupt_bug)
|
|
/*
|
|
* End of kprobes section
|
|
*/
|
|
@@ -1123,7 +1366,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
|
|
|
|
ENTRY(mcount)
|
|
ret
|
|
-END(mcount)
|
|
+ENDPROC(mcount)
|
|
|
|
ENTRY(ftrace_caller)
|
|
cmpl $0, function_trace_stop
|
|
@@ -1152,7 +1395,7 @@ ftrace_graph_call:
|
|
.globl ftrace_stub
|
|
ftrace_stub:
|
|
ret
|
|
-END(ftrace_caller)
|
|
+ENDPROC(ftrace_caller)
|
|
|
|
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
|
|
|
@@ -1188,7 +1431,7 @@ trace:
|
|
popl %ecx
|
|
popl %eax
|
|
jmp ftrace_stub
|
|
-END(mcount)
|
|
+ENDPROC(mcount)
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
@@ -1209,7 +1452,7 @@ ENTRY(ftrace_graph_caller)
|
|
popl %ecx
|
|
popl %eax
|
|
ret
|
|
-END(ftrace_graph_caller)
|
|
+ENDPROC(ftrace_graph_caller)
|
|
|
|
.globl return_to_handler
|
|
return_to_handler:
|
|
@@ -1264,15 +1507,18 @@ error_code:
|
|
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
|
REG_TO_PTGS %ecx
|
|
SET_KERNEL_GS %ecx
|
|
- movl $(__USER_DS), %ecx
|
|
+ movl $(__KERNEL_DS), %ecx
|
|
movl %ecx, %ds
|
|
movl %ecx, %es
|
|
+
|
|
+ pax_enter_kernel
|
|
+
|
|
TRACE_IRQS_OFF
|
|
movl %esp,%eax # pt_regs pointer
|
|
call *%edi
|
|
jmp ret_from_exception
|
|
CFI_ENDPROC
|
|
-END(page_fault)
|
|
+ENDPROC(page_fault)
|
|
|
|
/*
|
|
* Debug traps and NMI can happen at the one SYSENTER instruction
|
|
@@ -1314,7 +1560,7 @@ debug_stack_correct:
|
|
call do_debug
|
|
jmp ret_from_exception
|
|
CFI_ENDPROC
|
|
-END(debug)
|
|
+ENDPROC(debug)
|
|
|
|
/*
|
|
* NMI is doubly nasty. It can happen _while_ we're handling
|
|
@@ -1353,6 +1599,9 @@ nmi_stack_correct:
|
|
xorl %edx,%edx # zero error code
|
|
movl %esp,%eax # pt_regs pointer
|
|
call do_nmi
|
|
+
|
|
+ pax_exit_kernel
|
|
+
|
|
jmp restore_all_notrace
|
|
CFI_ENDPROC
|
|
|
|
@@ -1408,14 +1657,14 @@ ENTRY(int3)
|
|
call do_int3
|
|
jmp ret_from_exception
|
|
CFI_ENDPROC
|
|
-END(int3)
|
|
+ENDPROC(int3)
|
|
|
|
ENTRY(general_protection)
|
|
RING0_EC_FRAME
|
|
pushl_cfi $do_general_protection
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(general_protection)
|
|
+ENDPROC(general_protection)
|
|
|
|
#ifdef CONFIG_KVM_GUEST
|
|
ENTRY(async_page_fault)
|
|
@@ -1423,7 +1672,7 @@ ENTRY(async_page_fault)
|
|
pushl_cfi $do_async_page_fault
|
|
jmp error_code
|
|
CFI_ENDPROC
|
|
-END(async_page_fault)
|
|
+ENDPROC(async_page_fault)
|
|
#endif
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/entry_32.S.rej b/arch/x86/kernel/entry_32.S.rej
|
|
new file mode 100644
|
|
index 0000000..39bbe03
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/entry_32.S.rej
|
|
@@ -0,0 +1,47 @@
|
|
+--- arch/x86/kernel/entry_32.S 2012-05-21 11:32:57.475927660 +0200
|
|
++++ arch/x86/kernel/entry_32.S 2012-05-21 12:10:09.520048888 +0200
|
|
+@@ -877,20 +1083,24 @@ syscall_exit_work:
|
|
+ movl %esp, %eax
|
|
+ call syscall_trace_leave
|
|
+ jmp resume_userspace
|
|
+-END(syscall_exit_work)
|
|
++ENDPROC(syscall_exit_work)
|
|
+ CFI_ENDPROC
|
|
+
|
|
+ RING0_INT_FRAME # can't unwind into user space anyway
|
|
+ syscall_fault:
|
|
++#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
++ push %ss
|
|
++ pop %ds
|
|
++#endif
|
|
+ GET_THREAD_INFO(%ebp)
|
|
+ movl $-EFAULT,PT_EAX(%esp)
|
|
+ jmp resume_userspace
|
|
+-END(syscall_fault)
|
|
++ENDPROC(syscall_fault)
|
|
+
|
|
+ syscall_badsys:
|
|
+ movl $-ENOSYS,PT_EAX(%esp)
|
|
+ jmp resume_userspace
|
|
+-END(syscall_badsys)
|
|
++ENDPROC(syscall_badsys)
|
|
+ CFI_ENDPROC
|
|
+ /*
|
|
+ * End of kprobes section
|
|
+@@ -1625,12 +1878,15 @@ nmi_espfix_stack:
|
|
+ FIXUP_ESPFIX_STACK # %eax == %esp
|
|
+ xorl %edx,%edx # zero error code
|
|
+ call do_nmi
|
|
++
|
|
++ pax_exit_kernel
|
|
++
|
|
+ RESTORE_REGS
|
|
+ lss 12+4(%esp), %esp # back to espfix stack
|
|
+ CFI_ADJUST_CFA_OFFSET -24
|
|
+ jmp irq_return
|
|
+ CFI_ENDPROC
|
|
+-END(nmi)
|
|
++ENDPROC(nmi)
|
|
+
|
|
+ ENTRY(int3)
|
|
+ RING0_INT_FRAME
|
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
|
index 8b49131..d5e04292 100644
|
|
--- a/arch/x86/kernel/entry_64.S
|
|
+++ b/arch/x86/kernel/entry_64.S
|
|
@@ -57,6 +57,8 @@
|
|
#include <asm/percpu.h>
|
|
#include <asm/pgtable_types.h>
|
|
#include <linux/err.h>
|
|
+#include <asm/pgtable.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
|
#include <linux/elf-em.h>
|
|
@@ -70,8 +72,9 @@
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
ENTRY(mcount)
|
|
+ pax_force_retaddr
|
|
retq
|
|
-END(mcount)
|
|
+ENDPROC(mcount)
|
|
|
|
ENTRY(ftrace_caller)
|
|
cmpl $0, function_trace_stop
|
|
@@ -94,8 +97,9 @@ GLOBAL(ftrace_graph_call)
|
|
#endif
|
|
|
|
GLOBAL(ftrace_stub)
|
|
+ pax_force_retaddr
|
|
retq
|
|
-END(ftrace_caller)
|
|
+ENDPROC(ftrace_caller)
|
|
|
|
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
|
ENTRY(mcount)
|
|
@@ -114,6 +118,7 @@ ENTRY(mcount)
|
|
#endif
|
|
|
|
GLOBAL(ftrace_stub)
|
|
+ pax_force_retaddr
|
|
retq
|
|
|
|
trace:
|
|
@@ -123,12 +128,13 @@ trace:
|
|
movq 8(%rbp), %rsi
|
|
subq $MCOUNT_INSN_SIZE, %rdi
|
|
|
|
+ pax_force_fptr ftrace_trace_function
|
|
call *ftrace_trace_function
|
|
|
|
MCOUNT_RESTORE_FRAME
|
|
|
|
jmp ftrace_stub
|
|
-END(mcount)
|
|
+ENDPROC(mcount)
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
@@ -148,8 +154,9 @@ ENTRY(ftrace_graph_caller)
|
|
|
|
MCOUNT_RESTORE_FRAME
|
|
|
|
+ pax_force_retaddr
|
|
retq
|
|
-END(ftrace_graph_caller)
|
|
+ENDPROC(ftrace_graph_caller)
|
|
|
|
GLOBAL(return_to_handler)
|
|
subq $24, %rsp
|
|
@@ -165,6 +172,7 @@ GLOBAL(return_to_handler)
|
|
movq 8(%rsp), %rdx
|
|
movq (%rsp), %rax
|
|
addq $24, %rsp
|
|
+ pax_force_fptr %rdi
|
|
jmp *%rdi
|
|
#endif
|
|
|
|
@@ -180,6 +188,282 @@ ENTRY(native_usergs_sysret64)
|
|
ENDPROC(native_usergs_sysret64)
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
+ .macro ljmpq sel, off
|
|
+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
|
|
+ .byte 0x48; ljmp *1234f(%rip)
|
|
+ .pushsection .rodata
|
|
+ .align 16
|
|
+ 1234: .quad \off; .word \sel
|
|
+ .popsection
|
|
+#else
|
|
+ pushq $\sel
|
|
+ pushq $\off
|
|
+ lretq
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+ .macro pax_enter_kernel
|
|
+ pax_set_fptr_mask
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ call pax_enter_kernel
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+ .macro pax_exit_kernel
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ call pax_exit_kernel
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ENTRY(pax_enter_kernel)
|
|
+ pushq %rdi
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_SAVE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+ GET_CR0_INTO_RDI
|
|
+ bts $16,%rdi
|
|
+ jnc 3f
|
|
+ mov %cs,%edi
|
|
+ cmp $__KERNEL_CS,%edi
|
|
+ jnz 2f
|
|
+1:
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_RESTORE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ retq
|
|
+
|
|
+2: ljmpq __KERNEL_CS,1f
|
|
+3: ljmpq __KERNEXEC_KERNEL_CS,4f
|
|
+4: SET_RDI_INTO_CR0
|
|
+ jmp 1b
|
|
+ENDPROC(pax_enter_kernel)
|
|
+
|
|
+ENTRY(pax_exit_kernel)
|
|
+ pushq %rdi
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_SAVE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+ mov %cs,%rdi
|
|
+ cmp $__KERNEXEC_KERNEL_CS,%edi
|
|
+ jz 2f
|
|
+1:
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_RESTORE_REGS(CLBR_RDI);
|
|
+#endif
|
|
+
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ retq
|
|
+
|
|
+2: GET_CR0_INTO_RDI
|
|
+ btr $16,%rdi
|
|
+ ljmpq __KERNEL_CS,3f
|
|
+3: SET_RDI_INTO_CR0
|
|
+ jmp 1b
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_RESTORE_REGS(CLBR_RDI);
|
|
+#endif
|
|
+
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ retq
|
|
+ENDPROC(pax_exit_kernel)
|
|
+#endif
|
|
+
|
|
+ .macro pax_enter_kernel_user
|
|
+ pax_set_fptr_mask
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ call pax_enter_kernel_user
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+ .macro pax_exit_kernel_user
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ call pax_exit_kernel_user
|
|
+#endif
|
|
+#ifdef CONFIG_PAX_RANDKSTACK
|
|
+ pushq %rax
|
|
+ call pax_randomize_kstack
|
|
+ popq %rax
|
|
+#endif
|
|
+ .endm
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ENTRY(pax_enter_kernel_user)
|
|
+ pushq %rdi
|
|
+ pushq %rbx
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_SAVE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+ GET_CR3_INTO_RDI
|
|
+ mov %rdi,%rbx
|
|
+ add $__START_KERNEL_map,%rbx
|
|
+ sub phys_base(%rip),%rbx
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ pushq %rdi
|
|
+ cmpl $0, pv_info+PARAVIRT_enabled
|
|
+ jz 1f
|
|
+ i = 0
|
|
+ .rept USER_PGD_PTRS
|
|
+ mov i*8(%rbx),%rsi
|
|
+ mov $0,%sil
|
|
+ lea i*8(%rbx),%rdi
|
|
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
|
|
+ i = i + 1
|
|
+ .endr
|
|
+ jmp 2f
|
|
+1:
|
|
+#endif
|
|
+
|
|
+ i = 0
|
|
+ .rept USER_PGD_PTRS
|
|
+ movb $0,i*8(%rbx)
|
|
+ i = i + 1
|
|
+ .endr
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+2: popq %rdi
|
|
+#endif
|
|
+ SET_RDI_INTO_CR3
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ GET_CR0_INTO_RDI
|
|
+ bts $16,%rdi
|
|
+ SET_RDI_INTO_CR0
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ PV_RESTORE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+ popq %rbx
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ retq
|
|
+ENDPROC(pax_enter_kernel_user)
|
|
+
|
|
+ENTRY(pax_exit_kernel_user)
|
|
+ push %rdi
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ pushq %rbx
|
|
+ PV_SAVE_REGS(CLBR_RDI)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ GET_CR0_INTO_RDI
|
|
+ btr $16,%rdi
|
|
+ SET_RDI_INTO_CR0
|
|
+#endif
|
|
+
|
|
+ GET_CR3_INTO_RDI
|
|
+ add $__START_KERNEL_map,%rdi
|
|
+ sub phys_base(%rip),%rdi
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+ cmpl $0, pv_info+PARAVIRT_enabled
|
|
+ jz 1f
|
|
+ mov %rdi,%rbx
|
|
+ i = 0
|
|
+ .rept USER_PGD_PTRS
|
|
+ mov i*8(%rbx),%rsi
|
|
+ mov $0x67,%sil
|
|
+ lea i*8(%rbx),%rdi
|
|
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
|
|
+ i = i + 1
|
|
+ .endr
|
|
+ jmp 2f
|
|
+1:
|
|
+#endif
|
|
+
|
|
+ i = 0
|
|
+ .rept USER_PGD_PTRS
|
|
+ movb $0x67,i*8(%rdi)
|
|
+ i = i + 1
|
|
+ .endr
|
|
+
|
|
+#ifdef CONFIG_PARAVIRT
|
|
+2: PV_RESTORE_REGS(CLBR_RDI)
|
|
+ popq %rbx
|
|
+#endif
|
|
+
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ retq
|
|
+ENDPROC(pax_exit_kernel_user)
|
|
+#endif
|
|
+
|
|
+.macro pax_erase_kstack
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+ call pax_erase_kstack
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+/*
|
|
+ * r11: thread_info
|
|
+ * rcx, rdx: can be clobbered
|
|
+ */
|
|
+ENTRY(pax_erase_kstack)
|
|
+ pushq %rdi
|
|
+ pushq %rax
|
|
+ pushq %r11
|
|
+
|
|
+ GET_THREAD_INFO(%r11)
|
|
+ mov TI_lowest_stack(%r11), %rdi
|
|
+ mov $0xB4DD00D5BADBABE5, %rax
|
|
+ std
|
|
+
|
|
+1: mov %edi, %ecx
|
|
+ and $THREAD_SIZE_asm - 1, %ecx
|
|
+ shr $3, %ecx
|
|
+ repne scasq
|
|
+ jecxz 2f
|
|
+
|
|
+ cmp $2*8, %ecx
|
|
+ jc 2f
|
|
+
|
|
+ mov $2*8, %ecx
|
|
+ repe scasq
|
|
+ jecxz 2f
|
|
+ jne 1b
|
|
+
|
|
+2: cld
|
|
+ mov %esp, %ecx
|
|
+ sub %edi, %ecx
|
|
+
|
|
+ cmp $THREAD_SIZE_asm, %rcx
|
|
+ jb 3f
|
|
+ ud2
|
|
+3:
|
|
+
|
|
+ shr $3, %ecx
|
|
+ rep stosq
|
|
+
|
|
+ mov TI_task_thread_sp0(%r11), %rdi
|
|
+ sub $256, %rdi
|
|
+ mov %rdi, TI_lowest_stack(%r11)
|
|
+
|
|
+ popq %r11
|
|
+ popq %rax
|
|
+ popq %rdi
|
|
+ pax_force_retaddr
|
|
+ ret
|
|
+ENDPROC(pax_erase_kstack)
|
|
+#endif
|
|
|
|
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
|
|
.endm
|
|
|
|
.macro UNFAKE_STACK_FRAME
|
|
- addq $8*6, %rsp
|
|
- CFI_ADJUST_CFA_OFFSET -(6*8)
|
|
+ addq $8*6 + ARG_SKIP, %rsp
|
|
+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
|
|
.endm
|
|
|
|
/*
|
|
@@ -321,7 +605,7 @@ ENDPROC(native_usergs_sysret64)
|
|
movq %rsp, %rsi
|
|
|
|
leaq -RBP(%rsp),%rdi /* arg1 for handler */
|
|
- testl $3, CS-RBP(%rsi)
|
|
+ testb $3, CS-RBP(%rsi)
|
|
je 1f
|
|
SWAPGS
|
|
/*
|
|
@@ -356,9 +640,10 @@ ENTRY(save_rest)
|
|
movq_cfi r15, R15+16
|
|
movq %r11, 8(%rsp) /* return address */
|
|
FIXUP_TOP_OF_STACK %r11, 16
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-END(save_rest)
|
|
+ENDPROC(save_rest)
|
|
|
|
/* save complete stack frame */
|
|
.pushsection .kprobes.text, "ax"
|
|
@@ -387,9 +672,10 @@ ENTRY(save_paranoid)
|
|
js 1f /* negative -> in kernel */
|
|
SWAPGS
|
|
xorl %ebx,%ebx
|
|
-1: ret
|
|
+1: pax_force_retaddr_bts
|
|
+ ret
|
|
CFI_ENDPROC
|
|
-END(save_paranoid)
|
|
+ENDPROC(save_paranoid)
|
|
.popsection
|
|
|
|
/*
|
|
@@ -460,7 +746,7 @@ END(ret_from_fork)
|
|
ENTRY(system_call)
|
|
CFI_STARTPROC simple
|
|
CFI_SIGNAL_FRAME
|
|
- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
|
|
+ CFI_DEF_CFA rsp,0
|
|
CFI_REGISTER rip,rcx
|
|
/*CFI_REGISTER rflags,r11*/
|
|
SWAPGS_UNSAFE_STACK
|
|
@@ -473,16 +759,18 @@ GLOBAL(system_call_after_swapgs)
|
|
|
|
movq %rsp,PER_CPU_VAR(old_rsp)
|
|
movq PER_CPU_VAR(kernel_stack),%rsp
|
|
+ SAVE_ARGS 8*6,0
|
|
+ pax_enter_kernel_user
|
|
/*
|
|
* No need to follow this irqs off/on section - it's straight
|
|
* and short:
|
|
*/
|
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
|
- SAVE_ARGS 8,0
|
|
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
|
|
movq %rcx,RIP-ARGOFFSET(%rsp)
|
|
CFI_REL_OFFSET rip,RIP-ARGOFFSET
|
|
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ GET_THREAD_INFO(%rcx)
|
|
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
|
|
jnz tracesys
|
|
system_call_fastpath:
|
|
#if __SYSCALL_MASK == ~0
|
|
@@ -492,7 +780,7 @@ system_call_fastpath:
|
|
cmpl $__NR_syscall_max,%eax
|
|
#endif
|
|
ja badsys
|
|
- movq %r10,%rcx
|
|
+ movq R10-ARGOFFSET(%rsp),%rcx
|
|
call *sys_call_table(,%rax,8) # XXX: rip relative
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
|
/*
|
|
@@ -506,10 +794,13 @@ sysret_check:
|
|
LOCKDEP_SYS_EXIT
|
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
|
TRACE_IRQS_OFF
|
|
- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
|
|
+ GET_THREAD_INFO(%rcx)
|
|
+ movl TI_flags(%rcx),%edx
|
|
andl %edi,%edx
|
|
jnz sysret_careful
|
|
CFI_REMEMBER_STATE
|
|
+ pax_exit_kernel_user
|
|
+ pax_erase_kstack
|
|
/*
|
|
* sysretq will re-enable interrupts:
|
|
*/
|
|
@@ -561,14 +852,18 @@ badsys:
|
|
* jump back to the normal fast path.
|
|
*/
|
|
auditsys:
|
|
- movq %r10,%r9 /* 6th arg: 4th syscall arg */
|
|
+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
|
|
movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
|
|
movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
|
|
movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
|
|
movq %rax,%rsi /* 2nd arg: syscall number */
|
|
movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
|
|
call __audit_syscall_entry
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
LOAD_ARGS 0 /* reload call-clobbered registers */
|
|
+ pax_set_fptr_mask
|
|
jmp system_call_fastpath
|
|
|
|
/*
|
|
@@ -589,7 +884,7 @@ sysret_audit:
|
|
/* Do syscall tracing */
|
|
tracesys:
|
|
#ifdef CONFIG_AUDITSYSCALL
|
|
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
|
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
|
|
jz auditsys
|
|
#endif
|
|
SAVE_REST
|
|
@@ -597,12 +892,16 @@ tracesys:
|
|
FIXUP_TOP_OF_STACK %rdi
|
|
movq %rsp,%rdi
|
|
call syscall_trace_enter
|
|
+
|
|
+ pax_erase_kstack
|
|
+
|
|
/*
|
|
* Reload arg registers from stack in case ptrace changed them.
|
|
* We don't reload %rax because syscall_trace_enter() returned
|
|
* the value it wants us to use in the table lookup.
|
|
*/
|
|
LOAD_ARGS ARGOFFSET, 1
|
|
+ pax_set_fptr_mask
|
|
RESTORE_REST
|
|
#if __SYSCALL_MASK == ~0
|
|
cmpq $__NR_syscall_max,%rax
|
|
@@ -611,7 +910,7 @@ tracesys:
|
|
cmpl $__NR_syscall_max,%eax
|
|
#endif
|
|
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
|
|
- movq %r10,%rcx /* fixup for C */
|
|
+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
|
|
call *sys_call_table(,%rax,8)
|
|
movq %rax,RAX-ARGOFFSET(%rsp)
|
|
/* Use IRET because user could have changed frame */
|
|
@@ -632,6 +931,7 @@ GLOBAL(int_with_check)
|
|
andl %edi,%edx
|
|
jnz int_careful
|
|
andl $~TS_COMPAT,TI_status(%rcx)
|
|
+ pax_erase_kstack
|
|
jmp retint_swapgs
|
|
|
|
/* Either reschedule or signal or syscall exit tracking needed. */
|
|
@@ -678,7 +978,7 @@ int_restore_rest:
|
|
TRACE_IRQS_OFF
|
|
jmp int_with_check
|
|
CFI_ENDPROC
|
|
-END(system_call)
|
|
+ENDPROC(system_call)
|
|
|
|
/*
|
|
* Certain special system calls that need to save a complete full stack frame.
|
|
@@ -694,7 +994,7 @@ ENTRY(\label)
|
|
call \func
|
|
jmp ptregscall_common
|
|
CFI_ENDPROC
|
|
-END(\label)
|
|
+ENDPROC(\label)
|
|
.endm
|
|
|
|
PTREGSCALL stub_clone, sys_clone, %r8
|
|
@@ -712,9 +1012,10 @@ ENTRY(ptregscall_common)
|
|
movq_cfi_restore R12+8, r12
|
|
movq_cfi_restore RBP+8, rbp
|
|
movq_cfi_restore RBX+8, rbx
|
|
+ pax_force_retaddr
|
|
ret $REST_SKIP /* pop extended registers */
|
|
CFI_ENDPROC
|
|
-END(ptregscall_common)
|
|
+ENDPROC(ptregscall_common)
|
|
|
|
ENTRY(stub_execve)
|
|
CFI_STARTPROC
|
|
@@ -729,7 +1030,7 @@ ENTRY(stub_execve)
|
|
RESTORE_REST
|
|
jmp int_ret_from_sys_call
|
|
CFI_ENDPROC
|
|
-END(stub_execve)
|
|
+ENDPROC(stub_execve)
|
|
|
|
/*
|
|
* sigreturn is special because it needs to restore all registers on return.
|
|
@@ -747,7 +1048,7 @@ ENTRY(stub_rt_sigreturn)
|
|
RESTORE_REST
|
|
jmp int_ret_from_sys_call
|
|
CFI_ENDPROC
|
|
-END(stub_rt_sigreturn)
|
|
+ENDPROC(stub_rt_sigreturn)
|
|
|
|
#ifdef CONFIG_X86_X32_ABI
|
|
PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
|
|
@@ -816,7 +1117,7 @@ vector=vector+1
|
|
2: jmp common_interrupt
|
|
.endr
|
|
CFI_ENDPROC
|
|
-END(irq_entries_start)
|
|
+ENDPROC(irq_entries_start)
|
|
|
|
.previous
|
|
END(interrupt)
|
|
@@ -836,6 +1137,16 @@ END(interrupt)
|
|
subq $ORIG_RAX-RBP, %rsp
|
|
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
|
|
SAVE_ARGS_IRQ
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rdi)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
call \func
|
|
.endm
|
|
|
|
@@ -867,7 +1178,7 @@ ret_from_intr:
|
|
|
|
exit_intr:
|
|
GET_THREAD_INFO(%rcx)
|
|
- testl $3,CS-ARGOFFSET(%rsp)
|
|
+ testb $3,CS-ARGOFFSET(%rsp)
|
|
je retint_kernel
|
|
|
|
/* Interrupt came from user space */
|
|
@@ -889,12 +1200,15 @@ retint_swapgs: /* return to user-space */
|
|
* The iretq could re-enable interrupts:
|
|
*/
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
+ pax_exit_kernel_user
|
|
TRACE_IRQS_IRETQ
|
|
SWAPGS
|
|
jmp restore_args
|
|
|
|
retint_restore_args: /* return to kernel space */
|
|
DISABLE_INTERRUPTS(CLBR_ANY)
|
|
+ pax_exit_kernel
|
|
+ pax_force_retaddr RIP-ARGOFFSET
|
|
/*
|
|
* The iretq could re-enable interrupts:
|
|
*/
|
|
@@ -1014,7 +1328,7 @@ ENTRY(\sym)
|
|
interrupt \do_sym
|
|
jmp ret_from_intr
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -1087,12 +1401,22 @@ ENTRY(\sym)
|
|
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
|
|
call error_entry
|
|
DEFAULT_FRAME 0
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
xorl %esi,%esi /* no error code */
|
|
call \do_sym
|
|
jmp error_exit /* %ebx: no swapgs flag */
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
.macro paranoidzeroentry sym do_sym
|
|
@@ -1104,15 +1428,25 @@ ENTRY(\sym)
|
|
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
|
|
call save_paranoid
|
|
TRACE_IRQS_OFF
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
xorl %esi,%esi /* no error code */
|
|
call \do_sym
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
|
|
+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
|
|
.macro paranoidzeroentry_ist sym do_sym ist
|
|
ENTRY(\sym)
|
|
INTR_FRAME
|
|
@@ -1122,14 +1456,30 @@ ENTRY(\sym)
|
|
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
|
|
call save_paranoid
|
|
TRACE_IRQS_OFF
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
xorl %esi,%esi /* no error code */
|
|
+#ifdef CONFIG_SMP
|
|
+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
|
|
+ lea init_tss(%r12), %r12
|
|
+#else
|
|
+ lea init_tss(%rip), %r12
|
|
+#endif
|
|
subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
|
|
call \do_sym
|
|
addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
.macro errorentry sym do_sym
|
|
@@ -1140,13 +1490,23 @@ ENTRY(\sym)
|
|
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
|
|
call error_entry
|
|
DEFAULT_FRAME 0
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
movq ORIG_RAX(%rsp),%rsi /* get error code */
|
|
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
|
|
call \do_sym
|
|
jmp error_exit /* %ebx: no swapgs flag */
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
/* error code is on the stack already */
|
|
@@ -1159,13 +1519,23 @@ ENTRY(\sym)
|
|
call save_paranoid
|
|
DEFAULT_FRAME 0
|
|
TRACE_IRQS_OFF
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
movq %rsp,%rdi /* pt_regs pointer */
|
|
movq ORIG_RAX(%rsp),%rsi /* get error code */
|
|
movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
|
|
call \do_sym
|
|
jmp paranoid_exit /* %ebx: no swapgs flag */
|
|
CFI_ENDPROC
|
|
-END(\sym)
|
|
+ENDPROC(\sym)
|
|
.endm
|
|
|
|
zeroentry divide_error do_divide_error
|
|
@@ -1195,9 +1565,10 @@ gs_change:
|
|
2: mfence /* workaround */
|
|
SWAPGS
|
|
popfq_cfi
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-END(native_load_gs_index)
|
|
+ENDPROC(native_load_gs_index)
|
|
|
|
.section __ex_table,"a"
|
|
.align 8
|
|
@@ -1219,13 +1590,14 @@ ENTRY(kernel_thread_helper)
|
|
* Here we are in the child and the registers are set as they were
|
|
* at kernel_thread() invocation in the parent.
|
|
*/
|
|
+ pax_force_fptr %rsi
|
|
call *%rsi
|
|
# exit
|
|
mov %eax, %edi
|
|
call do_exit
|
|
ud2 # padding for call trace
|
|
CFI_ENDPROC
|
|
-END(kernel_thread_helper)
|
|
+ENDPROC(kernel_thread_helper)
|
|
|
|
/*
|
|
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
|
|
@@ -1252,11 +1624,11 @@ ENTRY(kernel_execve)
|
|
RESTORE_REST
|
|
testq %rax,%rax
|
|
je int_ret_from_sys_call
|
|
- RESTORE_ARGS
|
|
UNFAKE_STACK_FRAME
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-END(kernel_execve)
|
|
+ENDPROC(kernel_execve)
|
|
|
|
/* Call softirq on interrupt stack. Interrupts are off. */
|
|
ENTRY(call_softirq)
|
|
@@ -1274,9 +1646,10 @@ ENTRY(call_softirq)
|
|
CFI_DEF_CFA_REGISTER rsp
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
decl PER_CPU_VAR(irq_count)
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-END(call_softirq)
|
|
+ENDPROC(call_softirq)
|
|
|
|
#ifdef CONFIG_XEN
|
|
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
|
|
@@ -1314,7 +1687,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
|
|
decl PER_CPU_VAR(irq_count)
|
|
jmp error_exit
|
|
CFI_ENDPROC
|
|
-END(xen_do_hypervisor_callback)
|
|
+ENDPROC(xen_do_hypervisor_callback)
|
|
|
|
/*
|
|
* Hypervisor uses this for application faults while it executes.
|
|
@@ -1373,7 +1746,7 @@ ENTRY(xen_failsafe_callback)
|
|
SAVE_ALL
|
|
jmp error_exit
|
|
CFI_ENDPROC
|
|
-END(xen_failsafe_callback)
|
|
+ENDPROC(xen_failsafe_callback)
|
|
|
|
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
|
|
xen_hvm_callback_vector xen_evtchn_do_upcall
|
|
@@ -1422,16 +1795,31 @@ ENTRY(paranoid_exit)
|
|
TRACE_IRQS_OFF
|
|
testl %ebx,%ebx /* swapgs needed? */
|
|
jnz paranoid_restore
|
|
- testl $3,CS(%rsp)
|
|
+ testb $3,CS(%rsp)
|
|
jnz paranoid_userspace
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pax_exit_kernel
|
|
+ TRACE_IRQS_IRETQ 0
|
|
+ SWAPGS_UNSAFE_STACK
|
|
+ RESTORE_ALL 8
|
|
+ pax_force_retaddr_bts
|
|
+ jmp irq_return
|
|
+#endif
|
|
paranoid_swapgs:
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pax_exit_kernel_user
|
|
+#else
|
|
+ pax_exit_kernel
|
|
+#endif
|
|
TRACE_IRQS_IRETQ 0
|
|
SWAPGS_UNSAFE_STACK
|
|
RESTORE_ALL 8
|
|
jmp irq_return
|
|
paranoid_restore:
|
|
+ pax_exit_kernel
|
|
TRACE_IRQS_IRETQ 0
|
|
RESTORE_ALL 8
|
|
+ pax_force_retaddr_bts
|
|
jmp irq_return
|
|
paranoid_userspace:
|
|
GET_THREAD_INFO(%rcx)
|
|
@@ -1460,7 +1848,7 @@ paranoid_schedule:
|
|
TRACE_IRQS_OFF
|
|
jmp paranoid_userspace
|
|
CFI_ENDPROC
|
|
-END(paranoid_exit)
|
|
+ENDPROC(paranoid_exit)
|
|
|
|
/*
|
|
* Exception entry point. This expects an error code/orig_rax on the stack.
|
|
@@ -1487,12 +1875,13 @@ ENTRY(error_entry)
|
|
movq_cfi r14, R14+8
|
|
movq_cfi r15, R15+8
|
|
xorl %ebx,%ebx
|
|
- testl $3,CS+8(%rsp)
|
|
+ testb $3,CS+8(%rsp)
|
|
je error_kernelspace
|
|
error_swapgs:
|
|
SWAPGS
|
|
error_sti:
|
|
TRACE_IRQS_OFF
|
|
+ pax_force_retaddr_bts
|
|
ret
|
|
|
|
/*
|
|
@@ -1526,7 +1915,7 @@ error_bad_iret:
|
|
decl %ebx /* Return to usergs */
|
|
jmp error_sti
|
|
CFI_ENDPROC
|
|
-END(error_entry)
|
|
+ENDPROC(error_entry)
|
|
|
|
|
|
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
|
|
@@ -1546,7 +1935,7 @@ ENTRY(error_exit)
|
|
jnz retint_careful
|
|
jmp retint_swapgs
|
|
CFI_ENDPROC
|
|
-END(error_exit)
|
|
+ENDPROC(error_exit)
|
|
|
|
/*
|
|
* Test if a given stack is an NMI stack or not.
|
|
@@ -1604,9 +1993,11 @@ ENTRY(nmi)
|
|
* If %cs was not the kernel segment, then the NMI triggered in user
|
|
* space, which means it is definitely not nested.
|
|
*/
|
|
+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
|
|
+ je 1f
|
|
cmpl $__KERNEL_CS, 16(%rsp)
|
|
jne first_nmi
|
|
-
|
|
+1:
|
|
/*
|
|
* Check the special variable on the stack to see if NMIs are
|
|
* executing.
|
|
@@ -1753,6 +2144,16 @@ end_repeat_nmi:
|
|
*/
|
|
call save_paranoid
|
|
DEFAULT_FRAME 0
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ testb $3, CS(%rsp)
|
|
+ jnz 1f
|
|
+ pax_enter_kernel
|
|
+ jmp 2f
|
|
+1: pax_enter_kernel_user
|
|
+2:
|
|
+#else
|
|
+ pax_enter_kernel
|
|
+#endif
|
|
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
|
|
movq %rsp,%rdi
|
|
movq $-1,%rsi
|
|
@@ -1760,21 +2161,32 @@ end_repeat_nmi:
|
|
testl %ebx,%ebx /* swapgs needed? */
|
|
jnz nmi_restore
|
|
nmi_swapgs:
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pax_exit_kernel_user
|
|
+#else
|
|
+ pax_exit_kernel
|
|
+#endif
|
|
SWAPGS_UNSAFE_STACK
|
|
+ RESTORE_ALL 8
|
|
+ /* Clear the NMI executing stack variable */
|
|
+ movq $0, 10*8(%rsp)
|
|
+ jmp irq_return
|
|
nmi_restore:
|
|
+ pax_exit_kernel
|
|
RESTORE_ALL 8
|
|
+ pax_force_retaddr_bts
|
|
/* Clear the NMI executing stack variable */
|
|
movq $0, 10*8(%rsp)
|
|
jmp irq_return
|
|
CFI_ENDPROC
|
|
-END(nmi)
|
|
+ENDPROC(nmi)
|
|
|
|
ENTRY(ignore_sysret)
|
|
CFI_STARTPROC
|
|
mov $-ENOSYS,%eax
|
|
sysret
|
|
CFI_ENDPROC
|
|
-END(ignore_sysret)
|
|
+ENDPROC(ignore_sysret)
|
|
|
|
/*
|
|
* End of kprobes section
|
|
diff --git a/arch/x86/kernel/entry_64.S.rej b/arch/x86/kernel/entry_64.S.rej
|
|
new file mode 100644
|
|
index 0000000..9d787a754
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/entry_64.S.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/kernel/entry_64.S 2012-05-21 11:32:57.479927660 +0200
|
|
++++ arch/x86/kernel/entry_64.S 2012-05-21 12:10:09.524048888 +0200
|
|
+@@ -1293,7 +1607,7 @@ ENTRY(retint_kernel)
|
|
+ #endif
|
|
+
|
|
+ CFI_ENDPROC
|
|
+-END(common_interrupt)
|
|
++ENDPROC(common_interrupt)
|
|
+ /*
|
|
+ * End of kprobes section
|
|
+ */
|
|
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
|
|
index c9a281f..ce2f317 100644
|
|
--- a/arch/x86/kernel/ftrace.c
|
|
+++ b/arch/x86/kernel/ftrace.c
|
|
@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
|
|
static const void *mod_code_newcode; /* holds the text to write to the IP */
|
|
|
|
static unsigned nmi_wait_count;
|
|
-static atomic_t nmi_update_count = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
|
|
|
|
int ftrace_arch_read_dyn_info(char *buf, int size)
|
|
{
|
|
@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
|
|
|
|
r = snprintf(buf, size, "%u %u",
|
|
nmi_wait_count,
|
|
- atomic_read(&nmi_update_count));
|
|
+ atomic_read_unchecked(&nmi_update_count));
|
|
return r;
|
|
}
|
|
|
|
@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
|
|
|
|
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
|
|
smp_rmb();
|
|
+ pax_open_kernel();
|
|
ftrace_mod_code();
|
|
- atomic_inc(&nmi_update_count);
|
|
+ pax_close_kernel();
|
|
+ atomic_inc_unchecked(&nmi_update_count);
|
|
}
|
|
/* Must have previous changes seen before executions */
|
|
smp_mb();
|
|
@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
|
|
{
|
|
unsigned char replaced[MCOUNT_INSN_SIZE];
|
|
|
|
+ ip = ktla_ktva(ip);
|
|
+
|
|
/*
|
|
* Note: Due to modules and __init, code can
|
|
* disappear and change, we need to protect against faulting
|
|
@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
unsigned char old[MCOUNT_INSN_SIZE], *new;
|
|
int ret;
|
|
|
|
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
|
|
+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
|
|
new = ftrace_call_replace(ip, (unsigned long)func);
|
|
ret = ftrace_modify_code(ip, old, new);
|
|
|
|
@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
|
|
{
|
|
unsigned char code[MCOUNT_INSN_SIZE];
|
|
|
|
+ ip = ktla_ktva(ip);
|
|
+
|
|
if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
|
|
return -EFAULT;
|
|
|
|
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
|
|
index 51ff186..9e77418 100644
|
|
--- a/arch/x86/kernel/head32.c
|
|
+++ b/arch/x86/kernel/head32.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <asm/io_apic.h>
|
|
#include <asm/bios_ebda.h>
|
|
#include <asm/tlbflush.h>
|
|
+#include <asm/boot.h>
|
|
|
|
static void __init i386_default_early_setup(void)
|
|
{
|
|
@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
|
|
|
|
void __init i386_start_kernel(void)
|
|
{
|
|
- memblock_reserve(__pa_symbol(&_text),
|
|
- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
|
|
+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
/* Reserve INITRD */
|
|
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
|
|
index ce0be7c..c41476e 100644
|
|
--- a/arch/x86/kernel/head_32.S
|
|
+++ b/arch/x86/kernel/head_32.S
|
|
@@ -25,6 +25,12 @@
|
|
/* Physical address */
|
|
#define pa(X) ((X) - __PAGE_OFFSET)
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#define ta(X) (X)
|
|
+#else
|
|
+#define ta(X) ((X) - __PAGE_OFFSET)
|
|
+#endif
|
|
+
|
|
/*
|
|
* References to members of the new_cpu_data structure.
|
|
*/
|
|
@@ -54,11 +60,7 @@
|
|
* and small than max_low_pfn, otherwise will waste some page table entries
|
|
*/
|
|
|
|
-#if PTRS_PER_PMD > 1
|
|
-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
|
|
-#else
|
|
-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
|
|
-#endif
|
|
+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
|
|
|
|
/* Number of possible pages in the lowmem region */
|
|
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
|
|
@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
|
|
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
|
|
|
/*
|
|
+ * Real beginning of normal "text" segment
|
|
+ */
|
|
+ENTRY(stext)
|
|
+ENTRY(_stext)
|
|
+
|
|
+/*
|
|
* 32-bit kernel entrypoint; only used by the boot CPU. On entry,
|
|
* %esi points to the real-mode code as a 32-bit pointer.
|
|
* CS and DS must be 4 GB flat segments, but we don't depend on
|
|
@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
|
|
* can.
|
|
*/
|
|
__HEAD
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ jmp startup_32
|
|
+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
|
|
+.fill PAGE_SIZE-5,1,0xcc
|
|
+#endif
|
|
+
|
|
ENTRY(startup_32)
|
|
movl pa(stack_start),%ecx
|
|
|
|
@@ -105,6 +120,57 @@ ENTRY(startup_32)
|
|
2:
|
|
leal -__PAGE_OFFSET(%ecx),%esp
|
|
|
|
+#ifdef CONFIG_SMP
|
|
+ movl $pa(cpu_gdt_table),%edi
|
|
+ movl $__per_cpu_load,%eax
|
|
+ movw %ax,__KERNEL_PERCPU + 2(%edi)
|
|
+ rorl $16,%eax
|
|
+ movb %al,__KERNEL_PERCPU + 4(%edi)
|
|
+ movb %ah,__KERNEL_PERCPU + 7(%edi)
|
|
+ movl $__per_cpu_end - 1,%eax
|
|
+ subl $__per_cpu_start,%eax
|
|
+ movw %ax,__KERNEL_PERCPU + 0(%edi)
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ movl $NR_CPUS,%ecx
|
|
+ movl $pa(cpu_gdt_table),%edi
|
|
+1:
|
|
+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
|
|
+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
|
|
+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
|
|
+ addl $PAGE_SIZE_asm,%edi
|
|
+ loop 1b
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ movl $pa(boot_gdt),%edi
|
|
+ movl $__LOAD_PHYSICAL_ADDR,%eax
|
|
+ movw %ax,__BOOT_CS + 2(%edi)
|
|
+ rorl $16,%eax
|
|
+ movb %al,__BOOT_CS + 4(%edi)
|
|
+ movb %ah,__BOOT_CS + 7(%edi)
|
|
+ rorl $16,%eax
|
|
+
|
|
+ ljmp $(__BOOT_CS),$1f
|
|
+1:
|
|
+
|
|
+ movl $NR_CPUS,%ecx
|
|
+ movl $pa(cpu_gdt_table),%edi
|
|
+ addl $__PAGE_OFFSET,%eax
|
|
+1:
|
|
+ movw %ax,__KERNEL_CS + 2(%edi)
|
|
+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
|
|
+ rorl $16,%eax
|
|
+ movb %al,__KERNEL_CS + 4(%edi)
|
|
+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
|
|
+ movb %ah,__KERNEL_CS + 7(%edi)
|
|
+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
|
|
+ rorl $16,%eax
|
|
+ addl $PAGE_SIZE_asm,%edi
|
|
+ loop 1b
|
|
+#endif
|
|
+
|
|
/*
|
|
* Clear BSS first so that there are no surprises...
|
|
*/
|
|
@@ -195,8 +261,11 @@ ENTRY(startup_32)
|
|
movl %eax, pa(max_pfn_mapped)
|
|
|
|
/* Do early initialization of the fixmap area */
|
|
- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
|
|
- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
|
|
+#ifdef CONFIG_COMPAT_VDSO
|
|
+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
|
|
+#else
|
|
+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
|
|
+#endif
|
|
#else /* Not PAE */
|
|
|
|
page_pde_offset = (__PAGE_OFFSET >> 20);
|
|
@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
|
|
movl %eax, pa(max_pfn_mapped)
|
|
|
|
/* Do early initialization of the fixmap area */
|
|
- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
|
|
- movl %eax,pa(initial_page_table+0xffc)
|
|
+#ifdef CONFIG_COMPAT_VDSO
|
|
+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
|
|
+#else
|
|
+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
|
|
+#endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
|
|
cmpl $num_subarch_entries, %eax
|
|
jae bad_subarch
|
|
|
|
- movl pa(subarch_entries)(,%eax,4), %eax
|
|
- subl $__PAGE_OFFSET, %eax
|
|
- jmp *%eax
|
|
+ jmp *pa(subarch_entries)(,%eax,4)
|
|
|
|
bad_subarch:
|
|
WEAK(lguest_entry)
|
|
@@ -255,10 +325,10 @@ WEAK(xen_entry)
|
|
__INITDATA
|
|
|
|
subarch_entries:
|
|
- .long default_entry /* normal x86/PC */
|
|
- .long lguest_entry /* lguest hypervisor */
|
|
- .long xen_entry /* Xen hypervisor */
|
|
- .long default_entry /* Moorestown MID */
|
|
+ .long ta(default_entry) /* normal x86/PC */
|
|
+ .long ta(lguest_entry) /* lguest hypervisor */
|
|
+ .long ta(xen_entry) /* Xen hypervisor */
|
|
+ .long ta(default_entry) /* Moorestown MID */
|
|
num_subarch_entries = (. - subarch_entries) / 4
|
|
.previous
|
|
#else
|
|
@@ -312,6 +382,7 @@ default_entry:
|
|
orl %edx,%eax
|
|
movl %eax,%cr4
|
|
|
|
+#ifdef CONFIG_X86_PAE
|
|
testb $X86_CR4_PAE, %al # check if PAE is enabled
|
|
jz 6f
|
|
|
|
@@ -340,6 +411,9 @@ default_entry:
|
|
/* Make changes effective */
|
|
wrmsr
|
|
|
|
+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
|
|
+#endif
|
|
+
|
|
6:
|
|
|
|
/*
|
|
@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
|
|
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
|
|
movl %eax,%ss # after changing gdt.
|
|
|
|
- movl $(__USER_DS),%eax # DS/ES contains default USER segment
|
|
+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
|
|
movl %eax,%ds
|
|
movl %eax,%es
|
|
|
|
@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
|
|
*/
|
|
cmpb $0,ready
|
|
jne 1f
|
|
- movl $gdt_page,%eax
|
|
+ movl $cpu_gdt_table,%eax
|
|
movl $stack_canary,%ecx
|
|
+#ifdef CONFIG_SMP
|
|
+ addl $__per_cpu_load,%ecx
|
|
+#endif
|
|
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
|
shrl $16, %ecx
|
|
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
|
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
|
|
1:
|
|
-#endif
|
|
movl $(__KERNEL_STACK_CANARY),%eax
|
|
+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ movl $(__USER_DS),%eax
|
|
+#else
|
|
+ xorl %eax,%eax
|
|
+#endif
|
|
movl %eax,%gs
|
|
|
|
xorl %eax,%eax # Clear LDT
|
|
@@ -558,22 +639,22 @@ early_page_fault:
|
|
jmp early_fault
|
|
|
|
early_fault:
|
|
- cld
|
|
#ifdef CONFIG_PRINTK
|
|
+ cmpl $1,%ss:early_recursion_flag
|
|
+ je hlt_loop
|
|
+ incl %ss:early_recursion_flag
|
|
+ cld
|
|
pusha
|
|
movl $(__KERNEL_DS),%eax
|
|
movl %eax,%ds
|
|
movl %eax,%es
|
|
- cmpl $2,early_recursion_flag
|
|
- je hlt_loop
|
|
- incl early_recursion_flag
|
|
movl %cr2,%eax
|
|
pushl %eax
|
|
pushl %edx /* trapno */
|
|
pushl $fault_msg
|
|
call printk
|
|
+; call dump_stack
|
|
#endif
|
|
- call dump_stack
|
|
hlt_loop:
|
|
hlt
|
|
jmp hlt_loop
|
|
@@ -581,8 +662,11 @@ hlt_loop:
|
|
/* This is the default interrupt "handler" :-) */
|
|
ALIGN
|
|
ignore_int:
|
|
- cld
|
|
#ifdef CONFIG_PRINTK
|
|
+ cmpl $2,%ss:early_recursion_flag
|
|
+ je hlt_loop
|
|
+ incl %ss:early_recursion_flag
|
|
+ cld
|
|
pushl %eax
|
|
pushl %ecx
|
|
pushl %edx
|
|
@@ -591,9 +675,6 @@ ignore_int:
|
|
movl $(__KERNEL_DS),%eax
|
|
movl %eax,%ds
|
|
movl %eax,%es
|
|
- cmpl $2,early_recursion_flag
|
|
- je hlt_loop
|
|
- incl early_recursion_flag
|
|
pushl 16(%esp)
|
|
pushl 24(%esp)
|
|
pushl 32(%esp)
|
|
@@ -622,29 +703,43 @@ ENTRY(initial_code)
|
|
/*
|
|
* BSS section
|
|
*/
|
|
-__PAGE_ALIGNED_BSS
|
|
- .align PAGE_SIZE
|
|
#ifdef CONFIG_X86_PAE
|
|
+.section .initial_pg_pmd,"a",@progbits
|
|
initial_pg_pmd:
|
|
.fill 1024*KPMDS,4,0
|
|
#else
|
|
+.section .initial_page_table,"a",@progbits
|
|
ENTRY(initial_page_table)
|
|
.fill 1024,4,0
|
|
#endif
|
|
+.section .initial_pg_fixmap,"a",@progbits
|
|
initial_pg_fixmap:
|
|
.fill 1024,4,0
|
|
+.section .empty_zero_page,"a",@progbits
|
|
ENTRY(empty_zero_page)
|
|
.fill 4096,1,0
|
|
+.section .swapper_pg_dir,"a",@progbits
|
|
ENTRY(swapper_pg_dir)
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ .fill 4,8,0
|
|
+#else
|
|
.fill 1024,4,0
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * The IDT has to be page-aligned to simplify the Pentium
|
|
+ * F0 0F bug workaround.. We have a special link segment
|
|
+ * for this.
|
|
+ */
|
|
+.section .idt,"a",@progbits
|
|
+ENTRY(idt_table)
|
|
+ .fill 256,8,0
|
|
|
|
/*
|
|
* This starts the data section.
|
|
*/
|
|
#ifdef CONFIG_X86_PAE
|
|
-__PAGE_ALIGNED_DATA
|
|
- /* Page-aligned for the benefit of paravirt? */
|
|
- .align PAGE_SIZE
|
|
+.section .initial_page_table,"a",@progbits
|
|
ENTRY(initial_page_table)
|
|
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
|
|
# if KPMDS == 3
|
|
@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
|
|
# error "Kernel PMDs should be 1, 2 or 3"
|
|
# endif
|
|
.align PAGE_SIZE /* needs to be page-sized too */
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ENTRY(cpu_pgd)
|
|
+ .rept NR_CPUS
|
|
+ .fill 4,8,0
|
|
+ .endr
|
|
+#endif
|
|
+
|
|
#endif
|
|
|
|
.data
|
|
.balign 4
|
|
ENTRY(stack_start)
|
|
- .long init_thread_union+THREAD_SIZE
|
|
+ .long init_thread_union+THREAD_SIZE-8
|
|
|
|
+ready: .byte 0
|
|
+
|
|
+.section .rodata,"a",@progbits
|
|
early_recursion_flag:
|
|
.long 0
|
|
|
|
-ready: .byte 0
|
|
-
|
|
int_msg:
|
|
.asciz "Unknown interrupt or fault at: %p %p %p\n"
|
|
|
|
@@ -707,7 +811,7 @@ fault_msg:
|
|
.word 0 # 32 bit align gdt_desc.address
|
|
boot_gdt_descr:
|
|
.word __BOOT_DS+7
|
|
- .long boot_gdt - __PAGE_OFFSET
|
|
+ .long pa(boot_gdt)
|
|
|
|
.word 0 # 32-bit align idt_desc.address
|
|
idt_descr:
|
|
@@ -718,7 +822,7 @@ idt_descr:
|
|
.word 0 # 32 bit align gdt_desc.address
|
|
ENTRY(early_gdt_descr)
|
|
.word GDT_ENTRIES*8-1
|
|
- .long gdt_page /* Overwritten for secondary CPUs */
|
|
+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
|
|
|
|
/*
|
|
* The boot_gdt must mirror the equivalent in setup.S and is
|
|
@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
|
|
.align L1_CACHE_BYTES
|
|
ENTRY(boot_gdt)
|
|
.fill GDT_ENTRY_BOOT_CS,8,0
|
|
- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
|
|
- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
|
|
+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
|
|
+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
|
|
+
|
|
+ .align PAGE_SIZE_asm
|
|
+ENTRY(cpu_gdt_table)
|
|
+ .rept NR_CPUS
|
|
+ .quad 0x0000000000000000 /* NULL descriptor */
|
|
+ .quad 0x0000000000000000 /* 0x0b reserved */
|
|
+ .quad 0x0000000000000000 /* 0x13 reserved */
|
|
+ .quad 0x0000000000000000 /* 0x1b reserved */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
|
|
+#else
|
|
+ .quad 0x0000000000000000 /* 0x20 unused */
|
|
+#endif
|
|
+
|
|
+ .quad 0x0000000000000000 /* 0x28 unused */
|
|
+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
|
|
+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
|
|
+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
|
|
+ .quad 0x0000000000000000 /* 0x4b reserved */
|
|
+ .quad 0x0000000000000000 /* 0x53 reserved */
|
|
+ .quad 0x0000000000000000 /* 0x5b reserved */
|
|
+
|
|
+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
|
|
+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
|
|
+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
|
|
+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
|
|
+
|
|
+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
|
|
+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
|
|
+
|
|
+ /*
|
|
+ * Segments used for calling PnP BIOS have byte granularity.
|
|
+ * The code segments and data segments have fixed 64k limits,
|
|
+ * the transfer segment sizes are set at run time.
|
|
+ */
|
|
+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
|
|
+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
|
|
+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
|
|
+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
|
|
+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
|
|
+
|
|
+ /*
|
|
+ * The APM segments have byte granularity and their bases
|
|
+ * are set at run time. All have 64k limits.
|
|
+ */
|
|
+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
|
|
+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
|
|
+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
|
|
+
|
|
+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
|
|
+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
|
|
+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
|
|
+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
|
|
+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
|
|
+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
|
|
+
|
|
+ /* Be sure this is zeroed to avoid false validations in Xen */
|
|
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
|
|
+ .endr
|
|
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
|
|
index 40f4eb3..6d24d9d 100644
|
|
--- a/arch/x86/kernel/head_64.S
|
|
+++ b/arch/x86/kernel/head_64.S
|
|
@@ -19,6 +19,8 @@
|
|
#include <asm/cache.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/percpu.h>
|
|
+#include <asm/cpufeature.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#include <asm/asm-offsets.h>
|
|
@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
|
|
L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
|
|
L4_START_KERNEL = pgd_index(__START_KERNEL_map)
|
|
L3_START_KERNEL = pud_index(__START_KERNEL_map)
|
|
+L4_VMALLOC_START = pgd_index(VMALLOC_START)
|
|
+L3_VMALLOC_START = pud_index(VMALLOC_START)
|
|
+L4_VMALLOC_END = pgd_index(VMALLOC_END)
|
|
+L3_VMALLOC_END = pud_index(VMALLOC_END)
|
|
+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
|
|
+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
|
|
|
|
.text
|
|
__HEAD
|
|
@@ -85,35 +93,23 @@ startup_64:
|
|
*/
|
|
addq %rbp, init_level4_pgt + 0(%rip)
|
|
addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
|
|
+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
|
|
+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
|
|
+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
|
|
addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
|
|
|
|
addq %rbp, level3_ident_pgt + 0(%rip)
|
|
+#ifndef CONFIG_XEN
|
|
+ addq %rbp, level3_ident_pgt + 8(%rip)
|
|
+#endif
|
|
|
|
- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
|
|
- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
|
|
-
|
|
- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
|
|
-
|
|
- /* Add an Identity mapping if I am above 1G */
|
|
- leaq _text(%rip), %rdi
|
|
- andq $PMD_PAGE_MASK, %rdi
|
|
-
|
|
- movq %rdi, %rax
|
|
- shrq $PUD_SHIFT, %rax
|
|
- andq $(PTRS_PER_PUD - 1), %rax
|
|
- jz ident_complete
|
|
+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
|
|
|
|
- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
|
|
- leaq level3_ident_pgt(%rip), %rbx
|
|
- movq %rdx, 0(%rbx, %rax, 8)
|
|
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
|
|
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
|
|
|
|
- movq %rdi, %rax
|
|
- shrq $PMD_SHIFT, %rax
|
|
- andq $(PTRS_PER_PMD - 1), %rax
|
|
- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
|
|
- leaq level2_spare_pgt(%rip), %rbx
|
|
- movq %rdx, 0(%rbx, %rax, 8)
|
|
-ident_complete:
|
|
+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
|
|
+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
|
|
|
|
/*
|
|
* Fixup the kernel text+data virtual addresses. Note that
|
|
@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
|
|
* after the boot processor executes this code.
|
|
*/
|
|
|
|
- /* Enable PAE mode and PGE */
|
|
- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
|
|
+ /* Enable PAE mode and PSE/PGE */
|
|
+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
|
|
movq %rax, %cr4
|
|
|
|
/* Setup early boot stage 4 level pagetables. */
|
|
@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
|
|
movl $MSR_EFER, %ecx
|
|
rdmsr
|
|
btsl $_EFER_SCE, %eax /* Enable System Call */
|
|
- btl $20,%edi /* No Execute supported? */
|
|
+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
|
|
jnc 1f
|
|
btsl $_EFER_NX, %eax
|
|
+ leaq init_level4_pgt(%rip), %rdi
|
|
+#ifndef CONFIG_EFI
|
|
+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
|
|
+#endif
|
|
+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
|
|
+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
|
|
+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
|
|
+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
|
|
1: wrmsr /* Make changes effective */
|
|
|
|
/* Setup cr0 */
|
|
@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
|
|
* jump. In addition we need to ensure %cs is set so we make this
|
|
* a far return.
|
|
*/
|
|
+ pax_set_fptr_mask
|
|
movq initial_code(%rip),%rax
|
|
pushq $0 # fake return address to stop unwinder
|
|
pushq $__KERNEL_CS # set correct cs
|
|
@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
|
|
bad_address:
|
|
jmp bad_address
|
|
|
|
- .section ".init.text","ax"
|
|
+ __INIT
|
|
#ifdef CONFIG_EARLY_PRINTK
|
|
.globl early_idt_handlers
|
|
early_idt_handlers:
|
|
@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
|
|
#endif /* EARLY_PRINTK */
|
|
1: hlt
|
|
jmp 1b
|
|
+ .previous
|
|
|
|
#ifdef CONFIG_EARLY_PRINTK
|
|
+ __INITDATA
|
|
early_recursion_flag:
|
|
.long 0
|
|
+ .previous
|
|
|
|
+ .section .rodata,"a",@progbits
|
|
early_idt_msg:
|
|
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
|
|
early_idt_ripmsg:
|
|
.asciz "RIP %s\n"
|
|
-#endif /* CONFIG_EARLY_PRINTK */
|
|
.previous
|
|
+#endif /* CONFIG_EARLY_PRINTK */
|
|
|
|
+ .section .rodata,"a",@progbits
|
|
#define NEXT_PAGE(name) \
|
|
.balign PAGE_SIZE; \
|
|
ENTRY(name)
|
|
@@ -338,7 +348,6 @@ ENTRY(name)
|
|
i = i + 1 ; \
|
|
.endr
|
|
|
|
- .data
|
|
/*
|
|
* This default setting generates an ident mapping at address 0x100000
|
|
* and a mapping for the kernel that precisely maps virtual address
|
|
@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
|
|
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0
|
|
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
|
|
+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
|
|
+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
|
|
+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
.org init_level4_pgt + L4_START_KERNEL*8, 0
|
|
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
|
|
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+NEXT_PAGE(cpu_pgd)
|
|
+ .rept NR_CPUS
|
|
+ .fill 512,8,0
|
|
+ .endr
|
|
+#endif
|
|
+
|
|
NEXT_PAGE(level3_ident_pgt)
|
|
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
+#ifdef CONFIG_XEN
|
|
.fill 511,8,0
|
|
+#else
|
|
+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
|
|
+ .fill 510,8,0
|
|
+#endif
|
|
+
|
|
+NEXT_PAGE(level3_vmalloc_start_pgt)
|
|
+ .fill 512,8,0
|
|
+
|
|
+NEXT_PAGE(level3_vmalloc_end_pgt)
|
|
+ .fill 512,8,0
|
|
+
|
|
+NEXT_PAGE(level3_vmemmap_pgt)
|
|
+ .fill L3_VMEMMAP_START,8,0
|
|
+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
|
|
NEXT_PAGE(level3_kernel_pgt)
|
|
.fill L3_START_KERNEL,8,0
|
|
@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
|
|
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
|
|
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
|
|
|
|
+NEXT_PAGE(level2_vmemmap_pgt)
|
|
+ .fill 512,8,0
|
|
+
|
|
NEXT_PAGE(level2_fixmap_pgt)
|
|
- .fill 506,8,0
|
|
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
|
|
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
|
|
- .fill 5,8,0
|
|
+ .fill 507,8,0
|
|
+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
|
|
+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
|
|
+ .fill 4,8,0
|
|
|
|
-NEXT_PAGE(level1_fixmap_pgt)
|
|
+NEXT_PAGE(level1_vsyscall_pgt)
|
|
.fill 512,8,0
|
|
|
|
-NEXT_PAGE(level2_ident_pgt)
|
|
- /* Since I easily can, map the first 1G.
|
|
+ /* Since I easily can, map the first 2G.
|
|
* Don't set NX because code runs from these pages.
|
|
*/
|
|
- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
|
|
+NEXT_PAGE(level2_ident_pgt)
|
|
+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
|
|
|
|
NEXT_PAGE(level2_kernel_pgt)
|
|
/*
|
|
@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
|
|
* If you want to increase this then increase MODULES_VADDR
|
|
* too.)
|
|
*/
|
|
- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
|
|
- KERNEL_IMAGE_SIZE/PMD_SIZE)
|
|
-
|
|
-NEXT_PAGE(level2_spare_pgt)
|
|
- .fill 512, 8, 0
|
|
+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
|
|
|
|
#undef PMDS
|
|
#undef NEXT_PAGE
|
|
|
|
- .data
|
|
+ .align PAGE_SIZE
|
|
+ENTRY(cpu_gdt_table)
|
|
+ .rept NR_CPUS
|
|
+ .quad 0x0000000000000000 /* NULL descriptor */
|
|
+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
|
|
+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
|
|
+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
|
|
+ .quad 0x00cffb000000ffff /* __USER32_CS */
|
|
+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
|
|
+ .quad 0x00affb000000ffff /* __USER_CS */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
|
|
+#else
|
|
+ .quad 0x0 /* unused */
|
|
+#endif
|
|
+
|
|
+ .quad 0,0 /* TSS */
|
|
+ .quad 0,0 /* LDT */
|
|
+ .quad 0,0,0 /* three TLS descriptors */
|
|
+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
|
|
+ /* asm/segment.h:GDT_ENTRIES must match this */
|
|
+
|
|
+ /* zero the remaining page */
|
|
+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
|
|
+ .endr
|
|
+
|
|
.align 16
|
|
.globl early_gdt_descr
|
|
early_gdt_descr:
|
|
.word GDT_ENTRIES*8-1
|
|
early_gdt_descr_base:
|
|
- .quad INIT_PER_CPU_VAR(gdt_page)
|
|
+ .quad cpu_gdt_table
|
|
|
|
ENTRY(phys_base)
|
|
/* This must match the first entry in level2_kernel_pgt */
|
|
.quad 0x0000000000000000
|
|
|
|
#include "../../x86/xen/xen-head.S"
|
|
-
|
|
- .section .bss, "aw", @nobits
|
|
+
|
|
+ .section .rodata,"a",@progbits
|
|
.align L1_CACHE_BYTES
|
|
ENTRY(idt_table)
|
|
- .skip IDT_ENTRIES * 16
|
|
+ .fill 512,8,0
|
|
|
|
.align L1_CACHE_BYTES
|
|
ENTRY(nmi_idt_table)
|
|
- .skip IDT_ENTRIES * 16
|
|
+ .fill 512,8,0
|
|
|
|
__PAGE_ALIGNED_BSS
|
|
.align PAGE_SIZE
|
|
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
|
|
index 9c3bd4a..e1d9b35 100644
|
|
--- a/arch/x86/kernel/i386_ksyms_32.c
|
|
+++ b/arch/x86/kernel/i386_ksyms_32.c
|
|
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
|
|
EXPORT_SYMBOL(cmpxchg8b_emu);
|
|
#endif
|
|
|
|
+EXPORT_SYMBOL_GPL(cpu_gdt_table);
|
|
+
|
|
/* Networking helper routines. */
|
|
EXPORT_SYMBOL(csum_partial_copy_generic);
|
|
+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
|
|
+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
|
|
|
|
EXPORT_SYMBOL(__get_user_1);
|
|
EXPORT_SYMBOL(__get_user_2);
|
|
@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
|
|
|
|
EXPORT_SYMBOL(csum_partial);
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
|
|
index 7aa728d..986df68 100644
|
|
--- a/arch/x86/kernel/i387.c
|
|
+++ b/arch/x86/kernel/i387.c
|
|
@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
|
|
static inline bool interrupted_user_mode(void)
|
|
{
|
|
struct pt_regs *regs = get_irq_regs();
|
|
- return regs && user_mode_vm(regs);
|
|
+ return regs && user_mode(regs);
|
|
}
|
|
|
|
/*
|
|
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
|
|
index 36d1853..bf25736 100644
|
|
--- a/arch/x86/kernel/i8259.c
|
|
+++ b/arch/x86/kernel/i8259.c
|
|
@@ -209,7 +209,7 @@ static void mask_and_ack_8259A(struct irq_data *data)
|
|
"spurious 8259A interrupt: IRQ%d.\n", irq);
|
|
spurious_irq_mask |= irqmask;
|
|
}
|
|
- atomic_inc(&irq_err_count);
|
|
+ atomic_inc_unchecked(&irq_err_count);
|
|
/*
|
|
* Theoretically we do not have to handle this IRQ,
|
|
* but in Linux this does not cause problems and is
|
|
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
|
|
index 43e9ccf..44ccf6f 100644
|
|
--- a/arch/x86/kernel/init_task.c
|
|
+++ b/arch/x86/kernel/init_task.c
|
|
@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|
* way process stacks are handled. This is done by having a special
|
|
* "init_task" linker map entry..
|
|
*/
|
|
-union thread_union init_thread_union __init_task_data =
|
|
- { INIT_THREAD_INFO(init_task) };
|
|
+union thread_union init_thread_union __init_task_data;
|
|
|
|
/*
|
|
* Initial task structure.
|
|
@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
|
|
* section. Since TSS's are completely CPU-local, we want them
|
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
|
*/
|
|
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
|
-
|
|
+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
|
|
+EXPORT_SYMBOL(init_tss);
|
|
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
|
|
index 8c96897..3ddf87c 100644
|
|
--- a/arch/x86/kernel/ioport.c
|
|
+++ b/arch/x86/kernel/ioport.c
|
|
@@ -54,7 +54,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
|
|
* because the ->io_bitmap_max value must match the bitmap
|
|
* contents:
|
|
*/
|
|
- tss = &per_cpu(init_tss, get_cpu());
|
|
+ tss = init_tss + get_cpu();
|
|
|
|
if (turn_on)
|
|
bitmap_clear(t->io_bitmap_ptr, from, num);
|
|
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
|
|
index a14a835..145b03af 100644
|
|
--- a/arch/x86/kernel/irq.c
|
|
+++ b/arch/x86/kernel/irq.c
|
|
@@ -18,7 +18,7 @@
|
|
#include <asm/mce.h>
|
|
#include <asm/hw_irq.h>
|
|
|
|
-atomic_t irq_err_count;
|
|
+atomic_unchecked_t irq_err_count;
|
|
|
|
/* Function pointer for generic interrupt vector handling */
|
|
void (*x86_platform_ipi_callback)(void) = NULL;
|
|
@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
|
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
|
seq_printf(p, " Machine check polls\n");
|
|
#endif
|
|
- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
|
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
|
|
#if defined(CONFIG_X86_IO_APIC)
|
|
- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
|
|
+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
|
|
#endif
|
|
return 0;
|
|
}
|
|
diff --git a/arch/x86/kernel/irq.c.rej b/arch/x86/kernel/irq.c.rej
|
|
new file mode 100644
|
|
index 0000000..a8779b5
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/irq.c.rej
|
|
@@ -0,0 +1,15 @@
|
|
+--- arch/x86/kernel/irq.c 2012-05-21 11:32:57.491927661 +0200
|
|
++++ arch/x86/kernel/irq.c 2012-05-21 12:10:09.552048890 +0200
|
|
+@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
|
+
|
|
+ u64 arch_irq_stat(void)
|
|
+ {
|
|
+- u64 sum = atomic_read(&irq_err_count);
|
|
++ u64 sum = atomic_read_unchecked(&irq_err_count);
|
|
+
|
|
+ #ifdef CONFIG_X86_IO_APIC
|
|
+- sum += atomic_read(&irq_mis_count);
|
|
++ sum += atomic_read_unchecked(&irq_mis_count);
|
|
+ #endif
|
|
+ return sum;
|
|
+ }
|
|
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
|
|
index 58b7f27..e112d08 100644
|
|
--- a/arch/x86/kernel/irq_32.c
|
|
+++ b/arch/x86/kernel/irq_32.c
|
|
@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
|
|
__asm__ __volatile__("andl %%esp,%0" :
|
|
"=r" (sp) : "0" (THREAD_SIZE - 1));
|
|
|
|
- return sp < (sizeof(struct thread_info) + STACK_WARN);
|
|
+ return sp < STACK_WARN;
|
|
}
|
|
|
|
static void print_stack_overflow(void)
|
|
@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
|
|
* per-CPU IRQ handling contexts (thread information and stack)
|
|
*/
|
|
union irq_ctx {
|
|
- struct thread_info tinfo;
|
|
- u32 stack[THREAD_SIZE/sizeof(u32)];
|
|
+ unsigned long previous_esp;
|
|
+ u32 stack[THREAD_SIZE/sizeof(u32)];
|
|
} __attribute__((aligned(THREAD_SIZE)));
|
|
|
|
static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
|
|
@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
|
|
static inline int
|
|
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
{
|
|
- union irq_ctx *curctx, *irqctx;
|
|
+ union irq_ctx *irqctx;
|
|
u32 *isp, arg1, arg2;
|
|
|
|
- curctx = (union irq_ctx *) current_thread_info();
|
|
irqctx = __this_cpu_read(hardirq_ctx);
|
|
|
|
/*
|
|
@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
* handler) we can't do that and just have to keep using the
|
|
* current stack (which is the irq stack already after all)
|
|
*/
|
|
- if (unlikely(curctx == irqctx))
|
|
+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
|
|
return 0;
|
|
|
|
/* build the stack frame on the IRQ stack */
|
|
- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
|
|
- irqctx->tinfo.task = curctx->tinfo.task;
|
|
- irqctx->tinfo.previous_esp = current_stack_pointer;
|
|
+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
|
|
+ irqctx->previous_esp = current_stack_pointer;
|
|
|
|
- /* Copy the preempt_count so that the [soft]irq checks work. */
|
|
- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ __set_fs(MAKE_MM_SEG(0));
|
|
+#endif
|
|
|
|
if (unlikely(overflow))
|
|
call_on_stack(print_stack_overflow, isp);
|
|
@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
: "0" (irq), "1" (desc), "2" (isp),
|
|
"D" (desc->handle_irq)
|
|
: "memory", "cc", "ecx");
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ __set_fs(current_thread_info()->addr_limit);
|
|
+#endif
|
|
+
|
|
return 1;
|
|
}
|
|
|
|
@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|
*/
|
|
void __cpuinit irq_ctx_init(int cpu)
|
|
{
|
|
- union irq_ctx *irqctx;
|
|
-
|
|
if (per_cpu(hardirq_ctx, cpu))
|
|
return;
|
|
|
|
- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
|
|
- THREAD_FLAGS,
|
|
- THREAD_ORDER));
|
|
- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
|
|
- irqctx->tinfo.cpu = cpu;
|
|
- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
|
|
- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
|
-
|
|
- per_cpu(hardirq_ctx, cpu) = irqctx;
|
|
-
|
|
- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
|
|
- THREAD_FLAGS,
|
|
- THREAD_ORDER));
|
|
- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
|
|
- irqctx->tinfo.cpu = cpu;
|
|
- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
|
|
-
|
|
- per_cpu(softirq_ctx, cpu) = irqctx;
|
|
+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
|
|
+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
|
|
|
|
printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
|
|
cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
|
|
@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
|
|
asmlinkage void do_softirq(void)
|
|
{
|
|
unsigned long flags;
|
|
- struct thread_info *curctx;
|
|
union irq_ctx *irqctx;
|
|
u32 *isp;
|
|
|
|
@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
|
|
local_irq_save(flags);
|
|
|
|
if (local_softirq_pending()) {
|
|
- curctx = current_thread_info();
|
|
irqctx = __this_cpu_read(softirq_ctx);
|
|
- irqctx->tinfo.task = curctx->task;
|
|
- irqctx->tinfo.previous_esp = current_stack_pointer;
|
|
+ irqctx->previous_esp = current_stack_pointer;
|
|
|
|
/* build the stack frame on the softirq stack */
|
|
- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
|
|
+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ __set_fs(MAKE_MM_SEG(0));
|
|
+#endif
|
|
|
|
call_on_stack(__do_softirq, isp);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ __set_fs(current_thread_info()->addr_limit);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Shouldn't happen, we returned above if in_interrupt():
|
|
*/
|
|
@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|
if (unlikely(!desc))
|
|
return false;
|
|
|
|
- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
|
+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
|
|
if (unlikely(overflow))
|
|
print_stack_overflow();
|
|
desc->handle_irq(irq, desc);
|
|
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
|
|
index d04d3ec..ea4b374 100644
|
|
--- a/arch/x86/kernel/irq_64.c
|
|
+++ b/arch/x86/kernel/irq_64.c
|
|
@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
|
|
u64 estack_top, estack_bottom;
|
|
u64 curbase = (u64)task_stack_page(current);
|
|
|
|
- if (user_mode_vm(regs))
|
|
+ if (user_mode(regs))
|
|
return;
|
|
|
|
if (regs->sp >= curbase + sizeof(struct thread_info) +
|
|
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
|
|
index 1d5d31e..72731d4 100644
|
|
--- a/arch/x86/kernel/kdebugfs.c
|
|
+++ b/arch/x86/kernel/kdebugfs.c
|
|
@@ -27,7 +27,7 @@ struct setup_data_node {
|
|
u32 len;
|
|
};
|
|
|
|
-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
|
|
+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
struct setup_data_node *node = file->private_data;
|
|
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
|
|
index 8bfb614..2b3b35f 100644
|
|
--- a/arch/x86/kernel/kgdb.c
|
|
+++ b/arch/x86/kernel/kgdb.c
|
|
@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
|
|
#ifdef CONFIG_X86_32
|
|
switch (regno) {
|
|
case GDB_SS:
|
|
- if (!user_mode_vm(regs))
|
|
+ if (!user_mode(regs))
|
|
*(unsigned long *)mem = __KERNEL_DS;
|
|
break;
|
|
case GDB_SP:
|
|
- if (!user_mode_vm(regs))
|
|
+ if (!user_mode(regs))
|
|
*(unsigned long *)mem = kernel_stack_pointer(regs);
|
|
break;
|
|
case GDB_GS:
|
|
@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|
case 'k':
|
|
/* clear the trace bit */
|
|
linux_regs->flags &= ~X86_EFLAGS_TF;
|
|
- atomic_set(&kgdb_cpu_doing_single_step, -1);
|
|
+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
|
|
|
|
/* set the trace bit if we're stepping */
|
|
if (remcomInBuffer[0] == 's') {
|
|
linux_regs->flags |= X86_EFLAGS_TF;
|
|
- atomic_set(&kgdb_cpu_doing_single_step,
|
|
+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
|
|
raw_smp_processor_id());
|
|
}
|
|
|
|
@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
|
|
|
switch (cmd) {
|
|
case DIE_DEBUG:
|
|
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
|
+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
|
|
if (user_mode(regs))
|
|
return single_step_cont(regs, args);
|
|
break;
|
|
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
|
|
index c5e410e..da6aaf9 100644
|
|
--- a/arch/x86/kernel/kprobes-opt.c
|
|
+++ b/arch/x86/kernel/kprobes-opt.c
|
|
@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
|
|
* Verify if the address gap is in 2GB range, because this uses
|
|
* a relative jump.
|
|
*/
|
|
- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
|
|
+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
|
|
if (abs(rel) > 0x7fffffff)
|
|
return -ERANGE;
|
|
|
|
@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
|
|
synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
|
|
|
|
/* Set probe function call */
|
|
- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
|
|
+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
|
|
|
|
/* Set returning jmp instruction at the tail of out-of-line buffer */
|
|
synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
|
|
- (u8 *)op->kp.addr + op->optinsn.size);
|
|
+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
|
|
|
|
flush_icache_range((unsigned long) buf,
|
|
(unsigned long) buf + TMPL_END_IDX +
|
|
@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
|
|
((long)op->kp.addr + RELATIVEJUMP_SIZE));
|
|
|
|
/* Backup instructions which will be replaced by jump address */
|
|
- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
|
|
+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
|
|
RELATIVE_ADDR_SIZE);
|
|
|
|
insn_buf[0] = RELATIVEJUMP_OPCODE;
|
|
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
|
|
index 15d3c45..2d281b37 100644
|
|
--- a/arch/x86/kernel/kprobes.c
|
|
+++ b/arch/x86/kernel/kprobes.c
|
|
@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
|
|
} __attribute__((packed)) *insn;
|
|
|
|
insn = (struct __arch_relative_insn *)from;
|
|
+
|
|
+ pax_open_kernel();
|
|
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
|
|
insn->op = op;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
|
|
@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
|
|
kprobe_opcode_t opcode;
|
|
kprobe_opcode_t *orig_opcodes = opcodes;
|
|
|
|
- if (search_exception_tables((unsigned long)opcodes))
|
|
+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
|
|
return 0; /* Page fault may occur on this address. */
|
|
|
|
retry:
|
|
@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
|
/* Another subsystem puts a breakpoint, failed to recover */
|
|
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
return 0;
|
|
+ pax_open_kernel();
|
|
memcpy(dest, insn.kaddr, insn.length);
|
|
+ pax_close_kernel();
|
|
|
|
#ifdef CONFIG_X86_64
|
|
if (insn_rip_relative(&insn)) {
|
|
@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
|
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
|
|
BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
|
|
disp = (u8 *) dest + insn_offset_displacement(&insn);
|
|
+ pax_open_kernel();
|
|
*(s32 *) disp = (s32) newdisp;
|
|
+ pax_close_kernel();
|
|
}
|
|
#endif
|
|
return insn.length;
|
|
@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
|
|
* nor set current_kprobe, because it doesn't use single
|
|
* stepping.
|
|
*/
|
|
- regs->ip = (unsigned long)p->ainsn.insn;
|
|
+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
|
|
preempt_enable_no_resched();
|
|
return;
|
|
}
|
|
@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
|
|
if (p->opcode == BREAKPOINT_INSTRUCTION)
|
|
regs->ip = (unsigned long)p->addr;
|
|
else
|
|
- regs->ip = (unsigned long)p->ainsn.insn;
|
|
+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
|
|
}
|
|
|
|
/*
|
|
@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|
setup_singlestep(p, regs, kcb, 0);
|
|
return 1;
|
|
}
|
|
- } else if (*addr != BREAKPOINT_INSTRUCTION) {
|
|
+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
|
|
/*
|
|
* The breakpoint instruction was removed right
|
|
* after we hit it. Another cpu has removed
|
|
@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
|
|
" movq %rax, 152(%rsp)\n"
|
|
RESTORE_REGS_STRING
|
|
" popfq\n"
|
|
+#ifdef KERNEXEC_PLUGIN
|
|
+ " btsq $63,(%rsp)\n"
|
|
+#endif
|
|
#else
|
|
" pushf\n"
|
|
SAVE_REGS_STRING
|
|
@@ -765,7 +775,7 @@ static void __kprobes
|
|
resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
|
|
{
|
|
unsigned long *tos = stack_addr(regs);
|
|
- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
|
|
+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
|
|
unsigned long orig_ip = (unsigned long)p->addr;
|
|
kprobe_opcode_t *insn = p->ainsn.insn;
|
|
|
|
@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
|
|
struct die_args *args = data;
|
|
int ret = NOTIFY_DONE;
|
|
|
|
- if (args->regs && user_mode_vm(args->regs))
|
|
+ if (args->regs && user_mode(args->regs))
|
|
return ret;
|
|
|
|
switch (val) {
|
|
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
|
|
index c37886d..5b33955 100644
|
|
--- a/arch/x86/kernel/ldt.c
|
|
+++ b/arch/x86/kernel/ldt.c
|
|
@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
|
if (reload) {
|
|
#ifdef CONFIG_SMP
|
|
preempt_disable();
|
|
- load_LDT(pc);
|
|
+ load_LDT_nolock(pc);
|
|
if (!cpumask_equal(mm_cpumask(current->mm),
|
|
cpumask_of(smp_processor_id())))
|
|
smp_call_function(flush_ldt, current->mm, 1);
|
|
preempt_enable();
|
|
#else
|
|
- load_LDT(pc);
|
|
+ load_LDT_nolock(pc);
|
|
#endif
|
|
}
|
|
if (oldsize) {
|
|
@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
|
return err;
|
|
|
|
for (i = 0; i < old->size; i++)
|
|
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
|
|
+ write_ldt_entry(new->ldt, i, old->ldt + i);
|
|
return 0;
|
|
}
|
|
|
|
@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|
retval = copy_ldt(&mm->context, &old_mm->context);
|
|
mutex_unlock(&old_mm->context.lock);
|
|
}
|
|
+
|
|
+ if (tsk == current) {
|
|
+ mm->context.vdso = 0;
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ mm->context.user_cs_base = 0UL;
|
|
+ mm->context.user_cs_limit = ~0UL;
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
|
|
+ cpus_clear(mm->context.cpu_user_cs_mask);
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+ }
|
|
+
|
|
return retval;
|
|
}
|
|
|
|
@@ -234,6 +252,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
|
goto out_unlock;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
|
|
+ error = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+#endif
|
|
+
|
|
fill_ldt(&ldt, &ldt_info);
|
|
if (oldmode)
|
|
ldt.avl = 0;
|
|
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
|
|
index 5b19e4d..6476a76 100644
|
|
--- a/arch/x86/kernel/machine_kexec_32.c
|
|
+++ b/arch/x86/kernel/machine_kexec_32.c
|
|
@@ -26,7 +26,7 @@
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/debugreg.h>
|
|
|
|
-static void set_idt(void *newidt, __u16 limit)
|
|
+static void set_idt(struct desc_struct *newidt, __u16 limit)
|
|
{
|
|
struct desc_ptr curidt;
|
|
|
|
@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
|
|
}
|
|
|
|
|
|
-static void set_gdt(void *newgdt, __u16 limit)
|
|
+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
|
|
{
|
|
struct desc_ptr curgdt;
|
|
|
|
@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
|
|
}
|
|
|
|
control_page = page_address(image->control_code_page);
|
|
- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
|
|
+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
|
|
|
|
relocate_kernel_ptr = control_page;
|
|
page_list[PA_CONTROL_PAGE] = __pa(control_page);
|
|
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
|
|
index 0327e2b..e43737b 100644
|
|
--- a/arch/x86/kernel/microcode_intel.c
|
|
+++ b/arch/x86/kernel/microcode_intel.c
|
|
@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
|
|
|
|
static int get_ucode_user(void *to, const void *from, size_t n)
|
|
{
|
|
- return copy_from_user(to, from, n);
|
|
+ return copy_from_user(to, (const void __force_user *)from, n);
|
|
}
|
|
|
|
static enum ucode_state
|
|
request_microcode_user(int cpu, const void __user *buf, size_t size)
|
|
{
|
|
- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
|
|
+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
|
|
}
|
|
|
|
static void microcode_fini_cpu(int cpu)
|
|
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
|
|
index f21fd94..61565cd 100644
|
|
--- a/arch/x86/kernel/module.c
|
|
+++ b/arch/x86/kernel/module.c
|
|
@@ -35,15 +35,60 @@
|
|
#define DEBUGP(fmt...)
|
|
#endif
|
|
|
|
-void *module_alloc(unsigned long size)
|
|
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
|
|
{
|
|
- if (PAGE_ALIGN(size) > MODULES_LEN)
|
|
+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
|
|
return NULL;
|
|
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
|
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
|
|
-1, __builtin_return_address(0));
|
|
}
|
|
|
|
+void *module_alloc(unsigned long size)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ return __module_alloc(size, PAGE_KERNEL);
|
|
+#else
|
|
+ return __module_alloc(size, PAGE_KERNEL_EXEC);
|
|
+#endif
|
|
+
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#ifdef CONFIG_X86_32
|
|
+void *module_alloc_exec(unsigned long size)
|
|
+{
|
|
+ struct vm_struct *area;
|
|
+
|
|
+ if (size == 0)
|
|
+ return NULL;
|
|
+
|
|
+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
|
|
+ return area ? area->addr : NULL;
|
|
+}
|
|
+EXPORT_SYMBOL(module_alloc_exec);
|
|
+
|
|
+void module_free_exec(struct module *mod, void *module_region)
|
|
+{
|
|
+ vunmap(module_region);
|
|
+}
|
|
+EXPORT_SYMBOL(module_free_exec);
|
|
+#else
|
|
+void module_free_exec(struct module *mod, void *module_region)
|
|
+{
|
|
+ module_free(mod, module_region);
|
|
+}
|
|
+EXPORT_SYMBOL(module_free_exec);
|
|
+
|
|
+void *module_alloc_exec(unsigned long size)
|
|
+{
|
|
+ return __module_alloc(size, PAGE_KERNEL_RX);
|
|
+}
|
|
+EXPORT_SYMBOL(module_alloc_exec);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
int apply_relocate(Elf32_Shdr *sechdrs,
|
|
const char *strtab,
|
|
@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
|
|
unsigned int i;
|
|
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
|
|
Elf32_Sym *sym;
|
|
- uint32_t *location;
|
|
+ uint32_t *plocation, location;
|
|
|
|
DEBUGP("Applying relocate section %u to %u\n", relsec,
|
|
sechdrs[relsec].sh_info);
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
/* This is where to make the change */
|
|
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
|
|
- + rel[i].r_offset;
|
|
+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
|
|
+ location = (uint32_t)plocation;
|
|
+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
|
|
+ plocation = ktla_ktva((void *)plocation);
|
|
/* This is the symbol it is referring to. Note that all
|
|
undefined symbols have been resolved. */
|
|
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
|
|
@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
|
|
switch (ELF32_R_TYPE(rel[i].r_info)) {
|
|
case R_386_32:
|
|
/* We add the value into the location given */
|
|
- *location += sym->st_value;
|
|
+ pax_open_kernel();
|
|
+ *plocation += sym->st_value;
|
|
+ pax_close_kernel();
|
|
break;
|
|
case R_386_PC32:
|
|
/* Add the value, subtract its postition */
|
|
- *location += sym->st_value - (uint32_t)location;
|
|
+ pax_open_kernel();
|
|
+ *plocation += sym->st_value - location;
|
|
+ pax_close_kernel();
|
|
break;
|
|
default:
|
|
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
|
|
@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|
case R_X86_64_NONE:
|
|
break;
|
|
case R_X86_64_64:
|
|
+ pax_open_kernel();
|
|
*(u64 *)loc = val;
|
|
+ pax_close_kernel();
|
|
break;
|
|
case R_X86_64_32:
|
|
+ pax_open_kernel();
|
|
*(u32 *)loc = val;
|
|
+ pax_close_kernel();
|
|
if (val != *(u32 *)loc)
|
|
goto overflow;
|
|
break;
|
|
case R_X86_64_32S:
|
|
+ pax_open_kernel();
|
|
*(s32 *)loc = val;
|
|
+ pax_close_kernel();
|
|
if ((s64)val != *(s32 *)loc)
|
|
goto overflow;
|
|
break;
|
|
case R_X86_64_PC32:
|
|
val -= (u64)loc;
|
|
+ pax_open_kernel();
|
|
*(u32 *)loc = val;
|
|
+ pax_close_kernel();
|
|
+
|
|
#if 0
|
|
if ((s64)val != *(s32 *)loc)
|
|
goto overflow;
|
|
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
|
|
index 32856fa..ce95eaa 100644
|
|
--- a/arch/x86/kernel/nmi.c
|
|
+++ b/arch/x86/kernel/nmi.c
|
|
@@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
|
|
dotraplinkage notrace __kprobes void
|
|
do_nmi(struct pt_regs *regs, long error_code)
|
|
{
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (!user_mode(regs)) {
|
|
+ unsigned long cs = regs->cs & 0xFFFF;
|
|
+ unsigned long ip = ktva_ktla(regs->ip);
|
|
+
|
|
+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
|
|
+ regs->ip = ip;
|
|
+ }
|
|
+#endif
|
|
+
|
|
nmi_nesting_preprocess(regs);
|
|
|
|
nmi_enter();
|
|
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
|
|
index 676b8c7..870ba04 100644
|
|
--- a/arch/x86/kernel/paravirt-spinlocks.c
|
|
+++ b/arch/x86/kernel/paravirt-spinlocks.c
|
|
@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|
arch_spin_lock(lock);
|
|
}
|
|
|
|
-struct pv_lock_ops pv_lock_ops = {
|
|
+struct pv_lock_ops pv_lock_ops __read_only = {
|
|
#ifdef CONFIG_SMP
|
|
.spin_is_locked = __ticket_spin_is_locked,
|
|
.spin_is_contended = __ticket_spin_is_contended,
|
|
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
|
|
index 128323e..c09b79f 100644
|
|
--- a/arch/x86/kernel/paravirt.c
|
|
+++ b/arch/x86/kernel/paravirt.c
|
|
@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
|
|
{
|
|
return x;
|
|
}
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
|
|
+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
|
|
+#endif
|
|
|
|
void __init default_banner(void)
|
|
{
|
|
@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
|
if (opfunc == NULL)
|
|
/* If there's no function, patch it with a ud2a (BUG) */
|
|
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
|
|
- else if (opfunc == _paravirt_nop)
|
|
+ else if (opfunc == (void *)_paravirt_nop)
|
|
/* If the operation is a nop, then nop the callsite */
|
|
ret = paravirt_patch_nop();
|
|
|
|
/* identity functions just return their single argument */
|
|
- else if (opfunc == _paravirt_ident_32)
|
|
+ else if (opfunc == (void *)_paravirt_ident_32)
|
|
ret = paravirt_patch_ident_32(insnbuf, len);
|
|
- else if (opfunc == _paravirt_ident_64)
|
|
+ else if (opfunc == (void *)_paravirt_ident_64)
|
|
+ ret = paravirt_patch_ident_64(insnbuf, len);
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
|
|
+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
|
|
ret = paravirt_patch_ident_64(insnbuf, len);
|
|
+#endif
|
|
|
|
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
|
|
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
|
|
@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
|
|
if (insn_len > len || start == NULL)
|
|
insn_len = len;
|
|
else
|
|
- memcpy(insnbuf, start, insn_len);
|
|
+ memcpy(insnbuf, ktla_ktva(start), insn_len);
|
|
|
|
return insn_len;
|
|
}
|
|
@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
|
return percpu_read(paravirt_lazy_mode);
|
|
}
|
|
|
|
-struct pv_info pv_info = {
|
|
+struct pv_info pv_info __read_only = {
|
|
.name = "bare hardware",
|
|
.paravirt_enabled = 0,
|
|
.kernel_rpl = 0,
|
|
@@ -315,16 +322,16 @@ struct pv_info pv_info = {
|
|
#endif
|
|
};
|
|
|
|
-struct pv_init_ops pv_init_ops = {
|
|
+struct pv_init_ops pv_init_ops __read_only = {
|
|
.patch = native_patch,
|
|
};
|
|
|
|
-struct pv_time_ops pv_time_ops = {
|
|
+struct pv_time_ops pv_time_ops __read_only = {
|
|
.sched_clock = native_sched_clock,
|
|
.steal_clock = native_steal_clock,
|
|
};
|
|
|
|
-struct pv_irq_ops pv_irq_ops = {
|
|
+struct pv_irq_ops pv_irq_ops __read_only = {
|
|
.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
|
|
.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
|
|
.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
|
|
@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
|
|
#endif
|
|
};
|
|
|
|
-struct pv_cpu_ops pv_cpu_ops = {
|
|
+struct pv_cpu_ops pv_cpu_ops __read_only = {
|
|
.cpuid = native_cpuid,
|
|
.get_debugreg = native_get_debugreg,
|
|
.set_debugreg = native_set_debugreg,
|
|
@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
|
|
.end_context_switch = paravirt_nop,
|
|
};
|
|
|
|
-struct pv_apic_ops pv_apic_ops = {
|
|
+struct pv_apic_ops pv_apic_ops __read_only = {
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
.startup_ipi_hook = paravirt_nop,
|
|
#endif
|
|
};
|
|
|
|
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
|
|
+#ifdef CONFIG_X86_32
|
|
+#ifdef CONFIG_X86_PAE
|
|
+/* 64-bit pagetable entries */
|
|
+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
|
|
+#else
|
|
/* 32-bit pagetable entries */
|
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
|
|
+#endif
|
|
#else
|
|
/* 64-bit pagetable entries */
|
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
|
#endif
|
|
|
|
-struct pv_mmu_ops pv_mmu_ops = {
|
|
+struct pv_mmu_ops pv_mmu_ops __read_only = {
|
|
|
|
.read_cr2 = native_read_cr2,
|
|
.write_cr2 = native_write_cr2,
|
|
@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|
.make_pud = PTE_IDENT,
|
|
|
|
.set_pgd = native_set_pgd,
|
|
+ .set_pgd_batched = native_set_pgd_batched,
|
|
#endif
|
|
#endif /* PAGETABLE_LEVELS >= 3 */
|
|
|
|
@@ -481,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|
},
|
|
|
|
.set_fixmap = native_set_fixmap,
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ .pax_open_kernel = native_pax_open_kernel,
|
|
+ .pax_close_kernel = native_pax_close_kernel,
|
|
+#endif
|
|
+
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(pv_time_ops);
|
|
diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
|
|
index 35ccf75..7a15747 100644
|
|
--- a/arch/x86/kernel/pci-iommu_table.c
|
|
+++ b/arch/x86/kernel/pci-iommu_table.c
|
|
@@ -2,7 +2,7 @@
|
|
#include <asm/iommu_table.h>
|
|
#include <linux/string.h>
|
|
#include <linux/kallsyms.h>
|
|
-
|
|
+#include <linux/sched.h>
|
|
|
|
#define DEBUG 1
|
|
|
|
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
|
|
index fdd151c..66b0855 100644
|
|
--- a/arch/x86/kernel/process.c
|
|
+++ b/arch/x86/kernel/process.c
|
|
@@ -56,16 +56,33 @@ void free_thread_xstate(struct task_struct *tsk)
|
|
|
|
void free_thread_info(struct thread_info *ti)
|
|
{
|
|
- free_thread_xstate(ti->task);
|
|
free_pages((unsigned long)ti, THREAD_ORDER);
|
|
}
|
|
|
|
+static struct kmem_cache *task_struct_cachep;
|
|
+
|
|
void arch_task_cache_init(void)
|
|
{
|
|
- task_xstate_cachep =
|
|
- kmem_cache_create("task_xstate", xstate_size,
|
|
+ /* create a slab on which task_structs can be allocated */
|
|
+ task_struct_cachep =
|
|
+ kmem_cache_create("task_struct", sizeof(struct task_struct),
|
|
+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
|
|
+
|
|
+ task_xstate_cachep =
|
|
+ kmem_cache_create("task_xstate", xstate_size,
|
|
__alignof__(union thread_xstate),
|
|
- SLAB_PANIC | SLAB_NOTRACK, NULL);
|
|
+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
|
|
+}
|
|
+
|
|
+struct task_struct *alloc_task_struct_node(int node)
|
|
+{
|
|
+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
|
|
+}
|
|
+
|
|
+void free_task_struct(struct task_struct *task)
|
|
+{
|
|
+ free_thread_xstate(task);
|
|
+ kmem_cache_free(task_struct_cachep, task);
|
|
}
|
|
|
|
/*
|
|
@@ -78,7 +95,7 @@ void exit_thread(void)
|
|
unsigned long *bp = t->io_bitmap_ptr;
|
|
|
|
if (bp) {
|
|
- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
|
|
+ struct tss_struct *tss = init_tss + get_cpu();
|
|
|
|
t->io_bitmap_ptr = NULL;
|
|
clear_thread_flag(TIF_IO_BITMAP);
|
|
@@ -114,7 +131,7 @@ void show_regs_common(void)
|
|
|
|
printk(KERN_CONT "\n");
|
|
printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
|
|
- current->pid, current->comm, print_tainted(),
|
|
+ task_pid_nr(current), current->comm, print_tainted(),
|
|
init_utsname()->release,
|
|
(int)strcspn(init_utsname()->version, " "),
|
|
init_utsname()->version);
|
|
@@ -128,6 +145,9 @@ void flush_thread(void)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ loadsegment(gs, 0);
|
|
+#endif
|
|
flush_ptrace_hw_breakpoint(tsk);
|
|
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
|
/*
|
|
@@ -290,10 +310,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
|
|
regs.di = (unsigned long) arg;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
- regs.ds = __USER_DS;
|
|
- regs.es = __USER_DS;
|
|
+ regs.ds = __KERNEL_DS;
|
|
+ regs.es = __KERNEL_DS;
|
|
regs.fs = __KERNEL_PERCPU;
|
|
- regs.gs = __KERNEL_STACK_CANARY;
|
|
+ savesegment(gs, regs.gs);
|
|
#else
|
|
regs.ss = __KERNEL_DS;
|
|
#endif
|
|
@@ -379,7 +399,7 @@ static void __exit_idle(void)
|
|
void exit_idle(void)
|
|
{
|
|
/* idle loop has pid 0 */
|
|
- if (current->pid)
|
|
+ if (task_pid_nr(current))
|
|
return;
|
|
__exit_idle();
|
|
}
|
|
@@ -488,7 +508,7 @@ bool set_pm_idle_to_default(void)
|
|
|
|
return ret;
|
|
}
|
|
-void stop_this_cpu(void *dummy)
|
|
+__noreturn void stop_this_cpu(void *dummy)
|
|
{
|
|
local_irq_disable();
|
|
/*
|
|
@@ -730,16 +750,37 @@ static int __init idle_setup(char *str)
|
|
}
|
|
early_param("idle", idle_setup);
|
|
|
|
-unsigned long arch_align_stack(unsigned long sp)
|
|
+#ifdef CONFIG_PAX_RANDKSTACK
|
|
+void pax_randomize_kstack(struct pt_regs *regs)
|
|
{
|
|
- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
- sp -= get_random_int() % 8192;
|
|
- return sp & ~0xf;
|
|
-}
|
|
+ struct thread_struct *thread = ¤t->thread;
|
|
+ unsigned long time;
|
|
|
|
-unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
-{
|
|
- unsigned long range_end = mm->brk + 0x02000000;
|
|
- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
|
|
-}
|
|
+ if (!randomize_va_space)
|
|
+ return;
|
|
+
|
|
+ if (v8086_mode(regs))
|
|
+ return;
|
|
+
|
|
+ rdtscl(time);
|
|
+
|
|
+ /* P4 seems to return a 0 LSB, ignore it */
|
|
+#ifdef CONFIG_MPENTIUM4
|
|
+ time &= 0x3EUL;
|
|
+ time <<= 2;
|
|
+#elif defined(CONFIG_X86_64)
|
|
+ time &= 0xFUL;
|
|
+ time <<= 4;
|
|
+#else
|
|
+ time &= 0x1FUL;
|
|
+ time <<= 3;
|
|
+#endif
|
|
+
|
|
+ thread->sp0 ^= time;
|
|
+ load_sp0(init_tss + smp_processor_id(), thread);
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
+ percpu_write(kernel_stack, thread->sp0);
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
|
|
index ae68473..7b0bb71 100644
|
|
--- a/arch/x86/kernel/process_32.c
|
|
+++ b/arch/x86/kernel/process_32.c
|
|
@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
|
{
|
|
return ((unsigned long *)tsk->thread.sp)[3];
|
|
+//XXX return tsk->thread.eip;
|
|
}
|
|
|
|
void __show_regs(struct pt_regs *regs, int all)
|
|
@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
|
|
unsigned long sp;
|
|
unsigned short ss, gs;
|
|
|
|
- if (user_mode_vm(regs)) {
|
|
+ if (user_mode(regs)) {
|
|
sp = regs->sp;
|
|
ss = regs->ss & 0xffff;
|
|
- gs = get_user_gs(regs);
|
|
} else {
|
|
sp = kernel_stack_pointer(regs);
|
|
savesegment(ss, ss);
|
|
- savesegment(gs, gs);
|
|
}
|
|
+ gs = get_user_gs(regs);
|
|
|
|
show_regs_common();
|
|
|
|
@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|
struct task_struct *tsk;
|
|
int err;
|
|
|
|
- childregs = task_pt_regs(p);
|
|
+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
|
|
*childregs = *regs;
|
|
childregs->ax = 0;
|
|
childregs->sp = sp;
|
|
|
|
p->thread.sp = (unsigned long) childregs;
|
|
p->thread.sp0 = (unsigned long) (childregs+1);
|
|
+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
|
|
|
|
p->thread.ip = (unsigned long) ret_from_fork;
|
|
|
|
@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
struct thread_struct *prev = &prev_p->thread,
|
|
*next = &next_p->thread;
|
|
int cpu = smp_processor_id();
|
|
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
|
+ struct tss_struct *tss = init_tss + cpu;
|
|
fpu_switch_t fpu;
|
|
|
|
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
|
|
@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
*/
|
|
lazy_save_gs(prev->gs);
|
|
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ __set_fs(task_thread_info(next_p)->addr_limit);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Load the per-thread Thread-Local Storage descriptor.
|
|
*/
|
|
@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
*/
|
|
arch_end_context_switch(next_p);
|
|
|
|
+ percpu_write(current_task, next_p);
|
|
+ percpu_write(current_tinfo, &next_p->tinfo);
|
|
+
|
|
/*
|
|
* Restore %gs if needed (which is common)
|
|
*/
|
|
@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
|
switch_fpu_finish(next_p, fpu);
|
|
|
|
- percpu_write(current_task, next_p);
|
|
-
|
|
return prev_p;
|
|
}
|
|
|
|
@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
|
|
} while (count++ < 16);
|
|
return 0;
|
|
}
|
|
-
|
|
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
|
|
index bb390e1..f386b16 100644
|
|
--- a/arch/x86/kernel/process_64.c
|
|
+++ b/arch/x86/kernel/process_64.c
|
|
@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|
struct pt_regs *childregs;
|
|
struct task_struct *me = current;
|
|
|
|
- childregs = ((struct pt_regs *)
|
|
- (THREAD_SIZE + task_stack_page(p))) - 1;
|
|
+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
|
|
*childregs = *regs;
|
|
|
|
childregs->ax = 0;
|
|
@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|
p->thread.sp = (unsigned long) childregs;
|
|
p->thread.sp0 = (unsigned long) (childregs+1);
|
|
p->thread.usersp = me->thread.usersp;
|
|
+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
|
|
|
|
set_tsk_thread_flag(p, TIF_FORK);
|
|
|
|
@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
struct thread_struct *prev = &prev_p->thread;
|
|
struct thread_struct *next = &next_p->thread;
|
|
int cpu = smp_processor_id();
|
|
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
|
+ struct tss_struct *tss = init_tss + cpu;
|
|
unsigned fsindex, gsindex;
|
|
fpu_switch_t fpu;
|
|
|
|
@@ -407,10 +407,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
prev->usersp = percpu_read(old_rsp);
|
|
percpu_write(old_rsp, next->usersp);
|
|
percpu_write(current_task, next_p);
|
|
+ percpu_write(current_tinfo, &next_p->tinfo);
|
|
|
|
- percpu_write(kernel_stack,
|
|
- (unsigned long)task_stack_page(next_p) +
|
|
- THREAD_SIZE - KERNEL_STACK_OFFSET);
|
|
+ percpu_write(kernel_stack, next->sp0);
|
|
|
|
/*
|
|
* Now maybe reload the debug registers and handle I/O bitmaps
|
|
@@ -479,12 +478,11 @@ unsigned long get_wchan(struct task_struct *p)
|
|
if (!p || p == current || p->state == TASK_RUNNING)
|
|
return 0;
|
|
stack = (unsigned long)task_stack_page(p);
|
|
- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
|
|
+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
|
|
return 0;
|
|
fp = *(u64 *)(p->thread.sp);
|
|
do {
|
|
- if (fp < (unsigned long)stack ||
|
|
- fp >= (unsigned long)stack+THREAD_SIZE)
|
|
+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
|
|
return 0;
|
|
ip = *(u64 *)(fp+8);
|
|
if (!in_sched_functions(ip))
|
|
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
|
|
index 9ee1787..73d8000 100644
|
|
--- a/arch/x86/kernel/ptrace.c
|
|
+++ b/arch/x86/kernel/ptrace.c
|
|
@@ -854,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|
unsigned long addr, unsigned long data)
|
|
{
|
|
int ret;
|
|
- unsigned long __user *datap = (unsigned long __user *)data;
|
|
+ unsigned long __user *datap = (__force unsigned long __user *)data;
|
|
|
|
switch (request) {
|
|
/* read the word at location addr in the USER area. */
|
|
@@ -939,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
|
|
if ((int) addr < 0)
|
|
return -EIO;
|
|
ret = do_get_thread_area(child, addr,
|
|
- (struct user_desc __user *)data);
|
|
+ (__force struct user_desc __user *) data);
|
|
break;
|
|
|
|
case PTRACE_SET_THREAD_AREA:
|
|
if ((int) addr < 0)
|
|
return -EIO;
|
|
ret = do_set_thread_area(child, addr,
|
|
- (struct user_desc __user *)data, 0);
|
|
+ (__force struct user_desc __user *) data, 0);
|
|
break;
|
|
#endif
|
|
|
|
@@ -1456,7 +1456,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
|
|
memset(info, 0, sizeof(*info));
|
|
info->si_signo = SIGTRAP;
|
|
info->si_code = si_code;
|
|
- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
|
|
+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
|
|
}
|
|
|
|
void user_single_step_siginfo(struct task_struct *tsk,
|
|
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
|
|
index 42eb330..139955c 100644
|
|
--- a/arch/x86/kernel/pvclock.c
|
|
+++ b/arch/x86/kernel/pvclock.c
|
|
@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
|
|
return pv_tsc_khz;
|
|
}
|
|
|
|
-static atomic64_t last_value = ATOMIC64_INIT(0);
|
|
+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
|
|
|
|
void pvclock_resume(void)
|
|
{
|
|
- atomic64_set(&last_value, 0);
|
|
+ atomic64_set_unchecked(&last_value, 0);
|
|
}
|
|
|
|
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|
@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|
* updating at the same time, and one of them could be slightly behind,
|
|
* making the assumption that last_value always go forward fail to hold.
|
|
*/
|
|
- last = atomic64_read(&last_value);
|
|
+ last = atomic64_read_unchecked(&last_value);
|
|
do {
|
|
if (ret < last)
|
|
return last;
|
|
- last = atomic64_cmpxchg(&last_value, last, ret);
|
|
+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
|
|
} while (unlikely(last != ret));
|
|
|
|
return ret;
|
|
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
|
|
index 310cb01..864bed3 100644
|
|
--- a/arch/x86/kernel/reboot.c
|
|
+++ b/arch/x86/kernel/reboot.c
|
|
@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
|
|
EXPORT_SYMBOL(pm_power_off);
|
|
|
|
static const struct desc_ptr no_idt = {};
|
|
-static int reboot_mode;
|
|
+static unsigned short reboot_mode;
|
|
enum reboot_type reboot_type = BOOT_ACPI;
|
|
int reboot_force;
|
|
|
|
@@ -335,13 +335,17 @@ core_initcall(reboot_init);
|
|
extern const unsigned char machine_real_restart_asm[];
|
|
extern const u64 machine_real_restart_gdt[3];
|
|
|
|
-void machine_real_restart(unsigned int type)
|
|
+__noreturn void machine_real_restart(unsigned int type)
|
|
{
|
|
void *restart_va;
|
|
unsigned long restart_pa;
|
|
- void (*restart_lowmem)(unsigned int);
|
|
+ void (* __noreturn restart_lowmem)(unsigned int);
|
|
u64 *lowmem_gdt;
|
|
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
|
|
+ struct desc_struct *gdt;
|
|
+#endif
|
|
+
|
|
local_irq_disable();
|
|
|
|
/* Write zero to CMOS register number 0x0f, which the BIOS POST
|
|
@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
|
|
boot)". This seems like a fairly standard thing that gets set by
|
|
REBOOT.COM programs, and the previous reset routine did this
|
|
too. */
|
|
- *((unsigned short *)0x472) = reboot_mode;
|
|
+ *(unsigned short *)(__va(0x472)) = reboot_mode;
|
|
|
|
/* Patch the GDT in the low memory trampoline */
|
|
lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
|
|
|
|
restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
|
|
restart_pa = virt_to_phys(restart_va);
|
|
- restart_lowmem = (void (*)(unsigned int))restart_pa;
|
|
+ restart_lowmem = (void *)restart_pa;
|
|
|
|
/* GDT[0]: GDT self-pointer */
|
|
lowmem_gdt[0] =
|
|
@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
|
|
GDT_ENTRY(0x009b, restart_pa, 0xffff);
|
|
|
|
/* Jump to the identity-mapped low memory code */
|
|
+
|
|
+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
|
|
+ gdt = get_cpu_gdt_table(smp_processor_id());
|
|
+ pax_open_kernel();
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
|
|
+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
|
|
+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
|
|
+#endif
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
|
|
+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
|
|
+#endif
|
|
+ pax_close_kernel();
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
|
|
+ unreachable();
|
|
+#else
|
|
restart_lowmem(type);
|
|
+#endif
|
|
+
|
|
}
|
|
#ifdef CONFIG_APM_MODULE
|
|
EXPORT_SYMBOL(machine_real_restart);
|
|
@@ -591,7 +621,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
|
|
* try to force a triple fault and then cycle between hitting the keyboard
|
|
* controller and doing that
|
|
*/
|
|
-static void native_machine_emergency_restart(void)
|
|
+__noreturn static void native_machine_emergency_restart(void)
|
|
{
|
|
int i;
|
|
int attempt = 0;
|
|
@@ -721,13 +751,13 @@ void native_machine_shutdown(void)
|
|
#endif
|
|
}
|
|
|
|
-static void __machine_emergency_restart(int emergency)
|
|
+static __noreturn void __machine_emergency_restart(int emergency)
|
|
{
|
|
reboot_emergency = emergency;
|
|
machine_ops.emergency_restart();
|
|
}
|
|
|
|
-static void native_machine_restart(char *__unused)
|
|
+static __noreturn void native_machine_restart(char *__unused)
|
|
{
|
|
printk("machine restart\n");
|
|
|
|
@@ -736,7 +766,7 @@ static void native_machine_restart(char *__unused)
|
|
__machine_emergency_restart(0);
|
|
}
|
|
|
|
-static void native_machine_halt(void)
|
|
+static __noreturn void native_machine_halt(void)
|
|
{
|
|
/* stop other cpus and apics */
|
|
machine_shutdown();
|
|
@@ -747,7 +777,7 @@ static void native_machine_halt(void)
|
|
stop_this_cpu(NULL);
|
|
}
|
|
|
|
-static void native_machine_power_off(void)
|
|
+__noreturn static void native_machine_power_off(void)
|
|
{
|
|
if (pm_power_off) {
|
|
if (!reboot_force)
|
|
@@ -756,6 +786,7 @@ static void native_machine_power_off(void)
|
|
}
|
|
/* a fallback in case there is no PM info available */
|
|
tboot_shutdown(TB_SHUTDOWN_HALT);
|
|
+ unreachable();
|
|
}
|
|
|
|
struct machine_ops machine_ops = {
|
|
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
|
|
index f2bb9c9..b5a911a 100644
|
|
--- a/arch/x86/kernel/relocate_kernel_64.S
|
|
+++ b/arch/x86/kernel/relocate_kernel_64.S
|
|
@@ -11,6 +11,7 @@
|
|
#include <asm/kexec.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/pgtable_types.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* Must be relocatable PIC code callable as a C function
|
|
diff --git a/arch/x86/kernel/relocate_kernel_64.S.rej b/arch/x86/kernel/relocate_kernel_64.S.rej
|
|
new file mode 100644
|
|
index 0000000..2ff6a6df
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/relocate_kernel_64.S.rej
|
|
@@ -0,0 +1,18 @@
|
|
+--- arch/x86/kernel/relocate_kernel_64.S 2011-10-24 12:48:26.271091772 +0200
|
|
++++ arch/x86/kernel/relocate_kernel_64.S 2012-05-21 12:10:09.608048893 +0200
|
|
+@@ -161,13 +162,14 @@ identity_mapped:
|
|
+ xorq %rbp, %rbp
|
|
+ xorq %r8, %r8
|
|
+ xorq %r9, %r9
|
|
+- xorq %r10, %r9
|
|
++ xorq %r10, %r10
|
|
+ xorq %r11, %r11
|
|
+ xorq %r12, %r12
|
|
+ xorq %r13, %r13
|
|
+ xorq %r14, %r14
|
|
+ xorq %r15, %r15
|
|
+
|
|
++ pax_force_retaddr 0, 1
|
|
+ ret
|
|
+
|
|
+ 1:
|
|
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
|
|
index 436aa6d..e0723df 100644
|
|
--- a/arch/x86/kernel/setup.c
|
|
+++ b/arch/x86/kernel/setup.c
|
|
@@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
|
|
|
|
switch (data->type) {
|
|
case SETUP_E820_EXT:
|
|
- parse_e820_ext(data);
|
|
+ parse_e820_ext((struct setup_data __force_kernel *)data);
|
|
break;
|
|
case SETUP_DTB:
|
|
add_dtb(pa_data);
|
|
@@ -842,14 +842,14 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
if (!boot_params.hdr.root_flags)
|
|
root_mountflags &= ~MS_RDONLY;
|
|
- init_mm.start_code = (unsigned long) _text;
|
|
- init_mm.end_code = (unsigned long) _etext;
|
|
+ init_mm.start_code = ktla_ktva((unsigned long) _text);
|
|
+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
|
|
init_mm.end_data = (unsigned long) _edata;
|
|
init_mm.brk = _brk_end;
|
|
|
|
- code_resource.start = virt_to_phys(_text);
|
|
- code_resource.end = virt_to_phys(_etext)-1;
|
|
- data_resource.start = virt_to_phys(_etext);
|
|
+ code_resource.start = virt_to_phys(ktla_ktva(_text));
|
|
+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
|
|
+ data_resource.start = virt_to_phys(_sdata);
|
|
data_resource.end = virt_to_phys(_edata)-1;
|
|
bss_resource.start = virt_to_phys(&__bss_start);
|
|
bss_resource.end = virt_to_phys(&__bss_stop)-1;
|
|
diff --git a/arch/x86/kernel/setup.c.rej b/arch/x86/kernel/setup.c.rej
|
|
new file mode 100644
|
|
index 0000000..27c0bc0
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/setup.c.rej
|
|
@@ -0,0 +1,10 @@
|
|
+--- arch/x86/kernel/setup.c 2012-05-21 11:32:57.635927669 +0200
|
|
++++ arch/x86/kernel/setup.c 2012-05-21 12:10:09.608048893 +0200
|
|
+@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
|
|
+ * area (640->1Mb) as ram even though it is not.
|
|
+ * take them out.
|
|
+ */
|
|
+- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
|
|
++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
|
|
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
|
+ }
|
|
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
|
|
index 5a98aa2..2f9288d 100644
|
|
--- a/arch/x86/kernel/setup_percpu.c
|
|
+++ b/arch/x86/kernel/setup_percpu.c
|
|
@@ -21,19 +21,17 @@
|
|
#include <asm/cpu.h>
|
|
#include <asm/stackprotector.h>
|
|
|
|
-DEFINE_PER_CPU(int, cpu_number);
|
|
+#ifdef CONFIG_SMP
|
|
+DEFINE_PER_CPU(unsigned int, cpu_number);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_number);
|
|
+#endif
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
|
|
-#else
|
|
-#define BOOT_PERCPU_OFFSET 0
|
|
-#endif
|
|
|
|
DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
|
|
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
|
|
|
|
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
|
|
+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
|
|
[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
|
|
};
|
|
EXPORT_SYMBOL(__per_cpu_offset);
|
|
@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
struct desc_struct gdt;
|
|
+ unsigned long base = per_cpu_offset(cpu);
|
|
|
|
- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
|
|
- 0x2 | DESCTYPE_S, 0x8);
|
|
- gdt.s = 1;
|
|
+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
|
|
+ 0x83 | DESCTYPE_S, 0xC);
|
|
write_gdt_entry(get_cpu_gdt_table(cpu),
|
|
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
|
|
#endif
|
|
@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
|
|
/* alrighty, percpu areas up and running */
|
|
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
|
for_each_possible_cpu(cpu) {
|
|
+#ifdef CONFIG_CC_STACKPROTECTOR
|
|
+#ifdef CONFIG_X86_32
|
|
+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
|
|
+#endif
|
|
+#endif
|
|
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
|
|
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
|
|
per_cpu(cpu_number, cpu) = cpu;
|
|
@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
|
|
*/
|
|
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
|
|
#endif
|
|
+#ifdef CONFIG_CC_STACKPROTECTOR
|
|
+#ifdef CONFIG_X86_32
|
|
+ if (!cpu)
|
|
+ per_cpu(stack_canary.canary, cpu) = canary;
|
|
+#endif
|
|
+#endif
|
|
/*
|
|
* Up to this point, the boot CPU has been using .init.data
|
|
* area. Reload any changed state for the boot CPU.
|
|
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
|
|
index 115eac4..c0591d5 100644
|
|
--- a/arch/x86/kernel/signal.c
|
|
+++ b/arch/x86/kernel/signal.c
|
|
@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
|
|
* Align the stack pointer according to the i386 ABI,
|
|
* i.e. so that on function entry ((sp + 4) & 15) == 0.
|
|
*/
|
|
- sp = ((sp + 4) & -16ul) - 4;
|
|
+ sp = ((sp - 12) & -16ul) - 4;
|
|
#else /* !CONFIG_X86_32 */
|
|
sp = round_down(sp, 16) - 8;
|
|
#endif
|
|
@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
|
* Return an always-bogus address instead so we will die with SIGSEGV.
|
|
*/
|
|
if (onsigstack && !likely(on_sig_stack(sp)))
|
|
- return (void __user *)-1L;
|
|
+ return (__force void __user *)-1L;
|
|
|
|
/* save i387 state */
|
|
if (used_math() && save_i387_xstate(*fpstate) < 0)
|
|
- return (void __user *)-1L;
|
|
+ return (__force void __user *)-1L;
|
|
|
|
return (void __user *)sp;
|
|
}
|
|
@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
|
|
}
|
|
|
|
if (current->mm->context.vdso)
|
|
- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
|
|
+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
|
|
else
|
|
- restorer = &frame->retcode;
|
|
+ restorer = (void __user *)&frame->retcode;
|
|
if (ka->sa.sa_flags & SA_RESTORER)
|
|
restorer = ka->sa.sa_restorer;
|
|
|
|
@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
|
|
* reasons and because gdb uses it as a signature to notice
|
|
* signal handler stack frames.
|
|
*/
|
|
- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
|
|
+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
|
|
|
|
if (err)
|
|
return -EFAULT;
|
|
@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
|
|
/* Set up to return from userspace. */
|
|
- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
|
+ if (current->mm->context.vdso)
|
|
+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
|
+ else
|
|
+ restorer = (void __user *)&frame->retcode;
|
|
if (ka->sa.sa_flags & SA_RESTORER)
|
|
restorer = ka->sa.sa_restorer;
|
|
put_user_ex(restorer, &frame->pretcode);
|
|
@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
* reasons and because gdb uses it as a signature to notice
|
|
* signal handler stack frames.
|
|
*/
|
|
- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
|
+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
|
|
} put_user_catch(err);
|
|
|
|
if (err)
|
|
@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
|
|
* X86_32: vm86 regs switched out by assembly code before reaching
|
|
* here, so testing against kernel CS suffices.
|
|
*/
|
|
- if (!user_mode(regs))
|
|
+ if (!user_mode_novm(regs))
|
|
return;
|
|
|
|
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
|
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
|
|
index c7dbf02..f2ce791 100644
|
|
--- a/arch/x86/kernel/smpboot.c
|
|
+++ b/arch/x86/kernel/smpboot.c
|
|
@@ -708,17 +708,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
|
set_idle_for_cpu(cpu, c_idle.idle);
|
|
do_rest:
|
|
per_cpu(current_task, cpu) = c_idle.idle;
|
|
+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
|
|
#ifdef CONFIG_X86_32
|
|
/* Stack for startup_32 can be just as for start_secondary onwards */
|
|
irq_ctx_init(cpu);
|
|
#else
|
|
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
|
|
initial_gs = per_cpu_offset(cpu);
|
|
- per_cpu(kernel_stack, cpu) =
|
|
- (unsigned long)task_stack_page(c_idle.idle) -
|
|
- KERNEL_STACK_OFFSET + THREAD_SIZE;
|
|
+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
|
|
#endif
|
|
+
|
|
+ pax_open_kernel();
|
|
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
|
+ pax_close_kernel();
|
|
+
|
|
initial_code = (unsigned long)start_secondary;
|
|
stack_start = c_idle.idle->thread.sp;
|
|
|
|
@@ -863,6 +866,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
|
|
/* the FPU context is blank, nobody can own it */
|
|
__cpu_disable_lazy_restore(cpu);
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
|
|
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
+ KERNEL_PGD_PTRS);
|
|
+#endif
|
|
+
|
|
err = do_boot_cpu(apicid, cpu);
|
|
if (err) {
|
|
pr_debug("do_boot_cpu failed %d\n", err);
|
|
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
|
|
index f89cdc6..6020875 100644
|
|
--- a/arch/x86/kernel/step.c
|
|
+++ b/arch/x86/kernel/step.c
|
|
@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
|
|
struct desc_struct *desc;
|
|
unsigned long base;
|
|
|
|
- seg &= ~7UL;
|
|
+ seg >>= 3;
|
|
|
|
mutex_lock(&child->mm->context.lock);
|
|
- if (unlikely((seg >> 3) >= child->mm->context.size))
|
|
+ if (unlikely(seg >= child->mm->context.size))
|
|
addr = -1L; /* bogus selector, access would fault */
|
|
else {
|
|
desc = child->mm->context.ldt + seg;
|
|
@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
|
|
addr += base;
|
|
}
|
|
mutex_unlock(&child->mm->context.lock);
|
|
- }
|
|
+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
|
|
+ addr = ktla_ktva(addr);
|
|
|
|
return addr;
|
|
}
|
|
@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
|
|
unsigned char opcode[15];
|
|
unsigned long addr = convert_ip_to_linear(child, regs);
|
|
|
|
+ if (addr == -EINVAL)
|
|
+ return 0;
|
|
+
|
|
copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
|
|
for (i = 0; i < copied; i++) {
|
|
switch (opcode[i]) {
|
|
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
|
|
index 0b0cb5f..db6b9ed 100644
|
|
--- a/arch/x86/kernel/sys_i386_32.c
|
|
+++ b/arch/x86/kernel/sys_i386_32.c
|
|
@@ -24,17 +24,224 @@
|
|
|
|
#include <asm/syscalls.h>
|
|
|
|
-/*
|
|
- * Do a system call from kernel instead of calling sys_execve so we
|
|
- * end up with proper pt_regs.
|
|
- */
|
|
-int kernel_execve(const char *filename,
|
|
- const char *const argv[],
|
|
- const char *const envp[])
|
|
+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
|
|
+{
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ if (len > pax_task_size || addr > pax_task_size - len)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+unsigned long
|
|
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
+ unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
+{
|
|
+ struct mm_struct *mm = current->mm;
|
|
+ struct vm_area_struct *vma;
|
|
+ unsigned long start_addr, pax_task_size = TASK_SIZE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
+
|
|
+ if (len > pax_task_size)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (flags & MAP_FIXED)
|
|
+ return addr;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
+ if (addr) {
|
|
+ addr = PAGE_ALIGN(addr);
|
|
+ if (pax_task_size - len >= addr) {
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ return addr;
|
|
+ }
|
|
+ }
|
|
+ if (len > mm->cached_hole_size) {
|
|
+ start_addr = addr = mm->free_area_cache;
|
|
+ } else {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
|
|
+ start_addr = 0x00110000UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ start_addr += mm->delta_mmap & 0x03FFF000UL;
|
|
+#endif
|
|
+
|
|
+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ else
|
|
+ addr = start_addr;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+full_search:
|
|
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
|
+ /* At this point: (!vma || addr < vma->vm_end). */
|
|
+ if (pax_task_size - len < addr) {
|
|
+ /*
|
|
+ * Start a new search - just in case we missed
|
|
+ * some holes.
|
|
+ */
|
|
+ if (start_addr != mm->mmap_base) {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
+ goto full_search;
|
|
+ }
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ break;
|
|
+ if (addr + mm->cached_hole_size < vma->vm_start)
|
|
+ mm->cached_hole_size = vma->vm_start - addr;
|
|
+ addr = vma->vm_end;
|
|
+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
+ goto full_search;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Remember the place where we stopped the search:
|
|
+ */
|
|
+ mm->free_area_cache = addr + len;
|
|
+ return addr;
|
|
+}
|
|
+
|
|
+unsigned long
|
|
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
+ const unsigned long len, const unsigned long pgoff,
|
|
+ const unsigned long flags)
|
|
{
|
|
- long __res;
|
|
- asm volatile ("int $0x80"
|
|
- : "=a" (__res)
|
|
- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
|
|
- return __res;
|
|
+ struct vm_area_struct *vma;
|
|
+ struct mm_struct *mm = current->mm;
|
|
+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
+
|
|
+ /* requested length too big for entire address space */
|
|
+ if (len > pax_task_size)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (flags & MAP_FIXED)
|
|
+ return addr;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
|
|
+ goto bottomup;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
+ /* requesting a specific address */
|
|
+ if (addr) {
|
|
+ addr = PAGE_ALIGN(addr);
|
|
+ if (pax_task_size - len >= addr) {
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ return addr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* check if free_area_cache is useful for us */
|
|
+ if (len <= mm->cached_hole_size) {
|
|
+ mm->cached_hole_size = 0;
|
|
+ mm->free_area_cache = mm->mmap_base;
|
|
+ }
|
|
+
|
|
+ /* either no address requested or can't fit in requested address hole */
|
|
+ addr = mm->free_area_cache;
|
|
+
|
|
+ /* make sure it can fit in the remaining address space */
|
|
+ if (addr > len) {
|
|
+ vma = find_vma(mm, addr-len);
|
|
+ if (check_heap_stack_gap(vma, addr - len, len))
|
|
+ /* remember the address as a hint for next time */
|
|
+ return (mm->free_area_cache = addr-len);
|
|
+ }
|
|
+
|
|
+ if (mm->mmap_base < len)
|
|
+ goto bottomup;
|
|
+
|
|
+ addr = mm->mmap_base-len;
|
|
+
|
|
+ do {
|
|
+ /*
|
|
+ * Lookup failure means no vma is above this address,
|
|
+ * else if new region fits below vma->vm_start,
|
|
+ * return with success:
|
|
+ */
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ /* remember the address as a hint for next time */
|
|
+ return (mm->free_area_cache = addr);
|
|
+
|
|
+ /* remember the largest hole we saw so far */
|
|
+ if (addr + mm->cached_hole_size < vma->vm_start)
|
|
+ mm->cached_hole_size = vma->vm_start - addr;
|
|
+
|
|
+ /* try just below the current vma->vm_start */
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
+
|
|
+bottomup:
|
|
+ /*
|
|
+ * A failed mmap() very likely causes application failure,
|
|
+ * so fall back to the bottom-up function here. This scenario
|
|
+ * can happen with large stack limits and large mmap()
|
|
+ * allocations.
|
|
+ */
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
|
|
+ else
|
|
+#endif
|
|
+
|
|
+ mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
+ mm->free_area_cache = mm->mmap_base;
|
|
+ mm->cached_hole_size = ~0UL;
|
|
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
|
|
+ /*
|
|
+ * Restore the topdown base:
|
|
+ */
|
|
+ mm->mmap_base = base;
|
|
+ mm->free_area_cache = base;
|
|
+ mm->cached_hole_size = ~0UL;
|
|
+
|
|
+ return addr;
|
|
}
|
|
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
|
|
index 51534ef..0d8a376 100644
|
|
--- a/arch/x86/kernel/sys_x86_64.c
|
|
+++ b/arch/x86/kernel/sys_x86_64.c
|
|
@@ -95,8 +95,8 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
|
return error;
|
|
}
|
|
|
|
-static void find_start_end(unsigned long flags, unsigned long *begin,
|
|
- unsigned long *end)
|
|
+static void find_start_end(struct mm_struct *mm, unsigned long flags,
|
|
+ unsigned long *begin, unsigned long *end)
|
|
{
|
|
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
|
|
unsigned long new_begin;
|
|
@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
- find_start_end(flags, &begin, &end);
|
|
+ find_start_end(mm, flags, &begin, &end);
|
|
|
|
if (len > end)
|
|
return -ENOMEM;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
vma = find_vma(mm, addr);
|
|
- if (end - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
|
|
@@ -172,7 +175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/*
|
|
* Remember the place where we stopped the search:
|
|
*/
|
|
@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
- unsigned long addr = addr0, start_addr;
|
|
+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
|
|
|
|
/* requested length too big for entire address space */
|
|
if (len > TASK_SIZE)
|
|
@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
|
|
goto bottomup;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
/* requesting a specific address */
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
- vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
- return addr;
|
|
+ if (TASK_SIZE - len >= addr) {
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ return addr;
|
|
+ }
|
|
}
|
|
|
|
/* check if free_area_cache is useful for us */
|
|
@@ -240,7 +248,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (!vma || addr+len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
/* remember the address as a hint for next time */
|
|
return mm->free_area_cache = addr;
|
|
|
|
@@ -249,8 +257,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = vma->vm_start-len;
|
|
- } while (len < vma->vm_start);
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
fail:
|
|
/*
|
|
@@ -270,13 +278,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
+ mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
+ mm->free_area_cache = mm->mmap_base;
|
|
mm->cached_hole_size = ~0UL;
|
|
- mm->free_area_cache = TASK_UNMAPPED_BASE;
|
|
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
|
|
/*
|
|
* Restore the topdown base:
|
|
*/
|
|
- mm->free_area_cache = mm->mmap_base;
|
|
+ mm->mmap_base = base;
|
|
+ mm->free_area_cache = base;
|
|
mm->cached_hole_size = ~0UL;
|
|
|
|
return addr;
|
|
diff --git a/arch/x86/kernel/sys_x86_64.c.rej b/arch/x86/kernel/sys_x86_64.c.rej
|
|
new file mode 100644
|
|
index 0000000..d74c3f9
|
|
--- /dev/null
|
|
+++ b/arch/x86/kernel/sys_x86_64.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/kernel/sys_x86_64.c 2012-05-21 11:32:57.659927670 +0200
|
|
++++ arch/x86/kernel/sys_x86_64.c 2012-05-21 12:10:09.624048894 +0200
|
|
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long
|
|
+ *begin = new_begin;
|
|
+ }
|
|
+ } else {
|
|
+- *begin = TASK_UNMAPPED_BASE;
|
|
++ *begin = mm->mmap_base;
|
|
+ *end = TASK_SIZE;
|
|
+ }
|
|
+ }
|
|
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
|
|
index 6410744..79758f0 100644
|
|
--- a/arch/x86/kernel/tboot.c
|
|
+++ b/arch/x86/kernel/tboot.c
|
|
@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
|
|
|
|
void tboot_shutdown(u32 shutdown_type)
|
|
{
|
|
- void (*shutdown)(void);
|
|
+ void (* __noreturn shutdown)(void);
|
|
|
|
if (!tboot_enabled())
|
|
return;
|
|
@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
|
|
|
|
switch_to_tboot_pt();
|
|
|
|
- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
|
|
+ shutdown = (void *)tboot->shutdown_entry;
|
|
shutdown();
|
|
|
|
/* should not reach here */
|
|
@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
|
|
return 0;
|
|
}
|
|
|
|
-static atomic_t ap_wfs_count;
|
|
+static atomic_unchecked_t ap_wfs_count;
|
|
|
|
static int tboot_wait_for_aps(int num_aps)
|
|
{
|
|
@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
|
|
{
|
|
switch (action) {
|
|
case CPU_DYING:
|
|
- atomic_inc(&ap_wfs_count);
|
|
+ atomic_inc_unchecked(&ap_wfs_count);
|
|
if (num_online_cpus() == 1)
|
|
- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
|
|
+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
|
|
return NOTIFY_BAD;
|
|
break;
|
|
}
|
|
@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
|
|
|
|
tboot_create_trampoline();
|
|
|
|
- atomic_set(&ap_wfs_count, 0);
|
|
+ atomic_set_unchecked(&ap_wfs_count, 0);
|
|
register_hotcpu_notifier(&tboot_cpu_notifier);
|
|
|
|
acpi_os_set_prepare_sleep(&tboot_sleep);
|
|
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
|
|
index c6eba2b..3303326 100644
|
|
--- a/arch/x86/kernel/time.c
|
|
+++ b/arch/x86/kernel/time.c
|
|
@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|
{
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
|
|
+ if (!user_mode(regs) && in_lock_functions(pc)) {
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
- return *(unsigned long *)(regs->bp + sizeof(long));
|
|
+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
|
|
#else
|
|
unsigned long *sp =
|
|
(unsigned long *)kernel_stack_pointer(regs);
|
|
@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
|
|
* or above a saved flags. Eflags has bits 22-31 zero,
|
|
* kernel addresses don't.
|
|
*/
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ return ktla_ktva(sp[0]);
|
|
+#else
|
|
if (sp[0] >> 22)
|
|
return sp[0];
|
|
if (sp[1] >> 22)
|
|
return sp[1];
|
|
#endif
|
|
+
|
|
+#endif
|
|
}
|
|
return pc;
|
|
}
|
|
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
|
|
index fe2e363..f50a1a9 100644
|
|
--- a/arch/x86/kernel/tls.c
|
|
+++ b/arch/x86/kernel/tls.c
|
|
@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
|
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
|
|
return -EINVAL;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
|
|
+ return -EINVAL;
|
|
+#endif
|
|
+
|
|
set_tls_desc(p, idx, &info, 1);
|
|
|
|
return 0;
|
|
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
|
|
index 451c0a7..e57f551 100644
|
|
--- a/arch/x86/kernel/trampoline_32.S
|
|
+++ b/arch/x86/kernel/trampoline_32.S
|
|
@@ -32,6 +32,12 @@
|
|
#include <asm/segment.h>
|
|
#include <asm/page_types.h>
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#define ta(X) (X)
|
|
+#else
|
|
+#define ta(X) ((X) - __PAGE_OFFSET)
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_SMP
|
|
|
|
.section ".x86_trampoline","a"
|
|
@@ -62,7 +68,7 @@ r_base = .
|
|
inc %ax # protected mode (PE) bit
|
|
lmsw %ax # into protected mode
|
|
# flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
|
|
- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
|
|
+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
|
|
|
|
# These need to be in the same 64K segment as the above;
|
|
# hence we don't use the boot_gdt_descr defined in head.S
|
|
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
|
|
index 09ff517..df19fbff 100644
|
|
--- a/arch/x86/kernel/trampoline_64.S
|
|
+++ b/arch/x86/kernel/trampoline_64.S
|
|
@@ -90,7 +90,7 @@ startup_32:
|
|
movl $__KERNEL_DS, %eax # Initialize the %ds segment register
|
|
movl %eax, %ds
|
|
|
|
- movl $X86_CR4_PAE, %eax
|
|
+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
|
|
movl %eax, %cr4 # Enable PAE mode
|
|
|
|
# Setup trampoline 4 level pagetables
|
|
@@ -138,7 +138,7 @@ tidt:
|
|
# so the kernel can live anywhere
|
|
.balign 4
|
|
tgdt:
|
|
- .short tgdt_end - tgdt # gdt limit
|
|
+ .short tgdt_end - tgdt - 1 # gdt limit
|
|
.long tgdt - r_base
|
|
.short 0
|
|
.quad 0x00cf9b000000ffff # __KERNEL32_CS
|
|
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
|
|
index adc049f..f131681 100644
|
|
--- a/arch/x86/kernel/traps.c
|
|
+++ b/arch/x86/kernel/traps.c
|
|
@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
|
|
|
|
/* Do we ignore FPU interrupts ? */
|
|
char ignore_fpu_irq;
|
|
-
|
|
-/*
|
|
- * The IDT has to be page-aligned to simplify the Pentium
|
|
- * F0 0F bug workaround.
|
|
- */
|
|
-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
|
|
#endif
|
|
|
|
DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
|
@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|
}
|
|
|
|
static void __kprobes
|
|
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
|
|
long error_code, siginfo_t *info)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
- if (regs->flags & X86_VM_MASK) {
|
|
+ if (v8086_mode(regs)) {
|
|
/*
|
|
* traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
|
|
* On nmi (interrupt 2), do_trap should not be called.
|
|
@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
}
|
|
#endif
|
|
|
|
- if (!user_mode(regs))
|
|
+ if (!user_mode_novm(regs))
|
|
goto kernel_trap;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
@@ -148,7 +142,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
printk_ratelimit()) {
|
|
printk(KERN_INFO
|
|
"%s[%d] trap %s ip:%lx sp:%lx error:%lx",
|
|
- tsk->comm, tsk->pid, str,
|
|
+ tsk->comm, task_pid_nr(tsk), str,
|
|
regs->ip, regs->sp, error_code);
|
|
print_vma_addr(" in ", regs->ip);
|
|
printk("\n");
|
|
@@ -165,8 +159,20 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
if (!fixup_exception(regs)) {
|
|
tsk->thread.error_code = error_code;
|
|
tsk->thread.trap_nr = trapnr;
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
|
|
+ str = "PAX: suspicious stack segment fault";
|
|
+#endif
|
|
+
|
|
die(str, regs, error_code);
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ if (trapnr == 4)
|
|
+ pax_report_refcount_overflow(regs);
|
|
+#endif
|
|
+
|
|
return;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
@@ -271,14 +277,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
|
|
conditional_sti(regs);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
- if (regs->flags & X86_VM_MASK)
|
|
+ if (v8086_mode(regs))
|
|
goto gp_in_vm86;
|
|
#endif
|
|
|
|
tsk = current;
|
|
- if (!user_mode(regs))
|
|
+ if (!user_mode_novm(regs))
|
|
goto gp_in_kernel;
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
|
|
+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
|
|
+ struct mm_struct *mm = tsk->mm;
|
|
+ unsigned long limit;
|
|
+
|
|
+ down_write(&mm->mmap_sem);
|
|
+ limit = mm->context.user_cs_limit;
|
|
+ if (limit < TASK_SIZE) {
|
|
+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
|
|
+ up_write(&mm->mmap_sem);
|
|
+ return;
|
|
+ }
|
|
+ up_write(&mm->mmap_sem);
|
|
+ }
|
|
+#endif
|
|
+
|
|
tsk->thread.error_code = error_code;
|
|
tsk->thread.trap_nr = X86_TRAP_GP;
|
|
|
|
@@ -311,6 +333,13 @@ do_general_protection(struct pt_regs *regs, long error_code)
|
|
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
|
|
X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
|
|
return;
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
|
|
+ die("PAX: suspicious general protection fault", regs, error_code);
|
|
+ else
|
|
+#endif
|
|
+
|
|
die("general protection fault", regs, error_code);
|
|
}
|
|
|
|
@@ -466,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|
/* It's safe to allow irq's after DR6 has been saved */
|
|
preempt_conditional_sti(regs);
|
|
|
|
- if (regs->flags & X86_VM_MASK) {
|
|
+ if (v8086_mode(regs)) {
|
|
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
|
|
X86_TRAP_DB);
|
|
preempt_conditional_cli(regs);
|
|
@@ -481,7 +510,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|
* We already checked v86 mode above, so we can check for kernel mode
|
|
* by just checking the CPL of CS.
|
|
*/
|
|
- if ((dr6 & DR_STEP) && !user_mode(regs)) {
|
|
+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
|
|
tsk->thread.debugreg6 &= ~DR_STEP;
|
|
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
|
@@ -512,7 +541,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
|
|
return;
|
|
conditional_sti(regs);
|
|
|
|
- if (!user_mode_vm(regs))
|
|
+ if (!user_mode(regs))
|
|
{
|
|
if (!fixup_exception(regs)) {
|
|
task->thread.error_code = error_code;
|
|
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
|
|
index 255f58a..de006bf 100644
|
|
--- a/arch/x86/kernel/vm86_32.c
|
|
+++ b/arch/x86/kernel/vm86_32.c
|
|
@@ -148,7 +148,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
|
|
do_exit(SIGSEGV);
|
|
}
|
|
|
|
- tss = &per_cpu(init_tss, get_cpu());
|
|
+ tss = init_tss + get_cpu();
|
|
current->thread.sp0 = current->thread.saved_sp0;
|
|
current->thread.sysenter_cs = __KERNEL_CS;
|
|
load_sp0(tss, ¤t->thread);
|
|
@@ -326,7 +326,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
|
tsk->thread.saved_fs = info->regs32->fs;
|
|
tsk->thread.saved_gs = get_user_gs(info->regs32);
|
|
|
|
- tss = &per_cpu(init_tss, get_cpu());
|
|
+ tss = init_tss + get_cpu();
|
|
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
|
|
if (cpu_has_sep)
|
|
tsk->thread.sysenter_cs = 0;
|
|
@@ -533,7 +533,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
|
|
goto cannot_handle;
|
|
if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
|
|
goto cannot_handle;
|
|
- intr_ptr = (unsigned long __user *) (i << 2);
|
|
+ intr_ptr = (__force unsigned long __user *) (i << 2);
|
|
if (get_user(segoffs, intr_ptr))
|
|
goto cannot_handle;
|
|
if ((segoffs >> 16) == BIOSSEG)
|
|
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
|
|
index 0f703f1..3b426f3 100644
|
|
--- a/arch/x86/kernel/vmlinux.lds.S
|
|
+++ b/arch/x86/kernel/vmlinux.lds.S
|
|
@@ -26,6 +26,13 @@
|
|
#include <asm/page_types.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/boot.h>
|
|
+#include <asm/segment.h>
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
|
|
+#else
|
|
+#define __KERNEL_TEXT_OFFSET 0
|
|
+#endif
|
|
|
|
#undef i386 /* in case the preprocessor is a 32bit one */
|
|
|
|
@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
|
|
|
|
PHDRS {
|
|
text PT_LOAD FLAGS(5); /* R_E */
|
|
+#ifdef CONFIG_X86_32
|
|
+ module PT_LOAD FLAGS(5); /* R_E */
|
|
+#endif
|
|
+#ifdef CONFIG_XEN
|
|
+ rodata PT_LOAD FLAGS(5); /* R_E */
|
|
+#else
|
|
+ rodata PT_LOAD FLAGS(4); /* R__ */
|
|
+#endif
|
|
data PT_LOAD FLAGS(6); /* RW_ */
|
|
-#ifdef CONFIG_X86_64
|
|
+ init.begin PT_LOAD FLAGS(6); /* RW_ */
|
|
#ifdef CONFIG_SMP
|
|
percpu PT_LOAD FLAGS(6); /* RW_ */
|
|
#endif
|
|
+ text.init PT_LOAD FLAGS(5); /* R_E */
|
|
+ text.exit PT_LOAD FLAGS(5); /* R_E */
|
|
init PT_LOAD FLAGS(7); /* RWE */
|
|
-#endif
|
|
note PT_NOTE FLAGS(0); /* ___ */
|
|
}
|
|
|
|
SECTIONS
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
|
|
- phys_startup_32 = startup_32 - LOAD_OFFSET;
|
|
+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
|
|
#else
|
|
- . = __START_KERNEL;
|
|
- phys_startup_64 = startup_64 - LOAD_OFFSET;
|
|
+ . = __START_KERNEL;
|
|
#endif
|
|
|
|
/* Text and read-only data */
|
|
- .text : AT(ADDR(.text) - LOAD_OFFSET) {
|
|
- _text = .;
|
|
+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
|
|
/* bootstrapping code */
|
|
+#ifdef CONFIG_X86_32
|
|
+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
|
|
+#else
|
|
+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
|
|
+#endif
|
|
+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
|
|
+ _text = .;
|
|
HEAD_TEXT
|
|
#ifdef CONFIG_X86_32
|
|
. = ALIGN(PAGE_SIZE);
|
|
@@ -108,13 +128,48 @@ SECTIONS
|
|
IRQENTRY_TEXT
|
|
*(.fixup)
|
|
*(.gnu.warning)
|
|
- /* End of text section */
|
|
- _etext = .;
|
|
} :text = 0x9090
|
|
|
|
- NOTES :text :note
|
|
+ . += __KERNEL_TEXT_OFFSET;
|
|
|
|
- EXCEPTION_TABLE(16) :text = 0x9090
|
|
+#ifdef CONFIG_X86_32
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
|
|
+
|
|
+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
|
|
+ MODULES_EXEC_VADDR = .;
|
|
+ BYTE(0)
|
|
+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
|
|
+ . = ALIGN(HPAGE_SIZE) - 1;
|
|
+ MODULES_EXEC_END = .;
|
|
+#endif
|
|
+
|
|
+ } :module
|
|
+#endif
|
|
+
|
|
+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
|
|
+ /* End of text section */
|
|
+ BYTE(0)
|
|
+ _etext = . - __KERNEL_TEXT_OFFSET;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
|
|
+ *(.idt)
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ *(.empty_zero_page)
|
|
+ *(.initial_pg_fixmap)
|
|
+ *(.initial_pg_pmd)
|
|
+ *(.initial_page_table)
|
|
+ *(.swapper_pg_dir)
|
|
+ } :rodata
|
|
+#endif
|
|
+
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ NOTES :rodata :note
|
|
+
|
|
+ EXCEPTION_TABLE(16) :rodata
|
|
|
|
#if defined(CONFIG_DEBUG_RODATA)
|
|
/* .text should occupy whole number of pages */
|
|
@@ -126,16 +181,20 @@ SECTIONS
|
|
|
|
/* Data */
|
|
.data : AT(ADDR(.data) - LOAD_OFFSET) {
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ . = ALIGN(HPAGE_SIZE);
|
|
+#else
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+#endif
|
|
+
|
|
/* Start of data section */
|
|
_sdata = .;
|
|
|
|
/* init_task */
|
|
INIT_TASK_DATA(THREAD_SIZE)
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
- /* 32 bit has nosave before _edata */
|
|
NOSAVE_DATA
|
|
-#endif
|
|
|
|
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
|
|
|
@@ -176,12 +235,19 @@ SECTIONS
|
|
#endif /* CONFIG_X86_64 */
|
|
|
|
/* Init code and data - will be freed after init */
|
|
- . = ALIGN(PAGE_SIZE);
|
|
.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
|
|
+ BYTE(0)
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ . = ALIGN(HPAGE_SIZE);
|
|
+#else
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+#endif
|
|
+
|
|
__init_begin = .; /* paired with __init_end */
|
|
- }
|
|
+ } :init.begin
|
|
|
|
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
|
|
+#ifdef CONFIG_SMP
|
|
/*
|
|
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
|
|
* output PHDR, so the next output section - .init.text - should
|
|
@@ -190,12 +256,27 @@ SECTIONS
|
|
PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
|
|
#endif
|
|
|
|
- INIT_TEXT_SECTION(PAGE_SIZE)
|
|
-#ifdef CONFIG_X86_64
|
|
- :init
|
|
-#endif
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ init_begin = .;
|
|
+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
|
|
+ VMLINUX_SYMBOL(_sinittext) = .;
|
|
+ INIT_TEXT
|
|
+ VMLINUX_SYMBOL(_einittext) = .;
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ } :text.init
|
|
|
|
- INIT_DATA_SECTION(16)
|
|
+ /*
|
|
+ * .exit.text is discard at runtime, not link time, to deal with
|
|
+ * references from .altinstructions and .eh_frame
|
|
+ */
|
|
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
|
|
+ EXIT_TEXT
|
|
+ . = ALIGN(16);
|
|
+ } :text.exit
|
|
+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
|
|
+
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
+ INIT_DATA_SECTION(16) :init
|
|
|
|
/*
|
|
* Code and data for a variety of lowlevel trampolines, to be
|
|
@@ -269,19 +350,12 @@ SECTIONS
|
|
}
|
|
|
|
. = ALIGN(8);
|
|
- /*
|
|
- * .exit.text is discard at runtime, not link time, to deal with
|
|
- * references from .altinstructions and .eh_frame
|
|
- */
|
|
- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
|
|
- EXIT_TEXT
|
|
- }
|
|
|
|
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
|
|
EXIT_DATA
|
|
}
|
|
|
|
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
|
|
+#ifndef CONFIG_SMP
|
|
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
|
|
#endif
|
|
|
|
@@ -300,16 +374,10 @@ SECTIONS
|
|
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
|
|
__smp_locks = .;
|
|
*(.smp_locks)
|
|
- . = ALIGN(PAGE_SIZE);
|
|
__smp_locks_end = .;
|
|
+ . = ALIGN(PAGE_SIZE);
|
|
}
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
|
|
- NOSAVE_DATA
|
|
- }
|
|
-#endif
|
|
-
|
|
/* BSS */
|
|
. = ALIGN(PAGE_SIZE);
|
|
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
|
|
@@ -325,6 +393,7 @@ SECTIONS
|
|
__brk_base = .;
|
|
. += 64 * 1024; /* 64k alignment slop space */
|
|
*(.brk_reservation) /* areas brk users have reserved */
|
|
+ . = ALIGN(HPAGE_SIZE);
|
|
__brk_limit = .;
|
|
}
|
|
|
|
@@ -351,13 +420,12 @@ SECTIONS
|
|
* for the boot processor.
|
|
*/
|
|
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
|
|
-INIT_PER_CPU(gdt_page);
|
|
INIT_PER_CPU(irq_stack_union);
|
|
|
|
/*
|
|
* Build-time check on the image size:
|
|
*/
|
|
-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
|
|
+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
|
|
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
|
|
|
#ifdef CONFIG_SMP
|
|
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
|
|
index 7515cf0..331a1a0 100644
|
|
--- a/arch/x86/kernel/vsyscall_64.c
|
|
+++ b/arch/x86/kernel/vsyscall_64.c
|
|
@@ -54,15 +54,13 @@
|
|
DEFINE_VVAR(int, vgetcpu_mode);
|
|
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
|
|
|
|
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
|
|
+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
|
|
|
|
static int __init vsyscall_setup(char *str)
|
|
{
|
|
if (str) {
|
|
if (!strcmp("emulate", str))
|
|
vsyscall_mode = EMULATE;
|
|
- else if (!strcmp("native", str))
|
|
- vsyscall_mode = NATIVE;
|
|
else if (!strcmp("none", str))
|
|
vsyscall_mode = NONE;
|
|
else
|
|
@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|
|
|
tsk = current;
|
|
if (seccomp_mode(&tsk->seccomp))
|
|
- do_exit(SIGKILL);
|
|
+ do_group_exit(SIGKILL);
|
|
|
|
/*
|
|
* With a real vsyscall, page faults cause SIGSEGV. We want to
|
|
@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
|
return true;
|
|
|
|
sigsegv:
|
|
- force_sig(SIGSEGV, current);
|
|
- return true;
|
|
+ do_group_exit(SIGKILL);
|
|
}
|
|
|
|
/*
|
|
@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
|
|
extern char __vvar_page;
|
|
unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
|
|
|
|
- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
|
|
- vsyscall_mode == NATIVE
|
|
- ? PAGE_KERNEL_VSYSCALL
|
|
- : PAGE_KERNEL_VVAR);
|
|
+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
|
|
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
|
|
(unsigned long)VSYSCALL_START);
|
|
|
|
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
|
|
index 9796c2f..f686fbf 100644
|
|
--- a/arch/x86/kernel/x8664_ksyms_64.c
|
|
+++ b/arch/x86/kernel/x8664_ksyms_64.c
|
|
@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
|
|
EXPORT_SYMBOL(copy_user_generic_string);
|
|
EXPORT_SYMBOL(copy_user_generic_unrolled);
|
|
EXPORT_SYMBOL(__copy_user_nocache);
|
|
-EXPORT_SYMBOL(_copy_from_user);
|
|
-EXPORT_SYMBOL(_copy_to_user);
|
|
|
|
EXPORT_SYMBOL(copy_page);
|
|
EXPORT_SYMBOL(clear_page);
|
|
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
|
|
index e62728e..5fc3a07 100644
|
|
--- a/arch/x86/kernel/xsave.c
|
|
+++ b/arch/x86/kernel/xsave.c
|
|
@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
|
|
fx_sw_user->xstate_size > fx_sw_user->extended_size)
|
|
return -EINVAL;
|
|
|
|
- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
|
|
+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
|
|
fx_sw_user->extended_size -
|
|
FP_XSTATE_MAGIC2_SIZE));
|
|
if (err)
|
|
@@ -267,7 +267,7 @@ static int restore_user_xstate(void __user *buf)
|
|
* the other extended state.
|
|
*/
|
|
xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
|
|
- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
|
|
+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
|
|
}
|
|
|
|
/*
|
|
@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
|
|
if (use_xsave())
|
|
err = restore_user_xstate(buf);
|
|
else
|
|
- err = fxrstor_checking((__force struct i387_fxsave_struct *)
|
|
+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
|
|
buf);
|
|
if (unlikely(err)) {
|
|
/*
|
|
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
|
|
index 9fed5be..18fd595 100644
|
|
--- a/arch/x86/kvm/cpuid.c
|
|
+++ b/arch/x86/kvm/cpuid.c
|
|
@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpuid2 *cpuid,
|
|
struct kvm_cpuid_entry2 __user *entries)
|
|
{
|
|
- int r;
|
|
+ int r, i;
|
|
|
|
r = -E2BIG;
|
|
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
|
|
goto out;
|
|
r = -EFAULT;
|
|
- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
|
|
- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
|
|
+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
|
|
goto out;
|
|
+ for (i = 0; i < cpuid->nent; ++i) {
|
|
+ struct kvm_cpuid_entry2 cpuid_entry;
|
|
+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
|
|
+ goto out;
|
|
+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
|
|
+ }
|
|
vcpu->arch.cpuid_nent = cpuid->nent;
|
|
kvm_apic_set_version(vcpu);
|
|
kvm_x86_ops->cpuid_update(vcpu);
|
|
@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpuid2 *cpuid,
|
|
struct kvm_cpuid_entry2 __user *entries)
|
|
{
|
|
- int r;
|
|
+ int r, i;
|
|
|
|
r = -E2BIG;
|
|
if (cpuid->nent < vcpu->arch.cpuid_nent)
|
|
goto out;
|
|
r = -EFAULT;
|
|
- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
|
|
- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
|
|
+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
|
|
goto out;
|
|
+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
|
|
+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
|
|
+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
|
|
+ goto out;
|
|
+ }
|
|
return 0;
|
|
|
|
out:
|
|
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
|
|
index db08e66..805cff1 100644
|
|
--- a/arch/x86/kvm/emulate.c
|
|
+++ b/arch/x86/kvm/emulate.c
|
|
@@ -252,6 +252,7 @@ struct gprefix {
|
|
|
|
#define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
|
|
do { \
|
|
+ unsigned long _tmp; \
|
|
__asm__ __volatile__ ( \
|
|
_PRE_EFLAGS("0", "4", "2") \
|
|
_op _suffix " %"_x"3,%1; " \
|
|
@@ -266,8 +267,6 @@ struct gprefix {
|
|
/* Raw emulation: instruction has two explicit operands. */
|
|
#define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
|
|
do { \
|
|
- unsigned long _tmp; \
|
|
- \
|
|
switch ((ctxt)->dst.bytes) { \
|
|
case 2: \
|
|
____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
|
|
@@ -283,7 +282,6 @@ struct gprefix {
|
|
|
|
#define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
|
|
do { \
|
|
- unsigned long _tmp; \
|
|
switch ((ctxt)->dst.bytes) { \
|
|
case 1: \
|
|
____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
|
|
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
|
|
index 53454a6..4976d9d 100644
|
|
--- a/arch/x86/kvm/lapic.c
|
|
+++ b/arch/x86/kvm/lapic.c
|
|
@@ -54,7 +54,7 @@
|
|
#define APIC_BUS_CYCLE_NS 1
|
|
|
|
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
|
|
-#define apic_debug(fmt, arg...)
|
|
+#define apic_debug(fmt, arg...) do {} while (0)
|
|
|
|
#define APIC_LVT_NUM 6
|
|
/* 14 is the version for Xeon and Pentium 8.4.8*/
|
|
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
|
index df5a703..63748a7 100644
|
|
--- a/arch/x86/kvm/paging_tmpl.h
|
|
+++ b/arch/x86/kvm/paging_tmpl.h
|
|
@@ -197,7 +197,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
|
if (unlikely(kvm_is_error_hva(host_addr)))
|
|
goto error;
|
|
|
|
- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
|
|
+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
|
|
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
|
|
goto error;
|
|
|
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
|
index 86c74c0..c21103a 100644
|
|
--- a/arch/x86/kvm/svm.c
|
|
+++ b/arch/x86/kvm/svm.c
|
|
@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
|
+
|
|
+ pax_open_kernel();
|
|
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
|
|
+ pax_close_kernel();
|
|
+
|
|
load_TR_desc();
|
|
}
|
|
|
|
@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|
#endif
|
|
#endif
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ __set_fs(current_thread_info()->addr_limit);
|
|
+#endif
|
|
+
|
|
reload_tss(vcpu);
|
|
|
|
local_irq_disable();
|
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
|
index 4ad0d71..e865242 100644
|
|
--- a/arch/x86/kvm/vmx.c
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
@@ -1304,7 +1304,11 @@ static void reload_tss(void)
|
|
struct desc_struct *descs;
|
|
|
|
descs = (void *)gdt->address;
|
|
+
|
|
+ pax_open_kernel();
|
|
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
|
|
+ pax_close_kernel();
|
|
+
|
|
load_TR_desc();
|
|
}
|
|
|
|
@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
|
|
if (!cpu_has_vmx_flexpriority())
|
|
flexpriority_enabled = 0;
|
|
|
|
- if (!cpu_has_vmx_tpr_shadow())
|
|
- kvm_x86_ops->update_cr8_intercept = NULL;
|
|
+ if (!cpu_has_vmx_tpr_shadow()) {
|
|
+ pax_open_kernel();
|
|
+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
|
|
+ pax_close_kernel();
|
|
+ }
|
|
|
|
if (enable_ept && !cpu_has_vmx_ept_2m_page())
|
|
kvm_disable_largepages();
|
|
@@ -3655,7 +3662,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
|
|
|
|
asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
|
|
- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
|
|
+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
|
|
|
|
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
|
|
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
|
|
@@ -6221,6 +6228,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
"jmp .Lkvm_vmx_return \n\t"
|
|
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
|
|
".Lkvm_vmx_return: "
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
|
|
+ ".Lkvm_vmx_return2: "
|
|
+#endif
|
|
+
|
|
/* Save guest registers, load host registers, keep flags */
|
|
"mov %0, %c[wordsize](%%"R"sp) \n\t"
|
|
"pop %0 \n\t"
|
|
@@ -6269,6 +6282,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
#endif
|
|
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
|
|
[wordsize]"i"(sizeof(ulong))
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ ,[cs]"i"(__KERNEL_CS)
|
|
+#endif
|
|
+
|
|
: "cc", "memory"
|
|
, R"ax", R"bx", R"di", R"si"
|
|
#ifdef CONFIG_X86_64
|
|
@@ -6297,7 +6315,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|
}
|
|
}
|
|
|
|
- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
|
|
+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ loadsegment(fs, __KERNEL_PERCPU);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ __set_fs(current_thread_info()->addr_limit);
|
|
+#endif
|
|
+
|
|
vmx->loaded_vmcs->launched = 1;
|
|
|
|
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
|
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
index 4ad2b7b..460537f 100644
|
|
--- a/arch/x86/kvm/x86.c
|
|
+++ b/arch/x86/kvm/x86.c
|
|
@@ -1380,8 +1380,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
|
|
{
|
|
struct kvm *kvm = vcpu->kvm;
|
|
int lm = is_long_mode(vcpu);
|
|
- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
|
|
- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
|
|
+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
|
|
+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
|
|
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
|
|
: kvm->arch.xen_hvm_config.blob_size_32;
|
|
u32 page_num = data & ~PAGE_MASK;
|
|
@@ -2234,6 +2234,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|
if (n < msr_list.nmsrs)
|
|
goto out;
|
|
r = -EFAULT;
|
|
+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
|
|
+ goto out;
|
|
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
|
|
num_msrs_to_save * sizeof(u32)))
|
|
goto out;
|
|
@@ -2359,7 +2361,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
|
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|
struct kvm_interrupt *irq)
|
|
{
|
|
- if (irq->irq < 0 || irq->irq >= 256)
|
|
+ if (irq->irq >= 256)
|
|
return -EINVAL;
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
return -ENXIO;
|
|
@@ -4880,7 +4882,7 @@ static void kvm_set_mmio_spte_mask(void)
|
|
kvm_mmu_set_mmio_spte_mask(mask);
|
|
}
|
|
|
|
-int kvm_arch_init(void *opaque)
|
|
+int kvm_arch_init(const void *opaque)
|
|
{
|
|
int r;
|
|
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
|
|
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
|
|
index 53272bd..9983358 100644
|
|
--- a/arch/x86/lguest/boot.c
|
|
+++ b/arch/x86/lguest/boot.c
|
|
@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
|
|
* Rebooting also tells the Host we're finished, but the RESTART flag tells the
|
|
* Launcher to reboot us.
|
|
*/
|
|
-static void lguest_restart(char *reason)
|
|
+static __noreturn void lguest_restart(char *reason)
|
|
{
|
|
hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
|
|
+ BUG();
|
|
}
|
|
|
|
/*G:050
|
|
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
|
|
index 00933d5..3a64af9 100644
|
|
--- a/arch/x86/lib/atomic64_386_32.S
|
|
+++ b/arch/x86/lib/atomic64_386_32.S
|
|
@@ -48,6 +48,10 @@ BEGIN(read)
|
|
movl (v), %eax
|
|
movl 4(v), %edx
|
|
RET_ENDP
|
|
+BEGIN(read_unchecked)
|
|
+ movl (v), %eax
|
|
+ movl 4(v), %edx
|
|
+RET_ENDP
|
|
#undef v
|
|
|
|
#define v %esi
|
|
@@ -55,6 +59,10 @@ BEGIN(set)
|
|
movl %ebx, (v)
|
|
movl %ecx, 4(v)
|
|
RET_ENDP
|
|
+BEGIN(set_unchecked)
|
|
+ movl %ebx, (v)
|
|
+ movl %ecx, 4(v)
|
|
+RET_ENDP
|
|
#undef v
|
|
|
|
#define v %esi
|
|
@@ -70,6 +78,20 @@ RET_ENDP
|
|
BEGIN(add)
|
|
addl %eax, (v)
|
|
adcl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 0f
|
|
+ subl %eax, (v)
|
|
+ sbbl %edx, 4(v)
|
|
+ int $4
|
|
+0:
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(add_unchecked)
|
|
+ addl %eax, (v)
|
|
+ adcl %edx, 4(v)
|
|
RET_ENDP
|
|
#undef v
|
|
|
|
@@ -77,6 +99,24 @@ RET_ENDP
|
|
BEGIN(add_return)
|
|
addl (v), %eax
|
|
adcl 4(v), %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
+ movl %eax, (v)
|
|
+ movl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+2:
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(add_return_unchecked)
|
|
+ addl (v), %eax
|
|
+ adcl 4(v), %edx
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
RET_ENDP
|
|
@@ -86,6 +126,20 @@ RET_ENDP
|
|
BEGIN(sub)
|
|
subl %eax, (v)
|
|
sbbl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 0f
|
|
+ addl %eax, (v)
|
|
+ adcl %edx, 4(v)
|
|
+ int $4
|
|
+0:
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(sub_unchecked)
|
|
+ subl %eax, (v)
|
|
+ sbbl %edx, 4(v)
|
|
RET_ENDP
|
|
#undef v
|
|
|
|
@@ -96,6 +150,27 @@ BEGIN(sub_return)
|
|
sbbl $0, %edx
|
|
addl (v), %eax
|
|
adcl 4(v), %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
+ movl %eax, (v)
|
|
+ movl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+2:
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(sub_return_unchecked)
|
|
+ negl %edx
|
|
+ negl %eax
|
|
+ sbbl $0, %edx
|
|
+ addl (v), %eax
|
|
+ adcl 4(v), %edx
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
RET_ENDP
|
|
@@ -105,6 +180,20 @@ RET_ENDP
|
|
BEGIN(inc)
|
|
addl $1, (v)
|
|
adcl $0, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 0f
|
|
+ subl $1, (v)
|
|
+ sbbl $0, 4(v)
|
|
+ int $4
|
|
+0:
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(inc_unchecked)
|
|
+ addl $1, (v)
|
|
+ adcl $0, 4(v)
|
|
RET_ENDP
|
|
#undef v
|
|
|
|
@@ -114,6 +203,26 @@ BEGIN(inc_return)
|
|
movl 4(v), %edx
|
|
addl $1, %eax
|
|
adcl $0, %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
+ movl %eax, (v)
|
|
+ movl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+2:
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(inc_return_unchecked)
|
|
+ movl (v), %eax
|
|
+ movl 4(v), %edx
|
|
+ addl $1, %eax
|
|
+ adcl $0, %edx
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
RET_ENDP
|
|
@@ -123,6 +232,20 @@ RET_ENDP
|
|
BEGIN(dec)
|
|
subl $1, (v)
|
|
sbbl $0, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 0f
|
|
+ addl $1, (v)
|
|
+ adcl $0, 4(v)
|
|
+ int $4
|
|
+0:
|
|
+ _ASM_EXTABLE(0b, 0b)
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(dec_unchecked)
|
|
+ subl $1, (v)
|
|
+ sbbl $0, 4(v)
|
|
RET_ENDP
|
|
#undef v
|
|
|
|
@@ -132,6 +255,26 @@ BEGIN(dec_return)
|
|
movl 4(v), %edx
|
|
subl $1, %eax
|
|
sbbl $0, %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
+ movl %eax, (v)
|
|
+ movl %edx, 4(v)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+2:
|
|
+#endif
|
|
+
|
|
+RET_ENDP
|
|
+BEGIN(dec_return_unchecked)
|
|
+ movl (v), %eax
|
|
+ movl 4(v), %edx
|
|
+ subl $1, %eax
|
|
+ sbbl $0, %edx
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
RET_ENDP
|
|
@@ -143,6 +286,13 @@ BEGIN(add_unless)
|
|
adcl %edx, %edi
|
|
addl (v), %eax
|
|
adcl 4(v), %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
cmpl %eax, %ecx
|
|
je 3f
|
|
1:
|
|
@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
|
|
1:
|
|
addl $1, %eax
|
|
adcl $0, %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
movl $1, %eax
|
|
@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
|
|
movl 4(v), %edx
|
|
subl $1, %eax
|
|
sbbl $0, %edx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 1f)
|
|
+#endif
|
|
+
|
|
js 1f
|
|
movl %eax, (v)
|
|
movl %edx, 4(v)
|
|
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
|
|
index f5cc9eb..51fa319 100644
|
|
--- a/arch/x86/lib/atomic64_cx8_32.S
|
|
+++ b/arch/x86/lib/atomic64_cx8_32.S
|
|
@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
|
|
CFI_STARTPROC
|
|
|
|
read64 %ecx
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(atomic64_read_cx8)
|
|
|
|
+ENTRY(atomic64_read_unchecked_cx8)
|
|
+ CFI_STARTPROC
|
|
+
|
|
+ read64 %ecx
|
|
+ pax_force_retaddr
|
|
+ ret
|
|
+ CFI_ENDPROC
|
|
+ENDPROC(atomic64_read_unchecked_cx8)
|
|
+
|
|
ENTRY(atomic64_set_cx8)
|
|
CFI_STARTPROC
|
|
|
|
@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
|
|
cmpxchg8b (%esi)
|
|
jne 1b
|
|
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(atomic64_set_cx8)
|
|
|
|
+ENTRY(atomic64_set_unchecked_cx8)
|
|
+ CFI_STARTPROC
|
|
+
|
|
+1:
|
|
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
|
|
+ * are atomic on 586 and newer */
|
|
+ cmpxchg8b (%esi)
|
|
+ jne 1b
|
|
+
|
|
+ pax_force_retaddr
|
|
+ ret
|
|
+ CFI_ENDPROC
|
|
+ENDPROC(atomic64_set_unchecked_cx8)
|
|
+
|
|
ENTRY(atomic64_xchg_cx8)
|
|
CFI_STARTPROC
|
|
|
|
@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
|
|
cmpxchg8b (%esi)
|
|
jne 1b
|
|
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(atomic64_xchg_cx8)
|
|
|
|
-.macro addsub_return func ins insc
|
|
-ENTRY(atomic64_\func\()_return_cx8)
|
|
+.macro addsub_return func ins insc unchecked=""
|
|
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
|
|
CFI_STARTPROC
|
|
SAVE ebp
|
|
SAVE ebx
|
|
@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
|
|
movl %edx, %ecx
|
|
\ins\()l %esi, %ebx
|
|
\insc\()l %edi, %ecx
|
|
+
|
|
+.ifb \unchecked
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+2:
|
|
+ _ASM_EXTABLE(2b, 3f)
|
|
+#endif
|
|
+.endif
|
|
+
|
|
LOCK_PREFIX
|
|
cmpxchg8b (%ebp)
|
|
jne 1b
|
|
-
|
|
-10:
|
|
movl %ebx, %eax
|
|
movl %ecx, %edx
|
|
+
|
|
+.ifb \unchecked
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+3:
|
|
+#endif
|
|
+.endif
|
|
+
|
|
RESTORE edi
|
|
RESTORE esi
|
|
RESTORE ebx
|
|
RESTORE ebp
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-ENDPROC(atomic64_\func\()_return_cx8)
|
|
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
|
|
.endm
|
|
|
|
addsub_return add add adc
|
|
addsub_return sub sub sbb
|
|
+addsub_return add add adc _unchecked
|
|
+addsub_return sub sub sbb _unchecked
|
|
|
|
-.macro incdec_return func ins insc
|
|
-ENTRY(atomic64_\func\()_return_cx8)
|
|
+.macro incdec_return func ins insc unchecked=""
|
|
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
|
|
CFI_STARTPROC
|
|
SAVE ebx
|
|
|
|
@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
|
|
movl %edx, %ecx
|
|
\ins\()l $1, %ebx
|
|
\insc\()l $0, %ecx
|
|
+
|
|
+.ifb \unchecked
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+2:
|
|
+ _ASM_EXTABLE(2b, 3f)
|
|
+#endif
|
|
+.endif
|
|
+
|
|
LOCK_PREFIX
|
|
cmpxchg8b (%esi)
|
|
jne 1b
|
|
|
|
-10:
|
|
movl %ebx, %eax
|
|
movl %ecx, %edx
|
|
+
|
|
+.ifb \unchecked
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+3:
|
|
+#endif
|
|
+.endif
|
|
+
|
|
RESTORE ebx
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
-ENDPROC(atomic64_\func\()_return_cx8)
|
|
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
|
|
.endm
|
|
|
|
incdec_return inc add adc
|
|
incdec_return dec sub sbb
|
|
+incdec_return inc add adc _unchecked
|
|
+incdec_return dec sub sbb _unchecked
|
|
|
|
ENTRY(atomic64_dec_if_positive_cx8)
|
|
CFI_STARTPROC
|
|
@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
|
|
movl %edx, %ecx
|
|
subl $1, %ebx
|
|
sbb $0, %ecx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 2f)
|
|
+#endif
|
|
+
|
|
js 2f
|
|
LOCK_PREFIX
|
|
cmpxchg8b (%esi)
|
|
@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
|
|
movl %ebx, %eax
|
|
movl %ecx, %edx
|
|
RESTORE ebx
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(atomic64_dec_if_positive_cx8)
|
|
@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
|
|
movl %edx, %ecx
|
|
addl %ebp, %ebx
|
|
adcl %edi, %ecx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 3f)
|
|
+#endif
|
|
+
|
|
LOCK_PREFIX
|
|
cmpxchg8b (%esi)
|
|
jne 1b
|
|
@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
|
|
CFI_ADJUST_CFA_OFFSET -8
|
|
RESTORE ebx
|
|
RESTORE ebp
|
|
+ pax_force_retaddr
|
|
ret
|
|
4:
|
|
cmpl %edx, 4(%esp)
|
|
@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
|
|
xorl %ecx, %ecx
|
|
addl $1, %ebx
|
|
adcl %edx, %ecx
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ into
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 3f)
|
|
+#endif
|
|
+
|
|
LOCK_PREFIX
|
|
cmpxchg8b (%esi)
|
|
jne 1b
|
|
@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
|
|
movl $1, %eax
|
|
3:
|
|
RESTORE ebx
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(atomic64_inc_not_zero_cx8)
|
|
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
|
|
index 78d16a5..fbcf666 100644
|
|
--- a/arch/x86/lib/checksum_32.S
|
|
+++ b/arch/x86/lib/checksum_32.S
|
|
@@ -28,7 +28,8 @@
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/errno.h>
|
|
-
|
|
+#include <asm/segment.h>
|
|
+
|
|
/*
|
|
* computes a partial checksum, e.g. for TCP/UDP fragments
|
|
*/
|
|
@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
|
|
|
|
#define ARGBASE 16
|
|
#define FP 12
|
|
-
|
|
-ENTRY(csum_partial_copy_generic)
|
|
+
|
|
+ENTRY(csum_partial_copy_generic_to_user)
|
|
CFI_STARTPROC
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pushl_cfi %gs
|
|
+ popl_cfi %es
|
|
+ jmp csum_partial_copy_generic
|
|
+#endif
|
|
+
|
|
+ENTRY(csum_partial_copy_generic_from_user)
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pushl_cfi %gs
|
|
+ popl_cfi %ds
|
|
+#endif
|
|
+
|
|
+ENTRY(csum_partial_copy_generic)
|
|
subl $4,%esp
|
|
CFI_ADJUST_CFA_OFFSET 4
|
|
pushl_cfi %edi
|
|
@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
|
|
jmp 4f
|
|
SRC(1: movw (%esi), %bx )
|
|
addl $2, %esi
|
|
-DST( movw %bx, (%edi) )
|
|
+DST( movw %bx, %es:(%edi) )
|
|
addl $2, %edi
|
|
addw %bx, %ax
|
|
adcl $0, %eax
|
|
@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
|
|
SRC(1: movl (%esi), %ebx )
|
|
SRC( movl 4(%esi), %edx )
|
|
adcl %ebx, %eax
|
|
-DST( movl %ebx, (%edi) )
|
|
+DST( movl %ebx, %es:(%edi) )
|
|
adcl %edx, %eax
|
|
-DST( movl %edx, 4(%edi) )
|
|
+DST( movl %edx, %es:4(%edi) )
|
|
|
|
SRC( movl 8(%esi), %ebx )
|
|
SRC( movl 12(%esi), %edx )
|
|
adcl %ebx, %eax
|
|
-DST( movl %ebx, 8(%edi) )
|
|
+DST( movl %ebx, %es:8(%edi) )
|
|
adcl %edx, %eax
|
|
-DST( movl %edx, 12(%edi) )
|
|
+DST( movl %edx, %es:12(%edi) )
|
|
|
|
SRC( movl 16(%esi), %ebx )
|
|
SRC( movl 20(%esi), %edx )
|
|
adcl %ebx, %eax
|
|
-DST( movl %ebx, 16(%edi) )
|
|
+DST( movl %ebx, %es:16(%edi) )
|
|
adcl %edx, %eax
|
|
-DST( movl %edx, 20(%edi) )
|
|
+DST( movl %edx, %es:20(%edi) )
|
|
|
|
SRC( movl 24(%esi), %ebx )
|
|
SRC( movl 28(%esi), %edx )
|
|
adcl %ebx, %eax
|
|
-DST( movl %ebx, 24(%edi) )
|
|
+DST( movl %ebx, %es:24(%edi) )
|
|
adcl %edx, %eax
|
|
-DST( movl %edx, 28(%edi) )
|
|
+DST( movl %edx, %es:28(%edi) )
|
|
|
|
lea 32(%esi), %esi
|
|
lea 32(%edi), %edi
|
|
@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
|
|
shrl $2, %edx # This clears CF
|
|
SRC(3: movl (%esi), %ebx )
|
|
adcl %ebx, %eax
|
|
-DST( movl %ebx, (%edi) )
|
|
+DST( movl %ebx, %es:(%edi) )
|
|
lea 4(%esi), %esi
|
|
lea 4(%edi), %edi
|
|
dec %edx
|
|
@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
|
|
jb 5f
|
|
SRC( movw (%esi), %cx )
|
|
leal 2(%esi), %esi
|
|
-DST( movw %cx, (%edi) )
|
|
+DST( movw %cx, %es:(%edi) )
|
|
leal 2(%edi), %edi
|
|
je 6f
|
|
shll $16,%ecx
|
|
SRC(5: movb (%esi), %cl )
|
|
-DST( movb %cl, (%edi) )
|
|
+DST( movb %cl, %es:(%edi) )
|
|
6: addl %ecx, %eax
|
|
adcl $0, %eax
|
|
7:
|
|
@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
|
|
|
|
6001:
|
|
movl ARGBASE+20(%esp), %ebx # src_err_ptr
|
|
- movl $-EFAULT, (%ebx)
|
|
+ movl $-EFAULT, %ss:(%ebx)
|
|
|
|
# zero the complete destination - computing the rest
|
|
# is too much work
|
|
@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
|
|
|
|
6002:
|
|
movl ARGBASE+24(%esp), %ebx # dst_err_ptr
|
|
- movl $-EFAULT,(%ebx)
|
|
+ movl $-EFAULT,%ss:(%ebx)
|
|
jmp 5000b
|
|
|
|
.previous
|
|
|
|
+ pushl_cfi %ss
|
|
+ popl_cfi %ds
|
|
+ pushl_cfi %ss
|
|
+ popl_cfi %es
|
|
popl_cfi %ebx
|
|
CFI_RESTORE ebx
|
|
popl_cfi %esi
|
|
@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
|
|
popl_cfi %ecx # equivalent to addl $4,%esp
|
|
ret
|
|
CFI_ENDPROC
|
|
-ENDPROC(csum_partial_copy_generic)
|
|
+ENDPROC(csum_partial_copy_generic_to_user)
|
|
|
|
#else
|
|
|
|
/* Version for PentiumII/PPro */
|
|
|
|
#define ROUND1(x) \
|
|
+ nop; nop; nop; \
|
|
SRC(movl x(%esi), %ebx ) ; \
|
|
addl %ebx, %eax ; \
|
|
- DST(movl %ebx, x(%edi) ) ;
|
|
+ DST(movl %ebx, %es:x(%edi)) ;
|
|
|
|
#define ROUND(x) \
|
|
+ nop; nop; nop; \
|
|
SRC(movl x(%esi), %ebx ) ; \
|
|
adcl %ebx, %eax ; \
|
|
- DST(movl %ebx, x(%edi) ) ;
|
|
+ DST(movl %ebx, %es:x(%edi)) ;
|
|
|
|
#define ARGBASE 12
|
|
-
|
|
-ENTRY(csum_partial_copy_generic)
|
|
+
|
|
+ENTRY(csum_partial_copy_generic_to_user)
|
|
CFI_STARTPROC
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pushl_cfi %gs
|
|
+ popl_cfi %es
|
|
+ jmp csum_partial_copy_generic
|
|
+#endif
|
|
+
|
|
+ENTRY(csum_partial_copy_generic_from_user)
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pushl_cfi %gs
|
|
+ popl_cfi %ds
|
|
+#endif
|
|
+
|
|
+ENTRY(csum_partial_copy_generic)
|
|
pushl_cfi %ebx
|
|
CFI_REL_OFFSET ebx, 0
|
|
pushl_cfi %edi
|
|
@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
|
|
subl %ebx, %edi
|
|
lea -1(%esi),%edx
|
|
andl $-32,%edx
|
|
- lea 3f(%ebx,%ebx), %ebx
|
|
+ lea 3f(%ebx,%ebx,2), %ebx
|
|
testl %esi, %esi
|
|
jmp *%ebx
|
|
1: addl $64,%esi
|
|
@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
|
|
jb 5f
|
|
SRC( movw (%esi), %dx )
|
|
leal 2(%esi), %esi
|
|
-DST( movw %dx, (%edi) )
|
|
+DST( movw %dx, %es:(%edi) )
|
|
leal 2(%edi), %edi
|
|
je 6f
|
|
shll $16,%edx
|
|
5:
|
|
SRC( movb (%esi), %dl )
|
|
-DST( movb %dl, (%edi) )
|
|
+DST( movb %dl, %es:(%edi) )
|
|
6: addl %edx, %eax
|
|
adcl $0, %eax
|
|
7:
|
|
.section .fixup, "ax"
|
|
6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
|
|
- movl $-EFAULT, (%ebx)
|
|
+ movl $-EFAULT, %ss:(%ebx)
|
|
# zero the complete destination (computing the rest is too much work)
|
|
movl ARGBASE+8(%esp),%edi # dst
|
|
movl ARGBASE+12(%esp),%ecx # len
|
|
@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
|
|
rep; stosb
|
|
jmp 7b
|
|
6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
|
|
- movl $-EFAULT, (%ebx)
|
|
+ movl $-EFAULT, %ss:(%ebx)
|
|
jmp 7b
|
|
.previous
|
|
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ pushl_cfi %ss
|
|
+ popl_cfi %ds
|
|
+ pushl_cfi %ss
|
|
+ popl_cfi %es
|
|
+#endif
|
|
+
|
|
popl_cfi %esi
|
|
CFI_RESTORE esi
|
|
popl_cfi %edi
|
|
@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
|
|
CFI_RESTORE ebx
|
|
ret
|
|
CFI_ENDPROC
|
|
-ENDPROC(csum_partial_copy_generic)
|
|
+ENDPROC(csum_partial_copy_generic_to_user)
|
|
|
|
#undef ROUND
|
|
#undef ROUND1
|
|
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
|
|
index f2145cf..cea889d 100644
|
|
--- a/arch/x86/lib/clear_page_64.S
|
|
+++ b/arch/x86/lib/clear_page_64.S
|
|
@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
|
|
movl $4096/8,%ecx
|
|
xorl %eax,%eax
|
|
rep stosq
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(clear_page_c)
|
|
@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
|
|
movl $4096,%ecx
|
|
xorl %eax,%eax
|
|
rep stosb
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(clear_page_c_e)
|
|
@@ -43,6 +45,7 @@ ENTRY(clear_page)
|
|
leaq 64(%rdi),%rdi
|
|
jnz .Lloop
|
|
nop
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
.Lclear_page_end:
|
|
@@ -58,7 +61,7 @@ ENDPROC(clear_page)
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
- .section .altinstr_replacement,"ax"
|
|
+ .section .altinstr_replacement,"a"
|
|
1: .byte 0xeb /* jmp <disp8> */
|
|
.byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
|
|
2: .byte 0xeb /* jmp <disp8> */
|
|
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
|
|
index 1e572c5..2a162cd 100644
|
|
--- a/arch/x86/lib/cmpxchg16b_emu.S
|
|
+++ b/arch/x86/lib/cmpxchg16b_emu.S
|
|
@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
|
|
|
|
popf
|
|
mov $1, %al
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
not_same:
|
|
popf
|
|
xor %al,%al
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
CFI_ENDPROC
|
|
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
|
|
index 6b34d04..dccb07f 100644
|
|
--- a/arch/x86/lib/copy_page_64.S
|
|
+++ b/arch/x86/lib/copy_page_64.S
|
|
@@ -9,6 +9,7 @@ copy_page_c:
|
|
CFI_STARTPROC
|
|
movl $4096/8,%ecx
|
|
rep movsq
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(copy_page_c)
|
|
@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
|
|
|
|
ENTRY(copy_page)
|
|
CFI_STARTPROC
|
|
- subq $2*8,%rsp
|
|
- CFI_ADJUST_CFA_OFFSET 2*8
|
|
+ subq $3*8,%rsp
|
|
+ CFI_ADJUST_CFA_OFFSET 3*8
|
|
movq %rbx,(%rsp)
|
|
CFI_REL_OFFSET rbx, 0
|
|
movq %r12,1*8(%rsp)
|
|
CFI_REL_OFFSET r12, 1*8
|
|
+ movq %r13,2*8(%rsp)
|
|
+ CFI_REL_OFFSET r13, 2*8
|
|
|
|
movl $(4096/64)-5,%ecx
|
|
.p2align 4
|
|
@@ -37,7 +40,7 @@ ENTRY(copy_page)
|
|
movq 16 (%rsi), %rdx
|
|
movq 24 (%rsi), %r8
|
|
movq 32 (%rsi), %r9
|
|
- movq 40 (%rsi), %r10
|
|
+ movq 40 (%rsi), %r13
|
|
movq 48 (%rsi), %r11
|
|
movq 56 (%rsi), %r12
|
|
|
|
@@ -48,7 +51,7 @@ ENTRY(copy_page)
|
|
movq %rdx, 16 (%rdi)
|
|
movq %r8, 24 (%rdi)
|
|
movq %r9, 32 (%rdi)
|
|
- movq %r10, 40 (%rdi)
|
|
+ movq %r13, 40 (%rdi)
|
|
movq %r11, 48 (%rdi)
|
|
movq %r12, 56 (%rdi)
|
|
|
|
@@ -67,7 +70,7 @@ ENTRY(copy_page)
|
|
movq 16 (%rsi), %rdx
|
|
movq 24 (%rsi), %r8
|
|
movq 32 (%rsi), %r9
|
|
- movq 40 (%rsi), %r10
|
|
+ movq 40 (%rsi), %r13
|
|
movq 48 (%rsi), %r11
|
|
movq 56 (%rsi), %r12
|
|
|
|
@@ -76,7 +79,7 @@ ENTRY(copy_page)
|
|
movq %rdx, 16 (%rdi)
|
|
movq %r8, 24 (%rdi)
|
|
movq %r9, 32 (%rdi)
|
|
- movq %r10, 40 (%rdi)
|
|
+ movq %r13, 40 (%rdi)
|
|
movq %r11, 48 (%rdi)
|
|
movq %r12, 56 (%rdi)
|
|
|
|
@@ -89,8 +92,11 @@ ENTRY(copy_page)
|
|
CFI_RESTORE rbx
|
|
movq 1*8(%rsp),%r12
|
|
CFI_RESTORE r12
|
|
- addq $2*8,%rsp
|
|
- CFI_ADJUST_CFA_OFFSET -2*8
|
|
+ movq 2*8(%rsp),%r13
|
|
+ CFI_RESTORE r13
|
|
+ addq $3*8,%rsp
|
|
+ CFI_ADJUST_CFA_OFFSET -3*8
|
|
+ pax_force_retaddr
|
|
ret
|
|
.Lcopy_page_end:
|
|
CFI_ENDPROC
|
|
@@ -101,7 +107,7 @@ ENDPROC(copy_page)
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
- .section .altinstr_replacement,"ax"
|
|
+ .section .altinstr_replacement,"a"
|
|
1: .byte 0xeb /* jmp <disp8> */
|
|
.byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
|
|
2:
|
|
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
|
|
index 0248402..821c786 100644
|
|
--- a/arch/x86/lib/copy_user_64.S
|
|
+++ b/arch/x86/lib/copy_user_64.S
|
|
@@ -16,6 +16,7 @@
|
|
#include <asm/thread_info.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/alternative-asm.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
/*
|
|
* By placing feature2 after feature1 in altinstructions section, we logically
|
|
@@ -29,7 +30,7 @@
|
|
.byte 0xe9 /* 32bit jump */
|
|
.long \orig-1f /* by default jump to orig */
|
|
1:
|
|
- .section .altinstr_replacement,"ax"
|
|
+ .section .altinstr_replacement,"a"
|
|
2: .byte 0xe9 /* near jump with 32bit immediate */
|
|
.long \alt1-1b /* offset */ /* or alternatively to alt1 */
|
|
3: .byte 0xe9 /* near jump with 32bit immediate */
|
|
@@ -71,47 +72,20 @@
|
|
#endif
|
|
.endm
|
|
|
|
-/* Standard copy_to_user with segment limit checking */
|
|
-ENTRY(_copy_to_user)
|
|
- CFI_STARTPROC
|
|
- GET_THREAD_INFO(%rax)
|
|
- movq %rdi,%rcx
|
|
- addq %rdx,%rcx
|
|
- jc bad_to_user
|
|
- cmpq TI_addr_limit(%rax),%rcx
|
|
- ja bad_to_user
|
|
- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
|
|
- copy_user_generic_unrolled,copy_user_generic_string, \
|
|
- copy_user_enhanced_fast_string
|
|
- CFI_ENDPROC
|
|
-ENDPROC(_copy_to_user)
|
|
-
|
|
-/* Standard copy_from_user with segment limit checking */
|
|
-ENTRY(_copy_from_user)
|
|
- CFI_STARTPROC
|
|
- GET_THREAD_INFO(%rax)
|
|
- movq %rsi,%rcx
|
|
- addq %rdx,%rcx
|
|
- jc bad_from_user
|
|
- cmpq TI_addr_limit(%rax),%rcx
|
|
- ja bad_from_user
|
|
- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
|
|
- copy_user_generic_unrolled,copy_user_generic_string, \
|
|
- copy_user_enhanced_fast_string
|
|
- CFI_ENDPROC
|
|
-ENDPROC(_copy_from_user)
|
|
-
|
|
.section .fixup,"ax"
|
|
/* must zero dest */
|
|
ENTRY(bad_from_user)
|
|
bad_from_user:
|
|
CFI_STARTPROC
|
|
+ testl %edx,%edx
|
|
+ js bad_to_user
|
|
movl %edx,%ecx
|
|
xorl %eax,%eax
|
|
rep
|
|
stosb
|
|
bad_to_user:
|
|
movl %edx,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(bad_from_user)
|
|
@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
|
|
jz 17f
|
|
1: movq (%rsi),%r8
|
|
2: movq 1*8(%rsi),%r9
|
|
-3: movq 2*8(%rsi),%r10
|
|
+3: movq 2*8(%rsi),%rax
|
|
4: movq 3*8(%rsi),%r11
|
|
5: movq %r8,(%rdi)
|
|
6: movq %r9,1*8(%rdi)
|
|
-7: movq %r10,2*8(%rdi)
|
|
+7: movq %rax,2*8(%rdi)
|
|
8: movq %r11,3*8(%rdi)
|
|
9: movq 4*8(%rsi),%r8
|
|
10: movq 5*8(%rsi),%r9
|
|
-11: movq 6*8(%rsi),%r10
|
|
+11: movq 6*8(%rsi),%rax
|
|
12: movq 7*8(%rsi),%r11
|
|
13: movq %r8,4*8(%rdi)
|
|
14: movq %r9,5*8(%rdi)
|
|
-15: movq %r10,6*8(%rdi)
|
|
+15: movq %rax,6*8(%rdi)
|
|
16: movq %r11,7*8(%rdi)
|
|
leaq 64(%rsi),%rsi
|
|
leaq 64(%rdi),%rdi
|
|
@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
|
|
decl %ecx
|
|
jnz 21b
|
|
23: xor %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
.section .fixup,"ax"
|
|
@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
|
|
3: rep
|
|
movsb
|
|
4: xorl %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
.section .fixup,"ax"
|
|
@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
|
|
1: rep
|
|
movsb
|
|
2: xorl %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
.section .fixup,"ax"
|
|
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
|
|
index cb0c112..e3a6895 100644
|
|
--- a/arch/x86/lib/copy_user_nocache_64.S
|
|
+++ b/arch/x86/lib/copy_user_nocache_64.S
|
|
@@ -8,12 +8,14 @@
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#define FIX_ALIGNMENT 1
|
|
|
|
#include <asm/current.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
.macro ALIGN_DESTINATION
|
|
#ifdef FIX_ALIGNMENT
|
|
@@ -50,6 +52,15 @@
|
|
*/
|
|
ENTRY(__copy_user_nocache)
|
|
CFI_STARTPROC
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ mov $PAX_USER_SHADOW_BASE,%rcx
|
|
+ cmp %rcx,%rsi
|
|
+ jae 1f
|
|
+ add %rcx,%rsi
|
|
+1:
|
|
+#endif
|
|
+
|
|
cmpl $8,%edx
|
|
jb 20f /* less then 8 bytes, go to byte copy loop */
|
|
ALIGN_DESTINATION
|
|
@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
|
|
jz 17f
|
|
1: movq (%rsi),%r8
|
|
2: movq 1*8(%rsi),%r9
|
|
-3: movq 2*8(%rsi),%r10
|
|
+3: movq 2*8(%rsi),%rax
|
|
4: movq 3*8(%rsi),%r11
|
|
5: movnti %r8,(%rdi)
|
|
6: movnti %r9,1*8(%rdi)
|
|
-7: movnti %r10,2*8(%rdi)
|
|
+7: movnti %rax,2*8(%rdi)
|
|
8: movnti %r11,3*8(%rdi)
|
|
9: movq 4*8(%rsi),%r8
|
|
10: movq 5*8(%rsi),%r9
|
|
-11: movq 6*8(%rsi),%r10
|
|
+11: movq 6*8(%rsi),%rax
|
|
12: movq 7*8(%rsi),%r11
|
|
13: movnti %r8,4*8(%rdi)
|
|
14: movnti %r9,5*8(%rdi)
|
|
-15: movnti %r10,6*8(%rdi)
|
|
+15: movnti %rax,6*8(%rdi)
|
|
16: movnti %r11,7*8(%rdi)
|
|
leaq 64(%rsi),%rsi
|
|
leaq 64(%rdi),%rdi
|
|
@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
|
|
jnz 21b
|
|
23: xorl %eax,%eax
|
|
sfence
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
.section .fixup,"ax"
|
|
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
|
|
index fb903b7..c92b7f7 100644
|
|
--- a/arch/x86/lib/csum-copy_64.S
|
|
+++ b/arch/x86/lib/csum-copy_64.S
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/errno.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* Checksum copy with exception handling.
|
|
@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
|
|
CFI_RESTORE rbp
|
|
addq $7*8, %rsp
|
|
CFI_ADJUST_CFA_OFFSET -7*8
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
CFI_RESTORE_STATE
|
|
|
|
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
|
|
index 459b58a..9570bc7 100644
|
|
--- a/arch/x86/lib/csum-wrappers_64.c
|
|
+++ b/arch/x86/lib/csum-wrappers_64.c
|
|
@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
|
|
len -= 2;
|
|
}
|
|
}
|
|
- isum = csum_partial_copy_generic((__force const void *)src,
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
|
|
+ src += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
|
|
dst, len, isum, errp, NULL);
|
|
if (unlikely(*errp))
|
|
goto out_err;
|
|
@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
|
|
}
|
|
|
|
*errp = 0;
|
|
- return csum_partial_copy_generic(src, (void __force *)dst,
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
|
|
+ dst += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
|
|
len, isum, NULL, errp);
|
|
}
|
|
EXPORT_SYMBOL(csum_partial_copy_to_user);
|
|
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
|
|
index 51f1504..ddac4c1 100644
|
|
--- a/arch/x86/lib/getuser.S
|
|
+++ b/arch/x86/lib/getuser.S
|
|
@@ -33,15 +33,38 @@
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
+#include <asm/segment.h>
|
|
+#include <asm/pgtable.h>
|
|
+#include <asm/alternative-asm.h>
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define __copyuser_seg gs;
|
|
+#else
|
|
+#define __copyuser_seg
|
|
+#endif
|
|
|
|
.text
|
|
ENTRY(__get_user_1)
|
|
CFI_STARTPROC
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
-1: movzb (%_ASM_AX),%edx
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
|
|
+ cmp %_ASM_DX,%_ASM_AX
|
|
+ jae 1234f
|
|
+ add %_ASM_DX,%_ASM_AX
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+1: __copyuser_seg movzb (%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_1)
|
|
@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
|
|
ENTRY(__get_user_2)
|
|
CFI_STARTPROC
|
|
add $1,%_ASM_AX
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
jc bad_get_user
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
-2: movzwl -1(%_ASM_AX),%edx
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
|
|
+ cmp %_ASM_DX,%_ASM_AX
|
|
+ jae 1234f
|
|
+ add %_ASM_DX,%_ASM_AX
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_2)
|
|
@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
|
|
ENTRY(__get_user_4)
|
|
CFI_STARTPROC
|
|
add $3,%_ASM_AX
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
jc bad_get_user
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
-3: mov -3(%_ASM_AX),%edx
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
|
|
+ cmp %_ASM_DX,%_ASM_AX
|
|
+ jae 1234f
|
|
+ add %_ASM_DX,%_ASM_AX
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+3: __copyuser_seg mov -3(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_4)
|
|
@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
|
|
GET_THREAD_INFO(%_ASM_DX)
|
|
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
|
|
+ cmp %_ASM_DX,%_ASM_AX
|
|
+ jae 1234f
|
|
+ add %_ASM_DX,%_ASM_AX
|
|
+1234:
|
|
+#endif
|
|
+
|
|
4: movq -7(%_ASM_AX),%_ASM_DX
|
|
xor %eax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__get_user_8)
|
|
@@ -91,6 +152,7 @@ bad_get_user:
|
|
CFI_STARTPROC
|
|
xor %edx,%edx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
END(bad_get_user)
|
|
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
|
|
index b1e6c4b..21ae8fc 100644
|
|
--- a/arch/x86/lib/insn.c
|
|
+++ b/arch/x86/lib/insn.c
|
|
@@ -21,6 +21,11 @@
|
|
#include <linux/string.h>
|
|
#include <asm/inat.h>
|
|
#include <asm/insn.h>
|
|
+#ifdef __KERNEL__
|
|
+#include <asm/pgtable_types.h>
|
|
+#else
|
|
+#define ktla_ktva(addr) addr
|
|
+#endif
|
|
|
|
/* Verify next sizeof(t) bytes can be on the same instruction */
|
|
#define validate_next(t, insn, n) \
|
|
@@ -49,8 +54,8 @@
|
|
void insn_init(struct insn *insn, const void *kaddr, int x86_64)
|
|
{
|
|
memset(insn, 0, sizeof(*insn));
|
|
- insn->kaddr = kaddr;
|
|
- insn->next_byte = kaddr;
|
|
+ insn->kaddr = ktla_ktva(kaddr);
|
|
+ insn->next_byte = ktla_ktva(kaddr);
|
|
insn->x86_64 = x86_64 ? 1 : 0;
|
|
insn->opnd_bytes = 4;
|
|
if (x86_64)
|
|
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
|
|
index 05a95e7..326f2fa 100644
|
|
--- a/arch/x86/lib/iomap_copy_64.S
|
|
+++ b/arch/x86/lib/iomap_copy_64.S
|
|
@@ -17,6 +17,7 @@
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* override generic version in lib/iomap_copy.c
|
|
@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
|
|
CFI_STARTPROC
|
|
movl %edx,%ecx
|
|
rep movsd
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(__iowrite32_copy)
|
|
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
|
|
index 1c273be..da9cc0ec 100644
|
|
--- a/arch/x86/lib/memcpy_64.S
|
|
+++ b/arch/x86/lib/memcpy_64.S
|
|
@@ -33,6 +33,7 @@
|
|
rep movsq
|
|
movl %edx, %ecx
|
|
rep movsb
|
|
+ pax_force_retaddr
|
|
ret
|
|
.Lmemcpy_e:
|
|
.previous
|
|
@@ -49,6 +50,7 @@
|
|
movq %rdi, %rax
|
|
movq %rdx, %rcx
|
|
rep movsb
|
|
+ pax_force_retaddr
|
|
ret
|
|
.Lmemcpy_e_e:
|
|
.previous
|
|
@@ -76,13 +78,13 @@ ENTRY(memcpy)
|
|
*/
|
|
movq 0*8(%rsi), %r8
|
|
movq 1*8(%rsi), %r9
|
|
- movq 2*8(%rsi), %r10
|
|
+ movq 2*8(%rsi), %rcx
|
|
movq 3*8(%rsi), %r11
|
|
leaq 4*8(%rsi), %rsi
|
|
|
|
movq %r8, 0*8(%rdi)
|
|
movq %r9, 1*8(%rdi)
|
|
- movq %r10, 2*8(%rdi)
|
|
+ movq %rcx, 2*8(%rdi)
|
|
movq %r11, 3*8(%rdi)
|
|
leaq 4*8(%rdi), %rdi
|
|
jae .Lcopy_forward_loop
|
|
@@ -105,12 +107,12 @@ ENTRY(memcpy)
|
|
subq $0x20, %rdx
|
|
movq -1*8(%rsi), %r8
|
|
movq -2*8(%rsi), %r9
|
|
- movq -3*8(%rsi), %r10
|
|
+ movq -3*8(%rsi), %rcx
|
|
movq -4*8(%rsi), %r11
|
|
leaq -4*8(%rsi), %rsi
|
|
movq %r8, -1*8(%rdi)
|
|
movq %r9, -2*8(%rdi)
|
|
- movq %r10, -3*8(%rdi)
|
|
+ movq %rcx, -3*8(%rdi)
|
|
movq %r11, -4*8(%rdi)
|
|
leaq -4*8(%rdi), %rdi
|
|
jae .Lcopy_backward_loop
|
|
@@ -130,12 +132,13 @@ ENTRY(memcpy)
|
|
*/
|
|
movq 0*8(%rsi), %r8
|
|
movq 1*8(%rsi), %r9
|
|
- movq -2*8(%rsi, %rdx), %r10
|
|
+ movq -2*8(%rsi, %rdx), %rcx
|
|
movq -1*8(%rsi, %rdx), %r11
|
|
movq %r8, 0*8(%rdi)
|
|
movq %r9, 1*8(%rdi)
|
|
- movq %r10, -2*8(%rdi, %rdx)
|
|
+ movq %rcx, -2*8(%rdi, %rdx)
|
|
movq %r11, -1*8(%rdi, %rdx)
|
|
+ pax_force_retaddr
|
|
retq
|
|
.p2align 4
|
|
.Lless_16bytes:
|
|
@@ -148,6 +151,7 @@ ENTRY(memcpy)
|
|
movq -1*8(%rsi, %rdx), %r9
|
|
movq %r8, 0*8(%rdi)
|
|
movq %r9, -1*8(%rdi, %rdx)
|
|
+ pax_force_retaddr
|
|
retq
|
|
.p2align 4
|
|
.Lless_8bytes:
|
|
@@ -161,6 +165,7 @@ ENTRY(memcpy)
|
|
movl -4(%rsi, %rdx), %r8d
|
|
movl %ecx, (%rdi)
|
|
movl %r8d, -4(%rdi, %rdx)
|
|
+ pax_force_retaddr
|
|
retq
|
|
.p2align 4
|
|
.Lless_3bytes:
|
|
@@ -179,6 +184,7 @@ ENTRY(memcpy)
|
|
movb %cl, (%rdi)
|
|
|
|
.Lend:
|
|
+ pax_force_retaddr
|
|
retq
|
|
CFI_ENDPROC
|
|
ENDPROC(memcpy)
|
|
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
|
|
index ee16461..c39c199 100644
|
|
--- a/arch/x86/lib/memmove_64.S
|
|
+++ b/arch/x86/lib/memmove_64.S
|
|
@@ -61,13 +61,13 @@ ENTRY(memmove)
|
|
5:
|
|
sub $0x20, %rdx
|
|
movq 0*8(%rsi), %r11
|
|
- movq 1*8(%rsi), %r10
|
|
+ movq 1*8(%rsi), %rcx
|
|
movq 2*8(%rsi), %r9
|
|
movq 3*8(%rsi), %r8
|
|
leaq 4*8(%rsi), %rsi
|
|
|
|
movq %r11, 0*8(%rdi)
|
|
- movq %r10, 1*8(%rdi)
|
|
+ movq %rcx, 1*8(%rdi)
|
|
movq %r9, 2*8(%rdi)
|
|
movq %r8, 3*8(%rdi)
|
|
leaq 4*8(%rdi), %rdi
|
|
@@ -81,10 +81,10 @@ ENTRY(memmove)
|
|
4:
|
|
movq %rdx, %rcx
|
|
movq -8(%rsi, %rdx), %r11
|
|
- lea -8(%rdi, %rdx), %r10
|
|
+ lea -8(%rdi, %rdx), %r9
|
|
shrq $3, %rcx
|
|
rep movsq
|
|
- movq %r11, (%r10)
|
|
+ movq %r11, (%r9)
|
|
jmp 13f
|
|
.Lmemmove_end_forward:
|
|
|
|
@@ -95,14 +95,14 @@ ENTRY(memmove)
|
|
7:
|
|
movq %rdx, %rcx
|
|
movq (%rsi), %r11
|
|
- movq %rdi, %r10
|
|
+ movq %rdi, %r9
|
|
leaq -8(%rsi, %rdx), %rsi
|
|
leaq -8(%rdi, %rdx), %rdi
|
|
shrq $3, %rcx
|
|
std
|
|
rep movsq
|
|
cld
|
|
- movq %r11, (%r10)
|
|
+ movq %r11, (%r9)
|
|
jmp 13f
|
|
|
|
/*
|
|
@@ -127,13 +127,13 @@ ENTRY(memmove)
|
|
8:
|
|
subq $0x20, %rdx
|
|
movq -1*8(%rsi), %r11
|
|
- movq -2*8(%rsi), %r10
|
|
+ movq -2*8(%rsi), %rcx
|
|
movq -3*8(%rsi), %r9
|
|
movq -4*8(%rsi), %r8
|
|
leaq -4*8(%rsi), %rsi
|
|
|
|
movq %r11, -1*8(%rdi)
|
|
- movq %r10, -2*8(%rdi)
|
|
+ movq %rcx, -2*8(%rdi)
|
|
movq %r9, -3*8(%rdi)
|
|
movq %r8, -4*8(%rdi)
|
|
leaq -4*8(%rdi), %rdi
|
|
@@ -151,11 +151,11 @@ ENTRY(memmove)
|
|
* Move data from 16 bytes to 31 bytes.
|
|
*/
|
|
movq 0*8(%rsi), %r11
|
|
- movq 1*8(%rsi), %r10
|
|
+ movq 1*8(%rsi), %rcx
|
|
movq -2*8(%rsi, %rdx), %r9
|
|
movq -1*8(%rsi, %rdx), %r8
|
|
movq %r11, 0*8(%rdi)
|
|
- movq %r10, 1*8(%rdi)
|
|
+ movq %rcx, 1*8(%rdi)
|
|
movq %r9, -2*8(%rdi, %rdx)
|
|
movq %r8, -1*8(%rdi, %rdx)
|
|
jmp 13f
|
|
@@ -167,9 +167,9 @@ ENTRY(memmove)
|
|
* Move data from 8 bytes to 15 bytes.
|
|
*/
|
|
movq 0*8(%rsi), %r11
|
|
- movq -1*8(%rsi, %rdx), %r10
|
|
+ movq -1*8(%rsi, %rdx), %r9
|
|
movq %r11, 0*8(%rdi)
|
|
- movq %r10, -1*8(%rdi, %rdx)
|
|
+ movq %r9, -1*8(%rdi, %rdx)
|
|
jmp 13f
|
|
10:
|
|
cmpq $4, %rdx
|
|
@@ -178,9 +178,9 @@ ENTRY(memmove)
|
|
* Move data from 4 bytes to 7 bytes.
|
|
*/
|
|
movl (%rsi), %r11d
|
|
- movl -4(%rsi, %rdx), %r10d
|
|
+ movl -4(%rsi, %rdx), %r9d
|
|
movl %r11d, (%rdi)
|
|
- movl %r10d, -4(%rdi, %rdx)
|
|
+ movl %r9d, -4(%rdi, %rdx)
|
|
jmp 13f
|
|
11:
|
|
cmp $2, %rdx
|
|
@@ -189,9 +189,9 @@ ENTRY(memmove)
|
|
* Move data from 2 bytes to 3 bytes.
|
|
*/
|
|
movw (%rsi), %r11w
|
|
- movw -2(%rsi, %rdx), %r10w
|
|
+ movw -2(%rsi, %rdx), %r9w
|
|
movw %r11w, (%rdi)
|
|
- movw %r10w, -2(%rdi, %rdx)
|
|
+ movw %r9w, -2(%rdi, %rdx)
|
|
jmp 13f
|
|
12:
|
|
cmp $1, %rdx
|
|
@@ -202,6 +202,7 @@ ENTRY(memmove)
|
|
movb (%rsi), %r11b
|
|
movb %r11b, (%rdi)
|
|
13:
|
|
+ pax_force_retaddr
|
|
retq
|
|
CFI_ENDPROC
|
|
|
|
@@ -210,6 +211,7 @@ ENTRY(memmove)
|
|
/* Forward moving data. */
|
|
movq %rdx, %rcx
|
|
rep movsb
|
|
+ pax_force_retaddr
|
|
retq
|
|
.Lmemmove_end_forward_efs:
|
|
.previous
|
|
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
|
|
index 2dcb380..963660a 100644
|
|
--- a/arch/x86/lib/memset_64.S
|
|
+++ b/arch/x86/lib/memset_64.S
|
|
@@ -30,6 +30,7 @@
|
|
movl %edx,%ecx
|
|
rep stosb
|
|
movq %r9,%rax
|
|
+ pax_force_retaddr
|
|
ret
|
|
.Lmemset_e:
|
|
.previous
|
|
@@ -52,6 +53,7 @@
|
|
movq %rdx,%rcx
|
|
rep stosb
|
|
movq %r9,%rax
|
|
+ pax_force_retaddr
|
|
ret
|
|
.Lmemset_e_e:
|
|
.previous
|
|
@@ -59,7 +61,7 @@
|
|
ENTRY(memset)
|
|
ENTRY(__memset)
|
|
CFI_STARTPROC
|
|
- movq %rdi,%r10
|
|
+ movq %rdi,%r11
|
|
|
|
/* expand byte value */
|
|
movzbl %sil,%ecx
|
|
@@ -117,7 +119,8 @@ ENTRY(__memset)
|
|
jnz .Lloop_1
|
|
|
|
.Lende:
|
|
- movq %r10,%rax
|
|
+ movq %r11,%rax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
CFI_RESTORE_STATE
|
|
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
|
|
index c9f2d9b..e7fd2c0 100644
|
|
--- a/arch/x86/lib/mmx_32.c
|
|
+++ b/arch/x86/lib/mmx_32.c
|
|
@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
{
|
|
void *p;
|
|
int i;
|
|
+ unsigned long cr0;
|
|
|
|
if (unlikely(in_interrupt()))
|
|
return __memcpy(to, from, len);
|
|
@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
|
kernel_fpu_begin();
|
|
|
|
__asm__ __volatile__ (
|
|
- "1: prefetch (%0)\n" /* This set is 28 bytes */
|
|
- " prefetch 64(%0)\n"
|
|
- " prefetch 128(%0)\n"
|
|
- " prefetch 192(%0)\n"
|
|
- " prefetch 256(%0)\n"
|
|
+ "1: prefetch (%1)\n" /* This set is 28 bytes */
|
|
+ " prefetch 64(%1)\n"
|
|
+ " prefetch 128(%1)\n"
|
|
+ " prefetch 192(%1)\n"
|
|
+ " prefetch 256(%1)\n"
|
|
"2: \n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+ "3: \n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
- : : "r" (from));
|
|
+ : "=&r" (cr0) : "r" (from) : "ax");
|
|
|
|
for ( ; i > 5; i--) {
|
|
__asm__ __volatile__ (
|
|
- "1: prefetch 320(%0)\n"
|
|
- "2: movq (%0), %%mm0\n"
|
|
- " movq 8(%0), %%mm1\n"
|
|
- " movq 16(%0), %%mm2\n"
|
|
- " movq 24(%0), %%mm3\n"
|
|
- " movq %%mm0, (%1)\n"
|
|
- " movq %%mm1, 8(%1)\n"
|
|
- " movq %%mm2, 16(%1)\n"
|
|
- " movq %%mm3, 24(%1)\n"
|
|
- " movq 32(%0), %%mm0\n"
|
|
- " movq 40(%0), %%mm1\n"
|
|
- " movq 48(%0), %%mm2\n"
|
|
- " movq 56(%0), %%mm3\n"
|
|
- " movq %%mm0, 32(%1)\n"
|
|
- " movq %%mm1, 40(%1)\n"
|
|
- " movq %%mm2, 48(%1)\n"
|
|
- " movq %%mm3, 56(%1)\n"
|
|
+ "1: prefetch 320(%1)\n"
|
|
+ "2: movq (%1), %%mm0\n"
|
|
+ " movq 8(%1), %%mm1\n"
|
|
+ " movq 16(%1), %%mm2\n"
|
|
+ " movq 24(%1), %%mm3\n"
|
|
+ " movq %%mm0, (%2)\n"
|
|
+ " movq %%mm1, 8(%2)\n"
|
|
+ " movq %%mm2, 16(%2)\n"
|
|
+ " movq %%mm3, 24(%2)\n"
|
|
+ " movq 32(%1), %%mm0\n"
|
|
+ " movq 40(%1), %%mm1\n"
|
|
+ " movq 48(%1), %%mm2\n"
|
|
+ " movq 56(%1), %%mm3\n"
|
|
+ " movq %%mm0, 32(%2)\n"
|
|
+ " movq %%mm1, 40(%2)\n"
|
|
+ " movq %%mm2, 48(%2)\n"
|
|
+ " movq %%mm3, 56(%2)\n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+ "3:\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
|
|
|
|
from += 64;
|
|
to += 64;
|
|
@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
|
|
static void fast_copy_page(void *to, void *from)
|
|
{
|
|
int i;
|
|
+ unsigned long cr0;
|
|
|
|
kernel_fpu_begin();
|
|
|
|
@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
|
|
* but that is for later. -AV
|
|
*/
|
|
__asm__ __volatile__(
|
|
- "1: prefetch (%0)\n"
|
|
- " prefetch 64(%0)\n"
|
|
- " prefetch 128(%0)\n"
|
|
- " prefetch 192(%0)\n"
|
|
- " prefetch 256(%0)\n"
|
|
+ "1: prefetch (%1)\n"
|
|
+ " prefetch 64(%1)\n"
|
|
+ " prefetch 128(%1)\n"
|
|
+ " prefetch 192(%1)\n"
|
|
+ " prefetch 256(%1)\n"
|
|
"2: \n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+ "3: \n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
- _ASM_EXTABLE(1b, 3b) : : "r" (from));
|
|
+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
|
|
|
|
for (i = 0; i < (4096-320)/64; i++) {
|
|
__asm__ __volatile__ (
|
|
- "1: prefetch 320(%0)\n"
|
|
- "2: movq (%0), %%mm0\n"
|
|
- " movntq %%mm0, (%1)\n"
|
|
- " movq 8(%0), %%mm1\n"
|
|
- " movntq %%mm1, 8(%1)\n"
|
|
- " movq 16(%0), %%mm2\n"
|
|
- " movntq %%mm2, 16(%1)\n"
|
|
- " movq 24(%0), %%mm3\n"
|
|
- " movntq %%mm3, 24(%1)\n"
|
|
- " movq 32(%0), %%mm4\n"
|
|
- " movntq %%mm4, 32(%1)\n"
|
|
- " movq 40(%0), %%mm5\n"
|
|
- " movntq %%mm5, 40(%1)\n"
|
|
- " movq 48(%0), %%mm6\n"
|
|
- " movntq %%mm6, 48(%1)\n"
|
|
- " movq 56(%0), %%mm7\n"
|
|
- " movntq %%mm7, 56(%1)\n"
|
|
+ "1: prefetch 320(%1)\n"
|
|
+ "2: movq (%1), %%mm0\n"
|
|
+ " movntq %%mm0, (%2)\n"
|
|
+ " movq 8(%1), %%mm1\n"
|
|
+ " movntq %%mm1, 8(%2)\n"
|
|
+ " movq 16(%1), %%mm2\n"
|
|
+ " movntq %%mm2, 16(%2)\n"
|
|
+ " movq 24(%1), %%mm3\n"
|
|
+ " movntq %%mm3, 24(%2)\n"
|
|
+ " movq 32(%1), %%mm4\n"
|
|
+ " movntq %%mm4, 32(%2)\n"
|
|
+ " movq 40(%1), %%mm5\n"
|
|
+ " movntq %%mm5, 40(%2)\n"
|
|
+ " movq 48(%1), %%mm6\n"
|
|
+ " movntq %%mm6, 48(%2)\n"
|
|
+ " movq 56(%1), %%mm7\n"
|
|
+ " movntq %%mm7, 56(%2)\n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+ "3:\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
|
|
+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
|
|
|
|
from += 64;
|
|
to += 64;
|
|
@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
|
|
static void fast_copy_page(void *to, void *from)
|
|
{
|
|
int i;
|
|
+ unsigned long cr0;
|
|
|
|
kernel_fpu_begin();
|
|
|
|
__asm__ __volatile__ (
|
|
- "1: prefetch (%0)\n"
|
|
- " prefetch 64(%0)\n"
|
|
- " prefetch 128(%0)\n"
|
|
- " prefetch 192(%0)\n"
|
|
- " prefetch 256(%0)\n"
|
|
+ "1: prefetch (%1)\n"
|
|
+ " prefetch 64(%1)\n"
|
|
+ " prefetch 128(%1)\n"
|
|
+ " prefetch 192(%1)\n"
|
|
+ " prefetch 256(%1)\n"
|
|
"2: \n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+ "3: \n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
- _ASM_EXTABLE(1b, 3b) : : "r" (from));
|
|
+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
|
|
|
|
for (i = 0; i < 4096/64; i++) {
|
|
__asm__ __volatile__ (
|
|
- "1: prefetch 320(%0)\n"
|
|
- "2: movq (%0), %%mm0\n"
|
|
- " movq 8(%0), %%mm1\n"
|
|
- " movq 16(%0), %%mm2\n"
|
|
- " movq 24(%0), %%mm3\n"
|
|
- " movq %%mm0, (%1)\n"
|
|
- " movq %%mm1, 8(%1)\n"
|
|
- " movq %%mm2, 16(%1)\n"
|
|
- " movq %%mm3, 24(%1)\n"
|
|
- " movq 32(%0), %%mm0\n"
|
|
- " movq 40(%0), %%mm1\n"
|
|
- " movq 48(%0), %%mm2\n"
|
|
- " movq 56(%0), %%mm3\n"
|
|
- " movq %%mm0, 32(%1)\n"
|
|
- " movq %%mm1, 40(%1)\n"
|
|
- " movq %%mm2, 48(%1)\n"
|
|
- " movq %%mm3, 56(%1)\n"
|
|
+ "1: prefetch 320(%1)\n"
|
|
+ "2: movq (%1), %%mm0\n"
|
|
+ " movq 8(%1), %%mm1\n"
|
|
+ " movq 16(%1), %%mm2\n"
|
|
+ " movq 24(%1), %%mm3\n"
|
|
+ " movq %%mm0, (%2)\n"
|
|
+ " movq %%mm1, 8(%2)\n"
|
|
+ " movq %%mm2, 16(%2)\n"
|
|
+ " movq %%mm3, 24(%2)\n"
|
|
+ " movq 32(%1), %%mm0\n"
|
|
+ " movq 40(%1), %%mm1\n"
|
|
+ " movq 48(%1), %%mm2\n"
|
|
+ " movq 56(%1), %%mm3\n"
|
|
+ " movq %%mm0, 32(%2)\n"
|
|
+ " movq %%mm1, 40(%2)\n"
|
|
+ " movq %%mm2, 48(%2)\n"
|
|
+ " movq %%mm3, 56(%2)\n"
|
|
".section .fixup, \"ax\"\n"
|
|
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+ "3:\n"
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %%cr0, %0\n"
|
|
+ " movl %0, %%eax\n"
|
|
+ " andl $0xFFFEFFFF, %%eax\n"
|
|
+ " movl %%eax, %%cr0\n"
|
|
+#endif
|
|
+
|
|
+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ " movl %0, %%cr0\n"
|
|
+#endif
|
|
+
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE(1b, 3b)
|
|
- : : "r" (from), "r" (to) : "memory");
|
|
+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
|
|
|
|
from += 64;
|
|
to += 64;
|
|
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
|
|
index 69fa106..adda88b 100644
|
|
--- a/arch/x86/lib/msr-reg.S
|
|
+++ b/arch/x86/lib/msr-reg.S
|
|
@@ -3,6 +3,7 @@
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/msr.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
|
|
CFI_STARTPROC
|
|
pushq_cfi %rbx
|
|
pushq_cfi %rbp
|
|
- movq %rdi, %r10 /* Save pointer */
|
|
+ movq %rdi, %r9 /* Save pointer */
|
|
xorl %r11d, %r11d /* Return value */
|
|
movl (%rdi), %eax
|
|
movl 4(%rdi), %ecx
|
|
@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
|
|
movl 28(%rdi), %edi
|
|
CFI_REMEMBER_STATE
|
|
1: \op
|
|
-2: movl %eax, (%r10)
|
|
+2: movl %eax, (%r9)
|
|
movl %r11d, %eax /* Return value */
|
|
- movl %ecx, 4(%r10)
|
|
- movl %edx, 8(%r10)
|
|
- movl %ebx, 12(%r10)
|
|
- movl %ebp, 20(%r10)
|
|
- movl %esi, 24(%r10)
|
|
- movl %edi, 28(%r10)
|
|
+ movl %ecx, 4(%r9)
|
|
+ movl %edx, 8(%r9)
|
|
+ movl %ebx, 12(%r9)
|
|
+ movl %ebp, 20(%r9)
|
|
+ movl %esi, 24(%r9)
|
|
+ movl %edi, 28(%r9)
|
|
popq_cfi %rbp
|
|
popq_cfi %rbx
|
|
+ pax_force_retaddr
|
|
ret
|
|
3:
|
|
CFI_RESTORE_STATE
|
|
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
|
|
index 36b0d15..d381858 100644
|
|
--- a/arch/x86/lib/putuser.S
|
|
+++ b/arch/x86/lib/putuser.S
|
|
@@ -15,7 +15,9 @@
|
|
#include <asm/thread_info.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm.h>
|
|
-
|
|
+#include <asm/segment.h>
|
|
+#include <asm/pgtable.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* __put_user_X
|
|
@@ -29,52 +31,119 @@
|
|
* as they get called from within inline assembly.
|
|
*/
|
|
|
|
-#define ENTER CFI_STARTPROC ; \
|
|
- GET_THREAD_INFO(%_ASM_BX)
|
|
-#define EXIT ret ; \
|
|
+#define ENTER CFI_STARTPROC
|
|
+#define EXIT pax_force_retaddr; ret ; \
|
|
CFI_ENDPROC
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define _DEST %_ASM_CX,%_ASM_BX
|
|
+#else
|
|
+#define _DEST %_ASM_CX
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#define __copyuser_seg gs;
|
|
+#else
|
|
+#define __copyuser_seg
|
|
+#endif
|
|
+
|
|
.text
|
|
ENTRY(__put_user_1)
|
|
ENTER
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ GET_THREAD_INFO(%_ASM_BX)
|
|
cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
|
|
jae bad_put_user
|
|
-1: movb %al,(%_ASM_CX)
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
|
|
+ cmp %_ASM_BX,%_ASM_CX
|
|
+ jb 1234f
|
|
+ xor %ebx,%ebx
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+1: __copyuser_seg movb %al,(_DEST)
|
|
xor %eax,%eax
|
|
EXIT
|
|
ENDPROC(__put_user_1)
|
|
|
|
ENTRY(__put_user_2)
|
|
ENTER
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ GET_THREAD_INFO(%_ASM_BX)
|
|
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
|
|
sub $1,%_ASM_BX
|
|
cmp %_ASM_BX,%_ASM_CX
|
|
jae bad_put_user
|
|
-2: movw %ax,(%_ASM_CX)
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
|
|
+ cmp %_ASM_BX,%_ASM_CX
|
|
+ jb 1234f
|
|
+ xor %ebx,%ebx
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+2: __copyuser_seg movw %ax,(_DEST)
|
|
xor %eax,%eax
|
|
EXIT
|
|
ENDPROC(__put_user_2)
|
|
|
|
ENTRY(__put_user_4)
|
|
ENTER
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ GET_THREAD_INFO(%_ASM_BX)
|
|
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
|
|
sub $3,%_ASM_BX
|
|
cmp %_ASM_BX,%_ASM_CX
|
|
jae bad_put_user
|
|
-3: movl %eax,(%_ASM_CX)
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
|
|
+ cmp %_ASM_BX,%_ASM_CX
|
|
+ jb 1234f
|
|
+ xor %ebx,%ebx
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+3: __copyuser_seg movl %eax,(_DEST)
|
|
xor %eax,%eax
|
|
EXIT
|
|
ENDPROC(__put_user_4)
|
|
|
|
ENTRY(__put_user_8)
|
|
ENTER
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ GET_THREAD_INFO(%_ASM_BX)
|
|
mov TI_addr_limit(%_ASM_BX),%_ASM_BX
|
|
sub $7,%_ASM_BX
|
|
cmp %_ASM_BX,%_ASM_CX
|
|
jae bad_put_user
|
|
-4: mov %_ASM_AX,(%_ASM_CX)
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
|
|
+ cmp %_ASM_BX,%_ASM_CX
|
|
+ jb 1234f
|
|
+ xor %ebx,%ebx
|
|
+1234:
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+4: __copyuser_seg mov %_ASM_AX,(_DEST)
|
|
#ifdef CONFIG_X86_32
|
|
-5: movl %edx,4(%_ASM_CX)
|
|
+5: __copyuser_seg movl %edx,4(_DEST)
|
|
#endif
|
|
xor %eax,%eax
|
|
EXIT
|
|
diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
|
|
index 1cad221..de671ee 100644
|
|
--- a/arch/x86/lib/rwlock.S
|
|
+++ b/arch/x86/lib/rwlock.S
|
|
@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
|
|
FRAME
|
|
0: LOCK_PREFIX
|
|
WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 1234f
|
|
+ LOCK_PREFIX
|
|
+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
|
|
+ int $4
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 1234b)
|
|
+#endif
|
|
+
|
|
1: rep; nop
|
|
cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
|
|
jne 1b
|
|
LOCK_PREFIX
|
|
WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 1234f
|
|
+ LOCK_PREFIX
|
|
+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
|
|
+ int $4
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 1234b)
|
|
+#endif
|
|
+
|
|
jnz 0b
|
|
ENDFRAME
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
END(__write_lock_failed)
|
|
@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
|
|
FRAME
|
|
0: LOCK_PREFIX
|
|
READ_LOCK_SIZE(inc) (%__lock_ptr)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 1234f
|
|
+ LOCK_PREFIX
|
|
+ READ_LOCK_SIZE(dec) (%__lock_ptr)
|
|
+ int $4
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 1234b)
|
|
+#endif
|
|
+
|
|
1: rep; nop
|
|
READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
|
|
js 1b
|
|
LOCK_PREFIX
|
|
READ_LOCK_SIZE(dec) (%__lock_ptr)
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+ jno 1234f
|
|
+ LOCK_PREFIX
|
|
+ READ_LOCK_SIZE(inc) (%__lock_ptr)
|
|
+ int $4
|
|
+1234:
|
|
+ _ASM_EXTABLE(1234b, 1234b)
|
|
+#endif
|
|
+
|
|
js 0b
|
|
ENDFRAME
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
END(__read_lock_failed)
|
|
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
|
|
index 5dff5f0..cadebf4 100644
|
|
--- a/arch/x86/lib/rwsem.S
|
|
+++ b/arch/x86/lib/rwsem.S
|
|
@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
|
|
__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
|
|
CFI_RESTORE __ASM_REG(dx)
|
|
restore_common_regs
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(call_rwsem_down_read_failed)
|
|
@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
|
|
movq %rax,%rdi
|
|
call rwsem_down_write_failed
|
|
restore_common_regs
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(call_rwsem_down_write_failed)
|
|
@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
|
|
movq %rax,%rdi
|
|
call rwsem_wake
|
|
restore_common_regs
|
|
-1: ret
|
|
+1: pax_force_retaddr
|
|
+ ret
|
|
CFI_ENDPROC
|
|
ENDPROC(call_rwsem_wake)
|
|
|
|
@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
|
|
__ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
|
|
CFI_RESTORE __ASM_REG(dx)
|
|
restore_common_regs
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
ENDPROC(call_rwsem_downgrade_wake)
|
|
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
|
|
index a63efd6..ccecad8 100644
|
|
--- a/arch/x86/lib/thunk_64.S
|
|
+++ b/arch/x86/lib/thunk_64.S
|
|
@@ -8,6 +8,7 @@
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
#include <asm/calling.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
|
|
.macro THUNK name, func, put_ret_addr_in_rdi=0
|
|
@@ -41,5 +42,6 @@
|
|
SAVE_ARGS
|
|
restore:
|
|
RESTORE_ARGS
|
|
+ pax_force_retaddr
|
|
ret
|
|
CFI_ENDPROC
|
|
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
|
|
index ef2a6a5..dc7f3dd 100644
|
|
--- a/arch/x86/lib/usercopy_32.c
|
|
+++ b/arch/x86/lib/usercopy_32.c
|
|
@@ -41,10 +41,12 @@ do { \
|
|
int __d0; \
|
|
might_fault(); \
|
|
__asm__ __volatile__( \
|
|
+ __COPYUSER_SET_ES \
|
|
"0: rep; stosl\n" \
|
|
" movl %2,%0\n" \
|
|
"1: rep; stosb\n" \
|
|
"2:\n" \
|
|
+ __COPYUSER_RESTORE_ES \
|
|
".section .fixup,\"ax\"\n" \
|
|
"3: lea 0(%2,%0,4),%0\n" \
|
|
" jmp 2b\n" \
|
|
@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
|
|
might_fault();
|
|
|
|
__asm__ __volatile__(
|
|
+ __COPYUSER_SET_ES
|
|
" testl %0, %0\n"
|
|
" jz 3f\n"
|
|
" andl %0,%%ecx\n"
|
|
@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
|
|
" subl %%ecx,%0\n"
|
|
" addl %0,%%eax\n"
|
|
"1:\n"
|
|
+ __COPYUSER_RESTORE_ES
|
|
".section .fixup,\"ax\"\n"
|
|
"2: xorl %%eax,%%eax\n"
|
|
" jmp 1b\n"
|
|
@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
|
|
|
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
|
static unsigned long
|
|
-__copy_user_intel(void __user *to, const void *from, unsigned long size)
|
|
+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
|
|
{
|
|
int d0, d1;
|
|
__asm__ __volatile__(
|
|
@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
|
|
" .align 2,0x90\n"
|
|
"3: movl 0(%4), %%eax\n"
|
|
"4: movl 4(%4), %%edx\n"
|
|
- "5: movl %%eax, 0(%3)\n"
|
|
- "6: movl %%edx, 4(%3)\n"
|
|
+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
|
|
+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
|
|
"7: movl 8(%4), %%eax\n"
|
|
"8: movl 12(%4),%%edx\n"
|
|
- "9: movl %%eax, 8(%3)\n"
|
|
- "10: movl %%edx, 12(%3)\n"
|
|
+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
|
|
+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
|
|
"11: movl 16(%4), %%eax\n"
|
|
"12: movl 20(%4), %%edx\n"
|
|
- "13: movl %%eax, 16(%3)\n"
|
|
- "14: movl %%edx, 20(%3)\n"
|
|
+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
|
|
+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
|
|
"15: movl 24(%4), %%eax\n"
|
|
"16: movl 28(%4), %%edx\n"
|
|
- "17: movl %%eax, 24(%3)\n"
|
|
- "18: movl %%edx, 28(%3)\n"
|
|
+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
|
|
+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
|
|
"19: movl 32(%4), %%eax\n"
|
|
"20: movl 36(%4), %%edx\n"
|
|
- "21: movl %%eax, 32(%3)\n"
|
|
- "22: movl %%edx, 36(%3)\n"
|
|
+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
|
|
+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
|
|
"23: movl 40(%4), %%eax\n"
|
|
"24: movl 44(%4), %%edx\n"
|
|
- "25: movl %%eax, 40(%3)\n"
|
|
- "26: movl %%edx, 44(%3)\n"
|
|
+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
|
|
+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
|
|
"27: movl 48(%4), %%eax\n"
|
|
"28: movl 52(%4), %%edx\n"
|
|
- "29: movl %%eax, 48(%3)\n"
|
|
- "30: movl %%edx, 52(%3)\n"
|
|
+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
|
|
+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
|
|
"31: movl 56(%4), %%eax\n"
|
|
"32: movl 60(%4), %%edx\n"
|
|
- "33: movl %%eax, 56(%3)\n"
|
|
- "34: movl %%edx, 60(%3)\n"
|
|
+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
|
|
+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
|
|
" addl $-64, %0\n"
|
|
" addl $64, %4\n"
|
|
" addl $64, %3\n"
|
|
@@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
|
|
" shrl $2, %0\n"
|
|
" andl $3, %%eax\n"
|
|
" cld\n"
|
|
+ __COPYUSER_SET_ES
|
|
"99: rep; movsl\n"
|
|
"36: movl %%eax, %0\n"
|
|
"37: rep; movsb\n"
|
|
"100:\n"
|
|
+ __COPYUSER_RESTORE_ES
|
|
".section .fixup,\"ax\"\n"
|
|
"101: lea 0(%%eax,%0,4),%0\n"
|
|
" jmp 100b\n"
|
|
@@ -247,46 +253,153 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
|
|
}
|
|
|
|
static unsigned long
|
|
+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
|
|
+{
|
|
+ int d0, d1;
|
|
+ __asm__ __volatile__(
|
|
+ " .align 2,0x90\n"
|
|
+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
+ " cmpl $67, %0\n"
|
|
+ " jbe 3f\n"
|
|
+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
|
|
+ " .align 2,0x90\n"
|
|
+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
|
|
+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
|
|
+ "5: movl %%eax, 0(%3)\n"
|
|
+ "6: movl %%edx, 4(%3)\n"
|
|
+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
|
|
+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
|
|
+ "9: movl %%eax, 8(%3)\n"
|
|
+ "10: movl %%edx, 12(%3)\n"
|
|
+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
|
|
+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
|
|
+ "13: movl %%eax, 16(%3)\n"
|
|
+ "14: movl %%edx, 20(%3)\n"
|
|
+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
|
|
+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
|
|
+ "17: movl %%eax, 24(%3)\n"
|
|
+ "18: movl %%edx, 28(%3)\n"
|
|
+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
|
|
+ "21: movl %%eax, 32(%3)\n"
|
|
+ "22: movl %%edx, 36(%3)\n"
|
|
+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
|
|
+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
|
|
+ "25: movl %%eax, 40(%3)\n"
|
|
+ "26: movl %%edx, 44(%3)\n"
|
|
+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
|
|
+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
|
|
+ "29: movl %%eax, 48(%3)\n"
|
|
+ "30: movl %%edx, 52(%3)\n"
|
|
+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
|
|
+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
|
|
+ "33: movl %%eax, 56(%3)\n"
|
|
+ "34: movl %%edx, 60(%3)\n"
|
|
+ " addl $-64, %0\n"
|
|
+ " addl $64, %4\n"
|
|
+ " addl $64, %3\n"
|
|
+ " cmpl $63, %0\n"
|
|
+ " ja 1b\n"
|
|
+ "35: movl %0, %%eax\n"
|
|
+ " shrl $2, %0\n"
|
|
+ " andl $3, %%eax\n"
|
|
+ " cld\n"
|
|
+ "99: rep; "__copyuser_seg" movsl\n"
|
|
+ "36: movl %%eax, %0\n"
|
|
+ "37: rep; "__copyuser_seg" movsb\n"
|
|
+ "100:\n"
|
|
+ ".section .fixup,\"ax\"\n"
|
|
+ "101: lea 0(%%eax,%0,4),%0\n"
|
|
+ " jmp 100b\n"
|
|
+ ".previous\n"
|
|
+ ".section __ex_table,\"a\"\n"
|
|
+ " .align 4\n"
|
|
+ " .long 1b,100b\n"
|
|
+ " .long 2b,100b\n"
|
|
+ " .long 3b,100b\n"
|
|
+ " .long 4b,100b\n"
|
|
+ " .long 5b,100b\n"
|
|
+ " .long 6b,100b\n"
|
|
+ " .long 7b,100b\n"
|
|
+ " .long 8b,100b\n"
|
|
+ " .long 9b,100b\n"
|
|
+ " .long 10b,100b\n"
|
|
+ " .long 11b,100b\n"
|
|
+ " .long 12b,100b\n"
|
|
+ " .long 13b,100b\n"
|
|
+ " .long 14b,100b\n"
|
|
+ " .long 15b,100b\n"
|
|
+ " .long 16b,100b\n"
|
|
+ " .long 17b,100b\n"
|
|
+ " .long 18b,100b\n"
|
|
+ " .long 19b,100b\n"
|
|
+ " .long 20b,100b\n"
|
|
+ " .long 21b,100b\n"
|
|
+ " .long 22b,100b\n"
|
|
+ " .long 23b,100b\n"
|
|
+ " .long 24b,100b\n"
|
|
+ " .long 25b,100b\n"
|
|
+ " .long 26b,100b\n"
|
|
+ " .long 27b,100b\n"
|
|
+ " .long 28b,100b\n"
|
|
+ " .long 29b,100b\n"
|
|
+ " .long 30b,100b\n"
|
|
+ " .long 31b,100b\n"
|
|
+ " .long 32b,100b\n"
|
|
+ " .long 33b,100b\n"
|
|
+ " .long 34b,100b\n"
|
|
+ " .long 35b,100b\n"
|
|
+ " .long 36b,100b\n"
|
|
+ " .long 37b,100b\n"
|
|
+ " .long 99b,101b\n"
|
|
+ ".previous"
|
|
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
|
|
+ : "1"(to), "2"(from), "0"(size)
|
|
+ : "eax", "edx", "memory");
|
|
+ return size;
|
|
+}
|
|
+
|
|
+static unsigned long __size_overflow(3)
|
|
__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
|
|
{
|
|
int d0, d1;
|
|
__asm__ __volatile__(
|
|
" .align 2,0x90\n"
|
|
- "0: movl 32(%4), %%eax\n"
|
|
+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
" cmpl $67, %0\n"
|
|
" jbe 2f\n"
|
|
- "1: movl 64(%4), %%eax\n"
|
|
+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
|
|
" .align 2,0x90\n"
|
|
- "2: movl 0(%4), %%eax\n"
|
|
- "21: movl 4(%4), %%edx\n"
|
|
+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
|
|
+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
|
|
" movl %%eax, 0(%3)\n"
|
|
" movl %%edx, 4(%3)\n"
|
|
- "3: movl 8(%4), %%eax\n"
|
|
- "31: movl 12(%4),%%edx\n"
|
|
+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
|
|
+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
|
|
" movl %%eax, 8(%3)\n"
|
|
" movl %%edx, 12(%3)\n"
|
|
- "4: movl 16(%4), %%eax\n"
|
|
- "41: movl 20(%4), %%edx\n"
|
|
+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
|
|
+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
|
|
" movl %%eax, 16(%3)\n"
|
|
" movl %%edx, 20(%3)\n"
|
|
- "10: movl 24(%4), %%eax\n"
|
|
- "51: movl 28(%4), %%edx\n"
|
|
+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
|
|
+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
|
|
" movl %%eax, 24(%3)\n"
|
|
" movl %%edx, 28(%3)\n"
|
|
- "11: movl 32(%4), %%eax\n"
|
|
- "61: movl 36(%4), %%edx\n"
|
|
+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
|
|
" movl %%eax, 32(%3)\n"
|
|
" movl %%edx, 36(%3)\n"
|
|
- "12: movl 40(%4), %%eax\n"
|
|
- "71: movl 44(%4), %%edx\n"
|
|
+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
|
|
+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
|
|
" movl %%eax, 40(%3)\n"
|
|
" movl %%edx, 44(%3)\n"
|
|
- "13: movl 48(%4), %%eax\n"
|
|
- "81: movl 52(%4), %%edx\n"
|
|
+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
|
|
+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
|
|
" movl %%eax, 48(%3)\n"
|
|
" movl %%edx, 52(%3)\n"
|
|
- "14: movl 56(%4), %%eax\n"
|
|
- "91: movl 60(%4), %%edx\n"
|
|
+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
|
|
+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
|
|
" movl %%eax, 56(%3)\n"
|
|
" movl %%edx, 60(%3)\n"
|
|
" addl $-64, %0\n"
|
|
@@ -298,9 +411,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
|
|
" shrl $2, %0\n"
|
|
" andl $3, %%eax\n"
|
|
" cld\n"
|
|
- "6: rep; movsl\n"
|
|
+ "6: rep; "__copyuser_seg" movsl\n"
|
|
" movl %%eax,%0\n"
|
|
- "7: rep; movsb\n"
|
|
+ "7: rep; "__copyuser_seg" movsb\n"
|
|
"8:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
"9: lea 0(%%eax,%0,4),%0\n"
|
|
@@ -346,48 +459,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
|
|
* hyoshiok@miraclelinux.com
|
|
*/
|
|
|
|
-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
|
|
+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
|
|
const void __user *from, unsigned long size)
|
|
{
|
|
int d0, d1;
|
|
|
|
__asm__ __volatile__(
|
|
" .align 2,0x90\n"
|
|
- "0: movl 32(%4), %%eax\n"
|
|
+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
" cmpl $67, %0\n"
|
|
" jbe 2f\n"
|
|
- "1: movl 64(%4), %%eax\n"
|
|
+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
|
|
" .align 2,0x90\n"
|
|
- "2: movl 0(%4), %%eax\n"
|
|
- "21: movl 4(%4), %%edx\n"
|
|
+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
|
|
+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
|
|
" movnti %%eax, 0(%3)\n"
|
|
" movnti %%edx, 4(%3)\n"
|
|
- "3: movl 8(%4), %%eax\n"
|
|
- "31: movl 12(%4),%%edx\n"
|
|
+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
|
|
+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
|
|
" movnti %%eax, 8(%3)\n"
|
|
" movnti %%edx, 12(%3)\n"
|
|
- "4: movl 16(%4), %%eax\n"
|
|
- "41: movl 20(%4), %%edx\n"
|
|
+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
|
|
+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
|
|
" movnti %%eax, 16(%3)\n"
|
|
" movnti %%edx, 20(%3)\n"
|
|
- "10: movl 24(%4), %%eax\n"
|
|
- "51: movl 28(%4), %%edx\n"
|
|
+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
|
|
+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
|
|
" movnti %%eax, 24(%3)\n"
|
|
" movnti %%edx, 28(%3)\n"
|
|
- "11: movl 32(%4), %%eax\n"
|
|
- "61: movl 36(%4), %%edx\n"
|
|
+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
|
|
" movnti %%eax, 32(%3)\n"
|
|
" movnti %%edx, 36(%3)\n"
|
|
- "12: movl 40(%4), %%eax\n"
|
|
- "71: movl 44(%4), %%edx\n"
|
|
+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
|
|
+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
|
|
" movnti %%eax, 40(%3)\n"
|
|
" movnti %%edx, 44(%3)\n"
|
|
- "13: movl 48(%4), %%eax\n"
|
|
- "81: movl 52(%4), %%edx\n"
|
|
+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
|
|
+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
|
|
" movnti %%eax, 48(%3)\n"
|
|
" movnti %%edx, 52(%3)\n"
|
|
- "14: movl 56(%4), %%eax\n"
|
|
- "91: movl 60(%4), %%edx\n"
|
|
+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
|
|
+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
|
|
" movnti %%eax, 56(%3)\n"
|
|
" movnti %%edx, 60(%3)\n"
|
|
" addl $-64, %0\n"
|
|
@@ -400,9 +513,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
|
|
" shrl $2, %0\n"
|
|
" andl $3, %%eax\n"
|
|
" cld\n"
|
|
- "6: rep; movsl\n"
|
|
+ "6: rep; "__copyuser_seg" movsl\n"
|
|
" movl %%eax,%0\n"
|
|
- "7: rep; movsb\n"
|
|
+ "7: rep; "__copyuser_seg" movsb\n"
|
|
"8:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
"9: lea 0(%%eax,%0,4),%0\n"
|
|
@@ -443,48 +556,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
|
|
return size;
|
|
}
|
|
|
|
-static unsigned long __copy_user_intel_nocache(void *to,
|
|
+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
|
|
const void __user *from, unsigned long size)
|
|
{
|
|
int d0, d1;
|
|
|
|
__asm__ __volatile__(
|
|
" .align 2,0x90\n"
|
|
- "0: movl 32(%4), %%eax\n"
|
|
+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
" cmpl $67, %0\n"
|
|
" jbe 2f\n"
|
|
- "1: movl 64(%4), %%eax\n"
|
|
+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
|
|
" .align 2,0x90\n"
|
|
- "2: movl 0(%4), %%eax\n"
|
|
- "21: movl 4(%4), %%edx\n"
|
|
+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
|
|
+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
|
|
" movnti %%eax, 0(%3)\n"
|
|
" movnti %%edx, 4(%3)\n"
|
|
- "3: movl 8(%4), %%eax\n"
|
|
- "31: movl 12(%4),%%edx\n"
|
|
+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
|
|
+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
|
|
" movnti %%eax, 8(%3)\n"
|
|
" movnti %%edx, 12(%3)\n"
|
|
- "4: movl 16(%4), %%eax\n"
|
|
- "41: movl 20(%4), %%edx\n"
|
|
+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
|
|
+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
|
|
" movnti %%eax, 16(%3)\n"
|
|
" movnti %%edx, 20(%3)\n"
|
|
- "10: movl 24(%4), %%eax\n"
|
|
- "51: movl 28(%4), %%edx\n"
|
|
+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
|
|
+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
|
|
" movnti %%eax, 24(%3)\n"
|
|
" movnti %%edx, 28(%3)\n"
|
|
- "11: movl 32(%4), %%eax\n"
|
|
- "61: movl 36(%4), %%edx\n"
|
|
+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
|
|
+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
|
|
" movnti %%eax, 32(%3)\n"
|
|
" movnti %%edx, 36(%3)\n"
|
|
- "12: movl 40(%4), %%eax\n"
|
|
- "71: movl 44(%4), %%edx\n"
|
|
+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
|
|
+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
|
|
" movnti %%eax, 40(%3)\n"
|
|
" movnti %%edx, 44(%3)\n"
|
|
- "13: movl 48(%4), %%eax\n"
|
|
- "81: movl 52(%4), %%edx\n"
|
|
+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
|
|
+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
|
|
" movnti %%eax, 48(%3)\n"
|
|
" movnti %%edx, 52(%3)\n"
|
|
- "14: movl 56(%4), %%eax\n"
|
|
- "91: movl 60(%4), %%edx\n"
|
|
+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
|
|
+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
|
|
" movnti %%eax, 56(%3)\n"
|
|
" movnti %%edx, 60(%3)\n"
|
|
" addl $-64, %0\n"
|
|
@@ -497,9 +610,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
|
|
" shrl $2, %0\n"
|
|
" andl $3, %%eax\n"
|
|
" cld\n"
|
|
- "6: rep; movsl\n"
|
|
+ "6: rep; "__copyuser_seg" movsl\n"
|
|
" movl %%eax,%0\n"
|
|
- "7: rep; movsb\n"
|
|
+ "7: rep; "__copyuser_seg" movsb\n"
|
|
"8:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
"9: lea 0(%%eax,%0,4),%0\n"
|
|
@@ -542,32 +655,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
|
|
*/
|
|
unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
|
|
unsigned long size);
|
|
-unsigned long __copy_user_intel(void __user *to, const void *from,
|
|
+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
|
|
+ unsigned long size);
|
|
+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
|
|
unsigned long size);
|
|
unsigned long __copy_user_zeroing_intel_nocache(void *to,
|
|
const void __user *from, unsigned long size);
|
|
#endif /* CONFIG_X86_INTEL_USERCOPY */
|
|
|
|
/* Generic arbitrary sized copy. */
|
|
-#define __copy_user(to, from, size) \
|
|
+#define __copy_user(to, from, size, prefix, set, restore) \
|
|
do { \
|
|
int __d0, __d1, __d2; \
|
|
__asm__ __volatile__( \
|
|
+ set \
|
|
" cmp $7,%0\n" \
|
|
" jbe 1f\n" \
|
|
" movl %1,%0\n" \
|
|
" negl %0\n" \
|
|
" andl $7,%0\n" \
|
|
" subl %0,%3\n" \
|
|
- "4: rep; movsb\n" \
|
|
+ "4: rep; "prefix"movsb\n" \
|
|
" movl %3,%0\n" \
|
|
" shrl $2,%0\n" \
|
|
" andl $3,%3\n" \
|
|
" .align 2,0x90\n" \
|
|
- "0: rep; movsl\n" \
|
|
+ "0: rep; "prefix"movsl\n" \
|
|
" movl %3,%0\n" \
|
|
- "1: rep; movsb\n" \
|
|
+ "1: rep; "prefix"movsb\n" \
|
|
"2:\n" \
|
|
+ restore \
|
|
".section .fixup,\"ax\"\n" \
|
|
"5: addl %3,%0\n" \
|
|
" jmp 2b\n" \
|
|
@@ -595,14 +712,14 @@ do { \
|
|
" negl %0\n" \
|
|
" andl $7,%0\n" \
|
|
" subl %0,%3\n" \
|
|
- "4: rep; movsb\n" \
|
|
+ "4: rep; "__copyuser_seg"movsb\n" \
|
|
" movl %3,%0\n" \
|
|
" shrl $2,%0\n" \
|
|
" andl $3,%3\n" \
|
|
" .align 2,0x90\n" \
|
|
- "0: rep; movsl\n" \
|
|
+ "0: rep; "__copyuser_seg"movsl\n" \
|
|
" movl %3,%0\n" \
|
|
- "1: rep; movsb\n" \
|
|
+ "1: rep; "__copyuser_seg"movsb\n" \
|
|
"2:\n" \
|
|
".section .fixup,\"ax\"\n" \
|
|
"5: addl %3,%0\n" \
|
|
@@ -688,9 +805,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
|
|
}
|
|
#endif
|
|
if (movsl_is_ok(to, from, n))
|
|
- __copy_user(to, from, n);
|
|
+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
|
|
else
|
|
- n = __copy_user_intel(to, from, n);
|
|
+ n = __generic_copy_to_user_intel(to, from, n);
|
|
return n;
|
|
}
|
|
EXPORT_SYMBOL(__copy_to_user_ll);
|
|
@@ -710,10 +827,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
|
|
unsigned long n)
|
|
{
|
|
if (movsl_is_ok(to, from, n))
|
|
- __copy_user(to, from, n);
|
|
+ __copy_user(to, from, n, __copyuser_seg, "", "");
|
|
else
|
|
- n = __copy_user_intel((void __user *)to,
|
|
- (const void *)from, n);
|
|
+ n = __generic_copy_from_user_intel(to, from, n);
|
|
return n;
|
|
}
|
|
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
|
|
@@ -740,65 +856,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
|
|
if (n > 64 && cpu_has_xmm2)
|
|
n = __copy_user_intel_nocache(to, from, n);
|
|
else
|
|
- __copy_user(to, from, n);
|
|
+ __copy_user(to, from, n, __copyuser_seg, "", "");
|
|
#else
|
|
- __copy_user(to, from, n);
|
|
+ __copy_user(to, from, n, __copyuser_seg, "", "");
|
|
#endif
|
|
return n;
|
|
}
|
|
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
|
|
|
|
-/**
|
|
- * copy_to_user: - Copy a block of data into user space.
|
|
- * @to: Destination address, in user space.
|
|
- * @from: Source address, in kernel space.
|
|
- * @n: Number of bytes to copy.
|
|
- *
|
|
- * Context: User context only. This function may sleep.
|
|
- *
|
|
- * Copy data from kernel space to user space.
|
|
- *
|
|
- * Returns number of bytes that could not be copied.
|
|
- * On success, this will be zero.
|
|
- */
|
|
-unsigned long
|
|
-copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
+void copy_from_user_overflow(void)
|
|
{
|
|
- if (access_ok(VERIFY_WRITE, to, n))
|
|
- n = __copy_to_user(to, from, n);
|
|
- return n;
|
|
+ WARN(1, "Buffer overflow detected!\n");
|
|
}
|
|
-EXPORT_SYMBOL(copy_to_user);
|
|
+EXPORT_SYMBOL(copy_from_user_overflow);
|
|
|
|
-/**
|
|
- * copy_from_user: - Copy a block of data from user space.
|
|
- * @to: Destination address, in kernel space.
|
|
- * @from: Source address, in user space.
|
|
- * @n: Number of bytes to copy.
|
|
- *
|
|
- * Context: User context only. This function may sleep.
|
|
- *
|
|
- * Copy data from user space to kernel space.
|
|
- *
|
|
- * Returns number of bytes that could not be copied.
|
|
- * On success, this will be zero.
|
|
- *
|
|
- * If some data could not be copied, this function will pad the copied
|
|
- * data to the requested size using zero bytes.
|
|
- */
|
|
-unsigned long
|
|
-_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
+void copy_to_user_overflow(void)
|
|
{
|
|
- if (access_ok(VERIFY_READ, from, n))
|
|
- n = __copy_from_user(to, from, n);
|
|
- else
|
|
- memset(to, 0, n);
|
|
- return n;
|
|
+ WARN(1, "Buffer overflow detected!\n");
|
|
}
|
|
-EXPORT_SYMBOL(_copy_from_user);
|
|
+EXPORT_SYMBOL(copy_to_user_overflow);
|
|
|
|
-void copy_from_user_overflow(void)
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+void __set_fs(mm_segment_t x)
|
|
{
|
|
- WARN(1, "Buffer overflow detected!\n");
|
|
+ switch (x.seg) {
|
|
+ case 0:
|
|
+ loadsegment(gs, 0);
|
|
+ break;
|
|
+ case TASK_SIZE_MAX:
|
|
+ loadsegment(gs, __USER_DS);
|
|
+ break;
|
|
+ case -1UL:
|
|
+ loadsegment(gs, __KERNEL_DS);
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ }
|
|
+ return;
|
|
}
|
|
-EXPORT_SYMBOL(copy_from_user_overflow);
|
|
+EXPORT_SYMBOL(__set_fs);
|
|
+
|
|
+void set_fs(mm_segment_t x)
|
|
+{
|
|
+ current_thread_info()->addr_limit = x;
|
|
+ __set_fs(x);
|
|
+}
|
|
+EXPORT_SYMBOL(set_fs);
|
|
+#endif
|
|
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
|
|
index d70b9a5..a9cd769 100644
|
|
--- a/arch/x86/lib/usercopy_64.c
|
|
+++ b/arch/x86/lib/usercopy_64.c
|
|
@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
{
|
|
long __d0;
|
|
might_fault();
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
|
|
+ addr += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
/* no memory constraint because it doesn't change any memory gcc knows
|
|
about */
|
|
asm volatile(
|
|
@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
|
|
}
|
|
EXPORT_SYMBOL(strlen_user);
|
|
|
|
-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
|
|
+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
|
|
{
|
|
- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
|
|
- return copy_user_generic((__force void *)to, (__force void *)from, len);
|
|
- }
|
|
- return len;
|
|
+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_UDEREF
|
|
+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
|
|
+ to += PAX_USER_SHADOW_BASE;
|
|
+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
|
|
+ from += PAX_USER_SHADOW_BASE;
|
|
+#endif
|
|
+
|
|
+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
|
|
+ }
|
|
+ return len;
|
|
}
|
|
EXPORT_SYMBOL(copy_in_user);
|
|
|
|
@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
|
|
* it is not necessary to optimize tail handling.
|
|
*/
|
|
unsigned long
|
|
-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
|
+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
|
|
{
|
|
char c;
|
|
unsigned zero_len;
|
|
@@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
|
break;
|
|
return len;
|
|
}
|
|
+
|
|
+void copy_from_user_overflow(void)
|
|
+{
|
|
+ WARN(1, "Buffer overflow detected!\n");
|
|
+}
|
|
+EXPORT_SYMBOL(copy_from_user_overflow);
|
|
+
|
|
+void copy_to_user_overflow(void)
|
|
+{
|
|
+ WARN(1, "Buffer overflow detected!\n");
|
|
+}
|
|
+EXPORT_SYMBOL(copy_to_user_overflow);
|
|
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
|
|
index 1fb85db..8b3540b 100644
|
|
--- a/arch/x86/mm/extable.c
|
|
+++ b/arch/x86/mm/extable.c
|
|
@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
|
|
const struct exception_table_entry *fixup;
|
|
|
|
#ifdef CONFIG_PNPBIOS
|
|
- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
|
|
+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
|
|
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
|
|
extern u32 pnp_bios_is_utter_crap;
|
|
pnp_bios_is_utter_crap = 1;
|
|
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
|
|
index b723f2e..e16e973 100644
|
|
--- a/arch/x86/mm/fault.c
|
|
+++ b/arch/x86/mm/fault.c
|
|
@@ -13,11 +13,18 @@
|
|
#include <linux/perf_event.h> /* perf_sw_event */
|
|
#include <linux/hugetlb.h> /* hstate_index_to_shift */
|
|
#include <linux/prefetch.h> /* prefetchw */
|
|
+#include <linux/unistd.h>
|
|
+#include <linux/compiler.h>
|
|
|
|
#include <asm/traps.h> /* dotraplinkage, ... */
|
|
#include <asm/pgalloc.h> /* pgd_*(), ... */
|
|
#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
|
|
#include <asm/fixmap.h> /* VSYSCALL_START */
|
|
+#include <asm/tlbflush.h>
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+#include <asm/stacktrace.h>
|
|
+#endif
|
|
|
|
/*
|
|
* Page fault error code bits:
|
|
@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
|
|
int ret = 0;
|
|
|
|
/* kprobe_running() needs smp_processor_id() */
|
|
- if (kprobes_built_in() && !user_mode_vm(regs)) {
|
|
+ if (kprobes_built_in() && !user_mode(regs)) {
|
|
preempt_disable();
|
|
if (kprobe_running() && kprobe_fault_handler(regs, 14))
|
|
ret = 1;
|
|
@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
|
|
return !instr_lo || (instr_lo>>1) == 1;
|
|
case 0x00:
|
|
/* Prefetch instruction is 0x0F0D or 0x0F18 */
|
|
- if (probe_kernel_address(instr, opcode))
|
|
+ if (user_mode(regs)) {
|
|
+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
|
|
+ return 0;
|
|
+ } else if (probe_kernel_address(instr, opcode))
|
|
return 0;
|
|
|
|
*prefetch = (instr_lo == 0xF) &&
|
|
@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
|
while (instr < max_instr) {
|
|
unsigned char opcode;
|
|
|
|
- if (probe_kernel_address(instr, opcode))
|
|
+ if (user_mode(regs)) {
|
|
+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
|
|
+ break;
|
|
+ } else if (probe_kernel_address(instr, opcode))
|
|
break;
|
|
|
|
instr++;
|
|
@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
|
force_sig_info(si_signo, &info, tsk);
|
|
}
|
|
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
|
|
+{
|
|
+ pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+
|
|
+ pgd = pgd_offset(mm, address);
|
|
+ if (!pgd_present(*pgd))
|
|
+ return NULL;
|
|
+ pud = pud_offset(pgd, address);
|
|
+ if (!pud_present(*pud))
|
|
+ return NULL;
|
|
+ pmd = pmd_offset(pud, address);
|
|
+ if (!pmd_present(*pmd))
|
|
+ return NULL;
|
|
+ return pmd;
|
|
+}
|
|
+#endif
|
|
+
|
|
DEFINE_SPINLOCK(pgd_lock);
|
|
LIST_HEAD(pgd_list);
|
|
|
|
@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
|
|
for (address = VMALLOC_START & PMD_MASK;
|
|
address >= TASK_SIZE && address < FIXADDR_TOP;
|
|
address += PMD_SIZE) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ unsigned long cpu;
|
|
+#else
|
|
struct page *page;
|
|
+#endif
|
|
|
|
spin_lock(&pgd_lock);
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
|
|
+ pgd_t *pgd = get_cpu_pgd(cpu);
|
|
+ pmd_t *ret;
|
|
+#else
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
+ pgd_t *pgd = page_address(page);
|
|
spinlock_t *pgt_lock;
|
|
pmd_t *ret;
|
|
|
|
@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
|
|
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
|
|
|
spin_lock(pgt_lock);
|
|
- ret = vmalloc_sync_one(page_address(page), address);
|
|
+#endif
|
|
+
|
|
+ ret = vmalloc_sync_one(pgd, address);
|
|
+
|
|
+#ifndef CONFIG_PAX_PER_CPU_PGD
|
|
spin_unlock(pgt_lock);
|
|
+#endif
|
|
|
|
if (!ret)
|
|
break;
|
|
@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
|
|
* an interrupt in the middle of a task switch..
|
|
*/
|
|
pgd_paddr = read_cr3();
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
|
|
+#endif
|
|
+
|
|
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
|
|
if (!pmd_k)
|
|
return -1;
|
|
@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
|
|
* happen within a race in page table update. In the later
|
|
* case just flush:
|
|
*/
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
|
|
+ pgd = pgd_offset_cpu(smp_processor_id(), address);
|
|
+#else
|
|
pgd = pgd_offset(current->active_mm, address);
|
|
+#endif
|
|
+
|
|
pgd_ref = pgd_offset_k(address);
|
|
if (pgd_none(*pgd_ref))
|
|
return -1;
|
|
@@ -542,7 +612,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
|
|
static int is_errata100(struct pt_regs *regs, unsigned long address)
|
|
{
|
|
#ifdef CONFIG_X86_64
|
|
- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
|
|
+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
|
|
return 1;
|
|
#endif
|
|
return 0;
|
|
@@ -569,7 +639,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
|
|
}
|
|
|
|
static const char nx_warning[] = KERN_CRIT
|
|
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
|
|
+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
|
|
|
|
static void
|
|
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
|
@@ -578,15 +648,21 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
|
if (!oops_may_print())
|
|
return;
|
|
|
|
- if (error_code & PF_INSTR) {
|
|
+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
|
|
unsigned int level;
|
|
|
|
pte_t *pte = lookup_address(address, &level);
|
|
|
|
if (pte && pte_present(*pte) && !pte_exec(*pte))
|
|
- printk(nx_warning, current_uid());
|
|
+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ if (init_mm.start_code <= address && address < init_mm.end_code)
|
|
+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
|
|
+ current->comm, task_pid_nr(current), current_uid(), current_euid());
|
|
+#endif
|
|
+
|
|
printk(KERN_ALERT "BUG: unable to handle kernel ");
|
|
if (address < PAGE_SIZE)
|
|
printk(KERN_CONT "NULL pointer dereference");
|
|
@@ -848,7 +924,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
|
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
|
|
printk(KERN_ERR
|
|
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
|
|
- tsk->comm, tsk->pid, address);
|
|
+ tsk->comm, task_pid_nr(tsk), address);
|
|
code = BUS_MCEERR_AR;
|
|
}
|
|
#endif
|
|
@@ -906,6 +982,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
|
|
return 1;
|
|
}
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
|
|
+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
|
|
+{
|
|
+ pte_t *pte;
|
|
+ pmd_t *pmd;
|
|
+ spinlock_t *ptl;
|
|
+ unsigned char pte_mask;
|
|
+
|
|
+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
|
|
+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
|
|
+ return 0;
|
|
+
|
|
+ /* PaX: it's our fault, let's handle it if we can */
|
|
+
|
|
+ /* PaX: take a look at read faults before acquiring any locks */
|
|
+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
|
|
+ /* instruction fetch attempt from a protected page in user mode */
|
|
+ up_read(&mm->mmap_sem);
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ switch (pax_handle_fetch_fault(regs)) {
|
|
+ case 2:
|
|
+ return 1;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
|
|
+ do_group_exit(SIGKILL);
|
|
+ }
|
|
+
|
|
+ pmd = pax_get_pmd(mm, address);
|
|
+ if (unlikely(!pmd))
|
|
+ return 0;
|
|
+
|
|
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
|
|
+ /* write attempt to a protected page in user mode */
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_SMP
|
|
+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
|
|
+#else
|
|
+ if (likely(address > get_limit(regs->cs)))
|
|
+#endif
|
|
+ {
|
|
+ set_pte(pte, pte_mkread(*pte));
|
|
+ __flush_tlb_one(address);
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+ up_read(&mm->mmap_sem);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
|
|
+
|
|
+ /*
|
|
+ * PaX: fill DTLB with user rights and retry
|
|
+ */
|
|
+ __asm__ __volatile__ (
|
|
+ "orb %2,(%1)\n"
|
|
+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
|
|
+/*
|
|
+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
|
|
+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
|
|
+ * page fault when examined during a TLB load attempt. this is true not only
|
|
+ * for PTEs holding a non-present entry but also present entries that will
|
|
+ * raise a page fault (such as those set up by PaX, or the copy-on-write
|
|
+ * mechanism). in effect it means that we do *not* need to flush the TLBs
|
|
+ * for our target pages since their PTEs are simply not in the TLBs at all.
|
|
+
|
|
+ * the best thing in omitting it is that we gain around 15-20% speed in the
|
|
+ * fast path of the page fault handler and can get rid of tracing since we
|
|
+ * can no longer flush unintended entries.
|
|
+ */
|
|
+ "invlpg (%0)\n"
|
|
+#endif
|
|
+ __copyuser_seg"testb $0,(%0)\n"
|
|
+ "xorb %3,(%1)\n"
|
|
+ :
|
|
+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
|
|
+ : "memory", "cc");
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+ up_read(&mm->mmap_sem);
|
|
+ return 1;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Handle a spurious fault caused by a stale TLB entry.
|
|
*
|
|
@@ -978,6 +1147,9 @@ int show_unhandled_signals = 1;
|
|
static inline int
|
|
access_error(unsigned long error_code, struct vm_area_struct *vma)
|
|
{
|
|
+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
|
|
+ return 1;
|
|
+
|
|
if (error_code & PF_WRITE) {
|
|
/* write, present and write, not present: */
|
|
if (unlikely(!(vma->vm_flags & VM_WRITE)))
|
|
@@ -1011,19 +1183,34 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct task_struct *tsk;
|
|
- unsigned long address;
|
|
struct mm_struct *mm;
|
|
int fault;
|
|
int write = error_code & PF_WRITE;
|
|
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
|
(write ? FAULT_FLAG_WRITE : 0);
|
|
|
|
+ /* Get the faulting address: */
|
|
+ unsigned long address = read_cr2();
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
|
|
+ if (!search_exception_tables(regs->ip)) {
|
|
+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
|
|
+ bad_area_nosemaphore(regs, error_code, address);
|
|
+ return;
|
|
+ }
|
|
+ if (address < PAX_USER_SHADOW_BASE) {
|
|
+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
|
|
+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
|
|
+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
|
|
+ } else
|
|
+ address -= PAX_USER_SHADOW_BASE;
|
|
+ }
|
|
+#endif
|
|
+
|
|
tsk = current;
|
|
mm = tsk->mm;
|
|
|
|
- /* Get the faulting address: */
|
|
- address = read_cr2();
|
|
-
|
|
/*
|
|
* Detect and handle instructions that would cause a page fault for
|
|
* both a tracked kernel page and a userspace page.
|
|
@@ -1083,7 +1270,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
* User-mode registers count as a user access even for any
|
|
* potential system fault or CPU buglet:
|
|
*/
|
|
- if (user_mode_vm(regs)) {
|
|
+ if (user_mode(regs)) {
|
|
local_irq_enable();
|
|
error_code |= PF_USER;
|
|
} else {
|
|
@@ -1138,6 +1325,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
might_sleep();
|
|
}
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
|
|
+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
|
|
+ return;
|
|
+#endif
|
|
+
|
|
vma = find_vma(mm, address);
|
|
if (unlikely(!vma)) {
|
|
bad_area(regs, error_code, address);
|
|
@@ -1149,18 +1341,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
bad_area(regs, error_code, address);
|
|
return;
|
|
}
|
|
- if (error_code & PF_USER) {
|
|
- /*
|
|
- * Accessing the stack below %sp is always a bug.
|
|
- * The large cushion allows instructions like enter
|
|
- * and pusha to work. ("enter $65535, $31" pushes
|
|
- * 32 pointers and then decrements %sp by 65535.)
|
|
- */
|
|
- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
|
|
- bad_area(regs, error_code, address);
|
|
- return;
|
|
- }
|
|
+ /*
|
|
+ * Accessing the stack below %sp is always a bug.
|
|
+ * The large cushion allows instructions like enter
|
|
+ * and pusha to work. ("enter $65535, $31" pushes
|
|
+ * 32 pointers and then decrements %sp by 65535.)
|
|
+ */
|
|
+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
|
|
+ bad_area(regs, error_code, address);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
|
|
+ bad_area(regs, error_code, address);
|
|
+ return;
|
|
}
|
|
+#endif
|
|
+
|
|
if (unlikely(expand_stack(vma, address))) {
|
|
bad_area(regs, error_code, address);
|
|
return;
|
|
@@ -1215,3 +1413,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
|
|
up_read(&mm->mmap_sem);
|
|
}
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
|
|
+{
|
|
+ struct mm_struct *mm = current->mm;
|
|
+ unsigned long ip = regs->ip;
|
|
+
|
|
+ if (v8086_mode(regs))
|
|
+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
|
|
+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
|
|
+ return true;
|
|
+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
|
|
+ return true;
|
|
+ return false;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
|
|
+ return true;
|
|
+ return false;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return false;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: libffi trampoline emulation */
|
|
+ unsigned char mov, jmp;
|
|
+ unsigned int addr1, addr2;
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+ if ((regs->ip + 9) >> 32)
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ err = get_user(mov, (unsigned char __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
|
|
+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
|
|
+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov == 0xB8 && jmp == 0xE9) {
|
|
+ regs->ax = addr1;
|
|
+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: gcc trampoline emulation #1 */
|
|
+ unsigned char mov1, mov2;
|
|
+ unsigned short jmp;
|
|
+ unsigned int addr1, addr2;
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+ if ((regs->ip + 11) >> 32)
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ err = get_user(mov1, (unsigned char __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
|
|
+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
|
|
+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
|
|
+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
|
|
+ regs->cx = addr1;
|
|
+ regs->ax = addr2;
|
|
+ regs->ip = addr2;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: gcc trampoline emulation #2 */
|
|
+ unsigned char mov, jmp;
|
|
+ unsigned int addr1, addr2;
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+ if ((regs->ip + 9) >> 32)
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ err = get_user(mov, (unsigned char __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
|
|
+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
|
|
+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov == 0xB9 && jmp == 0xE9) {
|
|
+ regs->cx = addr1;
|
|
+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ return 1; /* PaX in action */
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ do { /* PaX: libffi trampoline emulation */
|
|
+ unsigned short mov1, mov2, jmp1;
|
|
+ unsigned char stcclc, jmp2;
|
|
+ unsigned long addr1, addr2;
|
|
+
|
|
+ err = get_user(mov1, (unsigned short __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
|
|
+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
|
|
+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
|
|
+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
|
|
+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
|
|
+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
|
|
+ regs->r11 = addr1;
|
|
+ regs->r10 = addr2;
|
|
+ if (stcclc == 0xF8)
|
|
+ regs->flags &= ~X86_EFLAGS_CF;
|
|
+ else
|
|
+ regs->flags |= X86_EFLAGS_CF;
|
|
+ regs->ip = addr1;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: gcc trampoline emulation #1 */
|
|
+ unsigned short mov1, mov2, jmp1;
|
|
+ unsigned char jmp2;
|
|
+ unsigned int addr1;
|
|
+ unsigned long addr2;
|
|
+
|
|
+ err = get_user(mov1, (unsigned short __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
|
|
+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
|
|
+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
|
|
+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
|
|
+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
|
|
+ regs->r11 = addr1;
|
|
+ regs->r10 = addr2;
|
|
+ regs->ip = addr1;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ do { /* PaX: gcc trampoline emulation #2 */
|
|
+ unsigned short mov1, mov2, jmp1;
|
|
+ unsigned char jmp2;
|
|
+ unsigned long addr1, addr2;
|
|
+
|
|
+ err = get_user(mov1, (unsigned short __user *)regs->ip);
|
|
+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
|
|
+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
|
|
+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
|
|
+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
|
|
+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
|
|
+
|
|
+ if (err)
|
|
+ break;
|
|
+
|
|
+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
|
|
+ regs->r11 = addr1;
|
|
+ regs->r10 = addr2;
|
|
+ regs->ip = addr1;
|
|
+ return 2;
|
|
+ }
|
|
+ } while (0);
|
|
+
|
|
+ return 1; /* PaX in action */
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * PaX: decide what to do with offenders (regs->ip = fault address)
|
|
+ *
|
|
+ * returns 1 when task should be killed
|
|
+ * 2 when gcc trampoline was detected
|
|
+ */
|
|
+static int pax_handle_fetch_fault(struct pt_regs *regs)
|
|
+{
|
|
+ if (v8086_mode(regs))
|
|
+ return 1;
|
|
+
|
|
+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
|
|
+ return 1;
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+ return pax_handle_fetch_fault_32(regs);
|
|
+#else
|
|
+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
|
|
+ return pax_handle_fetch_fault_32(regs);
|
|
+ else
|
|
+ return pax_handle_fetch_fault_64(regs);
|
|
+#endif
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ long i;
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at PC: ");
|
|
+ for (i = 0; i < 20; i++) {
|
|
+ unsigned char c;
|
|
+ if (get_user(c, (unsigned char __force_user *)pc+i))
|
|
+ printk(KERN_CONT "?? ");
|
|
+ else
|
|
+ printk(KERN_CONT "%02x ", c);
|
|
+ }
|
|
+ printk("\n");
|
|
+
|
|
+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
|
|
+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
|
|
+ unsigned long c;
|
|
+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
|
|
+#ifdef CONFIG_X86_32
|
|
+ printk(KERN_CONT "???????? ");
|
|
+#else
|
|
+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
|
|
+ printk(KERN_CONT "???????? ???????? ");
|
|
+ else
|
|
+ printk(KERN_CONT "???????????????? ");
|
|
+#endif
|
|
+ } else {
|
|
+#ifdef CONFIG_X86_64
|
|
+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
|
|
+ printk(KERN_CONT "%08x ", (unsigned int)c);
|
|
+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
|
|
+ } else
|
|
+#endif
|
|
+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
|
|
+ }
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * probe_kernel_write(): safely attempt to write to a location
|
|
+ * @dst: address to write to
|
|
+ * @src: pointer to the data that shall be written
|
|
+ * @size: size of the data chunk
|
|
+ *
|
|
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
|
|
+ * happens, handle that and return -EFAULT.
|
|
+ */
|
|
+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
|
|
+{
|
|
+ long ret;
|
|
+ mm_segment_t old_fs = get_fs();
|
|
+
|
|
+ set_fs(KERNEL_DS);
|
|
+ pagefault_disable();
|
|
+ pax_open_kernel();
|
|
+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
|
|
+ pax_close_kernel();
|
|
+ pagefault_enable();
|
|
+ set_fs(old_fs);
|
|
+
|
|
+ return ret ? -EFAULT : 0;
|
|
+}
|
|
diff --git a/arch/x86/mm/fault.c.rej b/arch/x86/mm/fault.c.rej
|
|
new file mode 100644
|
|
index 0000000..51b534c
|
|
--- /dev/null
|
|
+++ b/arch/x86/mm/fault.c.rej
|
|
@@ -0,0 +1,23 @@
|
|
+--- arch/x86/mm/fault.c 2012-05-21 11:32:57.831927679 +0200
|
|
++++ arch/x86/mm/fault.c 2012-05-26 01:07:07.428801082 +0200
|
|
+@@ -824,6 +900,21 @@ __bad_area_nosemaphore(struct pt_regs *r
|
|
+ }
|
|
+ #endif
|
|
+
|
|
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
++ if (pax_is_fetch_fault(regs, error_code, address)) {
|
|
++
|
|
++#ifdef CONFIG_PAX_EMUTRAMP
|
|
++ switch (pax_handle_fetch_fault(regs)) {
|
|
++ case 2:
|
|
++ return;
|
|
++ }
|
|
++#endif
|
|
++
|
|
++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
|
|
++ do_group_exit(SIGKILL);
|
|
++ }
|
|
++#endif
|
|
++
|
|
+ if (unlikely(show_unhandled_signals))
|
|
+ show_signal_msg(regs, error_code, address, tsk);
|
|
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
|
|
index dd74e46..7d26398 100644
|
|
--- a/arch/x86/mm/gup.c
|
|
+++ b/arch/x86/mm/gup.c
|
|
@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|
addr = start;
|
|
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
|
end = start + len;
|
|
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
|
+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
|
(void __user *)start, len)))
|
|
return 0;
|
|
|
|
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
|
|
index 6f31ee5..8ee4164 100644
|
|
--- a/arch/x86/mm/highmem_32.c
|
|
+++ b/arch/x86/mm/highmem_32.c
|
|
@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
+
|
|
+ pax_open_kernel();
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
+ pax_close_kernel();
|
|
+
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
|
|
index b91e485..d00e7c9 100644
|
|
--- a/arch/x86/mm/hugetlbpage.c
|
|
+++ b/arch/x86/mm/hugetlbpage.c
|
|
@@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
|
struct hstate *h = hstate_file(file);
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
- unsigned long start_addr;
|
|
+ unsigned long start_addr, pax_task_size = TASK_SIZE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
|
|
if (len > mm->cached_hole_size) {
|
|
- start_addr = mm->free_area_cache;
|
|
+ start_addr = mm->free_area_cache;
|
|
} else {
|
|
- start_addr = TASK_UNMAPPED_BASE;
|
|
- mm->cached_hole_size = 0;
|
|
+ start_addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
}
|
|
|
|
full_search:
|
|
@@ -291,26 +298,27 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
|
|
|
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
|
/* At this point: (!vma || addr < vma->vm_end). */
|
|
- if (TASK_SIZE - len < addr) {
|
|
+ if (pax_task_size - len < addr) {
|
|
/*
|
|
* Start a new search - just in case we missed
|
|
* some holes.
|
|
*/
|
|
- if (start_addr != TASK_UNMAPPED_BASE) {
|
|
- start_addr = TASK_UNMAPPED_BASE;
|
|
+ if (start_addr != mm->mmap_base) {
|
|
+ start_addr = mm->mmap_base;
|
|
mm->cached_hole_size = 0;
|
|
goto full_search;
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
- mm->free_area_cache = addr + len;
|
|
- return addr;
|
|
- }
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ break;
|
|
if (addr + mm->cached_hole_size < vma->vm_start)
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
addr = ALIGN(vma->vm_end, huge_page_size(h));
|
|
}
|
|
+
|
|
+ mm->free_area_cache = addr + len;
|
|
+ return addr;
|
|
}
|
|
|
|
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
@@ -321,9 +329,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
unsigned long base = mm->mmap_base;
|
|
- unsigned long addr = addr0;
|
|
+ unsigned long addr;
|
|
unsigned long largest_hole = mm->cached_hole_size;
|
|
- unsigned long start_addr;
|
|
|
|
/* don't allow allocations above current base */
|
|
if (mm->free_area_cache > base)
|
|
@@ -333,16 +340,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
largest_hole = 0;
|
|
mm->free_area_cache = base;
|
|
}
|
|
-try_again:
|
|
- start_addr = mm->free_area_cache;
|
|
|
|
/* make sure it can fit in the remaining address space */
|
|
if (mm->free_area_cache < len)
|
|
goto fail;
|
|
|
|
/* either no address requested or can't fit in requested address hole */
|
|
- addr = (mm->free_area_cache - len) & huge_page_mask(h);
|
|
+ addr = mm->free_area_cache - len;
|
|
do {
|
|
+ addr &= huge_page_mask(h);
|
|
/*
|
|
* Lookup failure means no vma is above this address,
|
|
* i.e. return with success:
|
|
@@ -351,10 +357,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
if (!vma)
|
|
return addr;
|
|
|
|
- if (addr + len <= vma->vm_start) {
|
|
+ if (check_heap_stack_gap(vma, addr, len)) {
|
|
/* remember the address as a hint for next time */
|
|
- mm->cached_hole_size = largest_hole;
|
|
- return (mm->free_area_cache = addr);
|
|
+ mm->cached_hole_size = largest_hole;
|
|
+ return (mm->free_area_cache = addr);
|
|
} else if (mm->free_area_cache == vma->vm_end) {
|
|
/* pull free_area_cache down to the first hole */
|
|
mm->free_area_cache = vma->vm_start;
|
|
@@ -363,29 +369,34 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
|
|
/* remember the largest hole we saw so far */
|
|
if (addr + largest_hole < vma->vm_start)
|
|
- largest_hole = vma->vm_start - addr;
|
|
+ largest_hole = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = (vma->vm_start - len) & huge_page_mask(h);
|
|
- } while (len <= vma->vm_start);
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
fail:
|
|
/*
|
|
- * if hint left us with no space for the requested
|
|
- * mapping then try again:
|
|
- */
|
|
- if (start_addr != base) {
|
|
- mm->free_area_cache = base;
|
|
- largest_hole = 0;
|
|
- goto try_again;
|
|
- }
|
|
- /*
|
|
* A failed mmap() very likely causes application failure,
|
|
* so fall back to the bottom-up function here. This scenario
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
- mm->free_area_cache = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
|
|
+ else
|
|
+#endif
|
|
+
|
|
+ mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
+ mm->free_area_cache = mm->mmap_base;
|
|
mm->cached_hole_size = ~0UL;
|
|
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
|
|
len, pgoff, flags);
|
|
@@ -393,6 +404,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|
/*
|
|
* Restore the topdown base:
|
|
*/
|
|
+ mm->mmap_base = base;
|
|
mm->free_area_cache = base;
|
|
mm->cached_hole_size = ~0UL;
|
|
|
|
@@ -406,10 +418,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
struct hstate *h = hstate_file(file);
|
|
struct mm_struct *mm = current->mm;
|
|
struct vm_area_struct *vma;
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
|
|
if (len & ~huge_page_mask(h))
|
|
return -EINVAL;
|
|
- if (len > TASK_SIZE)
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
+
|
|
+ if (len > pax_task_size)
|
|
return -ENOMEM;
|
|
|
|
if (flags & MAP_FIXED) {
|
|
@@ -421,8 +442,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
if (addr) {
|
|
addr = ALIGN(addr, huge_page_size(h));
|
|
vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
|
|
return addr;
|
|
}
|
|
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
|
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
|
|
index 71d37f5..250a08c 100644
|
|
--- a/arch/x86/mm/init.c
|
|
+++ b/arch/x86/mm/init.c
|
|
@@ -16,6 +16,7 @@
|
|
#include <asm/tlb.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/dma.h> /* for MAX_DMA_PFN */
|
|
+#include <asm/desc.h>
|
|
|
|
unsigned long __initdata pgt_buf_start;
|
|
unsigned long __meminitdata pgt_buf_end;
|
|
@@ -327,7 +328,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|
*/
|
|
int devmem_is_allowed(unsigned long pagenr)
|
|
{
|
|
- if (pagenr <= 256)
|
|
+ if (!pagenr)
|
|
+ return 1;
|
|
+#ifdef CONFIG_VM86
|
|
+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
|
|
+ return 1;
|
|
+#endif
|
|
+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
|
|
return 1;
|
|
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
|
|
return 0;
|
|
@@ -387,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
|
|
|
void free_initmem(void)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#ifdef CONFIG_X86_32
|
|
+ /* PaX: limit KERNEL_CS to actual size */
|
|
+ unsigned long addr, limit;
|
|
+ struct desc_struct d;
|
|
+ int cpu;
|
|
+
|
|
+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
|
|
+ limit = (limit - 1UL) >> PAGE_SHIFT;
|
|
+
|
|
+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
|
+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
|
|
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
|
|
+ }
|
|
+
|
|
+ /* PaX: make KERNEL_CS read-only */
|
|
+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
|
|
+ if (!paravirt_enabled())
|
|
+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
|
|
+/*
|
|
+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
|
|
+ }
|
|
+*/
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
|
|
+/*
|
|
+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
|
|
+ }
|
|
+*/
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_MODULES
|
|
+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
|
|
+#endif
|
|
+
|
|
+#else
|
|
+ pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+ unsigned long addr, end;
|
|
+
|
|
+ /* PaX: make kernel code/rodata read-only, rest non-executable */
|
|
+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ if (!pmd_present(*pmd))
|
|
+ continue;
|
|
+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
|
|
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
|
|
+ else
|
|
+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
|
|
+ }
|
|
+
|
|
+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
|
|
+ end = addr + KERNEL_IMAGE_SIZE;
|
|
+ for (; addr < end; addr += PMD_SIZE) {
|
|
+ pgd = pgd_offset_k(addr);
|
|
+ pud = pud_offset(pgd, addr);
|
|
+ pmd = pmd_offset(pud, addr);
|
|
+ if (!pmd_present(*pmd))
|
|
+ continue;
|
|
+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
|
|
+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ flush_tlb_all();
|
|
+#endif
|
|
+
|
|
free_init_pages("unused kernel memory",
|
|
(unsigned long)(&__init_begin),
|
|
(unsigned long)(&__init_end));
|
|
diff --git a/arch/x86/mm/init.c.rej b/arch/x86/mm/init.c.rej
|
|
new file mode 100644
|
|
index 0000000..24e81e8
|
|
--- /dev/null
|
|
+++ b/arch/x86/mm/init.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/mm/init.c 2012-05-21 11:32:57.855927681 +0200
|
|
++++ arch/x86/mm/init.c 2012-05-21 12:10:09.756048901 +0200
|
|
+@@ -33,7 +34,7 @@ int direct_gbpages
|
|
+ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|
+ int use_gbpages)
|
|
+ {
|
|
+- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
|
|
++ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
|
|
+ phys_addr_t base;
|
|
+
|
|
+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
|
|
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
|
|
index 575d86f..4987469 100644
|
|
--- a/arch/x86/mm/init_32.c
|
|
+++ b/arch/x86/mm/init_32.c
|
|
@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
|
|
}
|
|
|
|
/*
|
|
- * Creates a middle page table and puts a pointer to it in the
|
|
- * given global directory entry. This only returns the gd entry
|
|
- * in non-PAE compilation mode, since the middle layer is folded.
|
|
- */
|
|
-static pmd_t * __init one_md_table_init(pgd_t *pgd)
|
|
-{
|
|
- pud_t *pud;
|
|
- pmd_t *pmd_table;
|
|
-
|
|
-#ifdef CONFIG_X86_PAE
|
|
- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
|
|
- if (after_bootmem)
|
|
- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
|
|
- else
|
|
- pmd_table = (pmd_t *)alloc_low_page();
|
|
- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
|
|
- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
|
- pud = pud_offset(pgd, 0);
|
|
- BUG_ON(pmd_table != pmd_offset(pud, 0));
|
|
-
|
|
- return pmd_table;
|
|
- }
|
|
-#endif
|
|
- pud = pud_offset(pgd, 0);
|
|
- pmd_table = pmd_offset(pud, 0);
|
|
-
|
|
- return pmd_table;
|
|
-}
|
|
-
|
|
-/*
|
|
* Create a page table and place a pointer to it in a middle page
|
|
* directory entry:
|
|
*/
|
|
@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
|
|
page_table = (pte_t *)alloc_low_page();
|
|
|
|
paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
|
|
+#else
|
|
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
|
+#endif
|
|
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
|
|
}
|
|
|
|
return pte_offset_kernel(pmd, 0);
|
|
}
|
|
|
|
+static pmd_t * __init one_md_table_init(pgd_t *pgd)
|
|
+{
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd_table;
|
|
+
|
|
+ pud = pud_offset(pgd, 0);
|
|
+ pmd_table = pmd_offset(pud, 0);
|
|
+
|
|
+ return pmd_table;
|
|
+}
|
|
+
|
|
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
|
|
{
|
|
int pgd_idx = pgd_index(vaddr);
|
|
@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
|
|
int pgd_idx, pmd_idx;
|
|
unsigned long vaddr;
|
|
pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte = NULL;
|
|
|
|
@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
|
|
pgd = pgd_base + pgd_idx;
|
|
|
|
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
|
|
- pmd = one_md_table_init(pgd);
|
|
- pmd = pmd + pmd_index(vaddr);
|
|
+ pud = pud_offset(pgd, vaddr);
|
|
+ pmd = pmd_offset(pud, vaddr);
|
|
+
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
|
|
+#endif
|
|
+
|
|
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
|
|
pmd++, pmd_idx++) {
|
|
pte = page_table_kmap_check(one_page_table_init(pmd),
|
|
@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
|
|
}
|
|
}
|
|
|
|
-static inline int is_kernel_text(unsigned long addr)
|
|
+static inline int is_kernel_text(unsigned long start, unsigned long end)
|
|
{
|
|
- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
|
|
- return 1;
|
|
- return 0;
|
|
+ if ((start > ktla_ktva((unsigned long)_etext) ||
|
|
+ end <= ktla_ktva((unsigned long)_stext)) &&
|
|
+ (start > ktla_ktva((unsigned long)_einittext) ||
|
|
+ end <= ktla_ktva((unsigned long)_sinittext)) &&
|
|
+
|
|
+#ifdef CONFIG_ACPI_SLEEP
|
|
+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
|
|
+#endif
|
|
+
|
|
+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
|
|
+ return 0;
|
|
+ return 1;
|
|
}
|
|
|
|
/*
|
|
@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
|
|
unsigned long last_map_addr = end;
|
|
unsigned long start_pfn, end_pfn;
|
|
pgd_t *pgd_base = swapper_pg_dir;
|
|
- int pgd_idx, pmd_idx, pte_ofs;
|
|
+ unsigned int pgd_idx, pmd_idx, pte_ofs;
|
|
unsigned long pfn;
|
|
pgd_t *pgd;
|
|
+ pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
unsigned pages_2m, pages_4k;
|
|
@@ -280,8 +281,13 @@ kernel_physical_mapping_init(unsigned long start,
|
|
pfn = start_pfn;
|
|
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
|
|
pgd = pgd_base + pgd_idx;
|
|
- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
|
|
- pmd = one_md_table_init(pgd);
|
|
+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
|
|
+ pud = pud_offset(pgd, 0);
|
|
+ pmd = pmd_offset(pud, 0);
|
|
+
|
|
+#ifdef CONFIG_X86_PAE
|
|
+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
|
|
+#endif
|
|
|
|
if (pfn >= end_pfn)
|
|
continue;
|
|
@@ -293,14 +299,13 @@ kernel_physical_mapping_init(unsigned long start,
|
|
#endif
|
|
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
|
|
pmd++, pmd_idx++) {
|
|
- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
|
|
+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
|
|
|
|
/*
|
|
* Map with big pages if possible, otherwise
|
|
* create normal page tables:
|
|
*/
|
|
if (use_pse) {
|
|
- unsigned int addr2;
|
|
pgprot_t prot = PAGE_KERNEL_LARGE;
|
|
/*
|
|
* first pass will use the same initial
|
|
@@ -310,11 +315,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|
__pgprot(PTE_IDENT_ATTR |
|
|
_PAGE_PSE);
|
|
|
|
- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
|
|
- PAGE_OFFSET + PAGE_SIZE-1;
|
|
-
|
|
- if (is_kernel_text(addr) ||
|
|
- is_kernel_text(addr2))
|
|
+ if (is_kernel_text(address, address + PMD_SIZE))
|
|
prot = PAGE_KERNEL_LARGE_EXEC;
|
|
|
|
pages_2m++;
|
|
@@ -331,7 +332,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
|
|
pte += pte_ofs;
|
|
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
|
|
- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
|
|
+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
|
|
pgprot_t prot = PAGE_KERNEL;
|
|
/*
|
|
* first pass will use the same initial
|
|
@@ -339,7 +340,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|
*/
|
|
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
|
|
|
|
- if (is_kernel_text(addr))
|
|
+ if (is_kernel_text(address, address + PAGE_SIZE))
|
|
prot = PAGE_KERNEL_EXEC;
|
|
|
|
pages_4k++;
|
|
@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
|
|
|
|
pud = pud_offset(pgd, va);
|
|
pmd = pmd_offset(pud, va);
|
|
- if (!pmd_present(*pmd))
|
|
+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
|
|
break;
|
|
|
|
pte = pte_offset_kernel(pmd, va);
|
|
@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
|
|
|
|
static void __init pagetable_init(void)
|
|
{
|
|
- pgd_t *pgd_base = swapper_pg_dir;
|
|
-
|
|
- permanent_kmaps_init(pgd_base);
|
|
+ permanent_kmaps_init(swapper_pg_dir);
|
|
}
|
|
|
|
-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
|
|
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
|
|
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
|
|
|
/* user-defined highmem size */
|
|
@@ -734,6 +733,12 @@ void __init mem_init(void)
|
|
|
|
pci_iommu_alloc();
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
|
|
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
+ KERNEL_PGD_PTRS);
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_FLATMEM
|
|
BUG_ON(!mem_map);
|
|
#endif
|
|
@@ -760,7 +765,7 @@ void __init mem_init(void)
|
|
reservedpages++;
|
|
|
|
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
|
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
|
|
+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
|
|
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
|
|
|
|
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
|
|
@@ -801,10 +806,10 @@ void __init mem_init(void)
|
|
((unsigned long)&__init_end -
|
|
(unsigned long)&__init_begin) >> 10,
|
|
|
|
- (unsigned long)&_etext, (unsigned long)&_edata,
|
|
- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
|
|
+ (unsigned long)&_sdata, (unsigned long)&_edata,
|
|
+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
|
|
|
|
- (unsigned long)&_text, (unsigned long)&_etext,
|
|
+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
|
|
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
|
|
|
|
/*
|
|
@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
|
|
if (!kernel_set_to_readonly)
|
|
return;
|
|
|
|
+ start = ktla_ktva(start);
|
|
pr_debug("Set kernel text: %lx - %lx for read write\n",
|
|
start, start+size);
|
|
|
|
@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
|
|
if (!kernel_set_to_readonly)
|
|
return;
|
|
|
|
+ start = ktla_ktva(start);
|
|
pr_debug("Set kernel text: %lx - %lx for read only\n",
|
|
start, start+size);
|
|
|
|
@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
|
|
unsigned long start = PFN_ALIGN(_text);
|
|
unsigned long size = PFN_ALIGN(_etext) - start;
|
|
|
|
+ start = ktla_ktva(start);
|
|
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
|
|
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
|
|
size >> 10);
|
|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
|
|
index faf7a68..5616b27 100644
|
|
--- a/arch/x86/mm/init_64.c
|
|
+++ b/arch/x86/mm/init_64.c
|
|
@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
|
|
* around without checking the pgd every time.
|
|
*/
|
|
|
|
-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
|
|
+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
|
|
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
|
|
|
int force_personality32;
|
|
@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
|
|
for (address = start; address <= end; address += PGDIR_SIZE) {
|
|
const pgd_t *pgd_ref = pgd_offset_k(address);
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ unsigned long cpu;
|
|
+#else
|
|
struct page *page;
|
|
+#endif
|
|
|
|
if (pgd_none(*pgd_ref))
|
|
continue;
|
|
|
|
spin_lock(&pgd_lock);
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
|
|
+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
|
|
+#else
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
pgd_t *pgd;
|
|
spinlock_t *pgt_lock;
|
|
@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
/* the pgt_lock only for Xen */
|
|
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
|
spin_lock(pgt_lock);
|
|
+#endif
|
|
|
|
if (pgd_none(*pgd))
|
|
set_pgd(pgd, *pgd_ref);
|
|
@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|
BUG_ON(pgd_page_vaddr(*pgd)
|
|
!= pgd_page_vaddr(*pgd_ref));
|
|
|
|
+#ifndef CONFIG_PAX_PER_CPU_PGD
|
|
spin_unlock(pgt_lock);
|
|
+#endif
|
|
+
|
|
}
|
|
spin_unlock(&pgd_lock);
|
|
}
|
|
@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
|
|
{
|
|
if (pgd_none(*pgd)) {
|
|
pud_t *pud = (pud_t *)spp_getpage();
|
|
- pgd_populate(&init_mm, pgd, pud);
|
|
+ pgd_populate_kernel(&init_mm, pgd, pud);
|
|
if (pud != pud_offset(pgd, 0))
|
|
printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
|
|
pud, pud_offset(pgd, 0));
|
|
@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
|
|
{
|
|
if (pud_none(*pud)) {
|
|
pmd_t *pmd = (pmd_t *) spp_getpage();
|
|
- pud_populate(&init_mm, pud, pmd);
|
|
+ pud_populate_kernel(&init_mm, pud, pmd);
|
|
if (pmd != pmd_offset(pud, 0))
|
|
printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
|
|
pmd, pmd_offset(pud, 0));
|
|
@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
|
|
pmd = fill_pmd(pud, vaddr);
|
|
pte = fill_pte(pmd, vaddr);
|
|
|
|
+ pax_open_kernel();
|
|
set_pte(pte, new_pte);
|
|
+ pax_close_kernel();
|
|
|
|
/*
|
|
* It's enough to flush this one mapping.
|
|
@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
|
|
pgd = pgd_offset_k((unsigned long)__va(phys));
|
|
if (pgd_none(*pgd)) {
|
|
pud = (pud_t *) spp_getpage();
|
|
- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
|
|
- _PAGE_USER));
|
|
+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
|
|
}
|
|
pud = pud_offset(pgd, (unsigned long)__va(phys));
|
|
if (pud_none(*pud)) {
|
|
pmd = (pmd_t *) spp_getpage();
|
|
- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
|
|
- _PAGE_USER));
|
|
+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
|
|
}
|
|
pmd = pmd_offset(pud, phys);
|
|
BUG_ON(!pmd_none(*pmd));
|
|
@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
|
|
if (pfn >= pgt_buf_top)
|
|
panic("alloc_low_page: ran out of memory");
|
|
|
|
- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
|
|
+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
|
|
clear_page(adr);
|
|
*phys = pfn * PAGE_SIZE;
|
|
return adr;
|
|
@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
|
|
|
|
phys = __pa(virt);
|
|
left = phys & (PAGE_SIZE - 1);
|
|
- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
|
|
+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
|
|
adr = (void *)(((unsigned long)adr) | left);
|
|
|
|
return adr;
|
|
@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
|
unmap_low_page(pmd);
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
- pud_populate(&init_mm, pud, __va(pmd_phys));
|
|
+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
}
|
|
__flush_tlb_all();
|
|
@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|
unmap_low_page(pud);
|
|
|
|
spin_lock(&init_mm.page_table_lock);
|
|
- pgd_populate(&init_mm, pgd, __va(pud_phys));
|
|
+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
|
|
spin_unlock(&init_mm.page_table_lock);
|
|
pgd_changed = true;
|
|
}
|
|
@@ -683,6 +697,12 @@ void __init mem_init(void)
|
|
|
|
pci_iommu_alloc();
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
|
|
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
|
+ KERNEL_PGD_PTRS);
|
|
+#endif
|
|
+
|
|
/* clear_bss() already clear the empty_zero_page */
|
|
|
|
reservedpages = 0;
|
|
@@ -846,8 +866,8 @@ int kern_addr_valid(unsigned long addr)
|
|
static struct vm_area_struct gate_vma = {
|
|
.vm_start = VSYSCALL_START,
|
|
.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
|
|
- .vm_page_prot = PAGE_READONLY_EXEC,
|
|
- .vm_flags = VM_READ | VM_EXEC
|
|
+ .vm_page_prot = PAGE_READONLY,
|
|
+ .vm_flags = VM_READ
|
|
};
|
|
|
|
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
@@ -881,7 +901,7 @@ int in_gate_area_no_mm(unsigned long addr)
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
|
|
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
|
|
return "[vdso]";
|
|
if (vma == &gate_vma)
|
|
return "[vsyscall]";
|
|
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
|
|
index 7b179b49..6bd1777 100644
|
|
--- a/arch/x86/mm/iomap_32.c
|
|
+++ b/arch/x86/mm/iomap_32.c
|
|
@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
|
|
type = kmap_atomic_idx_push();
|
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
+
|
|
+ pax_open_kernel();
|
|
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
|
|
+ pax_close_kernel();
|
|
+
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
|
|
index dec49d3..a043b25 100644
|
|
--- a/arch/x86/mm/ioremap.c
|
|
+++ b/arch/x86/mm/ioremap.c
|
|
@@ -327,6 +327,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
|
|
|
|
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
|
|
if (page_is_ram(start >> PAGE_SHIFT))
|
|
+#ifdef CONFIG_HIGHMEM
|
|
+ if ((start >> PAGE_SHIFT) < max_low_pfn)
|
|
+#endif
|
|
return __va(phys);
|
|
|
|
addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
|
|
@@ -356,7 +359,7 @@ static int __init early_ioremap_debug_setup(char *str)
|
|
early_param("early_ioremap_debug", early_ioremap_debug_setup);
|
|
|
|
static __initdata int after_paging_init;
|
|
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
|
|
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
|
|
|
|
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
|
{
|
|
@@ -393,8 +396,7 @@ void __init early_ioremap_init(void)
|
|
slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
|
|
|
|
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
|
- memset(bm_pte, 0, sizeof(bm_pte));
|
|
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
|
+ pmd_populate_user(&init_mm, pmd, bm_pte);
|
|
|
|
/*
|
|
* The boot-ioremap range spans multiple pmds, for which
|
|
diff --git a/arch/x86/mm/ioremap.c.rej b/arch/x86/mm/ioremap.c.rej
|
|
new file mode 100644
|
|
index 0000000..08d957c
|
|
--- /dev/null
|
|
+++ b/arch/x86/mm/ioremap.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- arch/x86/mm/ioremap.c 2011-07-22 04:17:23.000000000 +0200
|
|
++++ arch/x86/mm/ioremap.c 2012-05-21 12:10:09.760048901 +0200
|
|
+@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(re
|
|
+ for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
|
|
+ int is_ram = page_is_ram(pfn);
|
|
+
|
|
+- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
|
|
++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
|
|
+ return NULL;
|
|
+ WARN_ON_ONCE(is_ram);
|
|
+ }
|
|
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
|
|
index d87dd6d..bf3fa66 100644
|
|
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
|
|
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
|
|
@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
|
|
* memory (e.g. tracked pages)? For now, we need this to avoid
|
|
* invoking kmemcheck for PnP BIOS calls.
|
|
*/
|
|
- if (regs->flags & X86_VM_MASK)
|
|
+ if (v8086_mode(regs))
|
|
return false;
|
|
- if (regs->cs != __KERNEL_CS)
|
|
+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
|
|
return false;
|
|
|
|
pte = kmemcheck_pte_lookup(address);
|
|
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
|
|
index 75f9e5d..f8b50a1 100644
|
|
--- a/arch/x86/mm/mmap.c
|
|
+++ b/arch/x86/mm/mmap.c
|
|
@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
|
|
* Leave an at least ~128 MB hole with possible stack randomization.
|
|
*/
|
|
#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
|
|
-#define MAX_GAP (TASK_SIZE/6*5)
|
|
+#define MAX_GAP (pax_task_size/6*5)
|
|
|
|
static int mmap_is_legacy(void)
|
|
{
|
|
@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
|
|
return rnd << PAGE_SHIFT;
|
|
}
|
|
|
|
-static unsigned long mmap_base(void)
|
|
+static unsigned long mmap_base(struct mm_struct *mm)
|
|
{
|
|
unsigned long gap = rlimit(RLIMIT_STACK);
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
|
|
if (gap < MIN_GAP)
|
|
gap = MIN_GAP;
|
|
else if (gap > MAX_GAP)
|
|
gap = MAX_GAP;
|
|
|
|
- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
|
|
+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
|
|
}
|
|
|
|
/*
|
|
* Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
|
|
* does, but not when emulating X86_32
|
|
*/
|
|
-static unsigned long mmap_legacy_base(void)
|
|
+static unsigned long mmap_legacy_base(struct mm_struct *mm)
|
|
{
|
|
- if (mmap_is_ia32())
|
|
+ if (mmap_is_ia32()) {
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ return SEGMEXEC_TASK_UNMAPPED_BASE;
|
|
+ else
|
|
+#endif
|
|
+
|
|
return TASK_UNMAPPED_BASE;
|
|
- else
|
|
+ } else
|
|
return TASK_UNMAPPED_BASE + mmap_rnd();
|
|
}
|
|
|
|
diff --git a/arch/x86/mm/mmap.c.rej b/arch/x86/mm/mmap.c.rej
|
|
new file mode 100644
|
|
index 0000000..1a9121a
|
|
--- /dev/null
|
|
+++ b/arch/x86/mm/mmap.c.rej
|
|
@@ -0,0 +1,28 @@
|
|
+--- arch/x86/mm/mmap.c 2012-03-19 10:38:56.692049991 +0100
|
|
++++ arch/x86/mm/mmap.c 2012-05-21 12:10:09.764048901 +0200
|
|
+@@ -126,11 +139,23 @@ static unsigned long mmap_legacy_base(vo
|
|
+ void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
+ {
|
|
+ if (mmap_is_legacy()) {
|
|
+- mm->mmap_base = mmap_legacy_base();
|
|
++ mm->mmap_base = mmap_legacy_base(mm);
|
|
++
|
|
++#ifdef CONFIG_PAX_RANDMMAP
|
|
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
++ mm->mmap_base += mm->delta_mmap;
|
|
++#endif
|
|
++
|
|
+ mm->get_unmapped_area = arch_get_unmapped_area;
|
|
+ mm->unmap_area = arch_unmap_area;
|
|
+ } else {
|
|
+- mm->mmap_base = mmap_base();
|
|
++ mm->mmap_base = mmap_base(mm);
|
|
++
|
|
++#ifdef CONFIG_PAX_RANDMMAP
|
|
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
|
|
++#endif
|
|
++
|
|
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
+ mm->unmap_area = arch_unmap_area_topdown;
|
|
+ }
|
|
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
|
|
index dc0b727..dc9d71a 100644
|
|
--- a/arch/x86/mm/mmio-mod.c
|
|
+++ b/arch/x86/mm/mmio-mod.c
|
|
@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
|
|
break;
|
|
default:
|
|
{
|
|
- unsigned char *ip = (unsigned char *)instptr;
|
|
+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
|
|
my_trace->opcode = MMIO_UNKNOWN_OP;
|
|
my_trace->width = 0;
|
|
my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
|
|
@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
|
|
static void ioremap_trace_core(resource_size_t offset, unsigned long size,
|
|
void __iomem *addr)
|
|
{
|
|
- static atomic_t next_id;
|
|
+ static atomic_unchecked_t next_id;
|
|
struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
|
|
/* These are page-unaligned. */
|
|
struct mmiotrace_map map = {
|
|
@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
|
|
.private = trace
|
|
},
|
|
.phys = offset,
|
|
- .id = atomic_inc_return(&next_id)
|
|
+ .id = atomic_inc_return_unchecked(&next_id)
|
|
};
|
|
map.map_id = trace->id;
|
|
|
|
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
|
|
index b008656..773eac2 100644
|
|
--- a/arch/x86/mm/pageattr-test.c
|
|
+++ b/arch/x86/mm/pageattr-test.c
|
|
@@ -36,7 +36,7 @@ enum {
|
|
|
|
static int pte_testbit(pte_t pte)
|
|
{
|
|
- return pte_flags(pte) & _PAGE_UNUSED1;
|
|
+ return pte_flags(pte) & _PAGE_CPA_TEST;
|
|
}
|
|
|
|
struct split_state {
|
|
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
|
|
index e1ebde3..b1e1db38 100644
|
|
--- a/arch/x86/mm/pageattr.c
|
|
+++ b/arch/x86/mm/pageattr.c
|
|
@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
|
|
*/
|
|
#ifdef CONFIG_PCI_BIOS
|
|
if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
|
|
- pgprot_val(forbidden) |= _PAGE_NX;
|
|
+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
|
|
#endif
|
|
|
|
/*
|
|
@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
|
|
* Does not cover __inittext since that is gone later on. On
|
|
* 64bit we do not enforce !NX on the low mapping
|
|
*/
|
|
- if (within(address, (unsigned long)_text, (unsigned long)_etext))
|
|
- pgprot_val(forbidden) |= _PAGE_NX;
|
|
+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
|
|
+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
|
|
|
|
+#ifdef CONFIG_DEBUG_RODATA
|
|
/*
|
|
* The .rodata section needs to be read-only. Using the pfn
|
|
* catches all aliases.
|
|
@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
|
|
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
|
|
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
|
|
pgprot_val(forbidden) |= _PAGE_RW;
|
|
+#endif
|
|
|
|
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
|
|
/*
|
|
@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
|
|
+ pgprot_val(forbidden) |= _PAGE_RW;
|
|
+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
|
|
+ }
|
|
+#endif
|
|
+
|
|
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
|
|
|
|
return prot;
|
|
@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
|
|
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
|
|
{
|
|
/* change init_mm */
|
|
+ pax_open_kernel();
|
|
set_pte_atomic(kpte, pte);
|
|
+
|
|
#ifdef CONFIG_X86_32
|
|
if (!SHARED_KERNEL_PMD) {
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ unsigned long cpu;
|
|
+#else
|
|
struct page *page;
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
|
|
+ pgd_t *pgd = get_cpu_pgd(cpu);
|
|
+#else
|
|
list_for_each_entry(page, &pgd_list, lru) {
|
|
- pgd_t *pgd;
|
|
+ pgd_t *pgd = (pgd_t *)page_address(page);
|
|
+#endif
|
|
+
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
|
+ pgd += pgd_index(address);
|
|
pud = pud_offset(pgd, address);
|
|
pmd = pmd_offset(pud, address);
|
|
set_pte_atomic((pte_t *)pmd, pte);
|
|
}
|
|
}
|
|
#endif
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static int
|
|
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
|
|
index f6ff57b..481690f 100644
|
|
--- a/arch/x86/mm/pat.c
|
|
+++ b/arch/x86/mm/pat.c
|
|
@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
|
|
|
|
if (!entry) {
|
|
printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
|
|
- current->comm, current->pid, start, end);
|
|
+ current->comm, task_pid_nr(current), start, end);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
|
while (cursor < to) {
|
|
if (!devmem_is_allowed(pfn)) {
|
|
printk(KERN_INFO
|
|
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
|
|
- current->comm, from, to);
|
|
+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
|
|
+ current->comm, from, to, cursor);
|
|
return 0;
|
|
}
|
|
cursor += PAGE_SIZE;
|
|
@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
|
printk(KERN_INFO
|
|
"%s:%d ioremap_change_attr failed %s "
|
|
"for %Lx-%Lx\n",
|
|
- current->comm, current->pid,
|
|
+ current->comm, task_pid_nr(current),
|
|
cattr_name(flags),
|
|
base, (unsigned long long)(base + size));
|
|
return -EINVAL;
|
|
@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
if (want_flags != flags) {
|
|
printk(KERN_WARNING
|
|
"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
|
|
- current->comm, current->pid,
|
|
+ current->comm, task_pid_nr(current),
|
|
cattr_name(want_flags),
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)(paddr + size),
|
|
@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|
free_memtype(paddr, paddr + size);
|
|
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
|
|
" for %Lx-%Lx, got %s\n",
|
|
- current->comm, current->pid,
|
|
+ current->comm, task_pid_nr(current),
|
|
cattr_name(want_flags),
|
|
(unsigned long long)paddr,
|
|
(unsigned long long)(paddr + size),
|
|
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
|
|
index 9f0614d..92ae64a 100644
|
|
--- a/arch/x86/mm/pf_in.c
|
|
+++ b/arch/x86/mm/pf_in.c
|
|
@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
|
|
int i;
|
|
enum reason_type rv = OTHERS;
|
|
|
|
- p = (unsigned char *)ins_addr;
|
|
+ p = (unsigned char *)ktla_ktva(ins_addr);
|
|
p += skip_prefix(p, &prf);
|
|
p += get_opcode(p, &opcode);
|
|
|
|
@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
|
|
struct prefix_bits prf;
|
|
int i;
|
|
|
|
- p = (unsigned char *)ins_addr;
|
|
+ p = (unsigned char *)ktla_ktva(ins_addr);
|
|
p += skip_prefix(p, &prf);
|
|
p += get_opcode(p, &opcode);
|
|
|
|
@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
|
|
struct prefix_bits prf;
|
|
int i;
|
|
|
|
- p = (unsigned char *)ins_addr;
|
|
+ p = (unsigned char *)ktla_ktva(ins_addr);
|
|
p += skip_prefix(p, &prf);
|
|
p += get_opcode(p, &opcode);
|
|
|
|
@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
|
|
struct prefix_bits prf;
|
|
int i;
|
|
|
|
- p = (unsigned char *)ins_addr;
|
|
+ p = (unsigned char *)ktla_ktva(ins_addr);
|
|
p += skip_prefix(p, &prf);
|
|
p += get_opcode(p, &opcode);
|
|
for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
|
|
@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
|
|
struct prefix_bits prf;
|
|
int i;
|
|
|
|
- p = (unsigned char *)ins_addr;
|
|
+ p = (unsigned char *)ktla_ktva(ins_addr);
|
|
p += skip_prefix(p, &prf);
|
|
p += get_opcode(p, &opcode);
|
|
for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
|
|
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
|
|
index 8573b83..d89f15e 100644
|
|
--- a/arch/x86/mm/pgtable.c
|
|
+++ b/arch/x86/mm/pgtable.c
|
|
@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
|
|
list_del(&page->lru);
|
|
}
|
|
|
|
-#define UNSHARED_PTRS_PER_PGD \
|
|
- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
|
|
|
|
+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
|
|
+{
|
|
+ unsigned int count = USER_PGD_PTRS;
|
|
+
|
|
+ while (count--)
|
|
+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
|
|
+{
|
|
+ unsigned int count = USER_PGD_PTRS;
|
|
+
|
|
+ while (count--) {
|
|
+ pgd_t pgd;
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
|
|
+#else
|
|
+ pgd = *src++;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
|
|
+#endif
|
|
|
|
+ *dst++ = pgd;
|
|
+ }
|
|
+
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_X86_64
|
|
+#define pxd_t pud_t
|
|
+#define pyd_t pgd_t
|
|
+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
|
|
+#define pxd_free(mm, pud) pud_free((mm), (pud))
|
|
+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
|
|
+#define pyd_offset(mm, address) pgd_offset((mm), (address))
|
|
+#define PYD_SIZE PGDIR_SIZE
|
|
+#else
|
|
+#define pxd_t pmd_t
|
|
+#define pyd_t pud_t
|
|
+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
|
|
+#define pxd_free(mm, pud) pmd_free((mm), (pud))
|
|
+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
|
|
+#define pyd_offset(mm, address) pud_offset((mm), (address))
|
|
+#define PYD_SIZE PUD_SIZE
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
|
|
+static inline void pgd_dtor(pgd_t *pgd) {}
|
|
+#else
|
|
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
|
|
{
|
|
BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
|
|
@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
|
|
pgd_list_del(pgd);
|
|
spin_unlock(&pgd_lock);
|
|
}
|
|
+#endif
|
|
|
|
/*
|
|
* List of all pgd's needed for non-PAE so it can invalidate entries
|
|
@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
|
|
* -- wli
|
|
*/
|
|
|
|
-#ifdef CONFIG_X86_PAE
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
|
|
/*
|
|
* In PAE mode, we need to do a cr3 reload (=tlb flush) when
|
|
* updating the top-level pagetable entries to guarantee the
|
|
@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
|
|
* not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
|
|
* and initialize the kernel pmds here.
|
|
*/
|
|
-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
|
|
+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
|
|
|
|
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
|
{
|
|
@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
|
|
*/
|
|
flush_tlb_mm(mm);
|
|
}
|
|
+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
|
|
+#define PREALLOCATED_PXDS USER_PGD_PTRS
|
|
#else /* !CONFIG_X86_PAE */
|
|
|
|
/* No need to prepopulate any pagetable entries in non-PAE modes. */
|
|
-#define PREALLOCATED_PMDS 0
|
|
+#define PREALLOCATED_PXDS 0
|
|
|
|
#endif /* CONFIG_X86_PAE */
|
|
|
|
-static void free_pmds(pmd_t *pmds[])
|
|
+static void free_pxds(pxd_t *pxds[])
|
|
{
|
|
int i;
|
|
|
|
- for(i = 0; i < PREALLOCATED_PMDS; i++)
|
|
- if (pmds[i])
|
|
- free_page((unsigned long)pmds[i]);
|
|
+ for(i = 0; i < PREALLOCATED_PXDS; i++)
|
|
+ if (pxds[i])
|
|
+ free_page((unsigned long)pxds[i]);
|
|
}
|
|
|
|
-static int preallocate_pmds(pmd_t *pmds[])
|
|
+static int preallocate_pxds(pxd_t *pxds[])
|
|
{
|
|
int i;
|
|
bool failed = false;
|
|
|
|
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
|
|
- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
|
|
- if (pmd == NULL)
|
|
+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
|
|
+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
|
|
+ if (pxd == NULL)
|
|
failed = true;
|
|
- pmds[i] = pmd;
|
|
+ pxds[i] = pxd;
|
|
}
|
|
|
|
if (failed) {
|
|
- free_pmds(pmds);
|
|
+ free_pxds(pxds);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
|
|
* preallocate which never got a corresponding vma will need to be
|
|
* freed manually.
|
|
*/
|
|
-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
|
|
+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
|
|
{
|
|
int i;
|
|
|
|
- for(i = 0; i < PREALLOCATED_PMDS; i++) {
|
|
+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
|
|
pgd_t pgd = pgdp[i];
|
|
|
|
if (pgd_val(pgd) != 0) {
|
|
- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
|
|
+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
|
|
|
|
- pgdp[i] = native_make_pgd(0);
|
|
+ set_pgd(pgdp + i, native_make_pgd(0));
|
|
|
|
- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
|
|
- pmd_free(mm, pmd);
|
|
+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
|
|
+ pxd_free(mm, pxd);
|
|
}
|
|
}
|
|
}
|
|
|
|
-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
|
|
+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
|
|
{
|
|
- pud_t *pud;
|
|
+ pyd_t *pyd;
|
|
unsigned long addr;
|
|
int i;
|
|
|
|
- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
|
|
+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
|
|
return;
|
|
|
|
- pud = pud_offset(pgd, 0);
|
|
+#ifdef CONFIG_X86_64
|
|
+ pyd = pyd_offset(mm, 0L);
|
|
+#else
|
|
+ pyd = pyd_offset(pgd, 0L);
|
|
+#endif
|
|
|
|
- for (addr = i = 0; i < PREALLOCATED_PMDS;
|
|
- i++, pud++, addr += PUD_SIZE) {
|
|
- pmd_t *pmd = pmds[i];
|
|
+ for (addr = i = 0; i < PREALLOCATED_PXDS;
|
|
+ i++, pyd++, addr += PYD_SIZE) {
|
|
+ pxd_t *pxd = pxds[i];
|
|
|
|
if (i >= KERNEL_PGD_BOUNDARY)
|
|
- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
|
|
- sizeof(pmd_t) * PTRS_PER_PMD);
|
|
+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
|
|
+ sizeof(pxd_t) * PTRS_PER_PMD);
|
|
|
|
- pud_populate(mm, pud, pmd);
|
|
+ pyd_populate(mm, pyd, pxd);
|
|
}
|
|
}
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *pgd;
|
|
- pmd_t *pmds[PREALLOCATED_PMDS];
|
|
+ pxd_t *pxds[PREALLOCATED_PXDS];
|
|
|
|
pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
|
|
|
|
@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
mm->pgd = pgd;
|
|
|
|
- if (preallocate_pmds(pmds) != 0)
|
|
+ if (preallocate_pxds(pxds) != 0)
|
|
goto out_free_pgd;
|
|
|
|
if (paravirt_pgd_alloc(mm) != 0)
|
|
- goto out_free_pmds;
|
|
+ goto out_free_pxds;
|
|
|
|
/*
|
|
* Make sure that pre-populating the pmds is atomic with
|
|
@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
spin_lock(&pgd_lock);
|
|
|
|
pgd_ctor(mm, pgd);
|
|
- pgd_prepopulate_pmd(mm, pgd, pmds);
|
|
+ pgd_prepopulate_pxd(mm, pgd, pxds);
|
|
|
|
spin_unlock(&pgd_lock);
|
|
|
|
return pgd;
|
|
|
|
-out_free_pmds:
|
|
- free_pmds(pmds);
|
|
+out_free_pxds:
|
|
+ free_pxds(pxds);
|
|
out_free_pgd:
|
|
free_page((unsigned long)pgd);
|
|
out:
|
|
@@ -295,7 +356,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
- pgd_mop_up_pmds(mm, pgd);
|
|
+ pgd_mop_up_pxds(mm, pgd);
|
|
pgd_dtor(pgd);
|
|
paravirt_pgd_free(mm, pgd);
|
|
free_page((unsigned long)pgd);
|
|
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
|
|
index a69bcb8..19068ab 100644
|
|
--- a/arch/x86/mm/pgtable_32.c
|
|
+++ b/arch/x86/mm/pgtable_32.c
|
|
@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
|
|
return;
|
|
}
|
|
pte = pte_offset_kernel(pmd, vaddr);
|
|
+
|
|
+ pax_open_kernel();
|
|
if (pte_val(pteval))
|
|
set_pte_at(&init_mm, vaddr, pte, pteval);
|
|
else
|
|
pte_clear(&init_mm, vaddr, pte);
|
|
+ pax_close_kernel();
|
|
|
|
/*
|
|
* It's enough to flush this one mapping.
|
|
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
|
|
index 410531d..0f16030 100644
|
|
--- a/arch/x86/mm/setup_nx.c
|
|
+++ b/arch/x86/mm/setup_nx.c
|
|
@@ -5,8 +5,10 @@
|
|
#include <asm/pgtable.h>
|
|
#include <asm/proto.h>
|
|
|
|
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
|
static int disable_nx __cpuinitdata;
|
|
|
|
+#ifndef CONFIG_PAX_PAGEEXEC
|
|
/*
|
|
* noexec = on|off
|
|
*
|
|
@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
|
|
return 0;
|
|
}
|
|
early_param("noexec", noexec_setup);
|
|
+#endif
|
|
+
|
|
+#endif
|
|
|
|
void __cpuinit x86_configure_nx(void)
|
|
{
|
|
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
|
if (cpu_has_nx && !disable_nx)
|
|
__supported_pte_mask |= _PAGE_NX;
|
|
else
|
|
+#endif
|
|
__supported_pte_mask &= ~_PAGE_NX;
|
|
}
|
|
|
|
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
|
|
index d6c0418..06a0ad5 100644
|
|
--- a/arch/x86/mm/tlb.c
|
|
+++ b/arch/x86/mm/tlb.c
|
|
@@ -65,7 +65,11 @@ void leave_mm(int cpu)
|
|
BUG();
|
|
cpumask_clear_cpu(cpu,
|
|
mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
|
|
+
|
|
+#ifndef CONFIG_PAX_PER_CPU_PGD
|
|
load_cr3(swapper_pg_dir);
|
|
+#endif
|
|
+
|
|
}
|
|
EXPORT_SYMBOL_GPL(leave_mm);
|
|
|
|
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
|
|
index 0149575..f746de8 100644
|
|
--- a/arch/x86/net/bpf_jit.S
|
|
+++ b/arch/x86/net/bpf_jit.S
|
|
@@ -9,6 +9,7 @@
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <asm/dwarf2.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
/*
|
|
* Calling convention :
|
|
@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
|
|
jle bpf_slow_path_word
|
|
mov (SKBDATA,%rsi),%eax
|
|
bswap %eax /* ntohl() */
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
sk_load_half:
|
|
@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
|
|
jle bpf_slow_path_half
|
|
movzwl (SKBDATA,%rsi),%eax
|
|
rol $8,%ax # ntohs()
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
sk_load_byte:
|
|
@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
|
|
cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
|
|
jle bpf_slow_path_byte
|
|
movzbl (SKBDATA,%rsi),%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
/**
|
|
@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
|
|
movzbl (SKBDATA,%rsi),%ebx
|
|
and $15,%bl
|
|
shl $2,%bl
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
/* rsi contains offset and can be scratched */
|
|
@@ -109,6 +114,7 @@ bpf_slow_path_word:
|
|
js bpf_error
|
|
mov -12(%rbp),%eax
|
|
bswap %eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_half:
|
|
@@ -117,12 +123,14 @@ bpf_slow_path_half:
|
|
mov -12(%rbp),%ax
|
|
rol $8,%ax
|
|
movzwl %ax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_byte:
|
|
bpf_slow_path_common(1)
|
|
js bpf_error
|
|
movzbl -12(%rbp),%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_byte_msh:
|
|
@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
|
|
and $15,%al
|
|
shl $2,%al
|
|
xchg %eax,%ebx
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
#define sk_negative_common(SIZE) \
|
|
@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
|
|
sk_negative_common(4)
|
|
mov (%rax), %eax
|
|
bswap %eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_half_neg:
|
|
@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
|
|
mov (%rax),%ax
|
|
rol $8,%ax
|
|
movzwl %ax,%eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_byte_neg:
|
|
@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
|
|
.globl sk_load_byte_negative_offset
|
|
sk_negative_common(1)
|
|
movzbl (%rax), %eax
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_slow_path_byte_msh_neg:
|
|
@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
|
|
and $15,%al
|
|
shl $2,%al
|
|
xchg %eax,%ebx
|
|
+ pax_force_retaddr
|
|
ret
|
|
|
|
bpf_error:
|
|
@@ -197,4 +210,5 @@ bpf_error:
|
|
xor %eax,%eax
|
|
mov -8(%rbp),%rbx
|
|
leaveq
|
|
+ pax_force_retaddr
|
|
ret
|
|
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
|
|
index 95f9934..3840a93 100644
|
|
--- a/arch/x86/net/bpf_jit_comp.c
|
|
+++ b/arch/x86/net/bpf_jit_comp.c
|
|
@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
|
|
set_fs(old_fs);
|
|
}
|
|
|
|
+struct bpf_jit_work {
|
|
+ struct work_struct work;
|
|
+ void *image;
|
|
+};
|
|
+
|
|
#define CHOOSE_LOAD_FUNC(K, func) \
|
|
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
|
|
|
|
@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
|
|
if (addrs == NULL)
|
|
return;
|
|
|
|
+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
|
|
+ if (!fp->work)
|
|
+ goto out;
|
|
+
|
|
/* Before first pass, make a rough estimation of addrs[]
|
|
* each bpf instruction is translated to less than 64 bytes
|
|
*/
|
|
@@ -594,17 +603,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
|
break;
|
|
default:
|
|
/* hmm, too complex filter, give up with jit compiler */
|
|
- goto out;
|
|
+ goto error;
|
|
}
|
|
ilen = prog - temp;
|
|
if (image) {
|
|
if (unlikely(proglen + ilen > oldproglen)) {
|
|
pr_err("bpb_jit_compile fatal error\n");
|
|
- kfree(addrs);
|
|
- module_free(NULL, image);
|
|
- return;
|
|
+ module_free_exec(NULL, image);
|
|
+ goto error;
|
|
}
|
|
+ pax_open_kernel();
|
|
memcpy(image + proglen, temp, ilen);
|
|
+ pax_close_kernel();
|
|
}
|
|
proglen += ilen;
|
|
addrs[i] = proglen;
|
|
@@ -625,11 +635,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
|
break;
|
|
}
|
|
if (proglen == oldproglen) {
|
|
- image = module_alloc(max_t(unsigned int,
|
|
- proglen,
|
|
- sizeof(struct work_struct)));
|
|
+ image = module_alloc_exec(proglen);
|
|
if (!image)
|
|
- goto out;
|
|
+ goto error;
|
|
}
|
|
oldproglen = proglen;
|
|
}
|
|
@@ -645,7 +653,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
|
bpf_flush_icache(image, image + proglen);
|
|
|
|
fp->bpf_func = (void *)image;
|
|
- }
|
|
+ } else
|
|
+error:
|
|
+ kfree(fp->work);
|
|
+
|
|
out:
|
|
kfree(addrs);
|
|
return;
|
|
@@ -653,18 +664,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
|
|
|
static void jit_free_defer(struct work_struct *arg)
|
|
{
|
|
- module_free(NULL, arg);
|
|
+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
|
|
+ kfree(arg);
|
|
}
|
|
|
|
/* run from softirq, we must use a work_struct to call
|
|
- * module_free() from process context
|
|
+ * module_free_exec() from process context
|
|
*/
|
|
void bpf_jit_free(struct sk_filter *fp)
|
|
{
|
|
if (fp->bpf_func != sk_run_filter) {
|
|
- struct work_struct *work = (struct work_struct *)fp->bpf_func;
|
|
+ struct work_struct *work = &fp->work->work;
|
|
|
|
INIT_WORK(work, jit_free_defer);
|
|
+ fp->work->image = fp->bpf_func;
|
|
schedule_work(work);
|
|
}
|
|
}
|
|
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
|
|
index d6aa6e8..266395a 100644
|
|
--- a/arch/x86/oprofile/backtrace.c
|
|
+++ b/arch/x86/oprofile/backtrace.c
|
|
@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
|
|
struct stack_frame_ia32 *fp;
|
|
unsigned long bytes;
|
|
|
|
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
|
+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
|
|
if (bytes != sizeof(bufhead))
|
|
return NULL;
|
|
|
|
- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
|
|
+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
|
|
|
|
oprofile_add_trace(bufhead[0].return_address);
|
|
|
|
@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
|
|
struct stack_frame bufhead[2];
|
|
unsigned long bytes;
|
|
|
|
- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
|
|
+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
|
|
if (bytes != sizeof(bufhead))
|
|
return NULL;
|
|
|
|
@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|
{
|
|
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
|
|
|
|
- if (!user_mode_vm(regs)) {
|
|
+ if (!user_mode(regs)) {
|
|
unsigned long stack = kernel_stack_pointer(regs);
|
|
if (depth)
|
|
dump_trace(NULL, regs, (unsigned long *)stack, 0,
|
|
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
|
|
index 831971e..dd8ca6f 100644
|
|
--- a/arch/x86/pci/i386.c
|
|
+++ b/arch/x86/pci/i386.c
|
|
@@ -57,7 +57,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
|
|
{
|
|
struct pcibios_fwaddrmap *map;
|
|
|
|
- WARN_ON(!spin_is_locked(&pcibios_fwaddrmap_lock));
|
|
+ WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
|
|
|
|
list_for_each_entry(map, &pcibios_fwaddrmappings, list)
|
|
if (map->dev == dev)
|
|
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
|
|
index 140942f..8a5cc55 100644
|
|
--- a/arch/x86/pci/mrst.c
|
|
+++ b/arch/x86/pci/mrst.c
|
|
@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
|
|
printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
|
|
pci_mmcfg_late_init();
|
|
pcibios_enable_irq = mrst_pci_irq_enable;
|
|
- pci_root_ops = pci_mrst_ops;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
|
|
+ pax_close_kernel();
|
|
pci_soc_mode = 1;
|
|
/* Continue with standard init */
|
|
return 1;
|
|
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
|
|
index da8fe05..7ee6704 100644
|
|
--- a/arch/x86/pci/pcbios.c
|
|
+++ b/arch/x86/pci/pcbios.c
|
|
@@ -79,50 +79,93 @@ union bios32 {
|
|
static struct {
|
|
unsigned long address;
|
|
unsigned short segment;
|
|
-} bios32_indirect = { 0, __KERNEL_CS };
|
|
+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
|
|
|
|
/*
|
|
* Returns the entry point for the given service, NULL on error
|
|
*/
|
|
|
|
-static unsigned long bios32_service(unsigned long service)
|
|
+static unsigned long __devinit bios32_service(unsigned long service)
|
|
{
|
|
unsigned char return_code; /* %al */
|
|
unsigned long address; /* %ebx */
|
|
unsigned long length; /* %ecx */
|
|
unsigned long entry; /* %edx */
|
|
unsigned long flags;
|
|
+ struct desc_struct d, *gdt;
|
|
|
|
local_irq_save(flags);
|
|
- __asm__("lcall *(%%edi); cld"
|
|
+
|
|
+ gdt = get_cpu_gdt_table(smp_processor_id());
|
|
+
|
|
+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
|
|
+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
|
|
+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
|
|
+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
|
|
+
|
|
+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
|
|
: "=a" (return_code),
|
|
"=b" (address),
|
|
"=c" (length),
|
|
"=d" (entry)
|
|
: "0" (service),
|
|
"1" (0),
|
|
- "D" (&bios32_indirect));
|
|
+ "D" (&bios32_indirect),
|
|
+ "r"(__PCIBIOS_DS)
|
|
+ : "memory");
|
|
+
|
|
+ pax_open_kernel();
|
|
+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
|
|
+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
|
|
+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
|
|
+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
|
|
+ pax_close_kernel();
|
|
+
|
|
local_irq_restore(flags);
|
|
|
|
switch (return_code) {
|
|
- case 0:
|
|
- return address + entry;
|
|
- case 0x80: /* Not present */
|
|
- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
|
|
- return 0;
|
|
- default: /* Shouldn't happen */
|
|
- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
|
|
- service, return_code);
|
|
+ case 0: {
|
|
+ int cpu;
|
|
+ unsigned char flags;
|
|
+
|
|
+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
|
|
+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
|
|
+ printk(KERN_WARNING "bios32_service: not valid\n");
|
|
return 0;
|
|
+ }
|
|
+ address = address + PAGE_OFFSET;
|
|
+ length += 16UL; /* some BIOSs underreport this... */
|
|
+ flags = 4;
|
|
+ if (length >= 64*1024*1024) {
|
|
+ length >>= PAGE_SHIFT;
|
|
+ flags |= 8;
|
|
+ }
|
|
+
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
|
+ gdt = get_cpu_gdt_table(cpu);
|
|
+ pack_descriptor(&d, address, length, 0x9b, flags);
|
|
+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
|
|
+ pack_descriptor(&d, address, length, 0x93, flags);
|
|
+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
|
|
+ }
|
|
+ return entry;
|
|
+ }
|
|
+ case 0x80: /* Not present */
|
|
+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
|
|
+ return 0;
|
|
+ default: /* Shouldn't happen */
|
|
+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
|
|
+ service, return_code);
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
static struct {
|
|
unsigned long address;
|
|
unsigned short segment;
|
|
-} pci_indirect = { 0, __KERNEL_CS };
|
|
+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
|
|
|
|
-static int pci_bios_present;
|
|
+static int pci_bios_present __read_only;
|
|
|
|
static int __devinit check_pcibios(void)
|
|
{
|
|
@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
|
|
unsigned long flags, pcibios_entry;
|
|
|
|
if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
|
|
- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
|
|
+ pci_indirect.address = pcibios_entry;
|
|
|
|
local_irq_save(flags);
|
|
- __asm__(
|
|
- "lcall *(%%edi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%edi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
|
|
"=b" (ebx),
|
|
"=c" (ecx)
|
|
: "1" (PCIBIOS_PCI_BIOS_PRESENT),
|
|
- "D" (&pci_indirect)
|
|
+ "D" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS)
|
|
: "memory");
|
|
local_irq_restore(flags);
|
|
|
|
@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
|
|
switch (len) {
|
|
case 1:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
: "1" (PCIBIOS_READ_CONFIG_BYTE),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
/*
|
|
* Zero-extend the result beyond 8 bits, do not trust the
|
|
* BIOS having done it:
|
|
@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
*value &= 0xff;
|
|
break;
|
|
case 2:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
: "1" (PCIBIOS_READ_CONFIG_WORD),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
/*
|
|
* Zero-extend the result beyond 16 bits, do not trust the
|
|
* BIOS having done it:
|
|
@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
*value &= 0xffff;
|
|
break;
|
|
case 4:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
|
|
: "1" (PCIBIOS_READ_CONFIG_DWORD),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
break;
|
|
}
|
|
|
|
@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
|
|
|
|
switch (len) {
|
|
case 1:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
|
|
"c" (value),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
break;
|
|
case 2:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
|
|
"c" (value),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
break;
|
|
case 4:
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w6, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n\t"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
|
|
"c" (value),
|
|
"b" (bx),
|
|
"D" ((long)reg),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
break;
|
|
}
|
|
|
|
@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
|
|
|
|
DBG("PCI: Fetching IRQ routing table... ");
|
|
__asm__("push %%es\n\t"
|
|
+ "movw %w8, %%ds\n\t"
|
|
"push %%ds\n\t"
|
|
"pop %%es\n\t"
|
|
- "lcall *(%%esi); cld\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
"pop %%es\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
|
|
"1" (0),
|
|
"D" ((long) &opt),
|
|
"S" (&pci_indirect),
|
|
- "m" (opt)
|
|
+ "m" (opt),
|
|
+ "r" (__PCIBIOS_DS)
|
|
: "memory");
|
|
DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
|
|
if (ret & 0xff00)
|
|
@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
|
|
{
|
|
int ret;
|
|
|
|
- __asm__("lcall *(%%esi); cld\n\t"
|
|
+ __asm__("movw %w5, %%ds\n\t"
|
|
+ "lcall *%%ss:(%%esi); cld\n\t"
|
|
+ "push %%ss\n\t"
|
|
+ "pop %%ds\n"
|
|
"jc 1f\n\t"
|
|
"xor %%ah, %%ah\n"
|
|
"1:"
|
|
@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
|
|
: "0" (PCIBIOS_SET_PCI_HW_INT),
|
|
"b" ((dev->bus->number << 8) | dev->devfn),
|
|
"c" ((irq << 8) | (pin + 10)),
|
|
- "S" (&pci_indirect));
|
|
+ "S" (&pci_indirect),
|
|
+ "r" (__PCIBIOS_DS));
|
|
return !(ret & 0xff00);
|
|
}
|
|
EXPORT_SYMBOL(pcibios_set_irq_routing);
|
|
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
|
|
index 40e4469..1ab536e 100644
|
|
--- a/arch/x86/platform/efi/efi_32.c
|
|
+++ b/arch/x86/platform/efi/efi_32.c
|
|
@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
|
|
{
|
|
struct desc_ptr gdt_descr;
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ struct desc_struct d;
|
|
+#endif
|
|
+
|
|
local_irq_save(efi_rt_eflags);
|
|
|
|
load_cr3(initial_page_table);
|
|
__flush_tlb_all();
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
|
|
+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
|
|
+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
|
|
+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
|
|
+#endif
|
|
+
|
|
gdt_descr.address = __pa(get_cpu_gdt_table(0));
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
load_gdt(&gdt_descr);
|
|
@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
|
|
{
|
|
struct desc_ptr gdt_descr;
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ struct desc_struct d;
|
|
+
|
|
+ memset(&d, 0, sizeof d);
|
|
+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
|
|
+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
|
|
+#endif
|
|
+
|
|
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
load_gdt(&gdt_descr);
|
|
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
|
|
index fbe66e6..eae5e38 100644
|
|
--- a/arch/x86/platform/efi/efi_stub_32.S
|
|
+++ b/arch/x86/platform/efi/efi_stub_32.S
|
|
@@ -6,7 +6,9 @@
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
+#include <linux/init.h>
|
|
#include <asm/page_types.h>
|
|
+#include <asm/segment.h>
|
|
|
|
/*
|
|
* efi_call_phys(void *, ...) is a function with variable parameters.
|
|
@@ -20,7 +22,7 @@
|
|
* service functions will comply with gcc calling convention, too.
|
|
*/
|
|
|
|
-.text
|
|
+__INIT
|
|
ENTRY(efi_call_phys)
|
|
/*
|
|
* 0. The function can only be called in Linux kernel. So CS has been
|
|
@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
|
|
* The mapping of lower virtual memory has been created in prelog and
|
|
* epilog.
|
|
*/
|
|
- movl $1f, %edx
|
|
- subl $__PAGE_OFFSET, %edx
|
|
- jmp *%edx
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ movl $(__KERNEXEC_EFI_DS), %edx
|
|
+ mov %edx, %ds
|
|
+ mov %edx, %es
|
|
+ mov %edx, %ss
|
|
+ addl $2f,(1f)
|
|
+ ljmp *(1f)
|
|
+
|
|
+__INITDATA
|
|
+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
|
|
+.previous
|
|
+
|
|
+2:
|
|
+ subl $2b,(1b)
|
|
+#else
|
|
+ jmp 1f-__PAGE_OFFSET
|
|
1:
|
|
+#endif
|
|
|
|
/*
|
|
* 2. Now on the top of stack is the return
|
|
@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
|
|
* parameter 2, ..., param n. To make things easy, we save the return
|
|
* address of efi_call_phys in a global variable.
|
|
*/
|
|
- popl %edx
|
|
- movl %edx, saved_return_addr
|
|
- /* get the function pointer into ECX*/
|
|
- popl %ecx
|
|
- movl %ecx, efi_rt_function_ptr
|
|
- movl $2f, %edx
|
|
- subl $__PAGE_OFFSET, %edx
|
|
- pushl %edx
|
|
+ popl (saved_return_addr)
|
|
+ popl (efi_rt_function_ptr)
|
|
|
|
/*
|
|
* 3. Clear PG bit in %CR0.
|
|
@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
|
|
/*
|
|
* 5. Call the physical function.
|
|
*/
|
|
- jmp *%ecx
|
|
+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
|
|
|
|
-2:
|
|
/*
|
|
* 6. After EFI runtime service returns, control will return to
|
|
* following instruction. We'd better readjust stack pointer first.
|
|
@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
|
|
movl %cr0, %edx
|
|
orl $0x80000000, %edx
|
|
movl %edx, %cr0
|
|
- jmp 1f
|
|
-1:
|
|
+
|
|
/*
|
|
* 8. Now restore the virtual mode from flat mode by
|
|
* adding EIP with PAGE_OFFSET.
|
|
*/
|
|
- movl $1f, %edx
|
|
- jmp *%edx
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ movl $(__KERNEL_DS), %edx
|
|
+ mov %edx, %ds
|
|
+ mov %edx, %es
|
|
+ mov %edx, %ss
|
|
+ ljmp $(__KERNEL_CS),$1f
|
|
+#else
|
|
+ jmp 1f+__PAGE_OFFSET
|
|
+#endif
|
|
1:
|
|
|
|
/*
|
|
* 9. Balance the stack. And because EAX contain the return value,
|
|
* we'd better not clobber it.
|
|
*/
|
|
- leal efi_rt_function_ptr, %edx
|
|
- movl (%edx), %ecx
|
|
- pushl %ecx
|
|
+ pushl (efi_rt_function_ptr)
|
|
|
|
/*
|
|
- * 10. Push the saved return address onto the stack and return.
|
|
+ * 10. Return to the saved return address.
|
|
*/
|
|
- leal saved_return_addr, %edx
|
|
- movl (%edx), %ecx
|
|
- pushl %ecx
|
|
- ret
|
|
+ jmpl *(saved_return_addr)
|
|
ENDPROC(efi_call_phys)
|
|
.previous
|
|
|
|
-.data
|
|
+__INITDATA
|
|
saved_return_addr:
|
|
.long 0
|
|
efi_rt_function_ptr:
|
|
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
|
|
index 4c07cca..2c8427d 100644
|
|
--- a/arch/x86/platform/efi/efi_stub_64.S
|
|
+++ b/arch/x86/platform/efi/efi_stub_64.S
|
|
@@ -7,6 +7,7 @@
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
+#include <asm/alternative-asm.h>
|
|
|
|
#define SAVE_XMM \
|
|
mov %rsp, %rax; \
|
|
@@ -40,6 +41,7 @@ ENTRY(efi_call0)
|
|
call *%rdi
|
|
addq $32, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call0)
|
|
|
|
@@ -50,6 +52,7 @@ ENTRY(efi_call1)
|
|
call *%rdi
|
|
addq $32, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call1)
|
|
|
|
@@ -60,6 +63,7 @@ ENTRY(efi_call2)
|
|
call *%rdi
|
|
addq $32, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call2)
|
|
|
|
@@ -71,6 +75,7 @@ ENTRY(efi_call3)
|
|
call *%rdi
|
|
addq $32, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call3)
|
|
|
|
@@ -83,6 +88,7 @@ ENTRY(efi_call4)
|
|
call *%rdi
|
|
addq $32, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call4)
|
|
|
|
@@ -96,6 +102,7 @@ ENTRY(efi_call5)
|
|
call *%rdi
|
|
addq $48, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call5)
|
|
|
|
@@ -112,5 +119,6 @@ ENTRY(efi_call6)
|
|
call *%rdi
|
|
addq $48, %rsp
|
|
RESTORE_XMM
|
|
+ pax_force_retaddr 0, 1
|
|
ret
|
|
ENDPROC(efi_call6)
|
|
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
|
|
index e31bcd8..f12dc46 100644
|
|
--- a/arch/x86/platform/mrst/mrst.c
|
|
+++ b/arch/x86/platform/mrst/mrst.c
|
|
@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
|
|
EXPORT_SYMBOL_GPL(sfi_mrtc_array);
|
|
int sfi_mrtc_num;
|
|
|
|
-static void mrst_power_off(void)
|
|
+static __noreturn void mrst_power_off(void)
|
|
{
|
|
+ BUG();
|
|
}
|
|
|
|
-static void mrst_reboot(void)
|
|
+static __noreturn void mrst_reboot(void)
|
|
{
|
|
intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
|
|
+ BUG();
|
|
}
|
|
|
|
/* parse all the mtimer info to a static mtimer array */
|
|
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
|
|
index fcbaac60..9f52376 100644
|
|
--- a/arch/x86/power/cpu.c
|
|
+++ b/arch/x86/power/cpu.c
|
|
@@ -133,7 +133,7 @@ static void do_fpu_end(void)
|
|
static void fix_processor_context(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
- struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
+ struct tss_struct *t = init_tss + cpu;
|
|
|
|
set_tss_desc(cpu, t); /*
|
|
* This just modifies memory; should not be
|
|
@@ -143,7 +143,9 @@ static void fix_processor_context(void)
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
+ pax_open_kernel();
|
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
|
+ pax_close_kernel();
|
|
|
|
syscall_init(); /* This sets MSR_*STAR and related */
|
|
#endif
|
|
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
|
|
index b685296..4ac6aaa 100644
|
|
--- a/arch/x86/tools/relocs.c
|
|
+++ b/arch/x86/tools/relocs.c
|
|
@@ -12,10 +12,13 @@
|
|
#include <regex.h>
|
|
#include <tools/le_byteshift.h>
|
|
|
|
+#include "../../../include/generated/autoconf.h"
|
|
+
|
|
static void die(char *fmt, ...);
|
|
|
|
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
|
|
static Elf32_Ehdr ehdr;
|
|
+static Elf32_Phdr *phdr;
|
|
static unsigned long reloc_count, reloc_idx;
|
|
static unsigned long *relocs;
|
|
static unsigned long reloc16_count, reloc16_idx;
|
|
@@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
|
|
}
|
|
}
|
|
|
|
+static void read_phdrs(FILE *fp)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
|
|
+ if (!phdr) {
|
|
+ die("Unable to allocate %d program headers\n",
|
|
+ ehdr.e_phnum);
|
|
+ }
|
|
+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
|
|
+ die("Seek to %d failed: %s\n",
|
|
+ ehdr.e_phoff, strerror(errno));
|
|
+ }
|
|
+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
|
|
+ die("Cannot read ELF program headers: %s\n",
|
|
+ strerror(errno));
|
|
+ }
|
|
+ for(i = 0; i < ehdr.e_phnum; i++) {
|
|
+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
|
|
+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
|
|
+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
|
|
+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
|
|
+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
|
|
+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
|
|
+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
|
|
+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
|
|
+ }
|
|
+
|
|
+}
|
|
+
|
|
static void read_shdrs(FILE *fp)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
Elf32_Shdr shdr;
|
|
|
|
secs = calloc(ehdr.e_shnum, sizeof(struct section));
|
|
@@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
|
|
|
|
static void read_strtabs(FILE *fp)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
struct section *sec = &secs[i];
|
|
if (sec->shdr.sh_type != SHT_STRTAB) {
|
|
@@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
|
|
|
|
static void read_symtabs(FILE *fp)
|
|
{
|
|
- int i,j;
|
|
+ unsigned int i,j;
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
struct section *sec = &secs[i];
|
|
if (sec->shdr.sh_type != SHT_SYMTAB) {
|
|
@@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
|
|
|
|
static void read_relocs(FILE *fp)
|
|
{
|
|
- int i,j;
|
|
+ unsigned int i,j;
|
|
+ uint32_t base;
|
|
+
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
struct section *sec = &secs[i];
|
|
if (sec->shdr.sh_type != SHT_REL) {
|
|
@@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
|
|
die("Cannot read symbol table: %s\n",
|
|
strerror(errno));
|
|
}
|
|
+ base = 0;
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+ for (j = 0; j < ehdr.e_phnum; j++) {
|
|
+ if (phdr[j].p_type != PT_LOAD )
|
|
+ continue;
|
|
+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
|
|
+ continue;
|
|
+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
|
|
Elf32_Rel *rel = &sec->reltab[j];
|
|
- rel->r_offset = elf32_to_cpu(rel->r_offset);
|
|
+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
|
|
rel->r_info = elf32_to_cpu(rel->r_info);
|
|
}
|
|
}
|
|
@@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
|
|
|
|
static void print_absolute_symbols(void)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
printf("Absolute symbols\n");
|
|
printf(" Num: Value Size Type Bind Visibility Name\n");
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
struct section *sec = &secs[i];
|
|
char *sym_strtab;
|
|
- int j;
|
|
+ unsigned int j;
|
|
|
|
if (sec->shdr.sh_type != SHT_SYMTAB) {
|
|
continue;
|
|
@@ -482,14 +530,14 @@ static void print_absolute_symbols(void)
|
|
|
|
static void print_absolute_relocs(void)
|
|
{
|
|
- int i, printed = 0;
|
|
+ unsigned int i, printed = 0;
|
|
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
struct section *sec = &secs[i];
|
|
struct section *sec_applies, *sec_symtab;
|
|
char *sym_strtab;
|
|
Elf32_Sym *sh_symtab;
|
|
- int j;
|
|
+ unsigned int j;
|
|
if (sec->shdr.sh_type != SHT_REL) {
|
|
continue;
|
|
}
|
|
@@ -551,13 +599,13 @@ static void print_absolute_relocs(void)
|
|
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
|
|
int use_real_mode)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
/* Walk through the relocations */
|
|
for (i = 0; i < ehdr.e_shnum; i++) {
|
|
char *sym_strtab;
|
|
Elf32_Sym *sh_symtab;
|
|
struct section *sec_applies, *sec_symtab;
|
|
- int j;
|
|
+ unsigned int j;
|
|
struct section *sec = &secs[i];
|
|
|
|
if (sec->shdr.sh_type != SHT_REL) {
|
|
@@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
|
|
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
|
|
r_type = ELF32_R_TYPE(rel->r_info);
|
|
|
|
+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
|
|
+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
|
|
+ continue;
|
|
+
|
|
+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
|
|
+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
|
|
+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
|
|
+ continue;
|
|
+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
|
|
+ continue;
|
|
+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
|
|
+ continue;
|
|
+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
|
|
+ continue;
|
|
+#endif
|
|
+
|
|
shn_abs = sym->st_shndx == SHN_ABS;
|
|
|
|
switch (r_type) {
|
|
@@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
|
|
|
|
static void emit_relocs(int as_text, int use_real_mode)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
/* Count how many relocations I have and allocate space for them. */
|
|
reloc_count = 0;
|
|
walk_relocs(count_reloc, use_real_mode);
|
|
@@ -801,6 +865,7 @@ int main(int argc, char **argv)
|
|
fname, strerror(errno));
|
|
}
|
|
read_ehdr(fp);
|
|
+ read_phdrs(fp);
|
|
read_shdrs(fp);
|
|
read_strtabs(fp);
|
|
read_symtabs(fp);
|
|
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
|
|
index fd14be1..e3c79c0 100644
|
|
--- a/arch/x86/vdso/Makefile
|
|
+++ b/arch/x86/vdso/Makefile
|
|
@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
|
|
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
|
|
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
|
|
|
|
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
|
+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
|
GCOV_PROFILE := n
|
|
|
|
#
|
|
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
|
|
index 66e6d93..587f435 100644
|
|
--- a/arch/x86/vdso/vdso32-setup.c
|
|
+++ b/arch/x86/vdso/vdso32-setup.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/vdso.h>
|
|
#include <asm/proto.h>
|
|
+#include <asm/mman.h>
|
|
|
|
enum {
|
|
VDSO_DISABLED = 0,
|
|
@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
|
|
void enable_sep_cpu(void)
|
|
{
|
|
int cpu = get_cpu();
|
|
- struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
|
+ struct tss_struct *tss = init_tss + cpu;
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_SEP)) {
|
|
put_cpu();
|
|
@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
|
|
gate_vma.vm_start = FIXADDR_USER_START;
|
|
gate_vma.vm_end = FIXADDR_USER_END;
|
|
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
|
- gate_vma.vm_page_prot = __P101;
|
|
+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
|
|
|
|
return 0;
|
|
}
|
|
@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
if (compat)
|
|
addr = VDSO_HIGH_BASE;
|
|
else {
|
|
- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
|
|
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
|
|
if (IS_ERR_VALUE(addr)) {
|
|
ret = addr;
|
|
goto up_fail;
|
|
}
|
|
}
|
|
|
|
- current->mm->context.vdso = (void *)addr;
|
|
+ current->mm->context.vdso = addr;
|
|
|
|
if (compat_uses_vma || !compat) {
|
|
/*
|
|
@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
}
|
|
|
|
current_thread_info()->sysenter_return =
|
|
- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
|
|
+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
|
|
|
|
up_fail:
|
|
if (ret)
|
|
- current->mm->context.vdso = NULL;
|
|
+ current->mm->context.vdso = 0;
|
|
|
|
up_write(&mm->mmap_sem);
|
|
|
|
@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
|
|
+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
|
|
return "[vdso]";
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
|
|
+ return "[vdso]";
|
|
+#endif
|
|
+
|
|
return NULL;
|
|
}
|
|
|
|
@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
* Check to see if the corresponding task was created in compat vdso
|
|
* mode.
|
|
*/
|
|
- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
|
|
+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
|
|
return &gate_vma;
|
|
return NULL;
|
|
}
|
|
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
|
|
index 47aa108..8434622 100644
|
|
--- a/arch/x86/vdso/vma.c
|
|
+++ b/arch/x86/vdso/vma.c
|
|
@@ -16,8 +16,6 @@
|
|
#include <asm/vdso.h>
|
|
#include <asm/page.h>
|
|
|
|
-unsigned int __read_mostly vdso_enabled = 1;
|
|
-
|
|
extern char vdso_start[], vdso_end[];
|
|
extern unsigned short vdso_sync_cpuid;
|
|
|
|
@@ -157,7 +155,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
|
* unaligned here as a result of stack start randomization.
|
|
*/
|
|
addr = PAGE_ALIGN(addr);
|
|
- addr = align_addr(addr, NULL, ALIGN_VDSO);
|
|
|
|
return addr;
|
|
}
|
|
@@ -170,30 +167,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
|
|
unsigned size)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
- unsigned long addr;
|
|
+ unsigned long addr = 0;
|
|
int ret;
|
|
|
|
- if (!vdso_enabled)
|
|
- return 0;
|
|
-
|
|
down_write(&mm->mmap_sem);
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
addr = vdso_addr(mm->start_stack, size);
|
|
+ addr = align_addr(addr, NULL, ALIGN_VDSO);
|
|
addr = get_unmapped_area(NULL, addr, size, 0, 0);
|
|
if (IS_ERR_VALUE(addr)) {
|
|
ret = addr;
|
|
goto up_fail;
|
|
}
|
|
|
|
- current->mm->context.vdso = (void *)addr;
|
|
+ mm->context.vdso = addr;
|
|
|
|
ret = install_special_mapping(mm, addr, size,
|
|
VM_READ|VM_EXEC|
|
|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
|
|
pages);
|
|
- if (ret) {
|
|
- current->mm->context.vdso = NULL;
|
|
- goto up_fail;
|
|
- }
|
|
+ if (ret)
|
|
+ mm->context.vdso = 0;
|
|
|
|
up_fail:
|
|
up_write(&mm->mmap_sem);
|
|
@@ -213,10 +211,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
vdsox32_size);
|
|
}
|
|
#endif
|
|
-
|
|
-static __init int vdso_setup(char *s)
|
|
-{
|
|
- vdso_enabled = simple_strtoul(s, NULL, 0);
|
|
- return 0;
|
|
-}
|
|
-__setup("vdso=", vdso_setup);
|
|
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
|
|
index 9598038..0925f0a 100644
|
|
--- a/arch/x86/xen/enlighten.c
|
|
+++ b/arch/x86/xen/enlighten.c
|
|
@@ -96,8 +96,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
|
|
|
|
struct shared_info xen_dummy_shared_info;
|
|
|
|
-void *xen_initial_gdt;
|
|
-
|
|
RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
|
|
__read_mostly int xen_have_vector_callback;
|
|
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
|
|
@@ -1197,30 +1195,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
|
|
#endif
|
|
};
|
|
|
|
-static void xen_reboot(int reason)
|
|
+static __noreturn void xen_reboot(int reason)
|
|
{
|
|
struct sched_shutdown r = { .reason = reason };
|
|
|
|
- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
|
|
- BUG();
|
|
+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
|
|
+ BUG();
|
|
}
|
|
|
|
-static void xen_restart(char *msg)
|
|
+static __noreturn void xen_restart(char *msg)
|
|
{
|
|
xen_reboot(SHUTDOWN_reboot);
|
|
}
|
|
|
|
-static void xen_emergency_restart(void)
|
|
+static __noreturn void xen_emergency_restart(void)
|
|
{
|
|
xen_reboot(SHUTDOWN_reboot);
|
|
}
|
|
|
|
-static void xen_machine_halt(void)
|
|
+static __noreturn void xen_machine_halt(void)
|
|
{
|
|
xen_reboot(SHUTDOWN_poweroff);
|
|
}
|
|
|
|
-static void xen_machine_power_off(void)
|
|
+static __noreturn void xen_machine_power_off(void)
|
|
{
|
|
if (pm_power_off)
|
|
pm_power_off();
|
|
@@ -1323,7 +1321,17 @@ asmlinkage void __init xen_start_kernel(void)
|
|
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
|
|
|
|
/* Work out if we support NX */
|
|
- x86_configure_nx();
|
|
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
|
+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
|
|
+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
|
|
+ unsigned l, h;
|
|
+
|
|
+ __supported_pte_mask |= _PAGE_NX;
|
|
+ rdmsr(MSR_EFER, l, h);
|
|
+ l |= EFER_NX;
|
|
+ wrmsr(MSR_EFER, l, h);
|
|
+ }
|
|
+#endif
|
|
|
|
xen_setup_features();
|
|
|
|
@@ -1354,13 +1362,6 @@ asmlinkage void __init xen_start_kernel(void)
|
|
|
|
machine_ops = xen_machine_ops;
|
|
|
|
- /*
|
|
- * The only reliable way to retain the initial address of the
|
|
- * percpu gdt_page is to remember it here, so we can go and
|
|
- * mark it RW later, when the initial percpu area is freed.
|
|
- */
|
|
- xen_initial_gdt = &per_cpu(gdt_page, 0);
|
|
-
|
|
xen_smp_init();
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
|
|
index cf7fe36..a7f8b58 100644
|
|
--- a/arch/x86/xen/mmu.c
|
|
+++ b/arch/x86/xen/mmu.c
|
|
@@ -1757,6 +1757,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
|
convert_pfn_mfn(init_level4_pgt);
|
|
convert_pfn_mfn(level3_ident_pgt);
|
|
convert_pfn_mfn(level3_kernel_pgt);
|
|
+ convert_pfn_mfn(level3_vmalloc_start_pgt);
|
|
+ convert_pfn_mfn(level3_vmalloc_end_pgt);
|
|
+ convert_pfn_mfn(level3_vmemmap_pgt);
|
|
|
|
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
|
|
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
|
|
@@ -1775,7 +1778,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
|
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
|
|
+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
|
|
+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
|
|
+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
|
|
+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
|
|
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
|
|
|
|
@@ -1983,6 +1990,7 @@ static void __init xen_post_allocator_init(void)
|
|
pv_mmu_ops.set_pud = xen_set_pud;
|
|
#if PAGETABLE_LEVELS == 4
|
|
pv_mmu_ops.set_pgd = xen_set_pgd;
|
|
+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
|
|
#endif
|
|
|
|
/* This will work as long as patching hasn't happened yet
|
|
@@ -2064,6 +2072,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
|
|
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
|
|
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
|
|
.set_pgd = xen_set_pgd_hyper,
|
|
+ .set_pgd_batched = xen_set_pgd_hyper,
|
|
|
|
.alloc_pud = xen_alloc_pmd_init,
|
|
.release_pud = xen_release_pmd_init,
|
|
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
|
|
index a7f5806..31939c3 100644
|
|
--- a/arch/x86/xen/smp.c
|
|
+++ b/arch/x86/xen/smp.c
|
|
@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
|
|
{
|
|
BUG_ON(smp_processor_id() != 0);
|
|
native_smp_prepare_boot_cpu();
|
|
-
|
|
- /* We've switched to the "real" per-cpu gdt, so make sure the
|
|
- old memory can be recycled */
|
|
- make_lowmem_page_readwrite(xen_initial_gdt);
|
|
-
|
|
xen_filter_cpu_maps();
|
|
xen_setup_vcpu_info_placement();
|
|
}
|
|
@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
|
gdt = get_cpu_gdt_table(cpu);
|
|
|
|
ctxt->flags = VGCF_IN_KERNEL;
|
|
- ctxt->user_regs.ds = __USER_DS;
|
|
- ctxt->user_regs.es = __USER_DS;
|
|
+ ctxt->user_regs.ds = __KERNEL_DS;
|
|
+ ctxt->user_regs.es = __KERNEL_DS;
|
|
ctxt->user_regs.ss = __KERNEL_DS;
|
|
#ifdef CONFIG_X86_32
|
|
ctxt->user_regs.fs = __KERNEL_PERCPU;
|
|
- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
|
|
+ savesegment(gs, ctxt->user_regs.gs);
|
|
#else
|
|
ctxt->gs_base_kernel = per_cpu_offset(cpu);
|
|
#endif
|
|
@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
|
|
int rc;
|
|
|
|
per_cpu(current_task, cpu) = idle;
|
|
+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
|
|
#ifdef CONFIG_X86_32
|
|
irq_ctx_init(cpu);
|
|
#else
|
|
clear_tsk_thread_flag(idle, TIF_FORK);
|
|
- per_cpu(kernel_stack, cpu) =
|
|
- (unsigned long)task_stack_page(idle) -
|
|
- KERNEL_STACK_OFFSET + THREAD_SIZE;
|
|
+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
|
|
#endif
|
|
xen_setup_runstate_info(cpu);
|
|
xen_setup_timer(cpu);
|
|
diff --git a/arch/x86/xen/xen-asm_32.S.rej b/arch/x86/xen/xen-asm_32.S.rej
|
|
new file mode 100644
|
|
index 0000000..b3a8af9
|
|
--- /dev/null
|
|
+++ b/arch/x86/xen/xen-asm_32.S.rej
|
|
@@ -0,0 +1,23 @@
|
|
+--- arch/x86/xen/xen-asm_32.S 2011-10-24 12:48:26.419091764 +0200
|
|
++++ arch/x86/xen/xen-asm_32.S 2012-05-21 12:10:09.824048905 +0200
|
|
+@@ -83,14 +83,14 @@ ENTRY(xen_iret)
|
|
+ ESP_OFFSET=4 # bytes pushed onto stack
|
|
+
|
|
+ /*
|
|
+- * Store vcpu_info pointer for easy access. Do it this way to
|
|
+- * avoid having to reload %fs
|
|
++ * Store vcpu_info pointer for easy access.
|
|
+ */
|
|
+ #ifdef CONFIG_SMP
|
|
+- GET_THREAD_INFO(%eax)
|
|
+- movl TI_cpu(%eax), %eax
|
|
+- movl __per_cpu_offset(,%eax,4), %eax
|
|
+- mov xen_vcpu(%eax), %eax
|
|
++ push %fs
|
|
++ mov $(__KERNEL_PERCPU), %eax
|
|
++ mov %eax, %fs
|
|
++ mov PER_CPU_VAR(xen_vcpu), %eax
|
|
++ pop %fs
|
|
+ #else
|
|
+ movl xen_vcpu, %eax
|
|
+ #endif
|
|
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
|
|
index aaa7291..3f77960 100644
|
|
--- a/arch/x86/xen/xen-head.S
|
|
+++ b/arch/x86/xen/xen-head.S
|
|
@@ -19,6 +19,17 @@ ENTRY(startup_xen)
|
|
#ifdef CONFIG_X86_32
|
|
mov %esi,xen_start_info
|
|
mov $init_thread_union+THREAD_SIZE,%esp
|
|
+#ifdef CONFIG_SMP
|
|
+ movl $cpu_gdt_table,%edi
|
|
+ movl $__per_cpu_load,%eax
|
|
+ movw %ax,__KERNEL_PERCPU + 2(%edi)
|
|
+ rorl $16,%eax
|
|
+ movb %al,__KERNEL_PERCPU + 4(%edi)
|
|
+ movb %ah,__KERNEL_PERCPU + 7(%edi)
|
|
+ movl $__per_cpu_end - 1,%eax
|
|
+ subl $__per_cpu_start,%eax
|
|
+ movw %ax,__KERNEL_PERCPU + 0(%edi)
|
|
+#endif
|
|
#else
|
|
mov %rsi,xen_start_info
|
|
mov $init_thread_union+THREAD_SIZE,%rsp
|
|
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
|
|
index b095739..8c17bcd 100644
|
|
--- a/arch/x86/xen/xen-ops.h
|
|
+++ b/arch/x86/xen/xen-ops.h
|
|
@@ -10,8 +10,6 @@
|
|
extern const char xen_hypervisor_callback[];
|
|
extern const char xen_failsafe_callback[];
|
|
|
|
-extern void *xen_initial_gdt;
|
|
-
|
|
struct trap_info;
|
|
void xen_copy_trap_info(struct trap_info *traps);
|
|
|
|
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
|
|
index 58916af..9cb880b 100644
|
|
--- a/block/blk-iopoll.c
|
|
+++ b/block/blk-iopoll.c
|
|
@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
|
|
}
|
|
EXPORT_SYMBOL(blk_iopoll_complete);
|
|
|
|
-static void blk_iopoll_softirq(struct softirq_action *h)
|
|
+static void blk_iopoll_softirq(void)
|
|
{
|
|
struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
|
|
int rearm = 0, budget = blk_iopoll_budget;
|
|
diff --git a/block/blk-map.c b/block/blk-map.c
|
|
index 623e1cd..ca1e109 100644
|
|
--- a/block/blk-map.c
|
|
+++ b/block/blk-map.c
|
|
@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|
if (!len || !kbuf)
|
|
return -EINVAL;
|
|
|
|
- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
|
|
+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
|
|
if (do_copy)
|
|
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
|
|
else
|
|
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
|
|
index 467c8de..4bddc6d 100644
|
|
--- a/block/blk-softirq.c
|
|
+++ b/block/blk-softirq.c
|
|
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
|
* Softirq action handler - move entries to local list and loop over them
|
|
* while passing them to the queue registered handler.
|
|
*/
|
|
-static void blk_done_softirq(struct softirq_action *h)
|
|
+static void blk_done_softirq(void)
|
|
{
|
|
struct list_head *cpu_list, local_list;
|
|
|
|
diff --git a/block/bsg.c b/block/bsg.c
|
|
index ff64ae3..593560c 100644
|
|
--- a/block/bsg.c
|
|
+++ b/block/bsg.c
|
|
@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
|
|
struct sg_io_v4 *hdr, struct bsg_device *bd,
|
|
fmode_t has_write_perm)
|
|
{
|
|
+ unsigned char tmpcmd[sizeof(rq->__cmd)];
|
|
+ unsigned char *cmdptr;
|
|
+
|
|
if (hdr->request_len > BLK_MAX_CDB) {
|
|
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
|
|
if (!rq->cmd)
|
|
return -ENOMEM;
|
|
- }
|
|
+ cmdptr = rq->cmd;
|
|
+ } else
|
|
+ cmdptr = tmpcmd;
|
|
|
|
- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
|
|
+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
|
|
hdr->request_len))
|
|
return -EFAULT;
|
|
|
|
+ if (cmdptr != rq->cmd)
|
|
+ memcpy(rq->cmd, cmdptr, hdr->request_len);
|
|
+
|
|
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
|
|
if (blk_verify_command(rq->cmd, has_write_perm))
|
|
return -EPERM;
|
|
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
|
|
index 7c668c8..db3521c 100644
|
|
--- a/block/compat_ioctl.c
|
|
+++ b/block/compat_ioctl.c
|
|
@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
|
|
err |= __get_user(f->spec1, &uf->spec1);
|
|
err |= __get_user(f->fmt_gap, &uf->fmt_gap);
|
|
err |= __get_user(name, &uf->name);
|
|
- f->name = compat_ptr(name);
|
|
+ f->name = (void __force_kernel *)compat_ptr(name);
|
|
if (err) {
|
|
err = -EFAULT;
|
|
goto out;
|
|
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
|
|
index 6296b40..417c00f 100644
|
|
--- a/block/partitions/efi.c
|
|
+++ b/block/partitions/efi.c
|
|
@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
|
|
if (!gpt)
|
|
return NULL;
|
|
|
|
- count = le32_to_cpu(gpt->num_partition_entries) *
|
|
- le32_to_cpu(gpt->sizeof_partition_entry);
|
|
- if (!count)
|
|
+ if (!le32_to_cpu(gpt->num_partition_entries))
|
|
return NULL;
|
|
- pte = kzalloc(count, GFP_KERNEL);
|
|
+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
|
|
if (!pte)
|
|
return NULL;
|
|
|
|
+ count = le32_to_cpu(gpt->num_partition_entries) *
|
|
+ le32_to_cpu(gpt->sizeof_partition_entry);
|
|
if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
|
|
(u8 *) pte,
|
|
count) < count) {
|
|
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
|
|
index f1c00c9..13f2998 100644
|
|
--- a/block/scsi_ioctl.c
|
|
+++ b/block/scsi_ioctl.c
|
|
@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
|
|
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
|
|
struct sg_io_hdr *hdr, fmode_t mode)
|
|
{
|
|
- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
|
|
+ unsigned char tmpcmd[sizeof(rq->__cmd)];
|
|
+ unsigned char *cmdptr;
|
|
+
|
|
+ if (rq->cmd != rq->__cmd)
|
|
+ cmdptr = rq->cmd;
|
|
+ else
|
|
+ cmdptr = tmpcmd;
|
|
+
|
|
+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
|
|
return -EFAULT;
|
|
+
|
|
+ if (cmdptr != rq->cmd)
|
|
+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
|
|
+
|
|
if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
|
|
return -EPERM;
|
|
|
|
@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|
int err;
|
|
unsigned int in_len, out_len, bytes, opcode, cmdlen;
|
|
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
|
|
+ unsigned char tmpcmd[sizeof(rq->__cmd)];
|
|
+ unsigned char *cmdptr;
|
|
|
|
if (!sic)
|
|
return -EINVAL;
|
|
@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|
*/
|
|
err = -EFAULT;
|
|
rq->cmd_len = cmdlen;
|
|
- if (copy_from_user(rq->cmd, sic->data, cmdlen))
|
|
+
|
|
+ if (rq->cmd != rq->__cmd)
|
|
+ cmdptr = rq->cmd;
|
|
+ else
|
|
+ cmdptr = tmpcmd;
|
|
+
|
|
+ if (copy_from_user(cmdptr, sic->data, cmdlen))
|
|
goto error;
|
|
|
|
+ if (rq->cmd != cmdptr)
|
|
+ memcpy(rq->cmd, cmdptr, cmdlen);
|
|
+
|
|
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
|
|
goto error;
|
|
|
|
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
|
|
index 7bdd61b..afec999 100644
|
|
--- a/crypto/cryptd.c
|
|
+++ b/crypto/cryptd.c
|
|
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
|
|
|
|
struct cryptd_blkcipher_request_ctx {
|
|
crypto_completion_t complete;
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct cryptd_hash_ctx {
|
|
struct crypto_shash *child;
|
|
@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
|
|
|
|
struct cryptd_aead_request_ctx {
|
|
crypto_completion_t complete;
|
|
-};
|
|
+} __no_const;
|
|
|
|
static void cryptd_queue_worker(struct work_struct *work);
|
|
|
|
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
|
|
index e6defd8..c26a225 100644
|
|
--- a/drivers/acpi/apei/cper.c
|
|
+++ b/drivers/acpi/apei/cper.c
|
|
@@ -38,12 +38,12 @@
|
|
*/
|
|
u64 cper_next_record_id(void)
|
|
{
|
|
- static atomic64_t seq;
|
|
+ static atomic64_unchecked_t seq;
|
|
|
|
- if (!atomic64_read(&seq))
|
|
- atomic64_set(&seq, ((u64)get_seconds()) << 32);
|
|
+ if (!atomic64_read_unchecked(&seq))
|
|
+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
|
|
|
|
- return atomic64_inc_return(&seq);
|
|
+ return atomic64_inc_return_unchecked(&seq);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cper_next_record_id);
|
|
|
|
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
|
|
index 7586544..636a2f0 100644
|
|
--- a/drivers/acpi/ec_sys.c
|
|
+++ b/drivers/acpi/ec_sys.c
|
|
@@ -12,6 +12,7 @@
|
|
#include <linux/acpi.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/uaccess.h>
|
|
#include "internal.h"
|
|
|
|
MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
|
|
@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
|
|
* struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
|
|
*/
|
|
unsigned int size = EC_SPACE_SIZE;
|
|
- u8 *data = (u8 *) buf;
|
|
+ u8 data;
|
|
loff_t init_off = *off;
|
|
int err = 0;
|
|
|
|
@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
|
|
size = count;
|
|
|
|
while (size) {
|
|
- err = ec_read(*off, &data[*off - init_off]);
|
|
+ err = ec_read(*off, &data);
|
|
if (err)
|
|
return err;
|
|
+ if (put_user(data, &buf[*off - init_off]))
|
|
+ return -EFAULT;
|
|
*off += 1;
|
|
size--;
|
|
}
|
|
@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
|
|
|
|
unsigned int size = count;
|
|
loff_t init_off = *off;
|
|
- u8 *data = (u8 *) buf;
|
|
int err = 0;
|
|
|
|
if (*off >= EC_SPACE_SIZE)
|
|
@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
|
|
}
|
|
|
|
while (size) {
|
|
- u8 byte_write = data[*off - init_off];
|
|
+ u8 byte_write;
|
|
+ if (get_user(byte_write, &buf[*off - init_off]))
|
|
+ return -EFAULT;
|
|
err = ec_write(*off, byte_write);
|
|
if (err)
|
|
return err;
|
|
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
|
|
index 251c7b62..000462d 100644
|
|
--- a/drivers/acpi/proc.c
|
|
+++ b/drivers/acpi/proc.c
|
|
@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
|
|
size_t count, loff_t * ppos)
|
|
{
|
|
struct list_head *node, *next;
|
|
- char strbuf[5];
|
|
- char str[5] = "";
|
|
- unsigned int len = count;
|
|
+ char strbuf[5] = {0};
|
|
|
|
- if (len > 4)
|
|
- len = 4;
|
|
- if (len < 0)
|
|
+ if (count > 4)
|
|
+ count = 4;
|
|
+ if (copy_from_user(strbuf, buffer, count))
|
|
return -EFAULT;
|
|
-
|
|
- if (copy_from_user(strbuf, buffer, len))
|
|
- return -EFAULT;
|
|
- strbuf[len] = '\0';
|
|
- sscanf(strbuf, "%s", str);
|
|
+ strbuf[count] = '\0';
|
|
|
|
mutex_lock(&acpi_device_lock);
|
|
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
|
|
@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
|
|
if (!dev->wakeup.flags.valid)
|
|
continue;
|
|
|
|
- if (!strncmp(dev->pnp.bus_id, str, 4)) {
|
|
+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
|
|
if (device_can_wakeup(&dev->dev)) {
|
|
bool enable = !device_may_wakeup(&dev->dev);
|
|
device_set_wakeup_enable(&dev->dev, enable);
|
|
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
|
|
index 4a2c131..5a69017 100644
|
|
--- a/drivers/acpi/processor_driver.c
|
|
+++ b/drivers/acpi/processor_driver.c
|
|
@@ -557,7 +557,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|
return 0;
|
|
#endif
|
|
|
|
- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
|
|
+ BUG_ON(pr->id >= nr_cpu_ids);
|
|
|
|
/*
|
|
* Buggy BIOS check
|
|
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
|
|
index 0a6767b..0798077 100644
|
|
--- a/drivers/ata/libata-core.c
|
|
+++ b/drivers/ata/libata-core.c
|
|
@@ -4772,7 +4772,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
|
|
struct ata_port *ap;
|
|
unsigned int tag;
|
|
|
|
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
|
|
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
|
|
ap = qc->ap;
|
|
|
|
qc->flags = 0;
|
|
@@ -4788,7 +4788,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
|
|
struct ata_port *ap;
|
|
struct ata_link *link;
|
|
|
|
- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
|
|
+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
|
|
WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
|
|
ap = qc->ap;
|
|
link = qc->dev->link;
|
|
@@ -5852,6 +5852,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
|
|
return;
|
|
|
|
spin_lock(&lock);
|
|
+ pax_open_kernel();
|
|
|
|
for (cur = ops->inherits; cur; cur = cur->inherits) {
|
|
void **inherit = (void **)cur;
|
|
@@ -5865,8 +5866,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
|
|
if (IS_ERR(*pp))
|
|
*pp = NULL;
|
|
|
|
- ops->inherits = NULL;
|
|
+ *(struct ata_port_operations **)&ops->inherits = NULL;
|
|
|
|
+ pax_close_kernel();
|
|
spin_unlock(&lock);
|
|
}
|
|
|
|
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
|
|
index 3239517..343b5f6 100644
|
|
--- a/drivers/ata/pata_arasan_cf.c
|
|
+++ b/drivers/ata/pata_arasan_cf.c
|
|
@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
|
|
/* Handle platform specific quirks */
|
|
if (pdata->quirk) {
|
|
if (pdata->quirk & CF_BROKEN_PIO) {
|
|
- ap->ops->set_piomode = NULL;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&ap->ops->set_piomode = NULL;
|
|
+ pax_close_kernel();
|
|
ap->pio_mask = 0;
|
|
}
|
|
if (pdata->quirk & CF_BROKEN_MWDMA)
|
|
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
|
|
index f9b983a..887b9d8 100644
|
|
--- a/drivers/atm/adummy.c
|
|
+++ b/drivers/atm/adummy.c
|
|
@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
dev_kfree_skb_any(skb);
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
|
|
index 89b30f3..7964211d4 100644
|
|
--- a/drivers/atm/ambassador.c
|
|
+++ b/drivers/atm/ambassador.c
|
|
@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
|
|
PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
|
|
|
|
// VC layer stats
|
|
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
|
|
|
|
// free the descriptor
|
|
kfree (tx_descr);
|
|
@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
|
|
dump_skb ("<<<", vc, skb);
|
|
|
|
// VC layer stats
|
|
- atomic_inc(&atm_vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->rx);
|
|
__net_timestamp(skb);
|
|
// end of our responsibility
|
|
atm_vcc->push (atm_vcc, skb);
|
|
@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
|
|
} else {
|
|
PRINTK (KERN_INFO, "dropped over-size frame");
|
|
// should we count this?
|
|
- atomic_inc(&atm_vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
|
|
}
|
|
|
|
} else {
|
|
@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
|
|
}
|
|
|
|
if (check_area (skb->data, skb->len)) {
|
|
- atomic_inc(&atm_vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
|
|
return -ENOMEM; // ?
|
|
}
|
|
|
|
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
|
|
index b22d71c..d6e1049 100644
|
|
--- a/drivers/atm/atmtcp.c
|
|
+++ b/drivers/atm/atmtcp.c
|
|
@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|
if (vcc->pop) vcc->pop(vcc,skb);
|
|
else dev_kfree_skb(skb);
|
|
if (dev_data) return 0;
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
return -ENOLINK;
|
|
}
|
|
size = skb->len+sizeof(struct atmtcp_hdr);
|
|
@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|
if (!new_skb) {
|
|
if (vcc->pop) vcc->pop(vcc,skb);
|
|
else dev_kfree_skb(skb);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
return -ENOBUFS;
|
|
}
|
|
hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
|
|
@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|
if (vcc->pop) vcc->pop(vcc,skb);
|
|
else dev_kfree_skb(skb);
|
|
out_vcc->push(out_vcc,new_skb);
|
|
- atomic_inc(&vcc->stats->tx);
|
|
- atomic_inc(&out_vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&out_vcc->stats->rx);
|
|
return 0;
|
|
}
|
|
|
|
@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|
out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
|
|
read_unlock(&vcc_sklist_lock);
|
|
if (!out_vcc) {
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
goto done;
|
|
}
|
|
skb_pull(skb,sizeof(struct atmtcp_hdr));
|
|
@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
|
|
__net_timestamp(new_skb);
|
|
skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
|
|
out_vcc->push(out_vcc,new_skb);
|
|
- atomic_inc(&vcc->stats->tx);
|
|
- atomic_inc(&out_vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&out_vcc->stats->rx);
|
|
done:
|
|
if (vcc->pop) vcc->pop(vcc,skb);
|
|
else dev_kfree_skb(skb);
|
|
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
|
|
index 2059ee4..faf51c7 100644
|
|
--- a/drivers/atm/eni.c
|
|
+++ b/drivers/atm/eni.c
|
|
@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
|
|
DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
|
|
vcc->dev->number);
|
|
length = 0;
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
}
|
|
else {
|
|
length = ATM_CELL_SIZE-1; /* no HEC */
|
|
@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
|
|
size);
|
|
}
|
|
eff = length = 0;
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
}
|
|
else {
|
|
size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
|
|
@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
|
|
"(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
|
|
vcc->dev->number,vcc->vci,length,size << 2,descr);
|
|
length = eff = 0;
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
}
|
|
}
|
|
skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
|
|
@@ -767,7 +767,7 @@ rx_dequeued++;
|
|
vcc->push(vcc,skb);
|
|
pushed++;
|
|
}
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
wake_up(&eni_dev->rx_wait);
|
|
}
|
|
@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
|
|
PCI_DMA_TODEVICE);
|
|
if (vcc->pop) vcc->pop(vcc,skb);
|
|
else dev_kfree_skb_irq(skb);
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
wake_up(&eni_dev->tx_wait);
|
|
dma_complete++;
|
|
}
|
|
@@ -1567,7 +1567,7 @@ tx_complete++;
|
|
/*--------------------------------- entries ---------------------------------*/
|
|
|
|
|
|
-static const char *media_name[] __devinitdata = {
|
|
+static const char *media_name[] __devinitconst = {
|
|
"MMF", "SMF", "MMF", "03?", /* 0- 3 */
|
|
"UTP", "05?", "06?", "07?", /* 4- 7 */
|
|
"TAXI","09?", "10?", "11?", /* 8-11 */
|
|
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
|
|
index 86fed1b..6dc47217 100644
|
|
--- a/drivers/atm/firestream.c
|
|
+++ b/drivers/atm/firestream.c
|
|
@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
|
|
}
|
|
}
|
|
|
|
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
|
|
|
|
fs_dprintk (FS_DEBUG_TXMEM, "i");
|
|
fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
|
|
@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
|
|
#endif
|
|
skb_put (skb, qe->p1 & 0xffff);
|
|
ATM_SKB(skb)->vcc = atm_vcc;
|
|
- atomic_inc(&atm_vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->rx);
|
|
__net_timestamp(skb);
|
|
fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
|
|
atm_vcc->push (atm_vcc, skb);
|
|
@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
|
|
kfree (pe);
|
|
}
|
|
if (atm_vcc)
|
|
- atomic_inc(&atm_vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
|
|
break;
|
|
case 0x1f: /* Reassembly abort: no buffers. */
|
|
/* Silently increment error counter. */
|
|
if (atm_vcc)
|
|
- atomic_inc(&atm_vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
|
|
break;
|
|
default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
|
|
printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
|
|
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
|
|
index 361f5ae..7fc552d 100644
|
|
--- a/drivers/atm/fore200e.c
|
|
+++ b/drivers/atm/fore200e.c
|
|
@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
|
|
#endif
|
|
/* check error condition */
|
|
if (*entry->status & STATUS_ERROR)
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
else
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
}
|
|
}
|
|
|
|
@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
|
|
if (skb == NULL) {
|
|
DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
|
|
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
|
|
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
|
|
|
|
@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
|
|
DPRINTK(2, "damaged PDU on %d.%d.%d\n",
|
|
fore200e->atm_dev->number,
|
|
entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
}
|
|
}
|
|
|
|
@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
goto retry_here;
|
|
}
|
|
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
|
|
fore200e->tx_sat++;
|
|
DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
|
|
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
|
|
index b182c2f..1c6fa8a 100644
|
|
--- a/drivers/atm/he.c
|
|
+++ b/drivers/atm/he.c
|
|
@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
|
|
|
|
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
|
|
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
goto return_host_buffers;
|
|
}
|
|
|
|
@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
|
|
RBRQ_LEN_ERR(he_dev->rbrq_head)
|
|
? "LEN_ERR" : "",
|
|
vcc->vpi, vcc->vci);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto return_host_buffers;
|
|
}
|
|
|
|
@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
|
|
vcc->push(vcc, skb);
|
|
spin_lock(&he_dev->global_lock);
|
|
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
return_host_buffers:
|
|
++pdus_assembled;
|
|
@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
|
|
tpd->vcc->pop(tpd->vcc, tpd->skb);
|
|
else
|
|
dev_kfree_skb_any(tpd->skb);
|
|
- atomic_inc(&tpd->vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
|
|
}
|
|
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
|
|
return;
|
|
@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
dev_kfree_skb_any(skb);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
dev_kfree_skb_any(skb);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
return -EINVAL;
|
|
}
|
|
#endif
|
|
@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
dev_kfree_skb_any(skb);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
spin_unlock_irqrestore(&he_dev->global_lock, flags);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
dev_kfree_skb_any(skb);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
spin_unlock_irqrestore(&he_dev->global_lock, flags);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
__enqueue_tpd(he_dev, tpd, cid);
|
|
spin_unlock_irqrestore(&he_dev->global_lock, flags);
|
|
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
|
|
index 75fd691..2d20b14 100644
|
|
--- a/drivers/atm/horizon.c
|
|
+++ b/drivers/atm/horizon.c
|
|
@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
|
|
{
|
|
struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
|
|
// VC layer stats
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
__net_timestamp(skb);
|
|
// end of our responsibility
|
|
vcc->push (vcc, skb);
|
|
@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
|
|
dev->tx_iovec = NULL;
|
|
|
|
// VC layer stats
|
|
- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
|
|
|
|
// free the skb
|
|
hrz_kfree_skb (skb);
|
|
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
|
|
index 81845fa..a4367d7 100644
|
|
--- a/drivers/atm/idt77252.c
|
|
+++ b/drivers/atm/idt77252.c
|
|
@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
|
|
else
|
|
dev_kfree_skb(skb);
|
|
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
}
|
|
|
|
atomic_dec(&scq->used);
|
|
@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
if ((sb = dev_alloc_skb(64)) == NULL) {
|
|
printk("%s: Can't allocate buffers for aal0.\n",
|
|
card->name);
|
|
- atomic_add(i, &vcc->stats->rx_drop);
|
|
+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
|
|
break;
|
|
}
|
|
if (!atm_charge(vcc, sb->truesize)) {
|
|
RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
|
|
card->name);
|
|
- atomic_add(i - 1, &vcc->stats->rx_drop);
|
|
+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
|
|
dev_kfree_skb(sb);
|
|
break;
|
|
}
|
|
@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
ATM_SKB(sb)->vcc = vcc;
|
|
__net_timestamp(sb);
|
|
vcc->push(vcc, sb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
cell += ATM_CELL_PAYLOAD;
|
|
}
|
|
@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
"(CDC: %08x)\n",
|
|
card->name, len, rpp->len, readl(SAR_REG_CDC));
|
|
recycle_rx_pool_skb(card, rpp);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
return;
|
|
}
|
|
if (stat & SAR_RSQE_CRC) {
|
|
RXPRINTK("%s: AAL5 CRC error.\n", card->name);
|
|
recycle_rx_pool_skb(card, rpp);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
return;
|
|
}
|
|
if (skb_queue_len(&rpp->queue) > 1) {
|
|
@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
RXPRINTK("%s: Can't alloc RX skb.\n",
|
|
card->name);
|
|
recycle_rx_pool_skb(card, rpp);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
return;
|
|
}
|
|
if (!atm_charge(vcc, skb->truesize)) {
|
|
@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
__net_timestamp(skb);
|
|
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
return;
|
|
}
|
|
@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
|
|
__net_timestamp(skb);
|
|
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
if (skb->truesize > SAR_FB_SIZE_3)
|
|
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
|
|
@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
|
|
if (vcc->qos.aal != ATM_AAL0) {
|
|
RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
|
|
card->name, vpi, vci);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
goto drop;
|
|
}
|
|
|
|
if ((sb = dev_alloc_skb(64)) == NULL) {
|
|
printk("%s: Can't allocate buffers for AAL0.\n",
|
|
card->name);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto drop;
|
|
}
|
|
|
|
@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
|
|
ATM_SKB(sb)->vcc = vcc;
|
|
__net_timestamp(sb);
|
|
vcc->push(vcc, sb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
|
|
drop:
|
|
skb_pull(queue, 64);
|
|
@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
|
|
|
|
if (vc == NULL) {
|
|
printk("%s: NULL connection in send().\n", card->name);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb(skb);
|
|
return -EINVAL;
|
|
}
|
|
if (!test_bit(VCF_TX, &vc->flags)) {
|
|
printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb(skb);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
|
|
break;
|
|
default:
|
|
printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb(skb);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (skb_shinfo(skb)->nr_frags != 0) {
|
|
printk("%s: No scatter-gather yet.\n", card->name);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb(skb);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
|
|
|
|
err = queue_skb(card, vc, skb, oam);
|
|
if (err) {
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb(skb);
|
|
return err;
|
|
}
|
|
@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
|
|
skb = dev_alloc_skb(64);
|
|
if (!skb) {
|
|
printk("%s: Out of memory in send_oam().\n", card->name);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
return -ENOMEM;
|
|
}
|
|
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
|
|
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
|
|
index d438601..8b9849574 100644
|
|
--- a/drivers/atm/iphase.c
|
|
+++ b/drivers/atm/iphase.c
|
|
@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
|
|
status = (u_short) (buf_desc_ptr->desc_mode);
|
|
if (status & (RX_CER | RX_PTE | RX_OFL))
|
|
{
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
IF_ERR(printk("IA: bad packet, dropping it");)
|
|
if (status & RX_CER) {
|
|
IF_ERR(printk(" cause: packet CRC error\n");)
|
|
@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
|
|
len = dma_addr - buf_addr;
|
|
if (len > iadev->rx_buf_sz) {
|
|
printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto out_free_desc;
|
|
}
|
|
|
|
@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
|
|
ia_vcc = INPH_IA_VCC(vcc);
|
|
if (ia_vcc == NULL)
|
|
{
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
atm_return(vcc, skb->truesize);
|
|
dev_kfree_skb_any(skb);
|
|
goto INCR_DLE;
|
|
@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
|
|
if ((length > iadev->rx_buf_sz) || (length >
|
|
(skb->len - sizeof(struct cpcs_trailer))))
|
|
{
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
|
|
length, skb->len);)
|
|
atm_return(vcc, skb->truesize);
|
|
@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
|
|
|
|
IF_RX(printk("rx_dle_intr: skb push");)
|
|
vcc->push(vcc,skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
iadev->rx_pkt_cnt++;
|
|
}
|
|
INCR_DLE:
|
|
@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
|
|
{
|
|
struct k_sonet_stats *stats;
|
|
stats = &PRIV(_ia_dev[board])->sonet_stats;
|
|
- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
|
|
- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
|
|
- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
|
|
- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
|
|
- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
|
|
- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
|
|
- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
|
|
- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
|
|
- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
|
|
+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
|
|
+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
|
|
+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
|
|
+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
|
|
+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
|
|
+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
|
|
+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
|
|
+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
|
|
+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
|
|
}
|
|
ia_cmds.status = 0;
|
|
break;
|
|
@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
|
|
if ((desc == 0) || (desc > iadev->num_tx_desc))
|
|
{
|
|
IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
if (vcc->pop)
|
|
vcc->pop(vcc, skb);
|
|
else
|
|
@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
|
|
ATM_DESC(skb) = vcc->vci;
|
|
skb_queue_tail(&iadev->tx_dma_q, skb);
|
|
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
iadev->tx_pkt_cnt++;
|
|
/* Increment transaction counter */
|
|
writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
|
|
|
|
#if 0
|
|
/* add flow control logic */
|
|
- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
|
|
+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
|
|
if (iavcc->vc_desc_cnt > 10) {
|
|
vcc->tx_quota = vcc->tx_quota * 3 / 4;
|
|
printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
|
|
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
|
|
index 68c7588..7036683 100644
|
|
--- a/drivers/atm/lanai.c
|
|
+++ b/drivers/atm/lanai.c
|
|
@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
|
|
vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
|
|
lanai_endtx(lanai, lvcc);
|
|
lanai_free_skb(lvcc->tx.atmvcc, skb);
|
|
- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
|
|
+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
|
|
}
|
|
|
|
/* Try to fill the buffer - don't call unless there is backlog */
|
|
@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
|
|
ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
|
|
__net_timestamp(skb);
|
|
lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
|
|
- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
|
|
+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
|
|
out:
|
|
lvcc->rx.buf.ptr = end;
|
|
cardvcc_write(lvcc, endptr, vcc_rxreadptr);
|
|
@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
|
|
DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
|
|
"vcc %d\n", lanai->number, (unsigned int) s, vci);
|
|
lanai->stats.service_rxnotaal5++;
|
|
- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
|
|
return 0;
|
|
}
|
|
if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
|
|
@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
|
|
int bytes;
|
|
read_unlock(&vcc_sklist_lock);
|
|
DPRINTK("got trashed rx pdu on vci %d\n", vci);
|
|
- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
|
|
lvcc->stats.x.aal5.service_trash++;
|
|
bytes = (SERVICE_GET_END(s) * 16) -
|
|
(((unsigned long) lvcc->rx.buf.ptr) -
|
|
@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
|
|
}
|
|
if (s & SERVICE_STREAM) {
|
|
read_unlock(&vcc_sklist_lock);
|
|
- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
|
|
lvcc->stats.x.aal5.service_stream++;
|
|
printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
|
|
"PDU on VCI %d!\n", lanai->number, vci);
|
|
@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
|
|
return 0;
|
|
}
|
|
DPRINTK("got rx crc error on vci %d\n", vci);
|
|
- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
|
|
lvcc->stats.x.aal5.service_rxcrc++;
|
|
lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
|
|
cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
|
|
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
|
|
index 1c70c45..300718d 100644
|
|
--- a/drivers/atm/nicstar.c
|
|
+++ b/drivers/atm/nicstar.c
|
|
@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
if ((vc = (vc_map *) vcc->dev_data) == NULL) {
|
|
printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
|
|
card->index);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb_any(skb);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
if (!vc->tx) {
|
|
printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
|
|
card->index);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb_any(skb);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
|
|
printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
|
|
card->index);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb_any(skb);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (skb_shinfo(skb)->nr_frags != 0) {
|
|
printk("nicstar%d: No scatter-gather yet.\n", card->index);
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb_any(skb);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
|
}
|
|
|
|
if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
|
|
- atomic_inc(&vcc->stats->tx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx_err);
|
|
dev_kfree_skb_any(skb);
|
|
return -EIO;
|
|
}
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
|
|
return 0;
|
|
}
|
|
@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
printk
|
|
("nicstar%d: Can't allocate buffers for aal0.\n",
|
|
card->index);
|
|
- atomic_add(i, &vcc->stats->rx_drop);
|
|
+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
|
|
break;
|
|
}
|
|
if (!atm_charge(vcc, sb->truesize)) {
|
|
RXPRINTK
|
|
("nicstar%d: atm_charge() dropped aal0 packets.\n",
|
|
card->index);
|
|
- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
|
|
+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
|
|
dev_kfree_skb_any(sb);
|
|
break;
|
|
}
|
|
@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
ATM_SKB(sb)->vcc = vcc;
|
|
__net_timestamp(sb);
|
|
vcc->push(vcc, sb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
cell += ATM_CELL_PAYLOAD;
|
|
}
|
|
|
|
@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
if (iovb == NULL) {
|
|
printk("nicstar%d: Out of iovec buffers.\n",
|
|
card->index);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
recycle_rx_buf(card, skb);
|
|
return;
|
|
}
|
|
@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
small or large buffer itself. */
|
|
} else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
|
|
printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
|
|
NS_MAX_IOVECS);
|
|
NS_PRV_IOVCNT(iovb) = 0;
|
|
@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
("nicstar%d: Expected a small buffer, and this is not one.\n",
|
|
card->index);
|
|
which_list(card, skb);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
recycle_rx_buf(card, skb);
|
|
vc->rx_iov = NULL;
|
|
recycle_iov_buf(card, iovb);
|
|
@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
("nicstar%d: Expected a large buffer, and this is not one.\n",
|
|
card->index);
|
|
which_list(card, skb);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
|
|
NS_PRV_IOVCNT(iovb));
|
|
vc->rx_iov = NULL;
|
|
@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
printk(" - PDU size mismatch.\n");
|
|
else
|
|
printk(".\n");
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
|
|
NS_PRV_IOVCNT(iovb));
|
|
vc->rx_iov = NULL;
|
|
@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
/* skb points to a small buffer */
|
|
if (!atm_charge(vcc, skb->truesize)) {
|
|
push_rxbufs(card, skb);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
} else {
|
|
skb_put(skb, len);
|
|
dequeue_sm_buf(card, skb);
|
|
@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
ATM_SKB(skb)->vcc = vcc;
|
|
__net_timestamp(skb);
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
} else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
|
|
struct sk_buff *sb;
|
|
@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
if (len <= NS_SMBUFSIZE) {
|
|
if (!atm_charge(vcc, sb->truesize)) {
|
|
push_rxbufs(card, sb);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
} else {
|
|
skb_put(sb, len);
|
|
dequeue_sm_buf(card, sb);
|
|
@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
ATM_SKB(sb)->vcc = vcc;
|
|
__net_timestamp(sb);
|
|
vcc->push(vcc, sb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
|
|
push_rxbufs(card, skb);
|
|
@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
|
|
if (!atm_charge(vcc, skb->truesize)) {
|
|
push_rxbufs(card, skb);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
} else {
|
|
dequeue_lg_buf(card, skb);
|
|
#ifdef NS_USE_DESTRUCTORS
|
|
@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
ATM_SKB(skb)->vcc = vcc;
|
|
__net_timestamp(skb);
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
|
|
push_rxbufs(card, sb);
|
|
@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
printk
|
|
("nicstar%d: Out of huge buffers.\n",
|
|
card->index);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
recycle_iovec_rx_bufs(card,
|
|
(struct iovec *)
|
|
iovb->data,
|
|
@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
card->hbpool.count++;
|
|
} else
|
|
dev_kfree_skb_any(hb);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
} else {
|
|
/* Copy the small buffer to the huge buffer */
|
|
sb = (struct sk_buff *)iov->iov_base;
|
|
@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
|
|
#endif /* NS_USE_DESTRUCTORS */
|
|
__net_timestamp(hb);
|
|
vcc->push(vcc, hb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
}
|
|
|
|
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
|
|
index 1853a45..cf2426d 100644
|
|
--- a/drivers/atm/solos-pci.c
|
|
+++ b/drivers/atm/solos-pci.c
|
|
@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
|
|
}
|
|
atm_charge(vcc, skb->truesize);
|
|
vcc->push(vcc, skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
break;
|
|
|
|
case PKT_STATUS:
|
|
@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
|
|
vcc = SKB_CB(oldskb)->vcc;
|
|
|
|
if (vcc) {
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
solos_pop(vcc, oldskb);
|
|
} else
|
|
dev_kfree_skb_irq(oldskb);
|
|
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
|
|
index 0215934..ce9f5b1 100644
|
|
--- a/drivers/atm/suni.c
|
|
+++ b/drivers/atm/suni.c
|
|
@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
|
|
|
|
|
|
#define ADD_LIMITED(s,v) \
|
|
- atomic_add((v),&stats->s); \
|
|
- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
|
|
+ atomic_add_unchecked((v),&stats->s); \
|
|
+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
|
|
|
|
|
|
static void suni_hz(unsigned long from_timer)
|
|
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
|
|
index 5120a96..e2572bd 100644
|
|
--- a/drivers/atm/uPD98402.c
|
|
+++ b/drivers/atm/uPD98402.c
|
|
@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
|
|
struct sonet_stats tmp;
|
|
int error = 0;
|
|
|
|
- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
|
|
+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
|
|
sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
|
|
if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
|
|
if (zero && !error) {
|
|
@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
|
|
|
|
|
#define ADD_LIMITED(s,v) \
|
|
- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
|
|
- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
|
|
- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
|
|
+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
|
|
+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
|
|
+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
|
|
|
|
|
|
static void stat_event(struct atm_dev *dev)
|
|
@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
|
|
if (reason & uPD98402_INT_PFM) stat_event(dev);
|
|
if (reason & uPD98402_INT_PCO) {
|
|
(void) GET(PCOCR); /* clear interrupt cause */
|
|
- atomic_add(GET(HECCT),
|
|
+ atomic_add_unchecked(GET(HECCT),
|
|
&PRIV(dev)->sonet_stats.uncorr_hcs);
|
|
}
|
|
if ((reason & uPD98402_INT_RFO) &&
|
|
@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
|
|
PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
|
|
uPD98402_INT_LOS),PIMR); /* enable them */
|
|
(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
|
|
- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
|
|
- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
|
|
- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
|
|
+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
|
|
+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
|
|
+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
|
|
index abe4e20..83c4727 100644
|
|
--- a/drivers/atm/zatm.c
|
|
+++ b/drivers/atm/zatm.c
|
|
@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
|
|
}
|
|
if (!size) {
|
|
dev_kfree_skb_irq(skb);
|
|
- if (vcc) atomic_inc(&vcc->stats->rx_err);
|
|
+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
continue;
|
|
}
|
|
if (!atm_charge(vcc,skb->truesize)) {
|
|
@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
|
|
skb->len = size;
|
|
ATM_SKB(skb)->vcc = vcc;
|
|
vcc->push(vcc,skb);
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
}
|
|
zout(pos & 0xffff,MTA(mbx));
|
|
#if 0 /* probably a stupid idea */
|
|
@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
|
|
skb_queue_head(&zatm_vcc->backlog,skb);
|
|
break;
|
|
}
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
wake_up(&zatm_vcc->tx_wait);
|
|
}
|
|
|
|
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
|
|
index 8493536..31adee0 100644
|
|
--- a/drivers/base/devtmpfs.c
|
|
+++ b/drivers/base/devtmpfs.c
|
|
@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
|
|
if (!thread)
|
|
return 0;
|
|
|
|
- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
|
|
+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
|
|
if (err)
|
|
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
|
|
else
|
|
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
|
|
index 975046e..d87d93e 100644
|
|
--- a/drivers/base/power/wakeup.c
|
|
+++ b/drivers/base/power/wakeup.c
|
|
@@ -30,14 +30,14 @@ bool events_check_enabled __read_mostly;
|
|
* They need to be modified together atomically, so it's better to use one
|
|
* atomic variable to hold them both.
|
|
*/
|
|
-static atomic_t combined_event_count = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
|
|
|
|
#define IN_PROGRESS_BITS (sizeof(int) * 4)
|
|
#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
|
|
|
|
static void split_counters(unsigned int *cnt, unsigned int *inpr)
|
|
{
|
|
- unsigned int comb = atomic_read(&combined_event_count);
|
|
+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
|
|
|
|
*cnt = (comb >> IN_PROGRESS_BITS);
|
|
*inpr = comb & MAX_IN_PROGRESS;
|
|
diff --git a/drivers/base/power/wakeup.c.rej b/drivers/base/power/wakeup.c.rej
|
|
new file mode 100644
|
|
index 0000000..f0caa9b
|
|
--- /dev/null
|
|
+++ b/drivers/base/power/wakeup.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- drivers/base/power/wakeup.c 2012-05-21 11:32:59.075927747 +0200
|
|
++++ drivers/base/power/wakeup.c 2012-05-21 12:10:09.948048911 +0200
|
|
+@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(str
|
|
+ * Increment the counter of registered wakeup events and decrement the
|
|
+ * couter of wakeup events in progress simultaneously.
|
|
+ */
|
|
+- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
|
|
++ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
|
|
+ }
|
|
+
|
|
+ /**
|
|
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
|
|
index d7ad865..61ddf2c 100644
|
|
--- a/drivers/block/cciss.c
|
|
+++ b/drivers/block/cciss.c
|
|
@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
|
|
while (!list_empty(&h->reqQ)) {
|
|
c = list_entry(h->reqQ.next, CommandList_struct, list);
|
|
/* can't do anything if fifo is full */
|
|
- if ((h->access.fifo_full(h))) {
|
|
+ if ((h->access->fifo_full(h))) {
|
|
dev_warn(&h->pdev->dev, "fifo full\n");
|
|
break;
|
|
}
|
|
@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
|
|
h->Qdepth--;
|
|
|
|
/* Tell the controller execute command */
|
|
- h->access.submit_command(h, c);
|
|
+ h->access->submit_command(h, c);
|
|
|
|
/* Put job onto the completed Q */
|
|
addQ(&h->cmpQ, c);
|
|
@@ -3444,17 +3444,17 @@ static void do_cciss_request(struct request_queue *q)
|
|
|
|
static inline unsigned long get_next_completion(ctlr_info_t *h)
|
|
{
|
|
- return h->access.command_completed(h);
|
|
+ return h->access->command_completed(h);
|
|
}
|
|
|
|
static inline int interrupt_pending(ctlr_info_t *h)
|
|
{
|
|
- return h->access.intr_pending(h);
|
|
+ return h->access->intr_pending(h);
|
|
}
|
|
|
|
static inline long interrupt_not_for_us(ctlr_info_t *h)
|
|
{
|
|
- return ((h->access.intr_pending(h) == 0) ||
|
|
+ return ((h->access->intr_pending(h) == 0) ||
|
|
(h->interrupts_enabled == 0));
|
|
}
|
|
|
|
@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
|
|
u32 a;
|
|
|
|
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
|
|
- return h->access.command_completed(h);
|
|
+ return h->access->command_completed(h);
|
|
|
|
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
|
|
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
|
|
@@ -4045,7 +4045,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
|
|
trans_support & CFGTBL_Trans_use_short_tags);
|
|
|
|
/* Change the access methods to the performant access methods */
|
|
- h->access = SA5_performant_access;
|
|
+ h->access = &SA5_performant_access;
|
|
h->transMethod = CFGTBL_Trans_Performant;
|
|
|
|
return;
|
|
@@ -4317,7 +4317,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
|
|
if (prod_index < 0)
|
|
return -ENODEV;
|
|
h->product_name = products[prod_index].product_name;
|
|
- h->access = *(products[prod_index].access);
|
|
+ h->access = products[prod_index].access;
|
|
|
|
if (cciss_board_disabled(h)) {
|
|
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
|
|
@@ -5042,7 +5042,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
|
}
|
|
|
|
/* make sure the board interrupts are off */
|
|
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
|
|
rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
|
|
if (rc)
|
|
goto clean2;
|
|
@@ -5094,7 +5094,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
|
* fake ones to scoop up any residual completions.
|
|
*/
|
|
spin_lock_irqsave(&h->lock, flags);
|
|
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
|
|
spin_unlock_irqrestore(&h->lock, flags);
|
|
free_irq(h->intr[h->intr_mode], h);
|
|
rc = cciss_request_irq(h, cciss_msix_discard_completions,
|
|
@@ -5114,9 +5114,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
|
dev_info(&h->pdev->dev, "Board READY.\n");
|
|
dev_info(&h->pdev->dev,
|
|
"Waiting for stale completions to drain.\n");
|
|
- h->access.set_intr_mask(h, CCISS_INTR_ON);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_ON);
|
|
msleep(10000);
|
|
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
|
|
|
|
rc = controller_reset_failed(h->cfgtable);
|
|
if (rc)
|
|
@@ -5139,7 +5139,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
|
cciss_scsi_setup(h);
|
|
|
|
/* Turn the interrupts on so we can service requests */
|
|
- h->access.set_intr_mask(h, CCISS_INTR_ON);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_ON);
|
|
|
|
/* Get the firmware version */
|
|
inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
|
|
@@ -5212,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
|
|
kfree(flush_buf);
|
|
if (return_code != IO_OK)
|
|
dev_warn(&h->pdev->dev, "Error flushing cache\n");
|
|
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
|
|
free_irq(h->intr[h->intr_mode], h);
|
|
}
|
|
|
|
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
|
|
index 7fda30e..eb5dfe0 100644
|
|
--- a/drivers/block/cciss.h
|
|
+++ b/drivers/block/cciss.h
|
|
@@ -101,7 +101,7 @@ struct ctlr_info
|
|
/* information about each logical volume */
|
|
drive_info_struct *drv[CISS_MAX_LUN];
|
|
|
|
- struct access_method access;
|
|
+ struct access_method *access;
|
|
|
|
/* queue and queue Info */
|
|
struct list_head reqQ;
|
|
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
|
|
index 504bc16..e13b631 100644
|
|
--- a/drivers/block/cpqarray.c
|
|
+++ b/drivers/block/cpqarray.c
|
|
@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
|
if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
|
|
goto Enomem4;
|
|
}
|
|
- hba[i]->access.set_intr_mask(hba[i], 0);
|
|
+ hba[i]->access->set_intr_mask(hba[i], 0);
|
|
if (request_irq(hba[i]->intr, do_ida_intr,
|
|
IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
|
|
{
|
|
@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
|
|
add_timer(&hba[i]->timer);
|
|
|
|
/* Enable IRQ now that spinlock and rate limit timer are set up */
|
|
- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
|
|
+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
|
|
|
|
for(j=0; j<NWD; j++) {
|
|
struct gendisk *disk = ida_gendisk[i][j];
|
|
@@ -694,7 +694,7 @@ DBGINFO(
|
|
for(i=0; i<NR_PRODUCTS; i++) {
|
|
if (board_id == products[i].board_id) {
|
|
c->product_name = products[i].product_name;
|
|
- c->access = *(products[i].access);
|
|
+ c->access = products[i].access;
|
|
break;
|
|
}
|
|
}
|
|
@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
|
|
hba[ctlr]->intr = intr;
|
|
sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
|
|
hba[ctlr]->product_name = products[j].product_name;
|
|
- hba[ctlr]->access = *(products[j].access);
|
|
+ hba[ctlr]->access = products[j].access;
|
|
hba[ctlr]->ctlr = ctlr;
|
|
hba[ctlr]->board_id = board_id;
|
|
hba[ctlr]->pci_dev = NULL; /* not PCI */
|
|
@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
|
|
|
|
while((c = h->reqQ) != NULL) {
|
|
/* Can't do anything if we're busy */
|
|
- if (h->access.fifo_full(h) == 0)
|
|
+ if (h->access->fifo_full(h) == 0)
|
|
return;
|
|
|
|
/* Get the first entry from the request Q */
|
|
@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
|
|
h->Qdepth--;
|
|
|
|
/* Tell the controller to do our bidding */
|
|
- h->access.submit_command(h, c);
|
|
+ h->access->submit_command(h, c);
|
|
|
|
/* Get onto the completion Q */
|
|
addQ(&h->cmpQ, c);
|
|
@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
|
|
unsigned long flags;
|
|
__u32 a,a1;
|
|
|
|
- istat = h->access.intr_pending(h);
|
|
+ istat = h->access->intr_pending(h);
|
|
/* Is this interrupt for us? */
|
|
if (istat == 0)
|
|
return IRQ_NONE;
|
|
@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
|
|
*/
|
|
spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
|
|
if (istat & FIFO_NOT_EMPTY) {
|
|
- while((a = h->access.command_completed(h))) {
|
|
+ while((a = h->access->command_completed(h))) {
|
|
a1 = a; a &= ~3;
|
|
if ((c = h->cmpQ) == NULL)
|
|
{
|
|
@@ -1450,11 +1450,11 @@ static int sendcmd(
|
|
/*
|
|
* Disable interrupt
|
|
*/
|
|
- info_p->access.set_intr_mask(info_p, 0);
|
|
+ info_p->access->set_intr_mask(info_p, 0);
|
|
/* Make sure there is room in the command FIFO */
|
|
/* Actually it should be completely empty at this time. */
|
|
for (i = 200000; i > 0; i--) {
|
|
- temp = info_p->access.fifo_full(info_p);
|
|
+ temp = info_p->access->fifo_full(info_p);
|
|
if (temp != 0) {
|
|
break;
|
|
}
|
|
@@ -1467,7 +1467,7 @@ DBG(
|
|
/*
|
|
* Send the cmd
|
|
*/
|
|
- info_p->access.submit_command(info_p, c);
|
|
+ info_p->access->submit_command(info_p, c);
|
|
complete = pollcomplete(ctlr);
|
|
|
|
pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
|
|
@@ -1550,9 +1550,9 @@ static int revalidate_allvol(ctlr_info_t *host)
|
|
* we check the new geometry. Then turn interrupts back on when
|
|
* we're done.
|
|
*/
|
|
- host->access.set_intr_mask(host, 0);
|
|
+ host->access->set_intr_mask(host, 0);
|
|
getgeometry(ctlr);
|
|
- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
|
|
+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
|
|
|
|
for(i=0; i<NWD; i++) {
|
|
struct gendisk *disk = ida_gendisk[ctlr][i];
|
|
@@ -1592,7 +1592,7 @@ static int pollcomplete(int ctlr)
|
|
/* Wait (up to 2 seconds) for a command to complete */
|
|
|
|
for (i = 200000; i > 0; i--) {
|
|
- done = hba[ctlr]->access.command_completed(hba[ctlr]);
|
|
+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
|
|
if (done == 0) {
|
|
udelay(10); /* a short fixed delay */
|
|
} else
|
|
diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
|
|
index be73e9d..7fbf140 100644
|
|
--- a/drivers/block/cpqarray.h
|
|
+++ b/drivers/block/cpqarray.h
|
|
@@ -99,7 +99,7 @@ struct ctlr_info {
|
|
drv_info_t drv[NWD];
|
|
struct proc_dir_entry *proc;
|
|
|
|
- struct access_method access;
|
|
+ struct access_method *access;
|
|
|
|
cmdlist_t *reqQ;
|
|
cmdlist_t *cmpQ;
|
|
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
|
|
index 8d68056..e67050f 100644
|
|
--- a/drivers/block/drbd/drbd_int.h
|
|
+++ b/drivers/block/drbd/drbd_int.h
|
|
@@ -736,7 +736,7 @@ struct drbd_request;
|
|
struct drbd_epoch {
|
|
struct list_head list;
|
|
unsigned int barrier_nr;
|
|
- atomic_t epoch_size; /* increased on every request added. */
|
|
+ atomic_unchecked_t epoch_size; /* increased on every request added. */
|
|
atomic_t active; /* increased on every req. added, and dec on every finished. */
|
|
unsigned long flags;
|
|
};
|
|
@@ -1108,7 +1108,7 @@ struct drbd_conf {
|
|
void *int_dig_in;
|
|
void *int_dig_vv;
|
|
wait_queue_head_t seq_wait;
|
|
- atomic_t packet_seq;
|
|
+ atomic_unchecked_t packet_seq;
|
|
unsigned int peer_seq;
|
|
spinlock_t peer_seq_lock;
|
|
unsigned int minor;
|
|
@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
|
|
|
|
static inline void drbd_tcp_cork(struct socket *sock)
|
|
{
|
|
- int __user val = 1;
|
|
+ int val = 1;
|
|
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
|
|
- (char __user *)&val, sizeof(val));
|
|
+ (char __force_user *)&val, sizeof(val));
|
|
}
|
|
|
|
static inline void drbd_tcp_uncork(struct socket *sock)
|
|
{
|
|
- int __user val = 0;
|
|
+ int val = 0;
|
|
(void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
|
|
- (char __user *)&val, sizeof(val));
|
|
+ (char __force_user *)&val, sizeof(val));
|
|
}
|
|
|
|
static inline void drbd_tcp_nodelay(struct socket *sock)
|
|
{
|
|
- int __user val = 1;
|
|
+ int val = 1;
|
|
(void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
|
|
- (char __user *)&val, sizeof(val));
|
|
+ (char __force_user *)&val, sizeof(val));
|
|
}
|
|
|
|
static inline void drbd_tcp_quickack(struct socket *sock)
|
|
{
|
|
- int __user val = 2;
|
|
+ int val = 2;
|
|
(void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
|
|
- (char __user *)&val, sizeof(val));
|
|
+ (char __force_user *)&val, sizeof(val));
|
|
}
|
|
|
|
void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
|
|
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
|
|
index 211fc44..c5116f1 100644
|
|
--- a/drivers/block/drbd/drbd_main.c
|
|
+++ b/drivers/block/drbd/drbd_main.c
|
|
@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
|
|
p.sector = sector;
|
|
p.block_id = block_id;
|
|
p.blksize = blksize;
|
|
- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
|
|
+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
|
|
|
|
if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
|
|
return false;
|
|
@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
|
|
p.sector = cpu_to_be64(req->sector);
|
|
p.block_id = (unsigned long)req;
|
|
p.seq_num = cpu_to_be32(req->seq_num =
|
|
- atomic_add_return(1, &mdev->packet_seq));
|
|
+ atomic_add_return_unchecked(1, &mdev->packet_seq));
|
|
|
|
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
|
|
|
|
@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
|
|
atomic_set(&mdev->unacked_cnt, 0);
|
|
atomic_set(&mdev->local_cnt, 0);
|
|
atomic_set(&mdev->net_cnt, 0);
|
|
- atomic_set(&mdev->packet_seq, 0);
|
|
+ atomic_set_unchecked(&mdev->packet_seq, 0);
|
|
atomic_set(&mdev->pp_in_use, 0);
|
|
atomic_set(&mdev->pp_in_use_by_net, 0);
|
|
atomic_set(&mdev->rs_sect_in, 0);
|
|
@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
|
|
mdev->receiver.t_state);
|
|
|
|
/* no need to lock it, I'm the only thread alive */
|
|
- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
|
|
- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
|
|
+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
|
|
+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
|
|
mdev->al_writ_cnt =
|
|
mdev->bm_writ_cnt =
|
|
mdev->read_cnt =
|
|
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
|
|
index 946166e..356b39a 100644
|
|
--- a/drivers/block/drbd/drbd_nl.c
|
|
+++ b/drivers/block/drbd/drbd_nl.c
|
|
@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
|
|
module_put(THIS_MODULE);
|
|
}
|
|
|
|
-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
|
|
+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
|
|
|
|
static unsigned short *
|
|
__tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
|
|
@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
|
|
cn_reply->id.idx = CN_IDX_DRBD;
|
|
cn_reply->id.val = CN_VAL_DRBD;
|
|
|
|
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
|
|
+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
|
|
cn_reply->ack = 0; /* not used here. */
|
|
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
|
|
(int)((char *)tl - (char *)reply->tag_list);
|
|
@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
|
|
cn_reply->id.idx = CN_IDX_DRBD;
|
|
cn_reply->id.val = CN_VAL_DRBD;
|
|
|
|
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
|
|
+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
|
|
cn_reply->ack = 0; /* not used here. */
|
|
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
|
|
(int)((char *)tl - (char *)reply->tag_list);
|
|
@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
|
|
cn_reply->id.idx = CN_IDX_DRBD;
|
|
cn_reply->id.val = CN_VAL_DRBD;
|
|
|
|
- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
|
|
+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
|
|
cn_reply->ack = 0; // not used here.
|
|
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
|
|
(int)((char*)tl - (char*)reply->tag_list);
|
|
@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
|
|
cn_reply->id.idx = CN_IDX_DRBD;
|
|
cn_reply->id.val = CN_VAL_DRBD;
|
|
|
|
- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
|
|
+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
|
|
cn_reply->ack = 0; /* not used here. */
|
|
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
|
|
(int)((char *)tl - (char *)reply->tag_list);
|
|
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
|
|
index 13cbdd3..26b88df 100644
|
|
--- a/drivers/block/drbd/drbd_receiver.c
|
|
+++ b/drivers/block/drbd/drbd_receiver.c
|
|
@@ -894,7 +894,7 @@ static int drbd_connect(struct drbd_conf *mdev)
|
|
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
|
|
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
|
|
|
|
- atomic_set(&mdev->packet_seq, 0);
|
|
+ atomic_set_unchecked(&mdev->packet_seq, 0);
|
|
mdev->peer_seq = 0;
|
|
|
|
drbd_thread_start(&mdev->asender);
|
|
@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
|
do {
|
|
next_epoch = NULL;
|
|
|
|
- epoch_size = atomic_read(&epoch->epoch_size);
|
|
+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
|
|
|
|
switch (ev & ~EV_CLEANUP) {
|
|
case EV_PUT:
|
|
@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
|
|
rv = FE_DESTROYED;
|
|
} else {
|
|
epoch->flags = 0;
|
|
- atomic_set(&epoch->epoch_size, 0);
|
|
+ atomic_set_unchecked(&epoch->epoch_size, 0);
|
|
/* atomic_set(&epoch->active, 0); is already zero */
|
|
if (rv == FE_STILL_LIVE)
|
|
rv = FE_RECYCLED;
|
|
@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
|
|
drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
|
|
drbd_flush(mdev);
|
|
|
|
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
|
|
+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
|
|
epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
|
|
if (epoch)
|
|
break;
|
|
}
|
|
|
|
epoch = mdev->current_epoch;
|
|
- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
|
|
+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
|
|
|
|
D_ASSERT(atomic_read(&epoch->active) == 0);
|
|
D_ASSERT(epoch->flags == 0);
|
|
@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
|
|
}
|
|
|
|
epoch->flags = 0;
|
|
- atomic_set(&epoch->epoch_size, 0);
|
|
+ atomic_set_unchecked(&epoch->epoch_size, 0);
|
|
atomic_set(&epoch->active, 0);
|
|
|
|
spin_lock(&mdev->epoch_lock);
|
|
- if (atomic_read(&mdev->current_epoch->epoch_size)) {
|
|
+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
|
|
list_add(&epoch->list, &mdev->current_epoch->list);
|
|
mdev->current_epoch = epoch;
|
|
mdev->epochs++;
|
|
@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
|
spin_unlock(&mdev->peer_seq_lock);
|
|
|
|
drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
|
|
- atomic_inc(&mdev->current_epoch->epoch_size);
|
|
+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
|
|
return drbd_drain_block(mdev, data_size);
|
|
}
|
|
|
|
@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
|
|
|
|
spin_lock(&mdev->epoch_lock);
|
|
e->epoch = mdev->current_epoch;
|
|
- atomic_inc(&e->epoch->epoch_size);
|
|
+ atomic_inc_unchecked(&e->epoch->epoch_size);
|
|
atomic_inc(&e->epoch->active);
|
|
spin_unlock(&mdev->epoch_lock);
|
|
|
|
@@ -3884,7 +3884,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
|
|
D_ASSERT(list_empty(&mdev->done_ee));
|
|
|
|
/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
|
|
- atomic_set(&mdev->current_epoch->epoch_size, 0);
|
|
+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
|
|
D_ASSERT(list_empty(&mdev->current_epoch->list));
|
|
}
|
|
|
|
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
|
|
index 462fd18..521eb98 100644
|
|
--- a/drivers/block/loop.c
|
|
+++ b/drivers/block/loop.c
|
|
@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(get_ds());
|
|
- bw = file->f_op->write(file, buf, len, &pos);
|
|
+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
|
|
set_fs(old_fs);
|
|
if (likely(bw == len))
|
|
return 0;
|
|
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
|
|
index 2e04433..22afc64 100644
|
|
--- a/drivers/char/agp/frontend.c
|
|
+++ b/drivers/char/agp/frontend.c
|
|
@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
|
|
if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
|
|
return -EFAULT;
|
|
|
|
- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
|
|
+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
|
|
return -EFAULT;
|
|
|
|
client = agp_find_client_by_pid(reserve.pid);
|
|
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
|
|
index 0ff5c2e..445c2b7 100644
|
|
--- a/drivers/char/hpet.c
|
|
+++ b/drivers/char/hpet.c
|
|
@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
|
|
}
|
|
|
|
static int
|
|
-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
|
|
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
|
|
struct hpet_info *info)
|
|
{
|
|
struct hpet_timer __iomem *timer;
|
|
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
|
|
index 2c29942..604c5ba 100644
|
|
--- a/drivers/char/ipmi/ipmi_msghandler.c
|
|
+++ b/drivers/char/ipmi/ipmi_msghandler.c
|
|
@@ -420,7 +420,7 @@ struct ipmi_smi {
|
|
struct proc_dir_entry *proc_dir;
|
|
char proc_dir_name[10];
|
|
|
|
- atomic_t stats[IPMI_NUM_STATS];
|
|
+ atomic_unchecked_t stats[IPMI_NUM_STATS];
|
|
|
|
/*
|
|
* run_to_completion duplicate of smb_info, smi_info
|
|
@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
|
|
|
|
|
|
#define ipmi_inc_stat(intf, stat) \
|
|
- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
|
|
+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
|
|
#define ipmi_get_stat(intf, stat) \
|
|
- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
|
|
+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
|
|
|
|
static int is_lan_addr(struct ipmi_addr *addr)
|
|
{
|
|
@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
|
|
INIT_LIST_HEAD(&intf->cmd_rcvrs);
|
|
init_waitqueue_head(&intf->waitq);
|
|
for (i = 0; i < IPMI_NUM_STATS; i++)
|
|
- atomic_set(&intf->stats[i], 0);
|
|
+ atomic_set_unchecked(&intf->stats[i], 0);
|
|
|
|
intf->proc_dir = NULL;
|
|
|
|
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
|
|
index e539949..55225d8 100644
|
|
--- a/drivers/char/ipmi/ipmi_si_intf.c
|
|
+++ b/drivers/char/ipmi/ipmi_si_intf.c
|
|
@@ -278,7 +278,7 @@ struct smi_info {
|
|
unsigned char slave_addr;
|
|
|
|
/* Counters and things for the proc filesystem. */
|
|
- atomic_t stats[SI_NUM_STATS];
|
|
+ atomic_unchecked_t stats[SI_NUM_STATS];
|
|
|
|
struct task_struct *thread;
|
|
|
|
@@ -287,9 +287,9 @@ struct smi_info {
|
|
};
|
|
|
|
#define smi_inc_stat(smi, stat) \
|
|
- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
|
|
+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
|
|
#define smi_get_stat(smi, stat) \
|
|
- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
|
|
+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
|
|
|
|
#define SI_MAX_PARMS 4
|
|
|
|
@@ -3219,7 +3219,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
|
atomic_set(&new_smi->req_events, 0);
|
|
new_smi->run_to_completion = 0;
|
|
for (i = 0; i < SI_NUM_STATS; i++)
|
|
- atomic_set(&new_smi->stats[i], 0);
|
|
+ atomic_set_unchecked(&new_smi->stats[i], 0);
|
|
|
|
new_smi->interrupt_disabled = 1;
|
|
atomic_set(&new_smi->stop_operation, 0);
|
|
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
|
|
index 47ff7e4..0c7d340 100644
|
|
--- a/drivers/char/mbcs.c
|
|
+++ b/drivers/char/mbcs.c
|
|
@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
|
|
return 0;
|
|
}
|
|
|
|
-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
|
|
+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
|
|
{
|
|
.part_num = MBCS_PART_NUM,
|
|
.mfg_num = MBCS_MFG_NUM,
|
|
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
|
|
index 67e19b6..52ed8d4 100644
|
|
--- a/drivers/char/mem.c
|
|
+++ b/drivers/char/mem.c
|
|
@@ -121,6 +121,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
|
|
|
|
while (count > 0) {
|
|
unsigned long remaining;
|
|
+ char *temp;
|
|
|
|
sz = size_inside_page(p, count);
|
|
|
|
@@ -136,7 +137,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
|
|
if (!ptr)
|
|
return -EFAULT;
|
|
|
|
- remaining = copy_to_user(buf, ptr, sz);
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
|
|
+ if (!temp) {
|
|
+ unxlate_dev_mem_ptr(p, ptr);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ memcpy(temp, ptr, sz);
|
|
+#else
|
|
+ temp = ptr;
|
|
+#endif
|
|
+
|
|
+ remaining = copy_to_user(buf, temp, sz);
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+ kfree(temp);
|
|
+#endif
|
|
+
|
|
unxlate_dev_mem_ptr(p, ptr);
|
|
if (remaining)
|
|
return -EFAULT;
|
|
@@ -403,9 +420,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
unsigned long p = *ppos;
|
|
- ssize_t low_count, read, sz;
|
|
+ ssize_t low_count, read, sz, err = 0;
|
|
char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
|
|
- int err = 0;
|
|
|
|
read = 0;
|
|
if (p < (unsigned long) high_memory) {
|
|
@@ -427,6 +443,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|
}
|
|
#endif
|
|
while (low_count > 0) {
|
|
+ char *temp;
|
|
+
|
|
sz = size_inside_page(p, low_count);
|
|
|
|
/*
|
|
@@ -436,7 +454,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|
*/
|
|
kbuf = xlate_dev_kmem_ptr((char *)p);
|
|
|
|
- if (copy_to_user(buf, kbuf, sz))
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
|
|
+ if (!temp)
|
|
+ return -ENOMEM;
|
|
+ memcpy(temp, kbuf, sz);
|
|
+#else
|
|
+ temp = kbuf;
|
|
+#endif
|
|
+
|
|
+ err = copy_to_user(buf, temp, sz);
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+ kfree(temp);
|
|
+#endif
|
|
+
|
|
+ if (err)
|
|
return -EFAULT;
|
|
buf += sz;
|
|
p += sz;
|
|
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
|
|
index 9df78e2..01ba9ae 100644
|
|
--- a/drivers/char/nvram.c
|
|
+++ b/drivers/char/nvram.c
|
|
@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
|
|
|
|
spin_unlock_irq(&rtc_lock);
|
|
|
|
- if (copy_to_user(buf, contents, tmp - contents))
|
|
+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
|
|
return -EFAULT;
|
|
|
|
*ppos = i;
|
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
|
index 85172fa..e54ef05 100644
|
|
--- a/drivers/char/random.c
|
|
+++ b/drivers/char/random.c
|
|
@@ -799,6 +799,17 @@ void add_disk_randomness(struct gendisk *disk)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_LATENT_ENTROPY
|
|
+u64 latent_entropy;
|
|
+
|
|
+__init void transfer_latent_entropy(void)
|
|
+{
|
|
+ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
|
|
+ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
|
|
+// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
|
|
+}
|
|
+#endif
|
|
+
|
|
/*********************************************************************
|
|
*
|
|
* Entropy extraction routines
|
|
@@ -1016,7 +1027,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
|
|
|
|
extract_buf(r, tmp);
|
|
i = min_t(int, nbytes, EXTRACT_SIZE);
|
|
- if (copy_to_user(buf, tmp, i)) {
|
|
+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
|
|
ret = -EFAULT;
|
|
break;
|
|
}
|
|
@@ -1350,7 +1361,7 @@ EXPORT_SYMBOL(generate_random_uuid);
|
|
#include <linux/sysctl.h>
|
|
|
|
static int min_read_thresh = 8, min_write_thresh;
|
|
-static int max_read_thresh = INPUT_POOL_WORDS * 32;
|
|
+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
|
|
static int max_write_thresh = INPUT_POOL_WORDS * 32;
|
|
static char sysctl_bootid[16];
|
|
|
|
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
|
|
index 45713f0..8286d21 100644
|
|
--- a/drivers/char/sonypi.c
|
|
+++ b/drivers/char/sonypi.c
|
|
@@ -54,6 +54,7 @@
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/io.h>
|
|
+#include <asm/local.h>
|
|
|
|
#include <linux/sonypi.h>
|
|
|
|
@@ -490,7 +491,7 @@ static struct sonypi_device {
|
|
spinlock_t fifo_lock;
|
|
wait_queue_head_t fifo_proc_list;
|
|
struct fasync_struct *fifo_async;
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
int model;
|
|
struct input_dev *input_jog_dev;
|
|
struct input_dev *input_key_dev;
|
|
@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
|
|
static int sonypi_misc_release(struct inode *inode, struct file *file)
|
|
{
|
|
mutex_lock(&sonypi_device.lock);
|
|
- sonypi_device.open_count--;
|
|
+ local_dec(&sonypi_device.open_count);
|
|
mutex_unlock(&sonypi_device.lock);
|
|
return 0;
|
|
}
|
|
@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
|
|
{
|
|
mutex_lock(&sonypi_device.lock);
|
|
/* Flush input queue on first open */
|
|
- if (!sonypi_device.open_count)
|
|
+ if (!local_read(&sonypi_device.open_count))
|
|
kfifo_reset(&sonypi_device.fifo);
|
|
- sonypi_device.open_count++;
|
|
+ local_inc(&sonypi_device.open_count);
|
|
mutex_unlock(&sonypi_device.lock);
|
|
|
|
return 0;
|
|
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
|
|
index 27f8ddf..980dc73 100644
|
|
--- a/drivers/char/tpm/tpm.c
|
|
+++ b/drivers/char/tpm/tpm.c
|
|
@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
|
|
chip->vendor.req_complete_val)
|
|
goto out_recv;
|
|
|
|
- if ((status == chip->vendor.req_canceled)) {
|
|
+ if (status == chip->vendor.req_canceled) {
|
|
dev_err(chip->dev, "Operation Canceled\n");
|
|
rc = -ECANCELED;
|
|
goto out;
|
|
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
|
|
index 0636520..169c1d0 100644
|
|
--- a/drivers/char/tpm/tpm_bios.c
|
|
+++ b/drivers/char/tpm/tpm_bios.c
|
|
@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
|
|
event = addr;
|
|
|
|
if ((event->event_type == 0 && event->event_size == 0) ||
|
|
- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
|
|
+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
|
|
return NULL;
|
|
|
|
return addr;
|
|
@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
|
|
return NULL;
|
|
|
|
if ((event->event_type == 0 && event->event_size == 0) ||
|
|
- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
|
|
+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
|
|
return NULL;
|
|
|
|
(*pos)++;
|
|
@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
|
|
int i;
|
|
|
|
for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
|
|
- seq_putc(m, data[i]);
|
|
+ if (!seq_putc(m, data[i]))
|
|
+ return -EFAULT;
|
|
|
|
return 0;
|
|
}
|
|
@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
|
|
log->bios_event_log_end = log->bios_event_log + len;
|
|
|
|
virt = acpi_os_map_memory(start, len);
|
|
+ if (!virt) {
|
|
+ kfree(log->bios_event_log);
|
|
+ log->bios_event_log = NULL;
|
|
+ return -EFAULT;
|
|
+ }
|
|
|
|
- memcpy(log->bios_event_log, virt, len);
|
|
+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
|
|
|
|
acpi_os_unmap_memory(virt, len);
|
|
return 0;
|
|
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
|
|
index 96f4a50..72de9a9 100644
|
|
--- a/drivers/char/virtio_console.c
|
|
+++ b/drivers/char/virtio_console.c
|
|
@@ -571,7 +571,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
|
|
if (to_user) {
|
|
ssize_t ret;
|
|
|
|
- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
|
|
+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
|
|
if (ret)
|
|
return -EFAULT;
|
|
} else {
|
|
@@ -674,7 +674,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
|
|
if (!port_has_data(port) && !port->host_connected)
|
|
return 0;
|
|
|
|
- return fill_readbuf(port, ubuf, count, true);
|
|
+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
|
|
}
|
|
|
|
static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
|
|
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
|
|
index 686ac03..8f6ed61 100644
|
|
--- a/drivers/edac/edac_pci_sysfs.c
|
|
+++ b/drivers/edac/edac_pci_sysfs.c
|
|
@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
|
|
static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
|
|
static int edac_pci_poll_msec = 1000; /* one second workq period */
|
|
|
|
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
|
|
-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
|
|
|
|
static struct kobject *edac_pci_top_main_kobj;
|
|
static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
|
|
@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
edac_printk(KERN_CRIT, EDAC_PCI,
|
|
"Signaled System Error on %s\n",
|
|
pci_name(dev));
|
|
- atomic_inc(&pci_nonparity_count);
|
|
+ atomic_inc_unchecked(&pci_nonparity_count);
|
|
}
|
|
|
|
if (status & (PCI_STATUS_PARITY)) {
|
|
@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
"Master Data Parity Error on %s\n",
|
|
pci_name(dev));
|
|
|
|
- atomic_inc(&pci_parity_count);
|
|
+ atomic_inc_unchecked(&pci_parity_count);
|
|
}
|
|
|
|
if (status & (PCI_STATUS_DETECTED_PARITY)) {
|
|
@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
"Detected Parity Error on %s\n",
|
|
pci_name(dev));
|
|
|
|
- atomic_inc(&pci_parity_count);
|
|
+ atomic_inc_unchecked(&pci_parity_count);
|
|
}
|
|
}
|
|
|
|
@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
|
|
"Signaled System Error on %s\n",
|
|
pci_name(dev));
|
|
- atomic_inc(&pci_nonparity_count);
|
|
+ atomic_inc_unchecked(&pci_nonparity_count);
|
|
}
|
|
|
|
if (status & (PCI_STATUS_PARITY)) {
|
|
@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
"Master Data Parity Error on "
|
|
"%s\n", pci_name(dev));
|
|
|
|
- atomic_inc(&pci_parity_count);
|
|
+ atomic_inc_unchecked(&pci_parity_count);
|
|
}
|
|
|
|
if (status & (PCI_STATUS_DETECTED_PARITY)) {
|
|
@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
|
|
"Detected Parity Error on %s\n",
|
|
pci_name(dev));
|
|
|
|
- atomic_inc(&pci_parity_count);
|
|
+ atomic_inc_unchecked(&pci_parity_count);
|
|
}
|
|
}
|
|
}
|
|
@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
|
|
if (!check_pci_errors)
|
|
return;
|
|
|
|
- before_count = atomic_read(&pci_parity_count);
|
|
+ before_count = atomic_read_unchecked(&pci_parity_count);
|
|
|
|
/* scan all PCI devices looking for a Parity Error on devices and
|
|
* bridges.
|
|
@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
|
|
/* Only if operator has selected panic on PCI Error */
|
|
if (edac_pci_get_panic_on_pe()) {
|
|
/* If the count is different 'after' from 'before' */
|
|
- if (before_count != atomic_read(&pci_parity_count))
|
|
+ if (before_count != atomic_read_unchecked(&pci_parity_count))
|
|
panic("EDAC: PCI Parity Error");
|
|
}
|
|
}
|
|
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
|
|
index c6074c5..88a9e2e 100644
|
|
--- a/drivers/edac/mce_amd.h
|
|
+++ b/drivers/edac/mce_amd.h
|
|
@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
|
|
struct amd_decoder_ops {
|
|
bool (*dc_mce)(u16, u8);
|
|
bool (*ic_mce)(u16, u8);
|
|
-};
|
|
+} __no_const;
|
|
|
|
void amd_report_gart_errors(bool);
|
|
void amd_register_ecc_decoder(void (*f)(int, struct mce *));
|
|
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
|
|
index cc595eb..4ec702a 100644
|
|
--- a/drivers/firewire/core-card.c
|
|
+++ b/drivers/firewire/core-card.c
|
|
@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
|
|
|
|
void fw_core_remove_card(struct fw_card *card)
|
|
{
|
|
- struct fw_card_driver dummy_driver = dummy_driver_template;
|
|
+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
|
|
|
|
card->driver->update_phy_reg(card, 4,
|
|
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
|
|
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
|
|
index b449572..7e5e7eb 100644
|
|
--- a/drivers/firewire/core-cdev.c
|
|
+++ b/drivers/firewire/core-cdev.c
|
|
@@ -1344,8 +1344,7 @@ static int init_iso_resource(struct client *client,
|
|
int ret;
|
|
|
|
if ((request->channels == 0 && request->bandwidth == 0) ||
|
|
- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
|
|
- request->bandwidth < 0)
|
|
+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
|
|
return -EINVAL;
|
|
|
|
r = kmalloc(sizeof(*r), GFP_KERNEL);
|
|
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
|
|
index dea2dcc..a4fb978 100644
|
|
--- a/drivers/firewire/core-transaction.c
|
|
+++ b/drivers/firewire/core-transaction.c
|
|
@@ -37,6 +37,7 @@
|
|
#include <linux/timer.h>
|
|
#include <linux/types.h>
|
|
#include <linux/workqueue.h>
|
|
+#include <linux/sched.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
|
|
index 9047f55..e47c7ff 100644
|
|
--- a/drivers/firewire/core.h
|
|
+++ b/drivers/firewire/core.h
|
|
@@ -110,6 +110,7 @@ struct fw_card_driver {
|
|
|
|
int (*stop_iso)(struct fw_iso_context *ctx);
|
|
};
|
|
+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
|
|
|
|
void fw_card_initialize(struct fw_card *card,
|
|
const struct fw_card_driver *driver, struct device *device);
|
|
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
|
|
index 4cd392d..4b629e1 100644
|
|
--- a/drivers/firmware/dmi_scan.c
|
|
+++ b/drivers/firmware/dmi_scan.c
|
|
@@ -490,11 +490,6 @@ void __init dmi_scan_machine(void)
|
|
}
|
|
}
|
|
else {
|
|
- /*
|
|
- * no iounmap() for that ioremap(); it would be a no-op, but
|
|
- * it's so early in setup that sucker gets confused into doing
|
|
- * what it shouldn't if we actually call it.
|
|
- */
|
|
p = dmi_ioremap(0xF0000, 0x10000);
|
|
if (p == NULL)
|
|
goto error;
|
|
@@ -769,7 +764,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
|
|
if (buf == NULL)
|
|
return -1;
|
|
|
|
- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
|
|
+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
|
|
|
|
iounmap(buf);
|
|
return 0;
|
|
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
|
|
index 82d5c20..44a7177 100644
|
|
--- a/drivers/gpio/gpio-vr41xx.c
|
|
+++ b/drivers/gpio/gpio-vr41xx.c
|
|
@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
|
|
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
|
|
maskl, pendl, maskh, pendh);
|
|
|
|
- atomic_inc(&irq_err_count);
|
|
+ atomic_inc_unchecked(&irq_err_count);
|
|
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
|
|
index b3abf70..d9ed8cae 100644
|
|
--- a/drivers/gpu/drm/drm_crtc_helper.c
|
|
+++ b/drivers/gpu/drm/drm_crtc_helper.c
|
|
@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
|
|
struct drm_crtc *tmp;
|
|
int crtc_mask = 1;
|
|
|
|
- WARN(!crtc, "checking null crtc?\n");
|
|
+ BUG_ON(!crtc);
|
|
|
|
dev = crtc->dev;
|
|
|
|
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
|
|
index e9f1ef5..082c9d0 100644
|
|
--- a/drivers/gpu/drm/drm_drv.c
|
|
+++ b/drivers/gpu/drm/drm_drv.c
|
|
@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
|
|
/**
|
|
* Copy and IOCTL return string to user space
|
|
*/
|
|
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
|
|
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
|
|
{
|
|
int len;
|
|
|
|
@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
|
|
return -ENODEV;
|
|
|
|
atomic_inc(&dev->ioctl_count);
|
|
- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
|
|
++file_priv->ioctl_count;
|
|
|
|
DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
|
|
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
|
|
index b90abff..b09cbb5 100644
|
|
--- a/drivers/gpu/drm/drm_fops.c
|
|
+++ b/drivers/gpu/drm/drm_fops.c
|
|
@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
|
|
- atomic_set(&dev->counts[i], 0);
|
|
+ atomic_set_unchecked(&dev->counts[i], 0);
|
|
|
|
dev->sigdata.lock = NULL;
|
|
|
|
@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
|
|
|
mutex_lock(&drm_global_mutex);
|
|
|
|
- DRM_DEBUG("open_count = %d\n", dev->open_count);
|
|
+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
|
|
|
|
if (dev->driver->preclose)
|
|
dev->driver->preclose(dev, file_priv);
|
|
@@ -494,10 +494,10 @@ int drm_release(struct inode *inode, struct file *filp)
|
|
* Begin inline drm_release
|
|
*/
|
|
|
|
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
|
|
+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
|
|
task_pid_nr(current),
|
|
(long)old_encode_dev(file_priv->minor->device),
|
|
- dev->open_count);
|
|
+ local_read(&dev->open_count));
|
|
|
|
/* Release any auth tokens that might point to this file_priv,
|
|
(do that under the drm_global_mutex) */
|
|
@@ -587,8 +587,8 @@ int drm_release(struct inode *inode, struct file *filp)
|
|
* End inline drm_release
|
|
*/
|
|
|
|
- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
|
|
- if (!--dev->open_count) {
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
|
|
+ if (local_dec_and_test(&dev->open_count)) {
|
|
if (atomic_read(&dev->ioctl_count)) {
|
|
DRM_ERROR("Device busy: %d\n",
|
|
atomic_read(&dev->ioctl_count));
|
|
diff --git a/drivers/gpu/drm/drm_fops.c.rej b/drivers/gpu/drm/drm_fops.c.rej
|
|
new file mode 100644
|
|
index 0000000..c38ea83
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/drm_fops.c.rej
|
|
@@ -0,0 +1,13 @@
|
|
+--- drivers/gpu/drm/drm_fops.c 2012-05-21 11:33:00.935927848 +0200
|
|
++++ drivers/gpu/drm/drm_fops.c 2012-05-21 12:10:10.048048917 +0200
|
|
+@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct
|
|
+
|
|
+ retcode = drm_open_helper(inode, filp, dev);
|
|
+ if (!retcode) {
|
|
+- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
|
|
+- if (!dev->open_count++)
|
|
++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
|
|
++ if (local_inc_return(&dev->open_count) == 1)
|
|
+ retcode = drm_setup(dev);
|
|
+ }
|
|
+ if (!retcode) {
|
|
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
|
|
index c87dc96..326055d 100644
|
|
--- a/drivers/gpu/drm/drm_global.c
|
|
+++ b/drivers/gpu/drm/drm_global.c
|
|
@@ -36,7 +36,7 @@
|
|
struct drm_global_item {
|
|
struct mutex mutex;
|
|
void *object;
|
|
- int refcount;
|
|
+ atomic_t refcount;
|
|
};
|
|
|
|
static struct drm_global_item glob[DRM_GLOBAL_NUM];
|
|
@@ -49,7 +49,7 @@ void drm_global_init(void)
|
|
struct drm_global_item *item = &glob[i];
|
|
mutex_init(&item->mutex);
|
|
item->object = NULL;
|
|
- item->refcount = 0;
|
|
+ atomic_set(&item->refcount, 0);
|
|
}
|
|
}
|
|
|
|
@@ -59,7 +59,7 @@ void drm_global_release(void)
|
|
for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
|
|
struct drm_global_item *item = &glob[i];
|
|
BUG_ON(item->object != NULL);
|
|
- BUG_ON(item->refcount != 0);
|
|
+ BUG_ON(atomic_read(&item->refcount) != 0);
|
|
}
|
|
}
|
|
|
|
@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
|
|
void *object;
|
|
|
|
mutex_lock(&item->mutex);
|
|
- if (item->refcount == 0) {
|
|
+ if (atomic_read(&item->refcount) == 0) {
|
|
item->object = kzalloc(ref->size, GFP_KERNEL);
|
|
if (unlikely(item->object == NULL)) {
|
|
ret = -ENOMEM;
|
|
@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
|
|
goto out_err;
|
|
|
|
}
|
|
- ++item->refcount;
|
|
+ atomic_inc(&item->refcount);
|
|
ref->object = item->object;
|
|
object = item->object;
|
|
mutex_unlock(&item->mutex);
|
|
@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
|
|
struct drm_global_item *item = &glob[ref->global_type];
|
|
|
|
mutex_lock(&item->mutex);
|
|
- BUG_ON(item->refcount == 0);
|
|
+ BUG_ON(atomic_read(&item->refcount) == 0);
|
|
BUG_ON(ref->object != item->object);
|
|
- if (--item->refcount == 0) {
|
|
+ if (atomic_dec_and_test(&item->refcount)) {
|
|
ref->release(ref);
|
|
item->object = NULL;
|
|
}
|
|
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
|
|
index ab1162d..6555447cb 100644
|
|
--- a/drivers/gpu/drm/drm_info.c
|
|
+++ b/drivers/gpu/drm/drm_info.c
|
|
@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
|
|
struct drm_local_map *map;
|
|
struct drm_map_list *r_list;
|
|
|
|
- /* Hardcoded from _DRM_FRAME_BUFFER,
|
|
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
|
|
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
|
|
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
|
|
+ static const char * const types[] = {
|
|
+ [_DRM_FRAME_BUFFER] = "FB",
|
|
+ [_DRM_REGISTERS] = "REG",
|
|
+ [_DRM_SHM] = "SHM",
|
|
+ [_DRM_AGP] = "AGP",
|
|
+ [_DRM_SCATTER_GATHER] = "SG",
|
|
+ [_DRM_CONSISTENT] = "PCI",
|
|
+ [_DRM_GEM] = "GEM" };
|
|
const char *type;
|
|
int i;
|
|
|
|
@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
|
|
map = r_list->map;
|
|
if (!map)
|
|
continue;
|
|
- if (map->type < 0 || map->type > 5)
|
|
+ if (map->type >= ARRAY_SIZE(types))
|
|
type = "??";
|
|
else
|
|
type = types[map->type];
|
|
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
|
|
index 637fcc3..e890b33 100644
|
|
--- a/drivers/gpu/drm/drm_ioc32.c
|
|
+++ b/drivers/gpu/drm/drm_ioc32.c
|
|
@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
|
|
request = compat_alloc_user_space(nbytes);
|
|
if (!access_ok(VERIFY_WRITE, request, nbytes))
|
|
return -EFAULT;
|
|
- list = (struct drm_buf_desc *) (request + 1);
|
|
+ list = (struct drm_buf_desc __user *) (request + 1);
|
|
|
|
if (__put_user(count, &request->count)
|
|
|| __put_user(list, &request->list))
|
|
@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
|
|
request = compat_alloc_user_space(nbytes);
|
|
if (!access_ok(VERIFY_WRITE, request, nbytes))
|
|
return -EFAULT;
|
|
- list = (struct drm_buf_pub *) (request + 1);
|
|
+ list = (struct drm_buf_pub __user *) (request + 1);
|
|
|
|
if (__put_user(count, &request->count)
|
|
|| __put_user(list, &request->list))
|
|
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
|
|
index cf85155..f2665cb 100644
|
|
--- a/drivers/gpu/drm/drm_ioctl.c
|
|
+++ b/drivers/gpu/drm/drm_ioctl.c
|
|
@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
|
|
stats->data[i].value =
|
|
(file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
|
|
else
|
|
- stats->data[i].value = atomic_read(&dev->counts[i]);
|
|
+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
|
|
stats->data[i].type = dev->types[i];
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
|
|
index c79c713..2048588 100644
|
|
--- a/drivers/gpu/drm/drm_lock.c
|
|
+++ b/drivers/gpu/drm/drm_lock.c
|
|
@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
if (drm_lock_take(&master->lock, lock->context)) {
|
|
master->lock.file_priv = file_priv;
|
|
master->lock.lock_time = jiffies;
|
|
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
|
|
break; /* Got lock */
|
|
}
|
|
|
|
@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
return -EINVAL;
|
|
}
|
|
|
|
- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
|
|
|
|
if (drm_lock_free(&master->lock, lock->context)) {
|
|
/* FIXME: Should really bail out here. */
|
|
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
|
|
index aa454f8..6d38580 100644
|
|
--- a/drivers/gpu/drm/drm_stub.c
|
|
+++ b/drivers/gpu/drm/drm_stub.c
|
|
@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
|
|
|
|
drm_device_set_unplugged(dev);
|
|
|
|
- if (dev->open_count == 0) {
|
|
+ if (local_read(&dev->open_count) == 0) {
|
|
drm_put_dev(dev);
|
|
}
|
|
mutex_unlock(&drm_global_mutex);
|
|
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
|
|
index f920fb5..001c52d 100644
|
|
--- a/drivers/gpu/drm/i810/i810_dma.c
|
|
+++ b/drivers/gpu/drm/i810/i810_dma.c
|
|
@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
|
|
dma->buflist[vertex->idx],
|
|
vertex->discard, vertex->used);
|
|
|
|
- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
|
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
|
|
+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
|
|
sarea_priv->last_enqueue = dev_priv->counter - 1;
|
|
sarea_priv->last_dispatch = (int)hw_status[5];
|
|
|
|
@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
|
|
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
|
|
mc->last_render);
|
|
|
|
- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
|
- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
|
|
+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
|
+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
|
|
sarea_priv->last_enqueue = dev_priv->counter - 1;
|
|
sarea_priv->last_dispatch = (int)hw_status[5];
|
|
|
|
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
|
|
index c9339f4..f5e1b9d 100644
|
|
--- a/drivers/gpu/drm/i810/i810_drv.h
|
|
+++ b/drivers/gpu/drm/i810/i810_drv.h
|
|
@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
|
|
int page_flipping;
|
|
|
|
wait_queue_head_t irq_queue;
|
|
- atomic_t irq_received;
|
|
- atomic_t irq_emitted;
|
|
+ atomic_unchecked_t irq_received;
|
|
+ atomic_unchecked_t irq_emitted;
|
|
|
|
int front_offset;
|
|
} drm_i810_private_t;
|
|
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
|
|
index 39f8111..16f3e5a 100644
|
|
--- a/drivers/gpu/drm/i915/i915_debugfs.c
|
|
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
|
|
@@ -501,7 +501,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
|
I915_READ(GTIMR));
|
|
}
|
|
seq_printf(m, "Interrupts received: %d\n",
|
|
- atomic_read(&dev_priv->irq_received));
|
|
+ atomic_read_unchecked(&dev_priv->irq_received));
|
|
for (i = 0; i < I915_NUM_RINGS; i++) {
|
|
if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
|
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
|
|
@@ -1315,7 +1315,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
|
|
return ret;
|
|
|
|
if (opregion->header)
|
|
- seq_write(m, opregion->header, OPREGION_SIZE);
|
|
+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
|
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
|
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
|
|
index 38c0a47..e61f898 100644
|
|
--- a/drivers/gpu/drm/i915/i915_dma.c
|
|
+++ b/drivers/gpu/drm/i915/i915_dma.c
|
|
@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
|
|
bool can_switch;
|
|
|
|
spin_lock(&dev->count_lock);
|
|
- can_switch = (dev->open_count == 0);
|
|
+ can_switch = (local_read(&dev->open_count) == 0);
|
|
spin_unlock(&dev->count_lock);
|
|
return can_switch;
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
|
|
index a8f00d0..65410e5 100644
|
|
--- a/drivers/gpu/drm/i915/i915_drv.h
|
|
+++ b/drivers/gpu/drm/i915/i915_drv.h
|
|
@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
|
|
/* render clock increase/decrease */
|
|
/* display clock increase/decrease */
|
|
/* pll clock increase/decrease */
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct intel_device_info {
|
|
u8 gen;
|
|
@@ -353,7 +353,7 @@ typedef struct drm_i915_private {
|
|
int current_page;
|
|
int page_flipping;
|
|
|
|
- atomic_t irq_received;
|
|
+ atomic_unchecked_t irq_received;
|
|
|
|
/* protects the irq masks */
|
|
spinlock_t irq_lock;
|
|
@@ -942,7 +942,7 @@ struct drm_i915_gem_object {
|
|
* will be page flipped away on the next vblank. When it
|
|
* reaches 0, dev_priv->pending_flip_queue will be woken up.
|
|
*/
|
|
- atomic_t pending_flip;
|
|
+ atomic_unchecked_t pending_flip;
|
|
};
|
|
|
|
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
|
@@ -1366,7 +1366,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
|
|
extern void intel_teardown_gmbus(struct drm_device *dev);
|
|
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
|
|
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
|
|
-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
|
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
|
{
|
|
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
|
|
}
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
index e97ed61..de46c99 100644
|
|
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
|
|
@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
|
|
i915_gem_clflush_object(obj);
|
|
|
|
if (obj->base.pending_write_domain)
|
|
- cd->flips |= atomic_read(&obj->pending_flip);
|
|
+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
|
|
|
|
/* The actual obj->write_domain will be updated with
|
|
* pending_write_domain after we emit the accumulated flush for all
|
|
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c.rej b/drivers/gpu/drm/i915/i915_gem_execbuffer.c.rej
|
|
new file mode 100644
|
|
index 0000000..b147b40
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c.rej
|
|
@@ -0,0 +1,14 @@
|
|
+--- drivers/gpu/drm/i915/i915_gem_execbuffer.c 2012-05-21 11:33:01.339927870 +0200
|
|
++++ drivers/gpu/drm/i915/i915_gem_execbuffer.c 2012-05-21 12:10:10.076048918 +0200
|
|
+@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i91
|
|
+
|
|
+ static int
|
|
+ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
|
+- int count)
|
|
++ unsigned int count)
|
|
+ {
|
|
+- int i;
|
|
++ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < count; i++) {
|
|
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
|
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
|
|
index fc6f32a..35d7caf 100644
|
|
--- a/drivers/gpu/drm/i915/i915_irq.c
|
|
+++ b/drivers/gpu/drm/i915/i915_irq.c
|
|
@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
|
|
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
|
|
struct drm_i915_master_private *master_priv;
|
|
|
|
- atomic_inc(&dev_priv->irq_received);
|
|
+ atomic_inc_unchecked(&dev_priv->irq_received);
|
|
|
|
/* disable master interrupt before clearing iir */
|
|
de_ier = I915_READ(DEIER);
|
|
@@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
|
|
struct drm_i915_master_private *master_priv;
|
|
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
|
|
|
|
- atomic_inc(&dev_priv->irq_received);
|
|
+ atomic_inc_unchecked(&dev_priv->irq_received);
|
|
|
|
if (IS_GEN6(dev))
|
|
bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
|
|
@@ -1293,7 +1293,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|
int ret = IRQ_NONE, pipe;
|
|
bool blc_event = false;
|
|
|
|
- atomic_inc(&dev_priv->irq_received);
|
|
+ atomic_inc_unchecked(&dev_priv->irq_received);
|
|
|
|
iir = I915_READ(IIR);
|
|
|
|
@@ -1804,7 +1804,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
|
|
{
|
|
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
|
|
|
- atomic_set(&dev_priv->irq_received, 0);
|
|
+ atomic_set_unchecked(&dev_priv->irq_received, 0);
|
|
|
|
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
|
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
|
|
@@ -1981,7 +1981,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
|
|
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
|
|
int pipe;
|
|
|
|
- atomic_set(&dev_priv->irq_received, 0);
|
|
+ atomic_set_unchecked(&dev_priv->irq_received, 0);
|
|
|
|
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
|
|
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
|
|
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
|
|
index c975c99..915b222 100644
|
|
--- a/drivers/gpu/drm/i915/intel_display.c
|
|
+++ b/drivers/gpu/drm/i915/intel_display.c
|
|
@@ -2273,7 +2273,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
|
|
|
|
wait_event(dev_priv->pending_flip_queue,
|
|
atomic_read(&dev_priv->mm.wedged) ||
|
|
- atomic_read(&obj->pending_flip) == 0);
|
|
+ atomic_read_unchecked(&obj->pending_flip) == 0);
|
|
|
|
/* Big Hammer, we also need to ensure that any pending
|
|
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
|
|
@@ -7645,7 +7645,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
|
/* Block clients from rendering to the new back buffer until
|
|
* the flip occurs and the object is no longer visible.
|
|
*/
|
|
- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
|
+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
|
|
|
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
|
|
if (ret)
|
|
diff --git a/drivers/gpu/drm/i915/intel_display.c.rej b/drivers/gpu/drm/i915/intel_display.c.rej
|
|
new file mode 100644
|
|
index 0000000..8b5bc41
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/i915/intel_display.c.rej
|
|
@@ -0,0 +1,32 @@
|
|
+--- drivers/gpu/drm/i915/intel_display.c 2012-06-11 19:12:28.596366966 +0200
|
|
++++ drivers/gpu/drm/i915/intel_display.c 2012-07-04 19:33:49.496065642 +0200
|
|
+@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_
|
|
+ obj = to_intel_framebuffer(crtc->fb)->obj;
|
|
+ dev_priv = crtc->dev->dev_private;
|
|
+ wait_event(dev_priv->pending_flip_queue,
|
|
+- atomic_read(&obj->pending_flip) == 0);
|
|
++ atomic_read_unchecked(&obj->pending_flip) == 0);
|
|
+ }
|
|
+
|
|
+ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
|
|
+@@ -7284,9 +7284,8 @@ static void do_intel_finish_page_flip(st
|
|
+
|
|
+ obj = work->old_fb_obj;
|
|
+
|
|
+- atomic_clear_mask(1 << intel_crtc->plane,
|
|
+- &obj->pending_flip.counter);
|
|
+- if (atomic_read(&obj->pending_flip) == 0)
|
|
++ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
|
|
++ if (atomic_read_unchecked(&obj->pending_flip) == 0)
|
|
+ wake_up(&dev_priv->pending_flip_queue);
|
|
+
|
|
+ schedule_work(&work->work);
|
|
+@@ -7596,7 +7595,7 @@ static int intel_crtc_page_flip(struct d
|
|
+ return 0;
|
|
+
|
|
+ cleanup_pending:
|
|
+- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
|
++ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
|
+ drm_gem_object_unreference(&work->old_fb_obj->base);
|
|
+ drm_gem_object_unreference(&obj->base);
|
|
+ mutex_unlock(&dev->struct_mutex);
|
|
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
|
|
index 54558a0..2d97005 100644
|
|
--- a/drivers/gpu/drm/mga/mga_drv.h
|
|
+++ b/drivers/gpu/drm/mga/mga_drv.h
|
|
@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
|
|
u32 clear_cmd;
|
|
u32 maccess;
|
|
|
|
- atomic_t vbl_received; /**< Number of vblanks received. */
|
|
+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
|
|
wait_queue_head_t fence_queue;
|
|
- atomic_t last_fence_retired;
|
|
+ atomic_unchecked_t last_fence_retired;
|
|
u32 next_fence_to_post;
|
|
|
|
unsigned int fb_cpp;
|
|
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
|
|
index 2581202..f230a8d9 100644
|
|
--- a/drivers/gpu/drm/mga/mga_irq.c
|
|
+++ b/drivers/gpu/drm/mga/mga_irq.c
|
|
@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
|
|
if (crtc != 0)
|
|
return 0;
|
|
|
|
- return atomic_read(&dev_priv->vbl_received);
|
|
+ return atomic_read_unchecked(&dev_priv->vbl_received);
|
|
}
|
|
|
|
|
|
@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
|
|
/* VBLANK interrupt */
|
|
if (status & MGA_VLINEPEN) {
|
|
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
|
|
- atomic_inc(&dev_priv->vbl_received);
|
|
+ atomic_inc_unchecked(&dev_priv->vbl_received);
|
|
drm_handle_vblank(dev, 0);
|
|
handled = 1;
|
|
}
|
|
@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
|
|
if ((prim_start & ~0x03) != (prim_end & ~0x03))
|
|
MGA_WRITE(MGA_PRIMEND, prim_end);
|
|
|
|
- atomic_inc(&dev_priv->last_fence_retired);
|
|
+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
|
|
DRM_WAKEUP(&dev_priv->fence_queue);
|
|
handled = 1;
|
|
}
|
|
@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
|
|
* using fences.
|
|
*/
|
|
DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
|
|
- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
|
|
+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
|
|
- *sequence) <= (1 << 23)));
|
|
|
|
*sequence = cur_fence;
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
|
|
index 0be4a81..7464804 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
|
|
@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
|
|
struct bit_table {
|
|
const char id;
|
|
int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
|
|
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
|
|
index 3aef353..0ad1322 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
|
|
@@ -240,7 +240,7 @@ struct nouveau_channel {
|
|
struct list_head pending;
|
|
uint32_t sequence;
|
|
uint32_t sequence_ack;
|
|
- atomic_t last_sequence_irq;
|
|
+ atomic_unchecked_t last_sequence_irq;
|
|
struct nouveau_vma vma;
|
|
} fence;
|
|
|
|
@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
|
|
u32 handle, u16 class);
|
|
void (*set_tile_region)(struct drm_device *dev, int i);
|
|
void (*tlb_flush)(struct drm_device *, int engine);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct nouveau_instmem_engine {
|
|
void *priv;
|
|
@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
|
|
struct nouveau_mc_engine {
|
|
int (*init)(struct drm_device *dev);
|
|
void (*takedown)(struct drm_device *dev);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct nouveau_timer_engine {
|
|
int (*init)(struct drm_device *dev);
|
|
void (*takedown)(struct drm_device *dev);
|
|
uint64_t (*read)(struct drm_device *dev);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct nouveau_fb_engine {
|
|
int num_tiles;
|
|
@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
|
|
void (*put)(struct drm_device *, struct nouveau_mem **);
|
|
|
|
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct nouveau_engine {
|
|
struct nouveau_instmem_engine instmem;
|
|
@@ -739,7 +739,7 @@ struct drm_nouveau_private {
|
|
struct drm_global_reference mem_global_ref;
|
|
struct ttm_bo_global_ref bo_global_ref;
|
|
struct ttm_bo_device bdev;
|
|
- atomic_t validate_sequence;
|
|
+ atomic_unchecked_t validate_sequence;
|
|
} ttm;
|
|
|
|
struct {
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
|
|
index c1dc20f..4df673c7 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
|
|
@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
|
|
if (USE_REFCNT(dev))
|
|
sequence = nvchan_rd32(chan, 0x48);
|
|
else
|
|
- sequence = atomic_read(&chan->fence.last_sequence_irq);
|
|
+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
|
|
|
|
if (chan->fence.sequence_ack == sequence)
|
|
goto out;
|
|
@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
|
|
return ret;
|
|
}
|
|
|
|
- atomic_set(&chan->fence.last_sequence_irq, 0);
|
|
+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
|
|
index 2f46bbf..11832c6 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
|
|
@@ -315,7 +315,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
|
|
int trycnt = 0;
|
|
int ret, i;
|
|
|
|
- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
|
|
+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
|
|
retry:
|
|
if (++trycnt > 100000) {
|
|
NV_ERROR(dev, "%s failed and gave up.\n", __func__);
|
|
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
|
|
index b096cf2..2bf3273 100644
|
|
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
|
|
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
|
|
@@ -590,7 +590,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
|
|
bool can_switch;
|
|
|
|
spin_lock(&dev->count_lock);
|
|
- can_switch = (dev->open_count == 0);
|
|
+ can_switch = (local_read(&dev->open_count) == 0);
|
|
spin_unlock(&dev->count_lock);
|
|
return can_switch;
|
|
}
|
|
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
|
|
index dbdea8e..cd6eeeb 100644
|
|
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
|
|
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
|
|
@@ -554,7 +554,7 @@ static int
|
|
nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
|
|
u32 class, u32 mthd, u32 data)
|
|
{
|
|
- atomic_set(&chan->fence.last_sequence_irq, data);
|
|
+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
|
|
index 2746402..c8dc4a4 100644
|
|
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
|
|
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
|
|
@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
|
|
}
|
|
|
|
if (nv_encoder->dcb->type == OUTPUT_DP) {
|
|
- struct dp_train_func func = {
|
|
+ static struct dp_train_func func = {
|
|
.link_set = nv50_sor_dp_link_set,
|
|
.train_set = nv50_sor_dp_train_set,
|
|
.train_adj = nv50_sor_dp_train_adj
|
|
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
|
|
index 8a555fb..2743fe6 100644
|
|
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
|
|
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
|
|
@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
|
|
nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
|
|
|
|
if (nv_encoder->dcb->type == OUTPUT_DP) {
|
|
- struct dp_train_func func = {
|
|
+ static struct dp_train_func func = {
|
|
.link_set = nvd0_sor_dp_link_set,
|
|
.train_set = nvd0_sor_dp_train_set,
|
|
.train_adj = nvd0_sor_dp_train_adj
|
|
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
|
|
index bcac90b..53bfc76 100644
|
|
--- a/drivers/gpu/drm/r128/r128_cce.c
|
|
+++ b/drivers/gpu/drm/r128/r128_cce.c
|
|
@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
|
|
|
|
/* GH: Simple idle check.
|
|
*/
|
|
- atomic_set(&dev_priv->idle_count, 0);
|
|
+ atomic_set_unchecked(&dev_priv->idle_count, 0);
|
|
|
|
/* We don't support anything other than bus-mastering ring mode,
|
|
* but the ring can be in either AGP or PCI space for the ring
|
|
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
|
|
index 930c71b..499aded 100644
|
|
--- a/drivers/gpu/drm/r128/r128_drv.h
|
|
+++ b/drivers/gpu/drm/r128/r128_drv.h
|
|
@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
|
|
int is_pci;
|
|
unsigned long cce_buffers_offset;
|
|
|
|
- atomic_t idle_count;
|
|
+ atomic_unchecked_t idle_count;
|
|
|
|
int page_flipping;
|
|
int current_page;
|
|
u32 crtc_offset;
|
|
u32 crtc_offset_cntl;
|
|
|
|
- atomic_t vbl_received;
|
|
+ atomic_unchecked_t vbl_received;
|
|
|
|
u32 color_fmt;
|
|
unsigned int front_offset;
|
|
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
|
|
index 429d5a0..7e899ed 100644
|
|
--- a/drivers/gpu/drm/r128/r128_irq.c
|
|
+++ b/drivers/gpu/drm/r128/r128_irq.c
|
|
@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
|
|
if (crtc != 0)
|
|
return 0;
|
|
|
|
- return atomic_read(&dev_priv->vbl_received);
|
|
+ return atomic_read_unchecked(&dev_priv->vbl_received);
|
|
}
|
|
|
|
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
|
|
@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
|
|
/* VBLANK interrupt */
|
|
if (status & R128_CRTC_VBLANK_INT) {
|
|
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
|
|
- atomic_inc(&dev_priv->vbl_received);
|
|
+ atomic_inc_unchecked(&dev_priv->vbl_received);
|
|
drm_handle_vblank(dev, 0);
|
|
return IRQ_HANDLED;
|
|
}
|
|
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
|
|
index a9e33ce..09edd4b 100644
|
|
--- a/drivers/gpu/drm/r128/r128_state.c
|
|
+++ b/drivers/gpu/drm/r128/r128_state.c
|
|
@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
|
|
|
|
static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
|
|
{
|
|
- if (atomic_read(&dev_priv->idle_count) == 0)
|
|
+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
|
|
r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
|
|
else
|
|
- atomic_set(&dev_priv->idle_count, 0);
|
|
+ atomic_set_unchecked(&dev_priv->idle_count, 0);
|
|
}
|
|
|
|
#endif
|
|
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
|
|
index 5a82b6b..9e69c73 100644
|
|
--- a/drivers/gpu/drm/radeon/mkregtable.c
|
|
+++ b/drivers/gpu/drm/radeon/mkregtable.c
|
|
@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
|
|
regex_t mask_rex;
|
|
regmatch_t match[4];
|
|
char buf[1024];
|
|
- size_t end;
|
|
+ long end;
|
|
int len;
|
|
int done = 0;
|
|
int r;
|
|
unsigned o;
|
|
struct offset *offset;
|
|
char last_reg_s[10];
|
|
- int last_reg;
|
|
+ unsigned long last_reg;
|
|
|
|
if (regcomp
|
|
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
|
|
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
|
|
index 66150f0..b2b91fa 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon.h
|
|
+++ b/drivers/gpu/drm/radeon/radeon.h
|
|
@@ -238,7 +238,7 @@ struct radeon_fence_driver {
|
|
uint32_t scratch_reg;
|
|
uint64_t gpu_addr;
|
|
volatile uint32_t *cpu_addr;
|
|
- atomic_t seq;
|
|
+ atomic_unchecked_t seq;
|
|
uint32_t last_seq;
|
|
unsigned long last_jiffies;
|
|
unsigned long last_timeout;
|
|
@@ -738,7 +738,7 @@ struct r600_blit_cp_primitives {
|
|
int x2, int y2);
|
|
void (*draw_auto)(struct radeon_device *rdev);
|
|
void (*set_default_state)(struct radeon_device *rdev);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct r600_blit {
|
|
struct mutex mutex;
|
|
@@ -1231,7 +1231,7 @@ struct radeon_asic {
|
|
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
|
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
|
|
} pflip;
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Asic structures
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
|
|
index 2f555d7..5b977bd 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_device.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_device.c
|
|
@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
|
|
bool can_switch;
|
|
|
|
spin_lock(&dev->count_lock);
|
|
- can_switch = (dev->open_count == 0);
|
|
+ can_switch = (local_read(&dev->open_count) == 0);
|
|
spin_unlock(&dev->count_lock);
|
|
return can_switch;
|
|
}
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
|
|
index a1b59ca..86f2d44 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_drv.h
|
|
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
|
|
@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
|
|
|
|
/* SW interrupt */
|
|
wait_queue_head_t swi_queue;
|
|
- atomic_t swi_emitted;
|
|
+ atomic_unchecked_t swi_emitted;
|
|
int vblank_crtc;
|
|
uint32_t irq_enable_reg;
|
|
uint32_t r500_disp_irq_reg;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
|
|
index 4bd36a3..e66fe9c 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_fence.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
|
|
@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
|
|
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
|
|
return 0;
|
|
}
|
|
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
|
|
+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
|
|
if (!rdev->ring[fence->ring].ready)
|
|
/* FIXME: cp is not running assume everythings is done right
|
|
* away
|
|
@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
|
|
}
|
|
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
|
|
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
|
|
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
|
|
+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
|
|
rdev->fence_drv[ring].initialized = true;
|
|
DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
|
|
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
|
|
@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
|
|
rdev->fence_drv[ring].scratch_reg = -1;
|
|
rdev->fence_drv[ring].cpu_addr = NULL;
|
|
rdev->fence_drv[ring].gpu_addr = 0;
|
|
- atomic_set(&rdev->fence_drv[ring].seq, 0);
|
|
+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
|
|
INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
|
|
INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
|
|
INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
|
|
index 48b7cea..342236f 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
|
|
@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
|
|
request = compat_alloc_user_space(sizeof(*request));
|
|
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|
|
|| __put_user(req32.param, &request->param)
|
|
- || __put_user((void __user *)(unsigned long)req32.value,
|
|
+ || __put_user((unsigned long)req32.value,
|
|
&request->value))
|
|
return -EFAULT;
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
|
|
index 00da384..32f972d 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_irq.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
|
|
@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
|
|
unsigned int ret;
|
|
RING_LOCALS;
|
|
|
|
- atomic_inc(&dev_priv->swi_emitted);
|
|
- ret = atomic_read(&dev_priv->swi_emitted);
|
|
+ atomic_inc_unchecked(&dev_priv->swi_emitted);
|
|
+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
|
|
|
|
BEGIN_RING(4);
|
|
OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
|
|
@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
|
|
drm_radeon_private_t *dev_priv =
|
|
(drm_radeon_private_t *) dev->dev_private;
|
|
|
|
- atomic_set(&dev_priv->swi_emitted, 0);
|
|
+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
|
|
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
|
|
|
|
dev->max_vblank_count = 0x001fffff;
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
|
|
index e8422ae..d22d4a8 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_state.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_state.c
|
|
@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
|
|
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
|
|
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
|
|
|
|
- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
|
|
+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
|
|
sarea_priv->nbox * sizeof(depth_boxes[0])))
|
|
return -EFAULT;
|
|
|
|
@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
|
|
{
|
|
drm_radeon_private_t *dev_priv = dev->dev_private;
|
|
drm_radeon_getparam_t *param = data;
|
|
- int value;
|
|
+ int value = 0;
|
|
|
|
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
|
|
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
index 15042d0..69ba2d8 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
|
|
@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
}
|
|
if (unlikely(ttm_vm_ops == NULL)) {
|
|
ttm_vm_ops = vma->vm_ops;
|
|
- radeon_ttm_vm_ops = *ttm_vm_ops;
|
|
- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
|
|
+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
|
|
+ pax_close_kernel();
|
|
}
|
|
vma->vm_ops = &radeon_ttm_vm_ops;
|
|
return 0;
|
|
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
|
|
index c46900c..15f10de 100644
|
|
--- a/drivers/gpu/drm/radeon/rs690.c
|
|
+++ b/drivers/gpu/drm/radeon/rs690.c
|
|
@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
|
|
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
|
|
rdev->pm.sideport_bandwidth.full)
|
|
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
|
|
- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
|
|
+ read_delay_latency.full = dfixed_const(800 * 1000);
|
|
read_delay_latency.full = dfixed_div(read_delay_latency,
|
|
rdev->pm.igp_sideport_mclk);
|
|
+ a.full = dfixed_const(370);
|
|
+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
|
|
} else {
|
|
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
|
|
rdev->pm.k8_bandwidth.full)
|
|
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
index 578207ec..1073f25 100644
|
|
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
|
|
@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
|
|
static int ttm_pool_mm_shrink(struct shrinker *shrink,
|
|
struct shrink_control *sc)
|
|
{
|
|
- static atomic_t start_pool = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
|
|
unsigned i;
|
|
- unsigned pool_offset = atomic_add_return(1, &start_pool);
|
|
+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
|
|
struct ttm_page_pool *pool;
|
|
int shrink_pages = sc->nr_to_scan;
|
|
|
|
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
|
|
index 88edacc..1e5412b 100644
|
|
--- a/drivers/gpu/drm/via/via_drv.h
|
|
+++ b/drivers/gpu/drm/via/via_drv.h
|
|
@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
|
|
typedef uint32_t maskarray_t[5];
|
|
|
|
typedef struct drm_via_irq {
|
|
- atomic_t irq_received;
|
|
+ atomic_unchecked_t irq_received;
|
|
uint32_t pending_mask;
|
|
uint32_t enable_mask;
|
|
wait_queue_head_t irq_queue;
|
|
@@ -75,7 +75,7 @@ typedef struct drm_via_private {
|
|
struct timeval last_vblank;
|
|
int last_vblank_valid;
|
|
unsigned usec_per_vblank;
|
|
- atomic_t vbl_received;
|
|
+ atomic_unchecked_t vbl_received;
|
|
drm_via_state_t hc_state;
|
|
char pci_buf[VIA_PCI_BUF_SIZE];
|
|
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
|
|
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
|
|
index d391f48..10c8ca3 100644
|
|
--- a/drivers/gpu/drm/via/via_irq.c
|
|
+++ b/drivers/gpu/drm/via/via_irq.c
|
|
@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
|
|
if (crtc != 0)
|
|
return 0;
|
|
|
|
- return atomic_read(&dev_priv->vbl_received);
|
|
+ return atomic_read_unchecked(&dev_priv->vbl_received);
|
|
}
|
|
|
|
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|
@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|
|
|
status = VIA_READ(VIA_REG_INTERRUPT);
|
|
if (status & VIA_IRQ_VBLANK_PENDING) {
|
|
- atomic_inc(&dev_priv->vbl_received);
|
|
- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
|
|
+ atomic_inc_unchecked(&dev_priv->vbl_received);
|
|
+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
|
|
do_gettimeofday(&cur_vblank);
|
|
if (dev_priv->last_vblank_valid) {
|
|
dev_priv->usec_per_vblank =
|
|
@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|
dev_priv->last_vblank = cur_vblank;
|
|
dev_priv->last_vblank_valid = 1;
|
|
}
|
|
- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
|
|
+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
|
|
DRM_DEBUG("US per vblank is: %u\n",
|
|
dev_priv->usec_per_vblank);
|
|
}
|
|
@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
|
|
|
|
for (i = 0; i < dev_priv->num_irqs; ++i) {
|
|
if (status & cur_irq->pending_mask) {
|
|
- atomic_inc(&cur_irq->irq_received);
|
|
+ atomic_inc_unchecked(&cur_irq->irq_received);
|
|
DRM_WAKEUP(&cur_irq->irq_queue);
|
|
handled = 1;
|
|
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
|
|
@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
|
|
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
|
|
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
|
|
masks[irq][4]));
|
|
- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
|
|
+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
|
|
} else {
|
|
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
|
|
(((cur_irq_sequence =
|
|
- atomic_read(&cur_irq->irq_received)) -
|
|
+ atomic_read_unchecked(&cur_irq->irq_received)) -
|
|
*sequence) <= (1 << 23)));
|
|
}
|
|
*sequence = cur_irq_sequence;
|
|
@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
|
|
}
|
|
|
|
for (i = 0; i < dev_priv->num_irqs; ++i) {
|
|
- atomic_set(&cur_irq->irq_received, 0);
|
|
+ atomic_set_unchecked(&cur_irq->irq_received, 0);
|
|
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
|
|
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
|
|
DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
|
|
@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|
switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
|
|
case VIA_IRQ_RELATIVE:
|
|
irqwait->request.sequence +=
|
|
- atomic_read(&cur_irq->irq_received);
|
|
+ atomic_read_unchecked(&cur_irq->irq_received);
|
|
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
|
|
case VIA_IRQ_ABSOLUTE:
|
|
break;
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
|
|
index 29c984f..4084f1a 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
|
|
@@ -263,7 +263,7 @@ struct vmw_private {
|
|
* Fencing and IRQs.
|
|
*/
|
|
|
|
- atomic_t marker_seq;
|
|
+ atomic_unchecked_t marker_seq;
|
|
wait_queue_head_t fence_queue;
|
|
wait_queue_head_t fifo_queue;
|
|
int fence_queue_waiters; /* Protected by hw_mutex */
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
index decca82..7968bc5 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
|
@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
|
(unsigned int) min,
|
|
(unsigned int) fifo->capabilities);
|
|
|
|
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
|
+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
|
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
|
vmw_marker_queue_init(&fifo->marker_queue);
|
|
return vmw_fifo_send_fence(dev_priv, &dummy);
|
|
@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
|
if (reserveable)
|
|
iowrite32(bytes, fifo_mem +
|
|
SVGA_FIFO_RESERVED);
|
|
- return fifo_mem + (next_cmd >> 2);
|
|
+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
|
|
} else {
|
|
need_bounce = true;
|
|
}
|
|
@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
|
|
|
fm = vmw_fifo_reserve(dev_priv, bytes);
|
|
if (unlikely(fm == NULL)) {
|
|
- *seqno = atomic_read(&dev_priv->marker_seq);
|
|
+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
|
|
ret = -ENOMEM;
|
|
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
|
|
false, 3*HZ);
|
|
@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
|
}
|
|
|
|
do {
|
|
- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
|
|
+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
|
|
} while (*seqno == 0);
|
|
|
|
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
|
index cabc95f..14b3d77 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
|
@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
|
* emitted. Then the fence is stale and signaled.
|
|
*/
|
|
|
|
- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
|
|
+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
|
|
> VMW_FENCE_WRAP);
|
|
|
|
return ret;
|
|
@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
|
|
|
if (fifo_idle)
|
|
down_read(&fifo_state->rwsem);
|
|
- signal_seq = atomic_read(&dev_priv->marker_seq);
|
|
+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
|
|
ret = 0;
|
|
|
|
for (;;) {
|
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
|
|
index 8a8725c2..afed796 100644
|
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
|
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
|
|
@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
|
|
while (!vmw_lag_lt(queue, us)) {
|
|
spin_lock(&queue->lock);
|
|
if (list_empty(&queue->head))
|
|
- seqno = atomic_read(&dev_priv->marker_seq);
|
|
+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
|
|
else {
|
|
marker = list_first_entry(&queue->head,
|
|
struct vmw_marker, head);
|
|
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
|
|
index 1903aa4..37d2628 100644
|
|
--- a/drivers/hid/hid-core.c
|
|
+++ b/drivers/hid/hid-core.c
|
|
@@ -2164,7 +2164,7 @@ static bool hid_ignore(struct hid_device *hdev)
|
|
|
|
int hid_add_device(struct hid_device *hdev)
|
|
{
|
|
- static atomic_t id = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t id = ATOMIC_INIT(0);
|
|
int ret;
|
|
|
|
if (WARN_ON(hdev->status & HID_STAT_ADDED))
|
|
@@ -2179,7 +2179,7 @@ int hid_add_device(struct hid_device *hdev)
|
|
/* XXX hack, any other cleaner solution after the driver core
|
|
* is converted to allow more than 20 bytes as the device name? */
|
|
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
|
|
- hdev->vendor, hdev->product, atomic_inc_return(&id));
|
|
+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
|
|
|
|
hid_debug_register(hdev, dev_name(&hdev->dev));
|
|
ret = device_add(&hdev->dev);
|
|
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
|
|
index b1ec0e2..c295a61 100644
|
|
--- a/drivers/hid/usbhid/hiddev.c
|
|
+++ b/drivers/hid/usbhid/hiddev.c
|
|
@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
break;
|
|
|
|
case HIDIOCAPPLICATION:
|
|
- if (arg < 0 || arg >= hid->maxapplication)
|
|
+ if (arg >= hid->maxapplication)
|
|
break;
|
|
|
|
for (i = 0; i < hid->maxcollection; i++)
|
|
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
|
|
index 980ef7e..b65114f 100644
|
|
--- a/drivers/hv/channel.c
|
|
+++ b/drivers/hv/channel.c
|
|
@@ -406,8 +406,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
|
unsigned long flags;
|
|
int ret = 0;
|
|
|
|
- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
|
|
- atomic_inc(&vmbus_connection.next_gpadl_handle);
|
|
+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
|
|
+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
|
|
|
|
ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
|
|
if (ret)
|
|
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
|
|
index 15956bd..ea34398 100644
|
|
--- a/drivers/hv/hv.c
|
|
+++ b/drivers/hv/hv.c
|
|
@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
|
|
u64 output_address = (output) ? virt_to_phys(output) : 0;
|
|
u32 output_address_hi = output_address >> 32;
|
|
u32 output_address_lo = output_address & 0xFFFFFFFF;
|
|
- void *hypercall_page = hv_context.hypercall_page;
|
|
+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
|
|
|
|
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
|
|
"=a"(hv_status_lo) : "d" (control_hi),
|
|
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
|
|
index 699f0d8..f4f19250 100644
|
|
--- a/drivers/hv/hyperv_vmbus.h
|
|
+++ b/drivers/hv/hyperv_vmbus.h
|
|
@@ -555,7 +555,7 @@ enum vmbus_connect_state {
|
|
struct vmbus_connection {
|
|
enum vmbus_connect_state conn_state;
|
|
|
|
- atomic_t next_gpadl_handle;
|
|
+ atomic_unchecked_t next_gpadl_handle;
|
|
|
|
/*
|
|
* Represents channel interrupts. Each bit position represents a
|
|
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
|
|
index 1ec309d..3b4665b 100644
|
|
--- a/drivers/hv/vmbus_drv.c
|
|
+++ b/drivers/hv/vmbus_drv.c
|
|
@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
|
|
{
|
|
int ret = 0;
|
|
|
|
- static atomic_t device_num = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
|
|
|
|
dev_set_name(&child_device_obj->device, "vmbus_0_%d",
|
|
- atomic_inc_return(&device_num));
|
|
+ atomic_inc_return_unchecked(&device_num));
|
|
|
|
child_device_obj->device.bus = &hv_bus;
|
|
child_device_obj->device.parent = &hv_acpi_dev->dev;
|
|
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
|
|
index 9140236..ceaef4e 100644
|
|
--- a/drivers/hwmon/acpi_power_meter.c
|
|
+++ b/drivers/hwmon/acpi_power_meter.c
|
|
@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
|
|
return res;
|
|
|
|
temp /= 1000;
|
|
- if (temp < 0)
|
|
- return -EINVAL;
|
|
|
|
mutex_lock(&resource->lock);
|
|
resource->trip[attr->index - 7] = temp;
|
|
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
|
|
index c689630..32f0bc3 100644
|
|
--- a/drivers/hwmon/sht15.c
|
|
+++ b/drivers/hwmon/sht15.c
|
|
@@ -166,7 +166,7 @@ struct sht15_data {
|
|
int supply_uV;
|
|
bool supply_uV_valid;
|
|
struct work_struct update_supply_work;
|
|
- atomic_t interrupt_handled;
|
|
+ atomic_unchecked_t interrupt_handled;
|
|
};
|
|
|
|
/**
|
|
@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
|
|
return ret;
|
|
|
|
gpio_direction_input(data->pdata->gpio_data);
|
|
- atomic_set(&data->interrupt_handled, 0);
|
|
+ atomic_set_unchecked(&data->interrupt_handled, 0);
|
|
|
|
enable_irq(gpio_to_irq(data->pdata->gpio_data));
|
|
if (gpio_get_value(data->pdata->gpio_data) == 0) {
|
|
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
|
|
/* Only relevant if the interrupt hasn't occurred. */
|
|
- if (!atomic_read(&data->interrupt_handled))
|
|
+ if (!atomic_read_unchecked(&data->interrupt_handled))
|
|
schedule_work(&data->read_work);
|
|
}
|
|
ret = wait_event_timeout(data->wait_queue,
|
|
@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
|
|
|
|
/* First disable the interrupt */
|
|
disable_irq_nosync(irq);
|
|
- atomic_inc(&data->interrupt_handled);
|
|
+ atomic_inc_unchecked(&data->interrupt_handled);
|
|
/* Then schedule a reading work struct */
|
|
if (data->state != SHT15_READING_NOTHING)
|
|
schedule_work(&data->read_work);
|
|
@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
|
|
* If not, then start the interrupt again - care here as could
|
|
* have gone low in meantime so verify it hasn't!
|
|
*/
|
|
- atomic_set(&data->interrupt_handled, 0);
|
|
+ atomic_set_unchecked(&data->interrupt_handled, 0);
|
|
enable_irq(gpio_to_irq(data->pdata->gpio_data));
|
|
/* If still not occurred or another handler was scheduled */
|
|
if (gpio_get_value(data->pdata->gpio_data)
|
|
- || atomic_read(&data->interrupt_handled))
|
|
+ || atomic_read_unchecked(&data->interrupt_handled))
|
|
return;
|
|
}
|
|
|
|
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
|
|
index 378fcb5..5e91fa8 100644
|
|
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
|
|
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
|
|
@@ -43,7 +43,7 @@
|
|
extern struct i2c_adapter amd756_smbus;
|
|
|
|
static struct i2c_adapter *s4882_adapter;
|
|
-static struct i2c_algorithm *s4882_algo;
|
|
+static i2c_algorithm_no_const *s4882_algo;
|
|
|
|
/* Wrapper access functions for multiplexed SMBus */
|
|
static DEFINE_MUTEX(amd756_lock);
|
|
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
|
|
index 29015eb..af2d8e9 100644
|
|
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
|
|
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
|
|
@@ -41,7 +41,7 @@
|
|
extern struct i2c_adapter *nforce2_smbus;
|
|
|
|
static struct i2c_adapter *s4985_adapter;
|
|
-static struct i2c_algorithm *s4985_algo;
|
|
+static i2c_algorithm_no_const *s4985_algo;
|
|
|
|
/* Wrapper access functions for multiplexed SMBus */
|
|
static DEFINE_MUTEX(nforce2_lock);
|
|
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
|
|
index d7a4833..7fae376 100644
|
|
--- a/drivers/i2c/i2c-mux.c
|
|
+++ b/drivers/i2c/i2c-mux.c
|
|
@@ -28,7 +28,7 @@
|
|
/* multiplexer per channel data */
|
|
struct i2c_mux_priv {
|
|
struct i2c_adapter adap;
|
|
- struct i2c_algorithm algo;
|
|
+ i2c_algorithm_no_const algo;
|
|
|
|
struct i2c_adapter *parent;
|
|
void *mux_dev; /* the mux chip/device */
|
|
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
|
|
index 57d00ca..0145194 100644
|
|
--- a/drivers/ide/aec62xx.c
|
|
+++ b/drivers/ide/aec62xx.c
|
|
@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
|
|
.cable_detect = atp86x_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
|
|
{ /* 0: AEC6210 */
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_aec62xx,
|
|
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
|
|
index 2c8016a..911a27c 100644
|
|
--- a/drivers/ide/alim15x3.c
|
|
+++ b/drivers/ide/alim15x3.c
|
|
@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info ali15x3_chipset __devinitdata = {
|
|
+static const struct ide_port_info ali15x3_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_ali15x3,
|
|
.init_hwif = init_hwif_ali15x3,
|
|
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
|
|
index 3747b25..56fc995 100644
|
|
--- a/drivers/ide/amd74xx.c
|
|
+++ b/drivers/ide/amd74xx.c
|
|
@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
|
|
.udma_mask = udma, \
|
|
}
|
|
|
|
-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
|
|
/* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
|
|
/* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
|
|
/* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
|
|
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
|
|
index 15f0ead..cb43480 100644
|
|
--- a/drivers/ide/atiixp.c
|
|
+++ b/drivers/ide/atiixp.c
|
|
@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
|
|
.cable_detect = atiixp_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
|
|
+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
|
|
{ /* 0: IXP200/300/400/700 */
|
|
.name = DRV_NAME,
|
|
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
|
|
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
|
|
index 5f80312..d1fc438 100644
|
|
--- a/drivers/ide/cmd64x.c
|
|
+++ b/drivers/ide/cmd64x.c
|
|
@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
|
|
{ /* 0: CMD643 */
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_cmd64x,
|
|
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
|
|
index 2c1e5f7..1444762 100644
|
|
--- a/drivers/ide/cs5520.c
|
|
+++ b/drivers/ide/cs5520.c
|
|
@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
|
|
.set_dma_mode = cs5520_set_dma_mode,
|
|
};
|
|
|
|
-static const struct ide_port_info cyrix_chipset __devinitdata = {
|
|
+static const struct ide_port_info cyrix_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
|
|
.port_ops = &cs5520_port_ops,
|
|
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
|
|
index 4dc4eb9..49b40ad 100644
|
|
--- a/drivers/ide/cs5530.c
|
|
+++ b/drivers/ide/cs5530.c
|
|
@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
|
|
.udma_filter = cs5530_udma_filter,
|
|
};
|
|
|
|
-static const struct ide_port_info cs5530_chipset __devinitdata = {
|
|
+static const struct ide_port_info cs5530_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_cs5530,
|
|
.init_hwif = init_hwif_cs5530,
|
|
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
|
|
index 5059faf..18d4c85 100644
|
|
--- a/drivers/ide/cs5535.c
|
|
+++ b/drivers/ide/cs5535.c
|
|
@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
|
|
.cable_detect = cs5535_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info cs5535_chipset __devinitdata = {
|
|
+static const struct ide_port_info cs5535_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.port_ops = &cs5535_port_ops,
|
|
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
|
|
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
|
|
index 847553f..3ffb49d 100644
|
|
--- a/drivers/ide/cy82c693.c
|
|
+++ b/drivers/ide/cy82c693.c
|
|
@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
|
|
.set_dma_mode = cy82c693_set_dma_mode,
|
|
};
|
|
|
|
-static const struct ide_port_info cy82c693_chipset __devinitdata = {
|
|
+static const struct ide_port_info cy82c693_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_iops = init_iops_cy82c693,
|
|
.port_ops = &cy82c693_port_ops,
|
|
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
|
|
index 58c51cd..4aec3b8 100644
|
|
--- a/drivers/ide/hpt366.c
|
|
+++ b/drivers/ide/hpt366.c
|
|
@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
|
|
}
|
|
};
|
|
|
|
-static const struct hpt_info hpt36x __devinitdata = {
|
|
+static const struct hpt_info hpt36x __devinitconst = {
|
|
.chip_name = "HPT36x",
|
|
.chip_type = HPT36x,
|
|
.udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
|
|
@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
|
|
.timings = &hpt36x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt370 __devinitdata = {
|
|
+static const struct hpt_info hpt370 __devinitconst = {
|
|
.chip_name = "HPT370",
|
|
.chip_type = HPT370,
|
|
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
|
|
@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt370a __devinitdata = {
|
|
+static const struct hpt_info hpt370a __devinitconst = {
|
|
.chip_name = "HPT370A",
|
|
.chip_type = HPT370A,
|
|
.udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
|
|
@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt374 __devinitdata = {
|
|
+static const struct hpt_info hpt374 __devinitconst = {
|
|
.chip_name = "HPT374",
|
|
.chip_type = HPT374,
|
|
.udma_mask = ATA_UDMA5,
|
|
@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt372 __devinitdata = {
|
|
+static const struct hpt_info hpt372 __devinitconst = {
|
|
.chip_name = "HPT372",
|
|
.chip_type = HPT372,
|
|
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt372a __devinitdata = {
|
|
+static const struct hpt_info hpt372a __devinitconst = {
|
|
.chip_name = "HPT372A",
|
|
.chip_type = HPT372A,
|
|
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt302 __devinitdata = {
|
|
+static const struct hpt_info hpt302 __devinitconst = {
|
|
.chip_name = "HPT302",
|
|
.chip_type = HPT302,
|
|
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt371 __devinitdata = {
|
|
+static const struct hpt_info hpt371 __devinitconst = {
|
|
.chip_name = "HPT371",
|
|
.chip_type = HPT371,
|
|
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt372n __devinitdata = {
|
|
+static const struct hpt_info hpt372n __devinitconst = {
|
|
.chip_name = "HPT372N",
|
|
.chip_type = HPT372N,
|
|
.udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt302n __devinitdata = {
|
|
+static const struct hpt_info hpt302n __devinitconst = {
|
|
.chip_name = "HPT302N",
|
|
.chip_type = HPT302N,
|
|
.udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
|
|
.timings = &hpt37x_timings
|
|
};
|
|
|
|
-static const struct hpt_info hpt371n __devinitdata = {
|
|
+static const struct hpt_info hpt371n __devinitconst = {
|
|
.chip_name = "HPT371N",
|
|
.chip_type = HPT371N,
|
|
.udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
|
|
@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
|
|
{ /* 0: HPT36x */
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_hpt366,
|
|
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
|
|
index 8126824..55a2798 100644
|
|
--- a/drivers/ide/ide-cd.c
|
|
+++ b/drivers/ide/ide-cd.c
|
|
@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
|
|
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
|
if ((unsigned long)buf & alignment
|
|
|| blk_rq_bytes(rq) & q->dma_pad_mask
|
|
- || object_is_on_stack(buf))
|
|
+ || object_starts_on_stack(buf))
|
|
drive->dma = 0;
|
|
}
|
|
}
|
|
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
|
|
index 7f56b73..dab5b67 100644
|
|
--- a/drivers/ide/ide-pci-generic.c
|
|
+++ b/drivers/ide/ide-pci-generic.c
|
|
@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
|
|
.udma_mask = ATA_UDMA6, \
|
|
}
|
|
|
|
-static const struct ide_port_info generic_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info generic_chipsets[] __devinitconst = {
|
|
/* 0: Unknown */
|
|
DECLARE_GENERIC_PCI_DEV(0),
|
|
|
|
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
|
|
index 560e66d..d5dd180 100644
|
|
--- a/drivers/ide/it8172.c
|
|
+++ b/drivers/ide/it8172.c
|
|
@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
|
|
.set_dma_mode = it8172_set_dma_mode,
|
|
};
|
|
|
|
-static const struct ide_port_info it8172_port_info __devinitdata = {
|
|
+static const struct ide_port_info it8172_port_info __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.port_ops = &it8172_port_ops,
|
|
.enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
|
|
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
|
|
index 46816ba..1847aeb 100644
|
|
--- a/drivers/ide/it8213.c
|
|
+++ b/drivers/ide/it8213.c
|
|
@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
|
|
.cable_detect = it8213_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info it8213_chipset __devinitdata = {
|
|
+static const struct ide_port_info it8213_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = { {0x41, 0x80, 0x80} },
|
|
.port_ops = &it8213_port_ops,
|
|
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
|
|
index 2e3169f..c5611db 100644
|
|
--- a/drivers/ide/it821x.c
|
|
+++ b/drivers/ide/it821x.c
|
|
@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
|
|
.cable_detect = it821x_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info it821x_chipset __devinitdata = {
|
|
+static const struct ide_port_info it821x_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_it821x,
|
|
.init_hwif = init_hwif_it821x,
|
|
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
|
|
index 74c2c4a..efddd7d 100644
|
|
--- a/drivers/ide/jmicron.c
|
|
+++ b/drivers/ide/jmicron.c
|
|
@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
|
|
.cable_detect = jmicron_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info jmicron_chipset __devinitdata = {
|
|
+static const struct ide_port_info jmicron_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
|
|
.port_ops = &jmicron_port_ops,
|
|
diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
|
|
index 95327a2..73f78d8 100644
|
|
--- a/drivers/ide/ns87415.c
|
|
+++ b/drivers/ide/ns87415.c
|
|
@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
|
|
.dma_sff_read_status = superio_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info ns87415_chipset __devinitdata = {
|
|
+static const struct ide_port_info ns87415_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_hwif = init_hwif_ns87415,
|
|
.tp_ops = &ns87415_tp_ops,
|
|
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
|
|
index 1a53a4c..39edc66 100644
|
|
--- a/drivers/ide/opti621.c
|
|
+++ b/drivers/ide/opti621.c
|
|
@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
|
|
.set_pio_mode = opti621_set_pio_mode,
|
|
};
|
|
|
|
-static const struct ide_port_info opti621_chipset __devinitdata = {
|
|
+static const struct ide_port_info opti621_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
|
|
.port_ops = &opti621_port_ops,
|
|
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
|
|
index 9546fe2..2e5ceb6 100644
|
|
--- a/drivers/ide/pdc202xx_new.c
|
|
+++ b/drivers/ide/pdc202xx_new.c
|
|
@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
|
|
.udma_mask = udma, \
|
|
}
|
|
|
|
-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
|
|
/* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
|
|
/* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
|
|
};
|
|
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
|
|
index 3a35ec6..5634510 100644
|
|
--- a/drivers/ide/pdc202xx_old.c
|
|
+++ b/drivers/ide/pdc202xx_old.c
|
|
@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
|
|
.max_sectors = sectors, \
|
|
}
|
|
|
|
-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
|
|
{ /* 0: PDC20246 */
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_pdc202xx,
|
|
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
|
|
index 1892e81..fe0fd60 100644
|
|
--- a/drivers/ide/piix.c
|
|
+++ b/drivers/ide/piix.c
|
|
@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
|
|
.udma_mask = udma, \
|
|
}
|
|
|
|
-static const struct ide_port_info piix_pci_info[] __devinitdata = {
|
|
+static const struct ide_port_info piix_pci_info[] __devinitconst = {
|
|
/* 0: MPIIX */
|
|
{ /*
|
|
* MPIIX actually has only a single IDE channel mapped to
|
|
diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
|
|
index a6414a8..c04173e 100644
|
|
--- a/drivers/ide/rz1000.c
|
|
+++ b/drivers/ide/rz1000.c
|
|
@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
|
|
}
|
|
}
|
|
|
|
-static const struct ide_port_info rz1000_chipset __devinitdata = {
|
|
+static const struct ide_port_info rz1000_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.host_flags = IDE_HFLAG_NO_DMA,
|
|
};
|
|
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
|
|
index 356b9b5..d4758eb 100644
|
|
--- a/drivers/ide/sc1200.c
|
|
+++ b/drivers/ide/sc1200.c
|
|
@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info sc1200_chipset __devinitdata = {
|
|
+static const struct ide_port_info sc1200_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.port_ops = &sc1200_port_ops,
|
|
.dma_ops = &sc1200_dma_ops,
|
|
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
|
|
index b7f5b0c..9701038 100644
|
|
--- a/drivers/ide/scc_pata.c
|
|
+++ b/drivers/ide/scc_pata.c
|
|
@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
|
|
.dma_sff_read_status = scc_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info scc_chipset __devinitdata = {
|
|
+static const struct ide_port_info scc_chipset __devinitconst = {
|
|
.name = "sccIDE",
|
|
.init_iops = init_iops_scc,
|
|
.init_dma = scc_init_dma,
|
|
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
|
|
index 35fb8da..24d72ef 100644
|
|
--- a/drivers/ide/serverworks.c
|
|
+++ b/drivers/ide/serverworks.c
|
|
@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
|
|
.cable_detect = svwks_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
|
|
{ /* 0: OSB4 */
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_svwks,
|
|
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
|
|
index ddeda44..46f7e30 100644
|
|
--- a/drivers/ide/siimage.c
|
|
+++ b/drivers/ide/siimage.c
|
|
@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
|
|
.udma_mask = ATA_UDMA6, \
|
|
}
|
|
|
|
-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
|
|
+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
|
|
/* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
|
|
/* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
|
|
};
|
|
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
|
|
index 4a00225..09e61b4 100644
|
|
--- a/drivers/ide/sis5513.c
|
|
+++ b/drivers/ide/sis5513.c
|
|
@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
|
|
.cable_detect = sis_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info sis5513_chipset __devinitdata = {
|
|
+static const struct ide_port_info sis5513_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_sis5513,
|
|
.enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
|
|
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
|
|
index f21dc2a..d051cd2 100644
|
|
--- a/drivers/ide/sl82c105.c
|
|
+++ b/drivers/ide/sl82c105.c
|
|
@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info sl82c105_chipset __devinitdata = {
|
|
+static const struct ide_port_info sl82c105_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_sl82c105,
|
|
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
|
|
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
|
|
index 864ffe0..863a5e92 100644
|
|
--- a/drivers/ide/slc90e66.c
|
|
+++ b/drivers/ide/slc90e66.c
|
|
@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
|
|
.cable_detect = slc90e66_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info slc90e66_chipset __devinitdata = {
|
|
+static const struct ide_port_info slc90e66_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
|
|
.port_ops = &slc90e66_port_ops,
|
|
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
|
|
index 4799d5c..1794678 100644
|
|
--- a/drivers/ide/tc86c001.c
|
|
+++ b/drivers/ide/tc86c001.c
|
|
@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
|
|
.dma_sff_read_status = ide_dma_sff_read_status,
|
|
};
|
|
|
|
-static const struct ide_port_info tc86c001_chipset __devinitdata = {
|
|
+static const struct ide_port_info tc86c001_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_hwif = init_hwif_tc86c001,
|
|
.port_ops = &tc86c001_port_ops,
|
|
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
|
|
index 281c914..55ce1b8 100644
|
|
--- a/drivers/ide/triflex.c
|
|
+++ b/drivers/ide/triflex.c
|
|
@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
|
|
.set_dma_mode = triflex_set_mode,
|
|
};
|
|
|
|
-static const struct ide_port_info triflex_device __devinitdata = {
|
|
+static const struct ide_port_info triflex_device __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
|
|
.port_ops = &triflex_port_ops,
|
|
diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
|
|
index 4b42ca0..e494a98 100644
|
|
--- a/drivers/ide/trm290.c
|
|
+++ b/drivers/ide/trm290.c
|
|
@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
|
|
.dma_check = trm290_dma_check,
|
|
};
|
|
|
|
-static const struct ide_port_info trm290_chipset __devinitdata = {
|
|
+static const struct ide_port_info trm290_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_hwif = init_hwif_trm290,
|
|
.tp_ops = &trm290_tp_ops,
|
|
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
|
|
index f46f49c..eb77678 100644
|
|
--- a/drivers/ide/via82cxxx.c
|
|
+++ b/drivers/ide/via82cxxx.c
|
|
@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
|
|
.cable_detect = via82cxxx_cable_detect,
|
|
};
|
|
|
|
-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
|
|
+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
|
|
.name = DRV_NAME,
|
|
.init_chipset = init_chipset_via82cxxx,
|
|
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
|
|
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
|
|
index 73d4531..c90cd2d 100644
|
|
--- a/drivers/ieee802154/fakehard.c
|
|
+++ b/drivers/ieee802154/fakehard.c
|
|
@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
|
|
phy->transmit_power = 0xbf;
|
|
|
|
dev->netdev_ops = &fake_ops;
|
|
- dev->ml_priv = &fake_mlme;
|
|
+ dev->ml_priv = (void *)&fake_mlme;
|
|
|
|
priv = netdev_priv(dev);
|
|
priv->phy = phy;
|
|
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
|
|
index c889aae..6cf5aa7 100644
|
|
--- a/drivers/infiniband/core/cm.c
|
|
+++ b/drivers/infiniband/core/cm.c
|
|
@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
|
|
|
|
struct cm_counter_group {
|
|
struct kobject obj;
|
|
- atomic_long_t counter[CM_ATTR_COUNT];
|
|
+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
|
|
};
|
|
|
|
struct cm_counter_attribute {
|
|
@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
|
|
struct ib_mad_send_buf *msg = NULL;
|
|
int ret;
|
|
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_REQ_COUNTER]);
|
|
|
|
/* Quick state check to discard duplicate REQs. */
|
|
@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
|
|
if (!cm_id_priv)
|
|
return;
|
|
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_REP_COUNTER]);
|
|
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
|
|
if (ret)
|
|
@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
|
|
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
|
|
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
|
|
spin_unlock_irq(&cm_id_priv->lock);
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_RTU_COUNTER]);
|
|
goto out;
|
|
}
|
|
@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
|
|
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
|
|
dreq_msg->local_comm_id);
|
|
if (!cm_id_priv) {
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_DREQ_COUNTER]);
|
|
cm_issue_drep(work->port, work->mad_recv_wc);
|
|
return -EINVAL;
|
|
@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
|
|
case IB_CM_MRA_REP_RCVD:
|
|
break;
|
|
case IB_CM_TIMEWAIT:
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_DREQ_COUNTER]);
|
|
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
|
|
goto unlock;
|
|
@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
|
|
cm_free_msg(msg);
|
|
goto deref;
|
|
case IB_CM_DREQ_RCVD:
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_DREQ_COUNTER]);
|
|
goto unlock;
|
|
default:
|
|
@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
|
|
ib_modify_mad(cm_id_priv->av.port->mad_agent,
|
|
cm_id_priv->msg, timeout)) {
|
|
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
|
|
- atomic_long_inc(&work->port->
|
|
+ atomic_long_inc_unchecked(&work->port->
|
|
counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_MRA_COUNTER]);
|
|
goto out;
|
|
@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
|
|
break;
|
|
case IB_CM_MRA_REQ_RCVD:
|
|
case IB_CM_MRA_REP_RCVD:
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_MRA_COUNTER]);
|
|
/* fall through */
|
|
default:
|
|
@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
|
|
case IB_CM_LAP_IDLE:
|
|
break;
|
|
case IB_CM_MRA_LAP_SENT:
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_LAP_COUNTER]);
|
|
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
|
|
goto unlock;
|
|
@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
|
|
cm_free_msg(msg);
|
|
goto deref;
|
|
case IB_CM_LAP_RCVD:
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_LAP_COUNTER]);
|
|
goto unlock;
|
|
default:
|
|
@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
|
|
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
|
|
if (cur_cm_id_priv) {
|
|
spin_unlock_irq(&cm.lock);
|
|
- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
|
|
counter[CM_SIDR_REQ_COUNTER]);
|
|
goto out; /* Duplicate message. */
|
|
}
|
|
@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
|
|
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
|
|
msg->retries = 1;
|
|
|
|
- atomic_long_add(1 + msg->retries,
|
|
+ atomic_long_add_unchecked(1 + msg->retries,
|
|
&port->counter_group[CM_XMIT].counter[attr_index]);
|
|
if (msg->retries)
|
|
- atomic_long_add(msg->retries,
|
|
+ atomic_long_add_unchecked(msg->retries,
|
|
&port->counter_group[CM_XMIT_RETRIES].
|
|
counter[attr_index]);
|
|
|
|
@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
|
|
}
|
|
|
|
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
|
|
- atomic_long_inc(&port->counter_group[CM_RECV].
|
|
+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
|
|
counter[attr_id - CM_ATTR_ID_OFFSET]);
|
|
|
|
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
|
|
@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
|
|
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
|
|
|
|
return sprintf(buf, "%ld\n",
|
|
- atomic_long_read(&group->counter[cm_attr->index]));
|
|
+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
|
|
}
|
|
|
|
static const struct sysfs_ops cm_counter_ops = {
|
|
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
|
|
index 176c8f9..2627b62 100644
|
|
--- a/drivers/infiniband/core/fmr_pool.c
|
|
+++ b/drivers/infiniband/core/fmr_pool.c
|
|
@@ -98,8 +98,8 @@ struct ib_fmr_pool {
|
|
|
|
struct task_struct *thread;
|
|
|
|
- atomic_t req_ser;
|
|
- atomic_t flush_ser;
|
|
+ atomic_unchecked_t req_ser;
|
|
+ atomic_unchecked_t flush_ser;
|
|
|
|
wait_queue_head_t force_wait;
|
|
};
|
|
@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
|
|
struct ib_fmr_pool *pool = pool_ptr;
|
|
|
|
do {
|
|
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
|
|
+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
|
|
ib_fmr_batch_release(pool);
|
|
|
|
- atomic_inc(&pool->flush_ser);
|
|
+ atomic_inc_unchecked(&pool->flush_ser);
|
|
wake_up_interruptible(&pool->force_wait);
|
|
|
|
if (pool->flush_function)
|
|
@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
|
|
}
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
|
|
+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
|
|
!kthread_should_stop())
|
|
schedule();
|
|
__set_current_state(TASK_RUNNING);
|
|
@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
|
pool->dirty_watermark = params->dirty_watermark;
|
|
pool->dirty_len = 0;
|
|
spin_lock_init(&pool->pool_lock);
|
|
- atomic_set(&pool->req_ser, 0);
|
|
- atomic_set(&pool->flush_ser, 0);
|
|
+ atomic_set_unchecked(&pool->req_ser, 0);
|
|
+ atomic_set_unchecked(&pool->flush_ser, 0);
|
|
init_waitqueue_head(&pool->force_wait);
|
|
|
|
pool->thread = kthread_run(ib_fmr_cleanup_thread,
|
|
@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
|
|
}
|
|
spin_unlock_irq(&pool->pool_lock);
|
|
|
|
- serial = atomic_inc_return(&pool->req_ser);
|
|
+ serial = atomic_inc_return_unchecked(&pool->req_ser);
|
|
wake_up_process(pool->thread);
|
|
|
|
if (wait_event_interruptible(pool->force_wait,
|
|
- atomic_read(&pool->flush_ser) - serial >= 0))
|
|
+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
|
|
return -EINTR;
|
|
|
|
return 0;
|
|
@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
|
|
} else {
|
|
list_add_tail(&fmr->list, &pool->dirty_list);
|
|
if (++pool->dirty_len >= pool->dirty_watermark) {
|
|
- atomic_inc(&pool->req_ser);
|
|
+ atomic_inc_unchecked(&pool->req_ser);
|
|
wake_up_process(pool->thread);
|
|
}
|
|
}
|
|
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
|
|
index 40c8353..946b0e4 100644
|
|
--- a/drivers/infiniband/hw/cxgb4/mem.c
|
|
+++ b/drivers/infiniband/hw/cxgb4/mem.c
|
|
@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
|
int err;
|
|
struct fw_ri_tpte tpt;
|
|
u32 stag_idx;
|
|
- static atomic_t key;
|
|
+ static atomic_unchecked_t key;
|
|
|
|
if (c4iw_fatal_error(rdev))
|
|
return -EIO;
|
|
@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
|
&rdev->resource.tpt_fifo_lock);
|
|
if (!stag_idx)
|
|
return -ENOMEM;
|
|
- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
|
|
+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
|
|
}
|
|
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
|
|
__func__, stag_state, type, pdid, stag_idx);
|
|
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
|
|
index 79b3dbc..96e5fcc 100644
|
|
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
|
|
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
|
|
@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
|
|
struct ib_atomic_eth *ateth;
|
|
struct ipath_ack_entry *e;
|
|
u64 vaddr;
|
|
- atomic64_t *maddr;
|
|
+ atomic64_unchecked_t *maddr;
|
|
u64 sdata;
|
|
u32 rkey;
|
|
u8 next;
|
|
@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
|
|
IB_ACCESS_REMOTE_ATOMIC)))
|
|
goto nack_acc_unlck;
|
|
/* Perform atomic OP and save result. */
|
|
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
|
|
+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
|
|
sdata = be64_to_cpu(ateth->swap_data);
|
|
e = &qp->s_ack_queue[qp->r_head_ack_queue];
|
|
e->atomic_data = (opcode == OP(FETCH_ADD)) ?
|
|
- (u64) atomic64_add_return(sdata, maddr) - sdata :
|
|
+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
|
|
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
|
|
be64_to_cpu(ateth->compare_data),
|
|
sdata);
|
|
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
|
|
index 1f95bba..9530f87 100644
|
|
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
|
|
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
|
|
@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
|
|
unsigned long flags;
|
|
struct ib_wc wc;
|
|
u64 sdata;
|
|
- atomic64_t *maddr;
|
|
+ atomic64_unchecked_t *maddr;
|
|
enum ib_wc_status send_status;
|
|
|
|
/*
|
|
@@ -382,11 +382,11 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
|
|
IB_ACCESS_REMOTE_ATOMIC)))
|
|
goto acc_err;
|
|
/* Perform atomic OP and save result. */
|
|
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
|
|
+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
|
|
sdata = wqe->wr.wr.atomic.compare_add;
|
|
*(u64 *) sqp->s_sge.sge.vaddr =
|
|
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
|
|
- (u64) atomic64_add_return(sdata, maddr) - sdata :
|
|
+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
|
|
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
|
|
sdata, wqe->wr.wr.atomic.swap);
|
|
goto send_comp;
|
|
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
|
|
index 7140199..da60063 100644
|
|
--- a/drivers/infiniband/hw/nes/nes.c
|
|
+++ b/drivers/infiniband/hw/nes/nes.c
|
|
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
|
|
LIST_HEAD(nes_adapter_list);
|
|
static LIST_HEAD(nes_dev_list);
|
|
|
|
-atomic_t qps_destroyed;
|
|
+atomic_unchecked_t qps_destroyed;
|
|
|
|
static unsigned int ee_flsh_adapter;
|
|
static unsigned int sysfs_nonidx_addr;
|
|
@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
|
|
struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
|
|
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
|
|
|
- atomic_inc(&qps_destroyed);
|
|
+ atomic_inc_unchecked(&qps_destroyed);
|
|
|
|
/* Free the control structures */
|
|
|
|
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
|
|
index 3f41d9f..69cfea7 100644
|
|
--- a/drivers/infiniband/hw/nes/nes.h
|
|
+++ b/drivers/infiniband/hw/nes/nes.h
|
|
@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
|
|
extern unsigned int wqm_quanta;
|
|
extern struct list_head nes_adapter_list;
|
|
|
|
-extern atomic_t cm_connects;
|
|
-extern atomic_t cm_accepts;
|
|
-extern atomic_t cm_disconnects;
|
|
-extern atomic_t cm_closes;
|
|
-extern atomic_t cm_connecteds;
|
|
-extern atomic_t cm_connect_reqs;
|
|
-extern atomic_t cm_rejects;
|
|
-extern atomic_t mod_qp_timouts;
|
|
-extern atomic_t qps_created;
|
|
-extern atomic_t qps_destroyed;
|
|
-extern atomic_t sw_qps_destroyed;
|
|
+extern atomic_unchecked_t cm_connects;
|
|
+extern atomic_unchecked_t cm_accepts;
|
|
+extern atomic_unchecked_t cm_disconnects;
|
|
+extern atomic_unchecked_t cm_closes;
|
|
+extern atomic_unchecked_t cm_connecteds;
|
|
+extern atomic_unchecked_t cm_connect_reqs;
|
|
+extern atomic_unchecked_t cm_rejects;
|
|
+extern atomic_unchecked_t mod_qp_timouts;
|
|
+extern atomic_unchecked_t qps_created;
|
|
+extern atomic_unchecked_t qps_destroyed;
|
|
+extern atomic_unchecked_t sw_qps_destroyed;
|
|
extern u32 mh_detected;
|
|
extern u32 mh_pauses_sent;
|
|
extern u32 cm_packets_sent;
|
|
@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
|
|
extern u32 cm_packets_received;
|
|
extern u32 cm_packets_dropped;
|
|
extern u32 cm_packets_retrans;
|
|
-extern atomic_t cm_listens_created;
|
|
-extern atomic_t cm_listens_destroyed;
|
|
+extern atomic_unchecked_t cm_listens_created;
|
|
+extern atomic_unchecked_t cm_listens_destroyed;
|
|
extern u32 cm_backlog_drops;
|
|
-extern atomic_t cm_loopbacks;
|
|
-extern atomic_t cm_nodes_created;
|
|
-extern atomic_t cm_nodes_destroyed;
|
|
-extern atomic_t cm_accel_dropped_pkts;
|
|
-extern atomic_t cm_resets_recvd;
|
|
-extern atomic_t pau_qps_created;
|
|
-extern atomic_t pau_qps_destroyed;
|
|
+extern atomic_unchecked_t cm_loopbacks;
|
|
+extern atomic_unchecked_t cm_nodes_created;
|
|
+extern atomic_unchecked_t cm_nodes_destroyed;
|
|
+extern atomic_unchecked_t cm_accel_dropped_pkts;
|
|
+extern atomic_unchecked_t cm_resets_recvd;
|
|
+extern atomic_unchecked_t pau_qps_created;
|
|
+extern atomic_unchecked_t pau_qps_destroyed;
|
|
|
|
extern u32 int_mod_timer_init;
|
|
extern u32 int_mod_cq_depth_256;
|
|
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
|
|
index 71edfbb..15b62ae 100644
|
|
--- a/drivers/infiniband/hw/nes/nes_cm.c
|
|
+++ b/drivers/infiniband/hw/nes/nes_cm.c
|
|
@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
|
|
u32 cm_packets_retrans;
|
|
u32 cm_packets_created;
|
|
u32 cm_packets_received;
|
|
-atomic_t cm_listens_created;
|
|
-atomic_t cm_listens_destroyed;
|
|
+atomic_unchecked_t cm_listens_created;
|
|
+atomic_unchecked_t cm_listens_destroyed;
|
|
u32 cm_backlog_drops;
|
|
-atomic_t cm_loopbacks;
|
|
-atomic_t cm_nodes_created;
|
|
-atomic_t cm_nodes_destroyed;
|
|
-atomic_t cm_accel_dropped_pkts;
|
|
-atomic_t cm_resets_recvd;
|
|
+atomic_unchecked_t cm_loopbacks;
|
|
+atomic_unchecked_t cm_nodes_created;
|
|
+atomic_unchecked_t cm_nodes_destroyed;
|
|
+atomic_unchecked_t cm_accel_dropped_pkts;
|
|
+atomic_unchecked_t cm_resets_recvd;
|
|
|
|
static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
|
|
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
|
|
@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
|
|
|
|
static struct nes_cm_core *g_cm_core;
|
|
|
|
-atomic_t cm_connects;
|
|
-atomic_t cm_accepts;
|
|
-atomic_t cm_disconnects;
|
|
-atomic_t cm_closes;
|
|
-atomic_t cm_connecteds;
|
|
-atomic_t cm_connect_reqs;
|
|
-atomic_t cm_rejects;
|
|
+atomic_unchecked_t cm_connects;
|
|
+atomic_unchecked_t cm_accepts;
|
|
+atomic_unchecked_t cm_disconnects;
|
|
+atomic_unchecked_t cm_closes;
|
|
+atomic_unchecked_t cm_connecteds;
|
|
+atomic_unchecked_t cm_connect_reqs;
|
|
+atomic_unchecked_t cm_rejects;
|
|
|
|
int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
|
|
{
|
|
@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
|
|
kfree(listener);
|
|
listener = NULL;
|
|
ret = 0;
|
|
- atomic_inc(&cm_listens_destroyed);
|
|
+ atomic_inc_unchecked(&cm_listens_destroyed);
|
|
} else {
|
|
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
|
|
}
|
|
@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
|
|
cm_node->rem_mac);
|
|
|
|
add_hte_node(cm_core, cm_node);
|
|
- atomic_inc(&cm_nodes_created);
|
|
+ atomic_inc_unchecked(&cm_nodes_created);
|
|
|
|
return cm_node;
|
|
}
|
|
@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
|
|
}
|
|
|
|
atomic_dec(&cm_core->node_cnt);
|
|
- atomic_inc(&cm_nodes_destroyed);
|
|
+ atomic_inc_unchecked(&cm_nodes_destroyed);
|
|
nesqp = cm_node->nesqp;
|
|
if (nesqp) {
|
|
nesqp->cm_node = NULL;
|
|
@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
|
|
|
|
static void drop_packet(struct sk_buff *skb)
|
|
{
|
|
- atomic_inc(&cm_accel_dropped_pkts);
|
|
+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
|
|
dev_kfree_skb_any(skb);
|
|
}
|
|
|
|
@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
|
{
|
|
|
|
int reset = 0; /* whether to send reset in case of err.. */
|
|
- atomic_inc(&cm_resets_recvd);
|
|
+ atomic_inc_unchecked(&cm_resets_recvd);
|
|
nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
|
|
" refcnt=%d\n", cm_node, cm_node->state,
|
|
atomic_read(&cm_node->ref_count));
|
|
@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
|
|
rem_ref_cm_node(cm_node->cm_core, cm_node);
|
|
return NULL;
|
|
}
|
|
- atomic_inc(&cm_loopbacks);
|
|
+ atomic_inc_unchecked(&cm_loopbacks);
|
|
loopbackremotenode->loopbackpartner = cm_node;
|
|
loopbackremotenode->tcp_cntxt.rcv_wscale =
|
|
NES_CM_DEFAULT_RCV_WND_SCALE;
|
|
@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
|
|
nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
|
|
else {
|
|
rem_ref_cm_node(cm_core, cm_node);
|
|
- atomic_inc(&cm_accel_dropped_pkts);
|
|
+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
|
|
dev_kfree_skb_any(skb);
|
|
}
|
|
break;
|
|
@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
|
|
|
if ((cm_id) && (cm_id->event_handler)) {
|
|
if (issue_disconn) {
|
|
- atomic_inc(&cm_disconnects);
|
|
+ atomic_inc_unchecked(&cm_disconnects);
|
|
cm_event.event = IW_CM_EVENT_DISCONNECT;
|
|
cm_event.status = disconn_status;
|
|
cm_event.local_addr = cm_id->local_addr;
|
|
@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
|
}
|
|
|
|
if (issue_close) {
|
|
- atomic_inc(&cm_closes);
|
|
+ atomic_inc_unchecked(&cm_closes);
|
|
nes_disconnect(nesqp, 1);
|
|
|
|
cm_id->provider_data = nesqp;
|
|
@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
|
|
nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
|
|
nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
|
|
- atomic_inc(&cm_accepts);
|
|
+ atomic_inc_unchecked(&cm_accepts);
|
|
|
|
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
|
|
netdev_refcnt_read(nesvnic->netdev));
|
|
@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
|
struct nes_cm_core *cm_core;
|
|
u8 *start_buff;
|
|
|
|
- atomic_inc(&cm_rejects);
|
|
+ atomic_inc_unchecked(&cm_rejects);
|
|
cm_node = (struct nes_cm_node *)cm_id->provider_data;
|
|
loopback = cm_node->loopbackpartner;
|
|
cm_core = cm_node->cm_core;
|
|
@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|
ntohl(cm_id->local_addr.sin_addr.s_addr),
|
|
ntohs(cm_id->local_addr.sin_port));
|
|
|
|
- atomic_inc(&cm_connects);
|
|
+ atomic_inc_unchecked(&cm_connects);
|
|
nesqp->active_conn = 1;
|
|
|
|
/* cache the cm_id in the qp */
|
|
@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
|
|
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
|
|
return err;
|
|
}
|
|
- atomic_inc(&cm_listens_created);
|
|
+ atomic_inc_unchecked(&cm_listens_created);
|
|
}
|
|
|
|
cm_id->add_ref(cm_id);
|
|
@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
|
|
|
|
if (nesqp->destroyed)
|
|
return;
|
|
- atomic_inc(&cm_connecteds);
|
|
+ atomic_inc_unchecked(&cm_connecteds);
|
|
nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
|
|
" local port 0x%04X. jiffies = %lu.\n",
|
|
nesqp->hwqp.qp_id,
|
|
@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
|
|
|
|
cm_id->add_ref(cm_id);
|
|
ret = cm_id->event_handler(cm_id, &cm_event);
|
|
- atomic_inc(&cm_closes);
|
|
+ atomic_inc_unchecked(&cm_closes);
|
|
cm_event.event = IW_CM_EVENT_CLOSE;
|
|
cm_event.status = 0;
|
|
cm_event.provider_data = cm_id->provider_data;
|
|
@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
|
|
return;
|
|
cm_id = cm_node->cm_id;
|
|
|
|
- atomic_inc(&cm_connect_reqs);
|
|
+ atomic_inc_unchecked(&cm_connect_reqs);
|
|
nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
|
|
cm_node, cm_id, jiffies);
|
|
|
|
@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
|
|
return;
|
|
cm_id = cm_node->cm_id;
|
|
|
|
- atomic_inc(&cm_connect_reqs);
|
|
+ atomic_inc_unchecked(&cm_connect_reqs);
|
|
nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
|
|
cm_node, cm_id, jiffies);
|
|
|
|
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
|
|
index 3ba7be3..c81f6ff 100644
|
|
--- a/drivers/infiniband/hw/nes/nes_mgt.c
|
|
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
|
|
@@ -40,8 +40,8 @@
|
|
#include "nes.h"
|
|
#include "nes_mgt.h"
|
|
|
|
-atomic_t pau_qps_created;
|
|
-atomic_t pau_qps_destroyed;
|
|
+atomic_unchecked_t pau_qps_created;
|
|
+atomic_unchecked_t pau_qps_destroyed;
|
|
|
|
static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
|
|
{
|
|
@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
|
|
{
|
|
struct sk_buff *skb;
|
|
unsigned long flags;
|
|
- atomic_inc(&pau_qps_destroyed);
|
|
+ atomic_inc_unchecked(&pau_qps_destroyed);
|
|
|
|
/* Free packets that have not yet been forwarded */
|
|
/* Lock is acquired by skb_dequeue when removing the skb */
|
|
@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
|
|
cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
|
|
skb_queue_head_init(&nesqp->pau_list);
|
|
spin_lock_init(&nesqp->pau_lock);
|
|
- atomic_inc(&pau_qps_created);
|
|
+ atomic_inc_unchecked(&pau_qps_created);
|
|
nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
|
|
}
|
|
|
|
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
|
|
index f3a3ecf..57d311d 100644
|
|
--- a/drivers/infiniband/hw/nes/nes_nic.c
|
|
+++ b/drivers/infiniband/hw/nes/nes_nic.c
|
|
@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
|
|
target_stat_values[++index] = mh_detected;
|
|
target_stat_values[++index] = mh_pauses_sent;
|
|
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
|
|
- target_stat_values[++index] = atomic_read(&cm_connects);
|
|
- target_stat_values[++index] = atomic_read(&cm_accepts);
|
|
- target_stat_values[++index] = atomic_read(&cm_disconnects);
|
|
- target_stat_values[++index] = atomic_read(&cm_connecteds);
|
|
- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
|
|
- target_stat_values[++index] = atomic_read(&cm_rejects);
|
|
- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
|
|
- target_stat_values[++index] = atomic_read(&qps_created);
|
|
- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
|
|
- target_stat_values[++index] = atomic_read(&qps_destroyed);
|
|
- target_stat_values[++index] = atomic_read(&cm_closes);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
|
|
target_stat_values[++index] = cm_packets_sent;
|
|
target_stat_values[++index] = cm_packets_bounced;
|
|
target_stat_values[++index] = cm_packets_created;
|
|
target_stat_values[++index] = cm_packets_received;
|
|
target_stat_values[++index] = cm_packets_dropped;
|
|
target_stat_values[++index] = cm_packets_retrans;
|
|
- target_stat_values[++index] = atomic_read(&cm_listens_created);
|
|
- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
|
|
target_stat_values[++index] = cm_backlog_drops;
|
|
- target_stat_values[++index] = atomic_read(&cm_loopbacks);
|
|
- target_stat_values[++index] = atomic_read(&cm_nodes_created);
|
|
- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
|
|
- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
|
|
- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
|
|
target_stat_values[++index] = nesadapter->free_4kpbl;
|
|
target_stat_values[++index] = nesadapter->free_256pbl;
|
|
target_stat_values[++index] = int_mod_timer_init;
|
|
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
|
|
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
|
|
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
|
|
- target_stat_values[++index] = atomic_read(&pau_qps_created);
|
|
- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
|
|
+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
|
|
index daf70d3..1c949a4 100644
|
|
--- a/drivers/infiniband/hw/nes/nes_verbs.c
|
|
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
|
|
@@ -46,9 +46,9 @@
|
|
|
|
#include <rdma/ib_umem.h>
|
|
|
|
-atomic_t mod_qp_timouts;
|
|
-atomic_t qps_created;
|
|
-atomic_t sw_qps_destroyed;
|
|
+atomic_unchecked_t mod_qp_timouts;
|
|
+atomic_unchecked_t qps_created;
|
|
+atomic_unchecked_t sw_qps_destroyed;
|
|
|
|
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
|
|
|
|
@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
|
if (init_attr->create_flags)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
- atomic_inc(&qps_created);
|
|
+ atomic_inc_unchecked(&qps_created);
|
|
switch (init_attr->qp_type) {
|
|
case IB_QPT_RC:
|
|
if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
|
|
@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
|
|
struct iw_cm_event cm_event;
|
|
int ret = 0;
|
|
|
|
- atomic_inc(&sw_qps_destroyed);
|
|
+ atomic_inc_unchecked(&sw_qps_destroyed);
|
|
nesqp->destroyed = 1;
|
|
|
|
/* Blow away the connection if it exists. */
|
|
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
|
|
index dcff64f..6c7a7ae 100644
|
|
--- a/drivers/infiniband/hw/qib/qib.h
|
|
+++ b/drivers/infiniband/hw/qib/qib.h
|
|
@@ -51,6 +51,7 @@
|
|
#include <linux/completion.h>
|
|
#include <linux/kref.h>
|
|
#include <linux/sched.h>
|
|
+#include <linux/slab.h>
|
|
|
|
#include "qib_common.h"
|
|
#include "qib_verbs.h"
|
|
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
|
|
index da739d9d..da1c7f4 100644
|
|
--- a/drivers/input/gameport/gameport.c
|
|
+++ b/drivers/input/gameport/gameport.c
|
|
@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
|
|
*/
|
|
static void gameport_init_port(struct gameport *gameport)
|
|
{
|
|
- static atomic_t gameport_no = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
|
|
|
|
__module_get(THIS_MODULE);
|
|
|
|
mutex_init(&gameport->drv_mutex);
|
|
device_initialize(&gameport->dev);
|
|
dev_set_name(&gameport->dev, "gameport%lu",
|
|
- (unsigned long)atomic_inc_return(&gameport_no) - 1);
|
|
+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
|
|
gameport->dev.bus = &gameport_bus;
|
|
gameport->dev.release = gameport_release_port;
|
|
if (gameport->parent)
|
|
diff --git a/drivers/input/input.c b/drivers/input/input.c
|
|
index d48d68b..c5d59d7 100644
|
|
--- a/drivers/input/input.c
|
|
+++ b/drivers/input/input.c
|
|
@@ -1822,7 +1822,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
|
|
*/
|
|
int input_register_device(struct input_dev *dev)
|
|
{
|
|
- static atomic_t input_no = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
|
|
struct input_handler *handler;
|
|
const char *path;
|
|
int error;
|
|
@@ -1859,7 +1859,7 @@ int input_register_device(struct input_dev *dev)
|
|
dev->setkeycode = input_default_setkeycode;
|
|
|
|
dev_set_name(&dev->dev, "input%ld",
|
|
- (unsigned long) atomic_inc_return(&input_no) - 1);
|
|
+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
|
|
|
|
error = device_add(&dev->dev);
|
|
if (error)
|
|
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
|
|
index b8d8611..7a4a04b 100644
|
|
--- a/drivers/input/joystick/sidewinder.c
|
|
+++ b/drivers/input/joystick/sidewinder.c
|
|
@@ -30,6 +30,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/input.h>
|
|
#include <linux/gameport.h>
|
|
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
|
|
index b4e8db8..f321095 100644
|
|
--- a/drivers/input/joystick/xpad.c
|
|
+++ b/drivers/input/joystick/xpad.c
|
|
@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
|
|
|
|
static int xpad_led_probe(struct usb_xpad *xpad)
|
|
{
|
|
- static atomic_t led_seq = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
|
|
long led_no;
|
|
struct xpad_led *led;
|
|
struct led_classdev *led_cdev;
|
|
@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
|
|
if (!led)
|
|
return -ENOMEM;
|
|
|
|
- led_no = (long)atomic_inc_return(&led_seq) - 1;
|
|
+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
|
|
|
|
snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
|
|
led->xpad = xpad;
|
|
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
|
|
index 0110b5a..d3ad144 100644
|
|
--- a/drivers/input/mousedev.c
|
|
+++ b/drivers/input/mousedev.c
|
|
@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
|
|
|
|
spin_unlock_irq(&client->packet_lock);
|
|
|
|
- if (copy_to_user(buffer, data, count))
|
|
+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
|
|
return -EFAULT;
|
|
|
|
return count;
|
|
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
|
|
index d0f7533..fb8215b 100644
|
|
--- a/drivers/input/serio/serio.c
|
|
+++ b/drivers/input/serio/serio.c
|
|
@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
|
|
*/
|
|
static void serio_init_port(struct serio *serio)
|
|
{
|
|
- static atomic_t serio_no = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
|
|
|
|
__module_get(THIS_MODULE);
|
|
|
|
@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
|
|
mutex_init(&serio->drv_mutex);
|
|
device_initialize(&serio->dev);
|
|
dev_set_name(&serio->dev, "serio%ld",
|
|
- (long)atomic_inc_return(&serio_no) - 1);
|
|
+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
|
|
serio->dev.bus = &serio_bus;
|
|
serio->dev.release = serio_release_port;
|
|
serio->dev.groups = serio_device_attr_groups;
|
|
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
|
|
index b902794..fc7b85b 100644
|
|
--- a/drivers/isdn/capi/capi.c
|
|
+++ b/drivers/isdn/capi/capi.c
|
|
@@ -83,8 +83,8 @@ struct capiminor {
|
|
|
|
struct capi20_appl *ap;
|
|
u32 ncci;
|
|
- atomic_t datahandle;
|
|
- atomic_t msgid;
|
|
+ atomic_unchecked_t datahandle;
|
|
+ atomic_unchecked_t msgid;
|
|
|
|
struct tty_port port;
|
|
int ttyinstop;
|
|
@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
|
|
capimsg_setu16(s, 2, mp->ap->applid);
|
|
capimsg_setu8 (s, 4, CAPI_DATA_B3);
|
|
capimsg_setu8 (s, 5, CAPI_RESP);
|
|
- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
|
|
+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
|
|
capimsg_setu32(s, 8, mp->ncci);
|
|
capimsg_setu16(s, 12, datahandle);
|
|
}
|
|
@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
|
|
mp->outbytes -= len;
|
|
spin_unlock_bh(&mp->outlock);
|
|
|
|
- datahandle = atomic_inc_return(&mp->datahandle);
|
|
+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
|
|
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
|
|
memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
|
|
capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
|
|
capimsg_setu16(skb->data, 2, mp->ap->applid);
|
|
capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
|
|
capimsg_setu8 (skb->data, 5, CAPI_REQ);
|
|
- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
|
|
+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
|
|
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
|
|
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
|
|
capimsg_setu16(skb->data, 16, len); /* Data length */
|
|
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
|
|
index 821f7ac..28d4030 100644
|
|
--- a/drivers/isdn/hardware/avm/b1.c
|
|
+++ b/drivers/isdn/hardware/avm/b1.c
|
|
@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
|
|
}
|
|
if (left) {
|
|
if (t4file->user) {
|
|
- if (copy_from_user(buf, dp, left))
|
|
+ if (left > sizeof buf || copy_from_user(buf, dp, left))
|
|
return -EFAULT;
|
|
} else {
|
|
memcpy(buf, dp, left);
|
|
@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
|
|
}
|
|
if (left) {
|
|
if (config->user) {
|
|
- if (copy_from_user(buf, dp, left))
|
|
+ if (left > sizeof buf || copy_from_user(buf, dp, left))
|
|
return -EFAULT;
|
|
} else {
|
|
memcpy(buf, dp, left);
|
|
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
|
|
index dd6b53a..19d9ee6 100644
|
|
--- a/drivers/isdn/hardware/eicon/divasync.h
|
|
+++ b/drivers/isdn/hardware/eicon/divasync.h
|
|
@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
|
|
} diva_didd_add_adapter_t;
|
|
typedef struct _diva_didd_remove_adapter {
|
|
IDI_CALL p_request;
|
|
-} diva_didd_remove_adapter_t;
|
|
+} __no_const diva_didd_remove_adapter_t;
|
|
typedef struct _diva_didd_read_adapter_array {
|
|
void *buffer;
|
|
dword length;
|
|
diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
|
|
index d303e65..28bcb7b 100644
|
|
--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
|
|
+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
|
|
@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
|
|
typedef struct _diva_os_idi_adapter_interface {
|
|
diva_init_card_proc_t cleanup_adapter_proc;
|
|
diva_cmd_card_proc_t cmd_proc;
|
|
-} diva_os_idi_adapter_interface_t;
|
|
+} __no_const diva_os_idi_adapter_interface_t;
|
|
|
|
typedef struct _diva_os_xdi_adapter {
|
|
struct list_head link;
|
|
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
|
|
index e74df7c..03a03ba 100644
|
|
--- a/drivers/isdn/icn/icn.c
|
|
+++ b/drivers/isdn/icn/icn.c
|
|
@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
|
|
if (count > len)
|
|
count = len;
|
|
if (user) {
|
|
- if (copy_from_user(msg, buf, count))
|
|
+ if (count > sizeof msg || copy_from_user(msg, buf, count))
|
|
return -EFAULT;
|
|
} else
|
|
memcpy(msg, buf, count);
|
|
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
|
|
index 34842e5..738e3d6 100644
|
|
--- a/drivers/lguest/core.c
|
|
+++ b/drivers/lguest/core.c
|
|
@@ -92,9 +92,17 @@ static __init int map_switcher(void)
|
|
* it's worked so far. The end address needs +1 because __get_vm_area
|
|
* allocates an extra guard page, so we need space for that.
|
|
*/
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
|
|
+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
|
|
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
|
|
+#else
|
|
switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
|
|
VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
|
|
+ (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
|
|
+#endif
|
|
+
|
|
if (!switcher_vma) {
|
|
err = -ENOMEM;
|
|
printk("lguest: could not map switcher pages high\n");
|
|
@@ -119,7 +127,7 @@ static __init int map_switcher(void)
|
|
* Now the Switcher is mapped at the right address, we can't fail!
|
|
* Copy in the compiled-in Switcher code (from x86/switcher_32.S).
|
|
*/
|
|
- memcpy(switcher_vma->addr, start_switcher_text,
|
|
+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
|
|
end_switcher_text - start_switcher_text);
|
|
|
|
printk(KERN_INFO "lguest: mapped switcher at %p\n",
|
|
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
|
|
index 39809035..ce25c5e 100644
|
|
--- a/drivers/lguest/x86/core.c
|
|
+++ b/drivers/lguest/x86/core.c
|
|
@@ -59,7 +59,7 @@ static struct {
|
|
/* Offset from where switcher.S was compiled to where we've copied it */
|
|
static unsigned long switcher_offset(void)
|
|
{
|
|
- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
|
|
+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
|
|
}
|
|
|
|
/* This cpu's struct lguest_pages. */
|
|
@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
|
|
* These copies are pretty cheap, so we do them unconditionally: */
|
|
/* Save the current Host top-level page directory.
|
|
*/
|
|
+
|
|
+#ifdef CONFIG_PAX_PER_CPU_PGD
|
|
+ pages->state.host_cr3 = read_cr3();
|
|
+#else
|
|
pages->state.host_cr3 = __pa(current->mm->pgd);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Set up the Guest's page tables to see this CPU's pages (and no
|
|
* other CPU's pages).
|
|
@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
|
|
* compiled-in switcher code and the high-mapped copy we just made.
|
|
*/
|
|
for (i = 0; i < IDT_ENTRIES; i++)
|
|
- default_idt_entries[i] += switcher_offset();
|
|
+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
|
|
|
|
/*
|
|
* Set up the Switcher's per-cpu areas.
|
|
@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
|
|
* it will be undisturbed when we switch. To change %cs and jump we
|
|
* need this structure to feed to Intel's "lcall" instruction.
|
|
*/
|
|
- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
|
|
+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
|
|
lguest_entry.segment = LGUEST_CS;
|
|
|
|
/*
|
|
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
|
|
index 40634b0..4f5855e 100644
|
|
--- a/drivers/lguest/x86/switcher_32.S
|
|
+++ b/drivers/lguest/x86/switcher_32.S
|
|
@@ -87,6 +87,7 @@
|
|
#include <asm/page.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/lguest.h>
|
|
+#include <asm/processor-flags.h>
|
|
|
|
// We mark the start of the code to copy
|
|
// It's placed in .text tho it's never run here
|
|
@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
|
|
// Changes type when we load it: damn Intel!
|
|
// For after we switch over our page tables
|
|
// That entry will be read-only: we'd crash.
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ mov %cr0, %edx
|
|
+ xor $X86_CR0_WP, %edx
|
|
+ mov %edx, %cr0
|
|
+#endif
|
|
+
|
|
movl $(GDT_ENTRY_TSS*8), %edx
|
|
ltr %dx
|
|
|
|
@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
|
|
// Let's clear it again for our return.
|
|
// The GDT descriptor of the Host
|
|
// Points to the table after two "size" bytes
|
|
- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
|
|
+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
|
|
// Clear "used" from type field (byte 5, bit 2)
|
|
- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
|
|
+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ mov %cr0, %eax
|
|
+ xor $X86_CR0_WP, %eax
|
|
+ mov %eax, %cr0
|
|
+#endif
|
|
|
|
// Once our page table's switched, the Guest is live!
|
|
// The Host fades as we run this final step.
|
|
@@ -295,13 +309,12 @@ deliver_to_host:
|
|
// I consulted gcc, and it gave
|
|
// These instructions, which I gladly credit:
|
|
leal (%edx,%ebx,8), %eax
|
|
- movzwl (%eax),%edx
|
|
- movl 4(%eax), %eax
|
|
- xorw %ax, %ax
|
|
- orl %eax, %edx
|
|
+ movl 4(%eax), %edx
|
|
+ movw (%eax), %dx
|
|
// Now the address of the handler's in %edx
|
|
// We call it now: its "iret" drops us home.
|
|
- jmp *%edx
|
|
+ ljmp $__KERNEL_CS, $1f
|
|
+1: jmp *%edx
|
|
|
|
// Every interrupt can come to us here
|
|
// But we must truly tell each apart.
|
|
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
|
|
index 20e5c2c..9e849a9 100644
|
|
--- a/drivers/macintosh/macio_asic.c
|
|
+++ b/drivers/macintosh/macio_asic.c
|
|
@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
|
|
* MacIO is matched against any Apple ID, it's probe() function
|
|
* will then decide wether it applies or not
|
|
*/
|
|
-static const struct pci_device_id __devinitdata pci_ids [] = { {
|
|
+static const struct pci_device_id __devinitconst pci_ids [] = { {
|
|
.vendor = PCI_VENDOR_ID_APPLE,
|
|
.device = PCI_ANY_ID,
|
|
.subvendor = PCI_ANY_ID,
|
|
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
|
|
index 17e2b47..bcbeec4 100644
|
|
--- a/drivers/md/bitmap.c
|
|
+++ b/drivers/md/bitmap.c
|
|
@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
|
|
chunk_kb ? "KB" : "B");
|
|
if (bitmap->file) {
|
|
seq_printf(seq, ", file: ");
|
|
- seq_path(seq, &bitmap->file->f_path, " \t\n");
|
|
+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
|
|
}
|
|
|
|
seq_printf(seq, "\n");
|
|
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
|
|
index f011d4b..3888bd4 100644
|
|
--- a/drivers/md/dm-ioctl.c
|
|
+++ b/drivers/md/dm-ioctl.c
|
|
@@ -1598,7 +1598,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
|
|
cmd == DM_LIST_VERSIONS_CMD)
|
|
return 0;
|
|
|
|
- if ((cmd == DM_DEV_CREATE_CMD)) {
|
|
+ if (cmd == DM_DEV_CREATE_CMD) {
|
|
if (!*param->name) {
|
|
DMWARN("name not supplied when creating device");
|
|
return -EINVAL;
|
|
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
|
|
index f8e0195..b09231b 100644
|
|
--- a/drivers/md/dm-raid1.c
|
|
+++ b/drivers/md/dm-raid1.c
|
|
@@ -40,7 +40,7 @@ enum dm_raid1_error {
|
|
|
|
struct mirror {
|
|
struct mirror_set *ms;
|
|
- atomic_t error_count;
|
|
+ atomic_unchecked_t error_count;
|
|
unsigned long error_type;
|
|
struct dm_dev *dev;
|
|
sector_t offset;
|
|
@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
|
|
struct mirror *m;
|
|
|
|
for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
|
|
- if (!atomic_read(&m->error_count))
|
|
+ if (!atomic_read_unchecked(&m->error_count))
|
|
return m;
|
|
|
|
return NULL;
|
|
@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
|
|
* simple way to tell if a device has encountered
|
|
* errors.
|
|
*/
|
|
- atomic_inc(&m->error_count);
|
|
+ atomic_inc_unchecked(&m->error_count);
|
|
|
|
if (test_and_set_bit(error_type, &m->error_type))
|
|
return;
|
|
@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
|
|
struct mirror *m = get_default_mirror(ms);
|
|
|
|
do {
|
|
- if (likely(!atomic_read(&m->error_count)))
|
|
+ if (likely(!atomic_read_unchecked(&m->error_count)))
|
|
return m;
|
|
|
|
if (m-- == ms->mirror)
|
|
@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
|
|
{
|
|
struct mirror *default_mirror = get_default_mirror(m->ms);
|
|
|
|
- return !atomic_read(&default_mirror->error_count);
|
|
+ return !atomic_read_unchecked(&default_mirror->error_count);
|
|
}
|
|
|
|
static int mirror_available(struct mirror_set *ms, struct bio *bio)
|
|
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
|
|
*/
|
|
if (likely(region_in_sync(ms, region, 1)))
|
|
m = choose_mirror(ms, bio->bi_sector);
|
|
- else if (m && atomic_read(&m->error_count))
|
|
+ else if (m && atomic_read_unchecked(&m->error_count))
|
|
m = NULL;
|
|
|
|
if (likely(m))
|
|
@@ -947,7 +947,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
|
|
}
|
|
|
|
ms->mirror[mirror].ms = ms;
|
|
- atomic_set(&(ms->mirror[mirror].error_count), 0);
|
|
+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
|
|
ms->mirror[mirror].error_type = 0;
|
|
ms->mirror[mirror].offset = offset;
|
|
|
|
@@ -1361,7 +1361,7 @@ static void mirror_resume(struct dm_target *ti)
|
|
*/
|
|
static char device_status_char(struct mirror *m)
|
|
{
|
|
- if (!atomic_read(&(m->error_count)))
|
|
+ if (!atomic_read_unchecked(&(m->error_count)))
|
|
return 'A';
|
|
|
|
return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
|
|
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
|
|
index 35c94ff..20d4c17 100644
|
|
--- a/drivers/md/dm-stripe.c
|
|
+++ b/drivers/md/dm-stripe.c
|
|
@@ -20,7 +20,7 @@ struct stripe {
|
|
struct dm_dev *dev;
|
|
sector_t physical_start;
|
|
|
|
- atomic_t error_count;
|
|
+ atomic_unchecked_t error_count;
|
|
};
|
|
|
|
struct stripe_c {
|
|
@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
kfree(sc);
|
|
return r;
|
|
}
|
|
- atomic_set(&(sc->stripe[i].error_count), 0);
|
|
+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
|
|
}
|
|
|
|
ti->private = sc;
|
|
@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
|
|
DMEMIT("%d ", sc->stripes);
|
|
for (i = 0; i < sc->stripes; i++) {
|
|
DMEMIT("%s ", sc->stripe[i].dev->name);
|
|
- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
|
|
+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
|
|
'D' : 'A';
|
|
}
|
|
buffer[i] = '\0';
|
|
@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
|
|
*/
|
|
for (i = 0; i < sc->stripes; i++)
|
|
if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
|
|
- atomic_inc(&(sc->stripe[i].error_count));
|
|
- if (atomic_read(&(sc->stripe[i].error_count)) <
|
|
+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
|
|
+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
|
|
DM_IO_ERROR_THRESHOLD)
|
|
schedule_work(&sc->trigger_event);
|
|
}
|
|
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
|
|
index 43e19b7..e42dea2 100644
|
|
--- a/drivers/md/dm-table.c
|
|
+++ b/drivers/md/dm-table.c
|
|
@@ -395,7 +395,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
|
if (!dev_size)
|
|
return 0;
|
|
|
|
- if ((start >= dev_size) || (start + len > dev_size)) {
|
|
+ if ((start >= dev_size) || (len > dev_size - start)) {
|
|
DMWARN("%s: %s too small for target: "
|
|
"start=%llu, len=%llu, dev_size=%llu",
|
|
dm_device_name(ti->table->md), bdevname(bdev, b),
|
|
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
|
|
index 737d388..811ad5a 100644
|
|
--- a/drivers/md/dm-thin-metadata.c
|
|
+++ b/drivers/md/dm-thin-metadata.c
|
|
@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
|
|
|
|
pmd->info.tm = tm;
|
|
pmd->info.levels = 2;
|
|
- pmd->info.value_type.context = pmd->data_sm;
|
|
+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
|
|
pmd->info.value_type.size = sizeof(__le64);
|
|
pmd->info.value_type.inc = data_block_inc;
|
|
pmd->info.value_type.dec = data_block_dec;
|
|
@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
|
|
|
|
pmd->bl_info.tm = tm;
|
|
pmd->bl_info.levels = 1;
|
|
- pmd->bl_info.value_type.context = pmd->data_sm;
|
|
+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
|
|
pmd->bl_info.value_type.size = sizeof(__le64);
|
|
pmd->bl_info.value_type.inc = data_block_inc;
|
|
pmd->bl_info.value_type.dec = data_block_dec;
|
|
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
|
|
index 75886b0..535bc89 100644
|
|
--- a/drivers/md/dm.c
|
|
+++ b/drivers/md/dm.c
|
|
@@ -176,9 +176,9 @@ struct mapped_device {
|
|
/*
|
|
* Event handling.
|
|
*/
|
|
- atomic_t event_nr;
|
|
+ atomic_unchecked_t event_nr;
|
|
wait_queue_head_t eventq;
|
|
- atomic_t uevent_seq;
|
|
+ atomic_unchecked_t uevent_seq;
|
|
struct list_head uevent_list;
|
|
spinlock_t uevent_lock; /* Protect access to uevent_list */
|
|
|
|
@@ -1871,8 +1871,8 @@ static struct mapped_device *alloc_dev(int minor)
|
|
rwlock_init(&md->map_lock);
|
|
atomic_set(&md->holders, 1);
|
|
atomic_set(&md->open_count, 0);
|
|
- atomic_set(&md->event_nr, 0);
|
|
- atomic_set(&md->uevent_seq, 0);
|
|
+ atomic_set_unchecked(&md->event_nr, 0);
|
|
+ atomic_set_unchecked(&md->uevent_seq, 0);
|
|
INIT_LIST_HEAD(&md->uevent_list);
|
|
spin_lock_init(&md->uevent_lock);
|
|
|
|
@@ -2007,7 +2007,7 @@ static void event_callback(void *context)
|
|
|
|
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
|
|
|
|
- atomic_inc(&md->event_nr);
|
|
+ atomic_inc_unchecked(&md->event_nr);
|
|
wake_up(&md->eventq);
|
|
}
|
|
|
|
@@ -2648,18 +2648,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
|
|
|
uint32_t dm_next_uevent_seq(struct mapped_device *md)
|
|
{
|
|
- return atomic_add_return(1, &md->uevent_seq);
|
|
+ return atomic_add_return_unchecked(1, &md->uevent_seq);
|
|
}
|
|
|
|
uint32_t dm_get_event_nr(struct mapped_device *md)
|
|
{
|
|
- return atomic_read(&md->event_nr);
|
|
+ return atomic_read_unchecked(&md->event_nr);
|
|
}
|
|
|
|
int dm_wait_event(struct mapped_device *md, int event_nr)
|
|
{
|
|
return wait_event_interruptible(md->eventq,
|
|
- (event_nr != atomic_read(&md->event_nr)));
|
|
+ (event_nr != atomic_read_unchecked(&md->event_nr)));
|
|
}
|
|
|
|
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
|
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
|
index 83dba06..b55c659 100644
|
|
--- a/drivers/md/md.c
|
|
+++ b/drivers/md/md.c
|
|
@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
|
|
* start build, activate spare
|
|
*/
|
|
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
|
|
-static atomic_t md_event_count;
|
|
+static atomic_unchecked_t md_event_count;
|
|
void md_new_event(struct mddev *mddev)
|
|
{
|
|
- atomic_inc(&md_event_count);
|
|
+ atomic_inc_unchecked(&md_event_count);
|
|
wake_up(&md_event_waiters);
|
|
}
|
|
EXPORT_SYMBOL_GPL(md_new_event);
|
|
@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
|
|
*/
|
|
static void md_new_event_inintr(struct mddev *mddev)
|
|
{
|
|
- atomic_inc(&md_event_count);
|
|
+ atomic_inc_unchecked(&md_event_count);
|
|
wake_up(&md_event_waiters);
|
|
}
|
|
|
|
@@ -1533,7 +1533,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
|
|
|
|
rdev->preferred_minor = 0xffff;
|
|
rdev->data_offset = le64_to_cpu(sb->data_offset);
|
|
- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
|
|
+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
|
|
|
|
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
|
|
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
|
|
@@ -1752,7 +1752,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
|
|
else
|
|
sb->resync_offset = cpu_to_le64(0);
|
|
|
|
- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
|
|
+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
|
|
|
|
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
|
|
sb->size = cpu_to_le64(mddev->dev_sectors);
|
|
@@ -2698,7 +2698,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
|
|
static ssize_t
|
|
errors_show(struct md_rdev *rdev, char *page)
|
|
{
|
|
- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
|
|
+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
|
|
}
|
|
|
|
static ssize_t
|
|
@@ -2707,7 +2707,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
|
|
char *e;
|
|
unsigned long n = simple_strtoul(buf, &e, 10);
|
|
if (*buf && (*e == 0 || *e == '\n')) {
|
|
- atomic_set(&rdev->corrected_errors, n);
|
|
+ atomic_set_unchecked(&rdev->corrected_errors, n);
|
|
return len;
|
|
}
|
|
return -EINVAL;
|
|
@@ -3096,8 +3096,8 @@ int md_rdev_init(struct md_rdev *rdev)
|
|
rdev->sb_loaded = 0;
|
|
rdev->bb_page = NULL;
|
|
atomic_set(&rdev->nr_pending, 0);
|
|
- atomic_set(&rdev->read_errors, 0);
|
|
- atomic_set(&rdev->corrected_errors, 0);
|
|
+ atomic_set_unchecked(&rdev->read_errors, 0);
|
|
+ atomic_set_unchecked(&rdev->corrected_errors, 0);
|
|
|
|
INIT_LIST_HEAD(&rdev->same_set);
|
|
init_waitqueue_head(&rdev->blocked_wait);
|
|
@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
|
|
|
|
spin_unlock(&pers_lock);
|
|
seq_printf(seq, "\n");
|
|
- seq->poll_event = atomic_read(&md_event_count);
|
|
+ seq->poll_event = atomic_read_unchecked(&md_event_count);
|
|
return 0;
|
|
}
|
|
if (v == (void*)2) {
|
|
@@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
|
|
return error;
|
|
|
|
seq = file->private_data;
|
|
- seq->poll_event = atomic_read(&md_event_count);
|
|
+ seq->poll_event = atomic_read_unchecked(&md_event_count);
|
|
return error;
|
|
}
|
|
|
|
@@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
|
|
/* always allow read */
|
|
mask = POLLIN | POLLRDNORM;
|
|
|
|
- if (seq->poll_event != atomic_read(&md_event_count))
|
|
+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
|
|
mask |= POLLERR | POLLPRI;
|
|
return mask;
|
|
}
|
|
@@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
|
|
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
|
|
curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
|
|
(int)part_stat_read(&disk->part0, sectors[1]) -
|
|
- atomic_read(&disk->sync_io);
|
|
+ atomic_read_unchecked(&disk->sync_io);
|
|
/* sync IO will cause sync_io to increase before the disk_stats
|
|
* as sync_io is counted when a request starts, and
|
|
* disk_stats is counted when it completes.
|
|
diff --git a/drivers/md/md.h b/drivers/md/md.h
|
|
index 1c2063c..9639970 100644
|
|
--- a/drivers/md/md.h
|
|
+++ b/drivers/md/md.h
|
|
@@ -93,13 +93,13 @@ struct md_rdev {
|
|
* only maintained for arrays that
|
|
* support hot removal
|
|
*/
|
|
- atomic_t read_errors; /* number of consecutive read errors that
|
|
+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
|
|
* we have tried to ignore.
|
|
*/
|
|
struct timespec last_read_error; /* monotonic time since our
|
|
* last read error
|
|
*/
|
|
- atomic_t corrected_errors; /* number of corrected read errors,
|
|
+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
|
|
* for reporting to userspace and storing
|
|
* in superblock.
|
|
*/
|
|
@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
|
|
|
|
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
|
|
{
|
|
- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
|
|
+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
|
|
}
|
|
|
|
struct md_personality
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
|
|
index fc90c11..c8cd9a9 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-checker.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
|
|
@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
|
|
/*----------------------------------------------------------------*/
|
|
|
|
struct sm_checker {
|
|
- struct dm_space_map sm;
|
|
+ dm_space_map_no_const sm;
|
|
|
|
struct count_array old_counts;
|
|
struct count_array counts;
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
|
|
index 3d0ed53..35dc592 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-disk.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
|
|
@@ -23,7 +23,7 @@
|
|
* Space map interface.
|
|
*/
|
|
struct sm_disk {
|
|
- struct dm_space_map sm;
|
|
+ dm_space_map_no_const sm;
|
|
|
|
struct ll_disk ll;
|
|
struct ll_disk old_ll;
|
|
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
index e89ae5e..062e4c2 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
|
|
@@ -43,7 +43,7 @@ struct block_op {
|
|
};
|
|
|
|
struct sm_metadata {
|
|
- struct dm_space_map sm;
|
|
+ dm_space_map_no_const sm;
|
|
|
|
struct ll_disk ll;
|
|
struct ll_disk old_ll;
|
|
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
|
|
index 1cbfc6b..56e1dbb 100644
|
|
--- a/drivers/md/persistent-data/dm-space-map.h
|
|
+++ b/drivers/md/persistent-data/dm-space-map.h
|
|
@@ -60,6 +60,7 @@ struct dm_space_map {
|
|
int (*root_size)(struct dm_space_map *sm, size_t *result);
|
|
int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
|
|
};
|
|
+typedef struct dm_space_map __no_const dm_space_map_no_const;
|
|
|
|
/*----------------------------------------------------------------*/
|
|
|
|
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
|
|
index 75e66c6..1799e72 100644
|
|
--- a/drivers/md/raid1.c
|
|
+++ b/drivers/md/raid1.c
|
|
@@ -1699,7 +1699,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
|
|
if (r1_sync_page_io(rdev, sect, s,
|
|
bio->bi_io_vec[idx].bv_page,
|
|
READ) != 0)
|
|
- atomic_add(s, &rdev->corrected_errors);
|
|
+ atomic_add_unchecked(s, &rdev->corrected_errors);
|
|
}
|
|
sectors -= s;
|
|
sect += s;
|
|
@@ -1919,7 +1919,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
|
|
test_bit(In_sync, &rdev->flags)) {
|
|
if (r1_sync_page_io(rdev, sect, s,
|
|
conf->tmppage, READ)) {
|
|
- atomic_add(s, &rdev->corrected_errors);
|
|
+ atomic_add_unchecked(s, &rdev->corrected_errors);
|
|
printk(KERN_INFO
|
|
"md/raid1:%s: read error corrected "
|
|
"(%d sectors at %llu on %s)\n",
|
|
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
|
|
index 149426c..f4f2d65 100644
|
|
--- a/drivers/md/raid10.c
|
|
+++ b/drivers/md/raid10.c
|
|
@@ -1707,7 +1707,7 @@ static void end_sync_read(struct bio *bio, int error)
|
|
/* The write handler will notice the lack of
|
|
* R10BIO_Uptodate and record any errors etc
|
|
*/
|
|
- atomic_add(r10_bio->sectors,
|
|
+ atomic_add_unchecked(r10_bio->sectors,
|
|
&conf->mirrors[d].rdev->corrected_errors);
|
|
|
|
/* for reconstruct, we always reschedule after a read.
|
|
@@ -2062,7 +2062,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
|
|
{
|
|
struct timespec cur_time_mon;
|
|
unsigned long hours_since_last;
|
|
- unsigned int read_errors = atomic_read(&rdev->read_errors);
|
|
+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
|
|
|
|
ktime_get_ts(&cur_time_mon);
|
|
|
|
@@ -2084,9 +2084,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
|
|
* overflowing the shift of read_errors by hours_since_last.
|
|
*/
|
|
if (hours_since_last >= 8 * sizeof(read_errors))
|
|
- atomic_set(&rdev->read_errors, 0);
|
|
+ atomic_set_unchecked(&rdev->read_errors, 0);
|
|
else
|
|
- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
|
|
+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
|
|
}
|
|
|
|
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
|
|
@@ -2140,8 +2140,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
return;
|
|
|
|
check_decay_read_errors(mddev, rdev);
|
|
- atomic_inc(&rdev->read_errors);
|
|
- if (atomic_read(&rdev->read_errors) > max_read_errors) {
|
|
+ atomic_inc_unchecked(&rdev->read_errors);
|
|
+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
|
|
char b[BDEVNAME_SIZE];
|
|
bdevname(rdev->bdev, b);
|
|
|
|
@@ -2149,7 +2149,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
"md/raid10:%s: %s: Raid device exceeded "
|
|
"read_error threshold [cur %d:max %d]\n",
|
|
mdname(mddev), b,
|
|
- atomic_read(&rdev->read_errors), max_read_errors);
|
|
+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
|
|
printk(KERN_NOTICE
|
|
"md/raid10:%s: %s: Failing raid device\n",
|
|
mdname(mddev), b);
|
|
@@ -2300,7 +2300,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
(unsigned long long)(
|
|
sect + rdev->data_offset),
|
|
bdevname(rdev->bdev, b));
|
|
- atomic_add(s, &rdev->corrected_errors);
|
|
+ atomic_add_unchecked(s, &rdev->corrected_errors);
|
|
}
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
|
|
index 7a218e8..a7eeb41 100644
|
|
--- a/drivers/md/raid5.c
|
|
+++ b/drivers/md/raid5.c
|
|
@@ -1699,18 +1699,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
|
(unsigned long long)(sh->sector
|
|
+ rdev->data_offset),
|
|
bdevname(rdev->bdev, b));
|
|
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
|
|
+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
|
|
clear_bit(R5_ReadError, &sh->dev[i].flags);
|
|
clear_bit(R5_ReWrite, &sh->dev[i].flags);
|
|
}
|
|
- if (atomic_read(&rdev->read_errors))
|
|
- atomic_set(&rdev->read_errors, 0);
|
|
+ if (atomic_read_unchecked(&rdev->read_errors))
|
|
+ atomic_set_unchecked(&rdev->read_errors, 0);
|
|
} else {
|
|
const char *bdn = bdevname(rdev->bdev, b);
|
|
int retry = 0;
|
|
|
|
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
|
|
- atomic_inc(&rdev->read_errors);
|
|
+ atomic_inc_unchecked(&rdev->read_errors);
|
|
if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
|
|
printk_ratelimited(
|
|
KERN_WARNING
|
|
@@ -1739,7 +1739,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
|
|
(unsigned long long)(sh->sector
|
|
+ rdev->data_offset),
|
|
bdn);
|
|
- else if (atomic_read(&rdev->read_errors)
|
|
+ else if (atomic_read_unchecked(&rdev->read_errors)
|
|
> conf->max_nr_stripes)
|
|
printk(KERN_WARNING
|
|
"md/raid:%s: Too many read errors, failing device %s.\n",
|
|
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
|
|
index d88c4aa..17c80b1 100644
|
|
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
|
|
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
|
|
@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
|
|
.subvendor = _subvend, .subdevice = _subdev, \
|
|
.driver_data = (unsigned long)&_driverdata }
|
|
|
|
-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
|
|
+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
|
|
DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
|
|
DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
|
|
DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
|
|
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
|
|
index 835e7b8..23e3399 100644
|
|
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
|
|
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
|
|
@@ -160,7 +160,7 @@ struct dvb_demux_feed {
|
|
union {
|
|
dmx_ts_cb ts;
|
|
dmx_section_cb sec;
|
|
- } cb;
|
|
+ } __no_const cb;
|
|
|
|
union {
|
|
dmx_ts_data_ready_cb ts;
|
|
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
|
|
index 39eab73..60033e7 100644
|
|
--- a/drivers/media/dvb/dvb-core/dvbdev.c
|
|
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
|
|
@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
|
|
const struct dvb_device *template, void *priv, int type)
|
|
{
|
|
struct dvb_device *dvbdev;
|
|
- struct file_operations *dvbdevfops;
|
|
+ file_operations_no_const *dvbdevfops;
|
|
struct device *clsdev;
|
|
int minor;
|
|
int id;
|
|
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
|
|
index 3940bb0..fb3952a 100644
|
|
--- a/drivers/media/dvb/dvb-usb/cxusb.c
|
|
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
|
|
@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
|
|
|
|
struct dib0700_adapter_state {
|
|
int (*set_param_save) (struct dvb_frontend *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
static int dib7070_set_param_override(struct dvb_frontend *fe)
|
|
{
|
|
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
|
|
index 451c5a7..649f711 100644
|
|
--- a/drivers/media/dvb/dvb-usb/dw2102.c
|
|
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
|
|
@@ -95,7 +95,7 @@ struct su3000_state {
|
|
|
|
struct s6x0_state {
|
|
int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* debug */
|
|
static int dvb_usb_dw2102_debug;
|
|
diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
|
|
index 404f63a..4796533 100644
|
|
--- a/drivers/media/dvb/frontends/dib3000.h
|
|
+++ b/drivers/media/dvb/frontends/dib3000.h
|
|
@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
|
|
int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
|
|
int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
|
|
int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
|
|
extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
|
|
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
|
|
index 7539a5d..06531a6 100644
|
|
--- a/drivers/media/dvb/ngene/ngene-cards.c
|
|
+++ b/drivers/media/dvb/ngene/ngene-cards.c
|
|
@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
|
|
|
|
/****************************************************************************/
|
|
|
|
-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
|
|
+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
|
|
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
|
|
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
|
|
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
|
|
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
|
|
index 16a089f..ab1667d 100644
|
|
--- a/drivers/media/radio/radio-cadet.c
|
|
+++ b/drivers/media/radio/radio-cadet.c
|
|
@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
|
|
unsigned char readbuf[RDS_BUFFER];
|
|
int i = 0;
|
|
|
|
+ if (count > RDS_BUFFER)
|
|
+ return -EFAULT;
|
|
mutex_lock(&dev->lock);
|
|
if (dev->rdsstat == 0) {
|
|
dev->rdsstat = 1;
|
|
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
|
|
index 9cde353..8c6a1c3 100644
|
|
--- a/drivers/media/video/au0828/au0828.h
|
|
+++ b/drivers/media/video/au0828/au0828.h
|
|
@@ -191,7 +191,7 @@ struct au0828_dev {
|
|
|
|
/* I2C */
|
|
struct i2c_adapter i2c_adap;
|
|
- struct i2c_algorithm i2c_algo;
|
|
+ i2c_algorithm_no_const i2c_algo;
|
|
struct i2c_client i2c_client;
|
|
u32 i2c_rc;
|
|
|
|
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
|
|
index 04bf662..e0ac026 100644
|
|
--- a/drivers/media/video/cx88/cx88-alsa.c
|
|
+++ b/drivers/media/video/cx88/cx88-alsa.c
|
|
@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
|
|
* Only boards with eeprom and byte 1 at eeprom=1 have it
|
|
*/
|
|
|
|
-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
|
|
+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
|
|
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
|
|
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
|
|
{0, }
|
|
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
|
|
index 89f354e..0d0b7c1 100644
|
|
--- a/drivers/media/video/omap/omap_vout.c
|
|
+++ b/drivers/media/video/omap/omap_vout.c
|
|
@@ -64,7 +64,6 @@ enum omap_vout_channels {
|
|
OMAP_VIDEO2,
|
|
};
|
|
|
|
-static struct videobuf_queue_ops video_vbq_ops;
|
|
/* Variables configurable through module params*/
|
|
static u32 video1_numbuffers = 3;
|
|
static u32 video2_numbuffers = 3;
|
|
@@ -1002,6 +1001,12 @@ static int omap_vout_open(struct file *file)
|
|
{
|
|
struct videobuf_queue *q;
|
|
struct omap_vout_device *vout = NULL;
|
|
+ static struct videobuf_queue_ops video_vbq_ops = {
|
|
+ .buf_setup = omap_vout_buffer_setup,
|
|
+ .buf_prepare = omap_vout_buffer_prepare,
|
|
+ .buf_release = omap_vout_buffer_release,
|
|
+ .buf_queue = omap_vout_buffer_queue,
|
|
+ };
|
|
|
|
vout = video_drvdata(file);
|
|
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
|
|
@@ -1019,10 +1024,6 @@ static int omap_vout_open(struct file *file)
|
|
vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
|
|
|
|
q = &vout->vbq;
|
|
- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
|
|
- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
|
|
- video_vbq_ops.buf_release = omap_vout_buffer_release;
|
|
- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
|
|
spin_lock_init(&vout->vbq_lock);
|
|
|
|
videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
|
|
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
|
|
index 305e6aa..0143317 100644
|
|
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
|
|
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
|
|
@@ -196,7 +196,7 @@ struct pvr2_hdw {
|
|
|
|
/* I2C stuff */
|
|
struct i2c_adapter i2c_adap;
|
|
- struct i2c_algorithm i2c_algo;
|
|
+ i2c_algorithm_no_const i2c_algo;
|
|
pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
|
|
int i2c_cx25840_hack_state;
|
|
int i2c_linked;
|
|
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
|
|
index 02194c0..091733b 100644
|
|
--- a/drivers/media/video/timblogiw.c
|
|
+++ b/drivers/media/video/timblogiw.c
|
|
@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
/* Platform device functions */
|
|
|
|
-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
|
|
+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
|
|
.vidioc_querycap = timblogiw_querycap,
|
|
.vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
|
|
.vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
|
|
@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
|
|
.vidioc_enum_framesizes = timblogiw_enum_framesizes,
|
|
};
|
|
|
|
-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
|
|
+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
|
|
.owner = THIS_MODULE,
|
|
.open = timblogiw_open,
|
|
.release = timblogiw_close,
|
|
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
|
|
index 551262e..7551198 100644
|
|
--- a/drivers/message/fusion/mptsas.c
|
|
+++ b/drivers/message/fusion/mptsas.c
|
|
@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
|
|
return 0;
|
|
}
|
|
|
|
+static inline void
|
|
+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
|
|
+{
|
|
+ if (phy_info->port_details) {
|
|
+ phy_info->port_details->rphy = rphy;
|
|
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
|
|
+ ioc->name, rphy));
|
|
+ }
|
|
+
|
|
+ if (rphy) {
|
|
+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
|
|
+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
|
|
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
|
|
+ ioc->name, rphy, rphy->dev.release));
|
|
+ }
|
|
+}
|
|
+
|
|
/* no mutex */
|
|
static void
|
|
mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
|
|
@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
|
|
return NULL;
|
|
}
|
|
|
|
-static inline void
|
|
-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
|
|
-{
|
|
- if (phy_info->port_details) {
|
|
- phy_info->port_details->rphy = rphy;
|
|
- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
|
|
- ioc->name, rphy));
|
|
- }
|
|
-
|
|
- if (rphy) {
|
|
- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
|
|
- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
|
|
- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
|
|
- ioc->name, rphy, rphy->dev.release));
|
|
- }
|
|
-}
|
|
-
|
|
static inline struct sas_port *
|
|
mptsas_get_port(struct mptsas_phyinfo *phy_info)
|
|
{
|
|
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
|
|
index 0c3ced7..1fe34ec 100644
|
|
--- a/drivers/message/fusion/mptscsih.c
|
|
+++ b/drivers/message/fusion/mptscsih.c
|
|
@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
|
|
|
|
h = shost_priv(SChost);
|
|
|
|
- if (h) {
|
|
- if (h->info_kbuf == NULL)
|
|
- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
|
|
- return h->info_kbuf;
|
|
- h->info_kbuf[0] = '\0';
|
|
+ if (!h)
|
|
+ return NULL;
|
|
|
|
- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
|
|
- h->info_kbuf[size-1] = '\0';
|
|
- }
|
|
+ if (h->info_kbuf == NULL)
|
|
+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
|
|
+ return h->info_kbuf;
|
|
+ h->info_kbuf[0] = '\0';
|
|
+
|
|
+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
|
|
+ h->info_kbuf[size-1] = '\0';
|
|
|
|
return h->info_kbuf;
|
|
}
|
|
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
|
|
index 6d115c7..58ff7fd 100644
|
|
--- a/drivers/message/i2o/i2o_proc.c
|
|
+++ b/drivers/message/i2o/i2o_proc.c
|
|
@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
|
|
"Array Controller Device"
|
|
};
|
|
|
|
-static char *chtostr(u8 * chars, int n)
|
|
-{
|
|
- char tmp[256];
|
|
- tmp[0] = 0;
|
|
- return strncat(tmp, (char *)chars, n);
|
|
-}
|
|
-
|
|
static int i2o_report_query_status(struct seq_file *seq, int block_status,
|
|
char *group)
|
|
{
|
|
@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
|
|
|
|
seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
|
|
seq_printf(seq, "%-#8x", ddm_table.module_id);
|
|
- seq_printf(seq, "%-29s",
|
|
- chtostr(ddm_table.module_name_version, 28));
|
|
+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
|
|
seq_printf(seq, "%9d ", ddm_table.data_size);
|
|
seq_printf(seq, "%8d", ddm_table.code_size);
|
|
|
|
@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
|
|
|
|
seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
|
|
seq_printf(seq, "%-#8x", dst->module_id);
|
|
- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
|
|
- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
|
|
+ seq_printf(seq, "%-.28s", dst->module_name_version);
|
|
+ seq_printf(seq, "%-.8s", dst->date);
|
|
seq_printf(seq, "%8d ", dst->module_size);
|
|
seq_printf(seq, "%8d ", dst->mpb_size);
|
|
seq_printf(seq, "0x%04x", dst->module_flags);
|
|
@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
|
|
seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
|
|
seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
|
|
seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
|
|
- seq_printf(seq, "Vendor info : %s\n",
|
|
- chtostr((u8 *) (work32 + 2), 16));
|
|
- seq_printf(seq, "Product info : %s\n",
|
|
- chtostr((u8 *) (work32 + 6), 16));
|
|
- seq_printf(seq, "Description : %s\n",
|
|
- chtostr((u8 *) (work32 + 10), 16));
|
|
- seq_printf(seq, "Product rev. : %s\n",
|
|
- chtostr((u8 *) (work32 + 14), 8));
|
|
+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
|
|
+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
|
|
+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
|
|
+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
|
|
|
|
seq_printf(seq, "Serial number : ");
|
|
print_serial_number(seq, (u8 *) (work32 + 16),
|
|
@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
|
|
}
|
|
|
|
seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
|
|
- seq_printf(seq, "Module name : %s\n",
|
|
- chtostr(result.module_name, 24));
|
|
- seq_printf(seq, "Module revision : %s\n",
|
|
- chtostr(result.module_rev, 8));
|
|
+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
|
|
+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
|
|
|
|
seq_printf(seq, "Serial number : ");
|
|
print_serial_number(seq, result.serial_number, sizeof(result) - 36);
|
|
@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
|
|
return 0;
|
|
}
|
|
|
|
- seq_printf(seq, "Device name : %s\n",
|
|
- chtostr(result.device_name, 64));
|
|
- seq_printf(seq, "Service name : %s\n",
|
|
- chtostr(result.service_name, 64));
|
|
- seq_printf(seq, "Physical name : %s\n",
|
|
- chtostr(result.physical_location, 64));
|
|
- seq_printf(seq, "Instance number : %s\n",
|
|
- chtostr(result.instance_number, 4));
|
|
+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
|
|
+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
|
|
+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
|
|
+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
|
|
index a8c08f3..155fe3d 100644
|
|
--- a/drivers/message/i2o/iop.c
|
|
+++ b/drivers/message/i2o/iop.c
|
|
@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
|
|
|
|
spin_lock_irqsave(&c->context_list_lock, flags);
|
|
|
|
- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
|
|
- atomic_inc(&c->context_list_counter);
|
|
+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
|
|
+ atomic_inc_unchecked(&c->context_list_counter);
|
|
|
|
- entry->context = atomic_read(&c->context_list_counter);
|
|
+ entry->context = atomic_read_unchecked(&c->context_list_counter);
|
|
|
|
list_add(&entry->list, &c->context_list);
|
|
|
|
@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
|
|
|
|
#if BITS_PER_LONG == 64
|
|
spin_lock_init(&c->context_list_lock);
|
|
- atomic_set(&c->context_list_counter, 0);
|
|
+ atomic_set_unchecked(&c->context_list_counter, 0);
|
|
INIT_LIST_HEAD(&c->context_list);
|
|
#endif
|
|
|
|
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
|
|
index 7ce65f4..e66e9bc 100644
|
|
--- a/drivers/mfd/abx500-core.c
|
|
+++ b/drivers/mfd/abx500-core.c
|
|
@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
|
|
|
|
struct abx500_device_entry {
|
|
struct list_head list;
|
|
- struct abx500_ops ops;
|
|
+ abx500_ops_no_const ops;
|
|
struct device *dev;
|
|
};
|
|
|
|
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
|
|
index a9223ed..4127b13 100644
|
|
--- a/drivers/mfd/janz-cmodio.c
|
|
+++ b/drivers/mfd/janz-cmodio.c
|
|
@@ -13,6 +13,7 @@
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/slab.h>
|
|
#include <linux/init.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/interrupt.h>
|
|
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
|
|
index a981e2a..5ca0c8b 100644
|
|
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
|
|
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
|
|
@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
|
|
* the lid is closed. This leads to interrupts as soon as a little move
|
|
* is done.
|
|
*/
|
|
- atomic_inc(&lis3->count);
|
|
+ atomic_inc_unchecked(&lis3->count);
|
|
|
|
wake_up_interruptible(&lis3->misc_wait);
|
|
kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
|
|
@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
|
|
if (lis3->pm_dev)
|
|
pm_runtime_get_sync(lis3->pm_dev);
|
|
|
|
- atomic_set(&lis3->count, 0);
|
|
+ atomic_set_unchecked(&lis3->count, 0);
|
|
return 0;
|
|
}
|
|
|
|
@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
|
|
add_wait_queue(&lis3->misc_wait, &wait);
|
|
while (true) {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
- data = atomic_xchg(&lis3->count, 0);
|
|
+ data = atomic_xchg_unchecked(&lis3->count, 0);
|
|
if (data)
|
|
break;
|
|
|
|
@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
|
|
struct lis3lv02d, miscdev);
|
|
|
|
poll_wait(file, &lis3->misc_wait, wait);
|
|
- if (atomic_read(&lis3->count))
|
|
+ if (atomic_read_unchecked(&lis3->count))
|
|
return POLLIN | POLLRDNORM;
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
|
|
index 2b1482a..5d33616 100644
|
|
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
|
|
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
|
|
@@ -266,7 +266,7 @@ struct lis3lv02d {
|
|
struct input_polled_dev *idev; /* input device */
|
|
struct platform_device *pdev; /* platform device */
|
|
struct regulator_bulk_data regulators[2];
|
|
- atomic_t count; /* interrupt count after last read */
|
|
+ atomic_unchecked_t count; /* interrupt count after last read */
|
|
union axis_conversion ac; /* hw -> logical axis */
|
|
int mapped_btns[3];
|
|
|
|
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
|
|
index 2f30bad..c4c13d0 100644
|
|
--- a/drivers/misc/sgi-gru/gruhandles.c
|
|
+++ b/drivers/misc/sgi-gru/gruhandles.c
|
|
@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
|
|
unsigned long nsec;
|
|
|
|
nsec = CLKS2NSEC(clks);
|
|
- atomic_long_inc(&mcs_op_statistics[op].count);
|
|
- atomic_long_add(nsec, &mcs_op_statistics[op].total);
|
|
+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
|
|
+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
|
|
if (mcs_op_statistics[op].max < nsec)
|
|
mcs_op_statistics[op].max = nsec;
|
|
}
|
|
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
|
|
index 950dbe9..eeef0f8 100644
|
|
--- a/drivers/misc/sgi-gru/gruprocfs.c
|
|
+++ b/drivers/misc/sgi-gru/gruprocfs.c
|
|
@@ -32,9 +32,9 @@
|
|
|
|
#define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
|
|
|
|
-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
|
|
+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
|
|
{
|
|
- unsigned long val = atomic_long_read(v);
|
|
+ unsigned long val = atomic_long_read_unchecked(v);
|
|
|
|
seq_printf(s, "%16lu %s\n", val, id);
|
|
}
|
|
@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
|
|
|
|
seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
|
|
for (op = 0; op < mcsop_last; op++) {
|
|
- count = atomic_long_read(&mcs_op_statistics[op].count);
|
|
- total = atomic_long_read(&mcs_op_statistics[op].total);
|
|
+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
|
|
+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
|
|
max = mcs_op_statistics[op].max;
|
|
seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
|
|
count ? total / count : 0, max);
|
|
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
|
|
index 5c3ce24..4915ccb 100644
|
|
--- a/drivers/misc/sgi-gru/grutables.h
|
|
+++ b/drivers/misc/sgi-gru/grutables.h
|
|
@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
|
|
* GRU statistics.
|
|
*/
|
|
struct gru_stats_s {
|
|
- atomic_long_t vdata_alloc;
|
|
- atomic_long_t vdata_free;
|
|
- atomic_long_t gts_alloc;
|
|
- atomic_long_t gts_free;
|
|
- atomic_long_t gms_alloc;
|
|
- atomic_long_t gms_free;
|
|
- atomic_long_t gts_double_allocate;
|
|
- atomic_long_t assign_context;
|
|
- atomic_long_t assign_context_failed;
|
|
- atomic_long_t free_context;
|
|
- atomic_long_t load_user_context;
|
|
- atomic_long_t load_kernel_context;
|
|
- atomic_long_t lock_kernel_context;
|
|
- atomic_long_t unlock_kernel_context;
|
|
- atomic_long_t steal_user_context;
|
|
- atomic_long_t steal_kernel_context;
|
|
- atomic_long_t steal_context_failed;
|
|
- atomic_long_t nopfn;
|
|
- atomic_long_t asid_new;
|
|
- atomic_long_t asid_next;
|
|
- atomic_long_t asid_wrap;
|
|
- atomic_long_t asid_reuse;
|
|
- atomic_long_t intr;
|
|
- atomic_long_t intr_cbr;
|
|
- atomic_long_t intr_tfh;
|
|
- atomic_long_t intr_spurious;
|
|
- atomic_long_t intr_mm_lock_failed;
|
|
- atomic_long_t call_os;
|
|
- atomic_long_t call_os_wait_queue;
|
|
- atomic_long_t user_flush_tlb;
|
|
- atomic_long_t user_unload_context;
|
|
- atomic_long_t user_exception;
|
|
- atomic_long_t set_context_option;
|
|
- atomic_long_t check_context_retarget_intr;
|
|
- atomic_long_t check_context_unload;
|
|
- atomic_long_t tlb_dropin;
|
|
- atomic_long_t tlb_preload_page;
|
|
- atomic_long_t tlb_dropin_fail_no_asid;
|
|
- atomic_long_t tlb_dropin_fail_upm;
|
|
- atomic_long_t tlb_dropin_fail_invalid;
|
|
- atomic_long_t tlb_dropin_fail_range_active;
|
|
- atomic_long_t tlb_dropin_fail_idle;
|
|
- atomic_long_t tlb_dropin_fail_fmm;
|
|
- atomic_long_t tlb_dropin_fail_no_exception;
|
|
- atomic_long_t tfh_stale_on_fault;
|
|
- atomic_long_t mmu_invalidate_range;
|
|
- atomic_long_t mmu_invalidate_page;
|
|
- atomic_long_t flush_tlb;
|
|
- atomic_long_t flush_tlb_gru;
|
|
- atomic_long_t flush_tlb_gru_tgh;
|
|
- atomic_long_t flush_tlb_gru_zero_asid;
|
|
-
|
|
- atomic_long_t copy_gpa;
|
|
- atomic_long_t read_gpa;
|
|
-
|
|
- atomic_long_t mesq_receive;
|
|
- atomic_long_t mesq_receive_none;
|
|
- atomic_long_t mesq_send;
|
|
- atomic_long_t mesq_send_failed;
|
|
- atomic_long_t mesq_noop;
|
|
- atomic_long_t mesq_send_unexpected_error;
|
|
- atomic_long_t mesq_send_lb_overflow;
|
|
- atomic_long_t mesq_send_qlimit_reached;
|
|
- atomic_long_t mesq_send_amo_nacked;
|
|
- atomic_long_t mesq_send_put_nacked;
|
|
- atomic_long_t mesq_page_overflow;
|
|
- atomic_long_t mesq_qf_locked;
|
|
- atomic_long_t mesq_qf_noop_not_full;
|
|
- atomic_long_t mesq_qf_switch_head_failed;
|
|
- atomic_long_t mesq_qf_unexpected_error;
|
|
- atomic_long_t mesq_noop_unexpected_error;
|
|
- atomic_long_t mesq_noop_lb_overflow;
|
|
- atomic_long_t mesq_noop_qlimit_reached;
|
|
- atomic_long_t mesq_noop_amo_nacked;
|
|
- atomic_long_t mesq_noop_put_nacked;
|
|
- atomic_long_t mesq_noop_page_overflow;
|
|
+ atomic_long_unchecked_t vdata_alloc;
|
|
+ atomic_long_unchecked_t vdata_free;
|
|
+ atomic_long_unchecked_t gts_alloc;
|
|
+ atomic_long_unchecked_t gts_free;
|
|
+ atomic_long_unchecked_t gms_alloc;
|
|
+ atomic_long_unchecked_t gms_free;
|
|
+ atomic_long_unchecked_t gts_double_allocate;
|
|
+ atomic_long_unchecked_t assign_context;
|
|
+ atomic_long_unchecked_t assign_context_failed;
|
|
+ atomic_long_unchecked_t free_context;
|
|
+ atomic_long_unchecked_t load_user_context;
|
|
+ atomic_long_unchecked_t load_kernel_context;
|
|
+ atomic_long_unchecked_t lock_kernel_context;
|
|
+ atomic_long_unchecked_t unlock_kernel_context;
|
|
+ atomic_long_unchecked_t steal_user_context;
|
|
+ atomic_long_unchecked_t steal_kernel_context;
|
|
+ atomic_long_unchecked_t steal_context_failed;
|
|
+ atomic_long_unchecked_t nopfn;
|
|
+ atomic_long_unchecked_t asid_new;
|
|
+ atomic_long_unchecked_t asid_next;
|
|
+ atomic_long_unchecked_t asid_wrap;
|
|
+ atomic_long_unchecked_t asid_reuse;
|
|
+ atomic_long_unchecked_t intr;
|
|
+ atomic_long_unchecked_t intr_cbr;
|
|
+ atomic_long_unchecked_t intr_tfh;
|
|
+ atomic_long_unchecked_t intr_spurious;
|
|
+ atomic_long_unchecked_t intr_mm_lock_failed;
|
|
+ atomic_long_unchecked_t call_os;
|
|
+ atomic_long_unchecked_t call_os_wait_queue;
|
|
+ atomic_long_unchecked_t user_flush_tlb;
|
|
+ atomic_long_unchecked_t user_unload_context;
|
|
+ atomic_long_unchecked_t user_exception;
|
|
+ atomic_long_unchecked_t set_context_option;
|
|
+ atomic_long_unchecked_t check_context_retarget_intr;
|
|
+ atomic_long_unchecked_t check_context_unload;
|
|
+ atomic_long_unchecked_t tlb_dropin;
|
|
+ atomic_long_unchecked_t tlb_preload_page;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_upm;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_idle;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
|
|
+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
|
|
+ atomic_long_unchecked_t tfh_stale_on_fault;
|
|
+ atomic_long_unchecked_t mmu_invalidate_range;
|
|
+ atomic_long_unchecked_t mmu_invalidate_page;
|
|
+ atomic_long_unchecked_t flush_tlb;
|
|
+ atomic_long_unchecked_t flush_tlb_gru;
|
|
+ atomic_long_unchecked_t flush_tlb_gru_tgh;
|
|
+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
|
|
+
|
|
+ atomic_long_unchecked_t copy_gpa;
|
|
+ atomic_long_unchecked_t read_gpa;
|
|
+
|
|
+ atomic_long_unchecked_t mesq_receive;
|
|
+ atomic_long_unchecked_t mesq_receive_none;
|
|
+ atomic_long_unchecked_t mesq_send;
|
|
+ atomic_long_unchecked_t mesq_send_failed;
|
|
+ atomic_long_unchecked_t mesq_noop;
|
|
+ atomic_long_unchecked_t mesq_send_unexpected_error;
|
|
+ atomic_long_unchecked_t mesq_send_lb_overflow;
|
|
+ atomic_long_unchecked_t mesq_send_qlimit_reached;
|
|
+ atomic_long_unchecked_t mesq_send_amo_nacked;
|
|
+ atomic_long_unchecked_t mesq_send_put_nacked;
|
|
+ atomic_long_unchecked_t mesq_page_overflow;
|
|
+ atomic_long_unchecked_t mesq_qf_locked;
|
|
+ atomic_long_unchecked_t mesq_qf_noop_not_full;
|
|
+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
|
|
+ atomic_long_unchecked_t mesq_qf_unexpected_error;
|
|
+ atomic_long_unchecked_t mesq_noop_unexpected_error;
|
|
+ atomic_long_unchecked_t mesq_noop_lb_overflow;
|
|
+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
|
|
+ atomic_long_unchecked_t mesq_noop_amo_nacked;
|
|
+ atomic_long_unchecked_t mesq_noop_put_nacked;
|
|
+ atomic_long_unchecked_t mesq_noop_page_overflow;
|
|
|
|
};
|
|
|
|
@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
|
|
tghop_invalidate, mcsop_last};
|
|
|
|
struct mcs_op_statistic {
|
|
- atomic_long_t count;
|
|
- atomic_long_t total;
|
|
+ atomic_long_unchecked_t count;
|
|
+ atomic_long_unchecked_t total;
|
|
unsigned long max;
|
|
};
|
|
|
|
@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
|
|
|
|
#define STAT(id) do { \
|
|
if (gru_options & OPT_STATS) \
|
|
- atomic_long_inc(&gru_stats.id); \
|
|
+ atomic_long_inc_unchecked(&gru_stats.id); \
|
|
} while (0)
|
|
|
|
#ifdef CONFIG_SGI_GRU_DEBUG
|
|
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
|
|
index c862cd4..0d176fe 100644
|
|
--- a/drivers/misc/sgi-xp/xp.h
|
|
+++ b/drivers/misc/sgi-xp/xp.h
|
|
@@ -288,7 +288,7 @@ struct xpc_interface {
|
|
xpc_notify_func, void *);
|
|
void (*received) (short, int, void *);
|
|
enum xp_retval (*partid_to_nasids) (short, void *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern struct xpc_interface xpc_interface;
|
|
|
|
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
|
|
index b94d5f7..7f494c5 100644
|
|
--- a/drivers/misc/sgi-xp/xpc.h
|
|
+++ b/drivers/misc/sgi-xp/xpc.h
|
|
@@ -835,6 +835,7 @@ struct xpc_arch_operations {
|
|
void (*received_payload) (struct xpc_channel *, void *);
|
|
void (*notify_senders_of_disconnect) (struct xpc_channel *);
|
|
};
|
|
+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
|
|
|
|
/* struct xpc_partition act_state values (for XPC HB) */
|
|
|
|
@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
|
|
/* found in xpc_main.c */
|
|
extern struct device *xpc_part;
|
|
extern struct device *xpc_chan;
|
|
-extern struct xpc_arch_operations xpc_arch_ops;
|
|
+extern xpc_arch_operations_no_const xpc_arch_ops;
|
|
extern int xpc_disengage_timelimit;
|
|
extern int xpc_disengage_timedout;
|
|
extern int xpc_activate_IRQ_rcvd;
|
|
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
|
|
index d971817..3805cce 100644
|
|
--- a/drivers/misc/sgi-xp/xpc_main.c
|
|
+++ b/drivers/misc/sgi-xp/xpc_main.c
|
|
@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
|
|
.notifier_call = xpc_system_die,
|
|
};
|
|
|
|
-struct xpc_arch_operations xpc_arch_ops;
|
|
+xpc_arch_operations_no_const xpc_arch_ops;
|
|
|
|
/*
|
|
* Timer function to enforce the timelimit on the partition disengage.
|
|
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
|
|
index 504da71..9722d43 100644
|
|
--- a/drivers/mmc/host/sdhci-pci.c
|
|
+++ b/drivers/mmc/host/sdhci-pci.c
|
|
@@ -653,7 +653,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
|
|
.probe = via_probe,
|
|
};
|
|
|
|
-static const struct pci_device_id pci_ids[] __devinitdata = {
|
|
+static const struct pci_device_id pci_ids[] __devinitconst = {
|
|
{
|
|
.vendor = PCI_VENDOR_ID_RICOH,
|
|
.device = PCI_DEVICE_ID_RICOH_R5C822,
|
|
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
|
|
index a4eb8b5..8c0628f 100644
|
|
--- a/drivers/mtd/devices/doc2000.c
|
|
+++ b/drivers/mtd/devices/doc2000.c
|
|
@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
/* The ECC will not be calculated correctly if less than 512 is written */
|
|
/* DBB-
|
|
- if (len != 0x200 && eccbuf)
|
|
+ if (len != 0x200)
|
|
printk(KERN_WARNING
|
|
"ECC needs a full sector write (adr: %lx size %lx)\n",
|
|
(long) to, (long) len);
|
|
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
|
|
index a9e57d6..c6d8731 100644
|
|
--- a/drivers/mtd/nand/denali.c
|
|
+++ b/drivers/mtd/nand/denali.c
|
|
@@ -26,6 +26,7 @@
|
|
#include <linux/pci.h>
|
|
#include <linux/mtd/mtd.h>
|
|
#include <linux/module.h>
|
|
+#include <linux/slab.h>
|
|
|
|
#include "denali.h"
|
|
|
|
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
|
|
index 51b9d6a..52af9a7 100644
|
|
--- a/drivers/mtd/nftlmount.c
|
|
+++ b/drivers/mtd/nftlmount.c
|
|
@@ -24,6 +24,7 @@
|
|
#include <asm/errno.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/sched.h>
|
|
#include <linux/mtd/mtd.h>
|
|
#include <linux/mtd/nand.h>
|
|
#include <linux/mtd/nftl.h>
|
|
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
index 6762dc4..9956862 100644
|
|
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
|
|
@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
|
|
*/
|
|
|
|
#define ATL2_PARAM(X, desc) \
|
|
- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
|
|
+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
|
|
MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
|
|
MODULE_PARM_DESC(X, desc);
|
|
#else
|
|
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
|
|
index 61a7670..7da6e34 100644
|
|
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
|
|
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
|
|
@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
|
|
|
|
int (*wait_comp)(struct bnx2x *bp,
|
|
struct bnx2x_rx_mode_ramrod_params *p);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/********************** Set multicast group ***********************************/
|
|
|
|
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
|
|
index 6639a63..03d71b0 100644
|
|
--- a/drivers/net/ethernet/broadcom/tg3.h
|
|
+++ b/drivers/net/ethernet/broadcom/tg3.h
|
|
@@ -140,6 +140,7 @@
|
|
#define CHIPREV_ID_5750_A0 0x4000
|
|
#define CHIPREV_ID_5750_A1 0x4001
|
|
#define CHIPREV_ID_5750_A3 0x4003
|
|
+#define CHIPREV_ID_5750_C1 0x4201
|
|
#define CHIPREV_ID_5750_C2 0x4202
|
|
#define CHIPREV_ID_5752_A0_HW 0x5000
|
|
#define CHIPREV_ID_5752_A0 0x6000
|
|
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
|
|
index c4e8643..0979484 100644
|
|
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
|
|
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
|
|
@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
|
|
*/
|
|
struct l2t_skb_cb {
|
|
arp_failure_handler_func arp_failure_handler;
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
|
|
|
|
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
|
|
index 18b106c..2b38d36 100644
|
|
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
|
|
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
|
|
@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
for (i=0; i<ETH_ALEN; i++) {
|
|
tmp.addr[i] = dev->dev_addr[i];
|
|
}
|
|
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
|
|
+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
|
|
break;
|
|
|
|
case DE4X5_SET_HWADDR: /* Set the hardware address */
|
|
@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
spin_lock_irqsave(&lp->lock, flags);
|
|
memcpy(&statbuf, &lp->pktStats, ioc->len);
|
|
spin_unlock_irqrestore(&lp->lock, flags);
|
|
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
|
|
+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
|
|
return -EFAULT;
|
|
break;
|
|
}
|
|
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
|
|
index ed7d1dc..d426748 100644
|
|
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
|
|
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
|
|
@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
|
|
{NULL}};
|
|
|
|
|
|
-static const char *block_name[] __devinitdata = {
|
|
+static const char *block_name[] __devinitconst = {
|
|
"21140 non-MII",
|
|
"21140 MII PHY",
|
|
"21142 Serial PHY",
|
|
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
|
|
index 2ac6fff..2d127d0 100644
|
|
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
|
|
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
|
|
@@ -236,7 +236,7 @@ struct pci_id_info {
|
|
int drv_flags; /* Driver use, intended as capability flags. */
|
|
};
|
|
|
|
-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
|
|
+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
|
|
{ /* Sometime a Level-One switch card. */
|
|
"Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
|
|
{ "Winbond W89c840", CanHaveMII | HasBrokenTx},
|
|
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
|
|
index d783f4f..97fa1b0 100644
|
|
--- a/drivers/net/ethernet/dlink/sundance.c
|
|
+++ b/drivers/net/ethernet/dlink/sundance.c
|
|
@@ -218,7 +218,7 @@ enum {
|
|
struct pci_id_info {
|
|
const char *name;
|
|
};
|
|
-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
|
|
+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
|
|
{"D-Link DFE-550TX FAST Ethernet Adapter"},
|
|
{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
|
|
{"D-Link DFE-580TX 4 port Server Adapter"},
|
|
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
index b2740f1..284f0f5 100644
|
|
--- a/drivers/net/ethernet/emulex/benet/be_main.c
|
|
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
|
|
@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
|
|
|
|
if (wrapped)
|
|
newacc += 65536;
|
|
- ACCESS_ONCE(*acc) = newacc;
|
|
+ ACCESS_ONCE_RW(*acc) = newacc;
|
|
}
|
|
|
|
void be_parse_stats(struct be_adapter *adapter)
|
|
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
|
|
index 16b0704..d2c07d7 100644
|
|
--- a/drivers/net/ethernet/faraday/ftgmac100.c
|
|
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
|
|
@@ -31,6 +31,8 @@
|
|
#include <linux/netdevice.h>
|
|
#include <linux/phy.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/irqreturn.h>
|
|
#include <net/ip.h>
|
|
|
|
#include "ftgmac100.h"
|
|
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
|
|
index 829b109..4ae5f6a 100644
|
|
--- a/drivers/net/ethernet/faraday/ftmac100.c
|
|
+++ b/drivers/net/ethernet/faraday/ftmac100.c
|
|
@@ -31,6 +31,8 @@
|
|
#include <linux/module.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/platform_device.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/irqreturn.h>
|
|
|
|
#include "ftmac100.h"
|
|
|
|
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
|
|
index 1637b98..c42f87b 100644
|
|
--- a/drivers/net/ethernet/fealnx.c
|
|
+++ b/drivers/net/ethernet/fealnx.c
|
|
@@ -150,7 +150,7 @@ struct chip_info {
|
|
int flags;
|
|
};
|
|
|
|
-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
|
|
+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
|
|
{ "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
|
|
{ "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
|
|
{ "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
|
|
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
|
|
index f82ecf5..7d59ecb 100644
|
|
--- a/drivers/net/ethernet/intel/e1000e/hw.h
|
|
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
|
|
@@ -784,6 +784,7 @@ struct e1000_mac_operations {
|
|
void (*config_collision_dist)(struct e1000_hw *);
|
|
s32 (*read_mac_addr)(struct e1000_hw *);
|
|
};
|
|
+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
|
|
|
|
/*
|
|
* When to use various PHY register access functions:
|
|
@@ -824,6 +825,7 @@ struct e1000_phy_operations {
|
|
void (*power_up)(struct e1000_hw *);
|
|
void (*power_down)(struct e1000_hw *);
|
|
};
|
|
+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
|
|
|
|
/* Function pointers for the NVM. */
|
|
struct e1000_nvm_operations {
|
|
@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
|
|
s32 (*validate)(struct e1000_hw *);
|
|
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
|
|
};
|
|
+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
|
|
|
|
struct e1000_mac_info {
|
|
- struct e1000_mac_operations ops;
|
|
+ e1000_mac_operations_no_const ops;
|
|
u8 addr[ETH_ALEN];
|
|
u8 perm_addr[ETH_ALEN];
|
|
|
|
@@ -879,7 +882,7 @@ struct e1000_mac_info {
|
|
};
|
|
|
|
struct e1000_phy_info {
|
|
- struct e1000_phy_operations ops;
|
|
+ e1000_phy_operations_no_const ops;
|
|
|
|
enum e1000_phy_type type;
|
|
|
|
@@ -913,7 +916,7 @@ struct e1000_phy_info {
|
|
};
|
|
|
|
struct e1000_nvm_info {
|
|
- struct e1000_nvm_operations ops;
|
|
+ e1000_nvm_operations_no_const ops;
|
|
|
|
enum e1000_nvm_type type;
|
|
enum e1000_nvm_override override;
|
|
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
|
|
index f67cbd3..cef9e3d 100644
|
|
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
|
|
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
|
|
@@ -314,6 +314,7 @@ struct e1000_mac_operations {
|
|
s32 (*read_mac_addr)(struct e1000_hw *);
|
|
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
|
|
};
|
|
+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
|
|
|
|
struct e1000_phy_operations {
|
|
s32 (*acquire)(struct e1000_hw *);
|
|
@@ -330,6 +331,7 @@ struct e1000_phy_operations {
|
|
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
|
|
s32 (*write_reg)(struct e1000_hw *, u32, u16);
|
|
};
|
|
+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
|
|
|
|
struct e1000_nvm_operations {
|
|
s32 (*acquire)(struct e1000_hw *);
|
|
@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
|
|
s32 (*update)(struct e1000_hw *);
|
|
s32 (*validate)(struct e1000_hw *);
|
|
};
|
|
+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
|
|
|
|
struct e1000_info {
|
|
s32 (*get_invariants)(struct e1000_hw *);
|
|
@@ -350,7 +353,7 @@ struct e1000_info {
|
|
extern const struct e1000_info e1000_82575_info;
|
|
|
|
struct e1000_mac_info {
|
|
- struct e1000_mac_operations ops;
|
|
+ e1000_mac_operations_no_const ops;
|
|
|
|
u8 addr[6];
|
|
u8 perm_addr[6];
|
|
@@ -388,7 +391,7 @@ struct e1000_mac_info {
|
|
};
|
|
|
|
struct e1000_phy_info {
|
|
- struct e1000_phy_operations ops;
|
|
+ e1000_phy_operations_no_const ops;
|
|
|
|
enum e1000_phy_type type;
|
|
|
|
@@ -423,7 +426,7 @@ struct e1000_phy_info {
|
|
};
|
|
|
|
struct e1000_nvm_info {
|
|
- struct e1000_nvm_operations ops;
|
|
+ e1000_nvm_operations_no_const ops;
|
|
enum e1000_nvm_type type;
|
|
enum e1000_nvm_override override;
|
|
|
|
@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
|
|
s32 (*check_for_ack)(struct e1000_hw *, u16);
|
|
s32 (*check_for_rst)(struct e1000_hw *, u16);
|
|
};
|
|
+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
|
|
|
|
struct e1000_mbx_stats {
|
|
u32 msgs_tx;
|
|
@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
|
|
};
|
|
|
|
struct e1000_mbx_info {
|
|
- struct e1000_mbx_operations ops;
|
|
+ e1000_mbx_operations_no_const ops;
|
|
struct e1000_mbx_stats stats;
|
|
u32 timeout;
|
|
u32 usec_delay;
|
|
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
|
|
index 57db3c6..aa825fc 100644
|
|
--- a/drivers/net/ethernet/intel/igbvf/vf.h
|
|
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
|
|
@@ -189,9 +189,10 @@ struct e1000_mac_operations {
|
|
s32 (*read_mac_addr)(struct e1000_hw *);
|
|
s32 (*set_vfta)(struct e1000_hw *, u16, bool);
|
|
};
|
|
+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
|
|
|
|
struct e1000_mac_info {
|
|
- struct e1000_mac_operations ops;
|
|
+ e1000_mac_operations_no_const ops;
|
|
u8 addr[6];
|
|
u8 perm_addr[6];
|
|
|
|
@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
|
|
s32 (*check_for_ack)(struct e1000_hw *);
|
|
s32 (*check_for_rst)(struct e1000_hw *);
|
|
};
|
|
+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
|
|
|
|
struct e1000_mbx_stats {
|
|
u32 msgs_tx;
|
|
@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
|
|
};
|
|
|
|
struct e1000_mbx_info {
|
|
- struct e1000_mbx_operations ops;
|
|
+ e1000_mbx_operations_no_const ops;
|
|
struct e1000_mbx_stats stats;
|
|
u32 timeout;
|
|
u32 usec_delay;
|
|
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
|
|
index 37eb39c..515bdd0 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
|
|
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
|
|
@@ -2711,6 +2711,7 @@ struct ixgbe_eeprom_operations {
|
|
s32 (*update_checksum)(struct ixgbe_hw *);
|
|
u16 (*calc_checksum)(struct ixgbe_hw *);
|
|
};
|
|
+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
|
|
|
|
struct ixgbe_mac_operations {
|
|
s32 (*init_hw)(struct ixgbe_hw *);
|
|
@@ -2774,6 +2775,7 @@ struct ixgbe_mac_operations {
|
|
/* Manageability interface */
|
|
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
|
|
};
|
|
+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
|
|
|
|
struct ixgbe_phy_operations {
|
|
s32 (*identify)(struct ixgbe_hw *);
|
|
@@ -2793,9 +2795,10 @@ struct ixgbe_phy_operations {
|
|
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
|
|
s32 (*check_overtemp)(struct ixgbe_hw *);
|
|
};
|
|
+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
|
|
|
|
struct ixgbe_eeprom_info {
|
|
- struct ixgbe_eeprom_operations ops;
|
|
+ ixgbe_eeprom_operations_no_const ops;
|
|
enum ixgbe_eeprom_type type;
|
|
u32 semaphore_delay;
|
|
u16 word_size;
|
|
@@ -2805,7 +2808,7 @@ struct ixgbe_eeprom_info {
|
|
|
|
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
|
|
struct ixgbe_mac_info {
|
|
- struct ixgbe_mac_operations ops;
|
|
+ ixgbe_mac_operations_no_const ops;
|
|
enum ixgbe_mac_type type;
|
|
u8 addr[ETH_ALEN];
|
|
u8 perm_addr[ETH_ALEN];
|
|
@@ -2833,7 +2836,7 @@ struct ixgbe_mac_info {
|
|
};
|
|
|
|
struct ixgbe_phy_info {
|
|
- struct ixgbe_phy_operations ops;
|
|
+ ixgbe_phy_operations_no_const ops;
|
|
struct mdio_if_info mdio;
|
|
enum ixgbe_phy_type type;
|
|
u32 id;
|
|
@@ -2861,6 +2864,7 @@ struct ixgbe_mbx_operations {
|
|
s32 (*check_for_ack)(struct ixgbe_hw *, u16);
|
|
s32 (*check_for_rst)(struct ixgbe_hw *, u16);
|
|
};
|
|
+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
|
|
|
|
struct ixgbe_mbx_stats {
|
|
u32 msgs_tx;
|
|
@@ -2872,7 +2876,7 @@ struct ixgbe_mbx_stats {
|
|
};
|
|
|
|
struct ixgbe_mbx_info {
|
|
- struct ixgbe_mbx_operations ops;
|
|
+ ixgbe_mbx_operations_no_const ops;
|
|
struct ixgbe_mbx_stats stats;
|
|
u32 timeout;
|
|
u32 usec_delay;
|
|
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
|
|
index 25c951d..cc7cf33 100644
|
|
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
|
|
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
|
|
@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
|
|
s32 (*clear_vfta)(struct ixgbe_hw *);
|
|
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
|
|
};
|
|
+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
|
|
|
|
enum ixgbe_mac_type {
|
|
ixgbe_mac_unknown = 0,
|
|
@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
|
|
};
|
|
|
|
struct ixgbe_mac_info {
|
|
- struct ixgbe_mac_operations ops;
|
|
+ ixgbe_mac_operations_no_const ops;
|
|
u8 addr[6];
|
|
u8 perm_addr[6];
|
|
|
|
@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
|
|
s32 (*check_for_ack)(struct ixgbe_hw *);
|
|
s32 (*check_for_rst)(struct ixgbe_hw *);
|
|
};
|
|
+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
|
|
|
|
struct ixgbe_mbx_stats {
|
|
u32 msgs_tx;
|
|
@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
|
|
};
|
|
|
|
struct ixgbe_mbx_info {
|
|
- struct ixgbe_mbx_operations ops;
|
|
+ ixgbe_mbx_operations_no_const ops;
|
|
struct ixgbe_mbx_stats stats;
|
|
u32 timeout;
|
|
u32 udelay;
|
|
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
index 6e49bb4..2b2c393 100644
|
|
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
|
|
@@ -41,6 +41,7 @@
|
|
#include <linux/slab.h>
|
|
#include <linux/io-mapping.h>
|
|
#include <linux/delay.h>
|
|
+#include <linux/sched.h>
|
|
|
|
#include <linux/mlx4/device.h>
|
|
#include <linux/mlx4/doorbell.h>
|
|
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
|
|
index 5046a64..71ca936 100644
|
|
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
|
|
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
|
|
@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
|
|
void (*link_down)(struct __vxge_hw_device *devh);
|
|
void (*crit_err)(struct __vxge_hw_device *devh,
|
|
enum vxge_hw_event type, u64 ext_data);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* struct __vxge_hw_blockpool_entry - Block private data structure
|
|
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
|
|
index 4a518a3..936b334 100644
|
|
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
|
|
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
|
|
@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
|
|
struct vxge_hw_mempool_dma *dma_object,
|
|
u32 index,
|
|
u32 is_last);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
|
|
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
|
|
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
|
|
index 2a59e7a7..660fda2 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169.c
|
|
@@ -710,17 +710,17 @@ struct rtl8169_private {
|
|
struct mdio_ops {
|
|
void (*write)(void __iomem *, int, int);
|
|
int (*read)(void __iomem *, int);
|
|
- } mdio_ops;
|
|
+ } __no_const mdio_ops;
|
|
|
|
struct pll_power_ops {
|
|
void (*down)(struct rtl8169_private *);
|
|
void (*up)(struct rtl8169_private *);
|
|
- } pll_power_ops;
|
|
+ } __no_const pll_power_ops;
|
|
|
|
struct jumbo_ops {
|
|
void (*enable)(struct rtl8169_private *);
|
|
void (*disable)(struct rtl8169_private *);
|
|
- } jumbo_ops;
|
|
+ } __no_const jumbo_ops;
|
|
|
|
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
|
|
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
|
|
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
|
|
index a9deda8..5507c31 100644
|
|
--- a/drivers/net/ethernet/sis/sis190.c
|
|
+++ b/drivers/net/ethernet/sis/sis190.c
|
|
@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
|
|
static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
|
|
struct net_device *dev)
|
|
{
|
|
- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
|
|
+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
|
|
struct sis190_private *tp = netdev_priv(dev);
|
|
struct pci_dev *isa_bridge;
|
|
u8 reg, tmp8;
|
|
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
|
|
index c07cfe9..81cbf7e 100644
|
|
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
|
|
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
|
|
@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
|
|
|
|
writel(value, ioaddr + MMC_CNTRL);
|
|
|
|
- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
|
|
- MMC_CNTRL, value);
|
|
+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
|
|
+// MMC_CNTRL, value);
|
|
}
|
|
|
|
/* To mask all all interrupts.*/
|
|
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
|
|
index c358245..8c1de63 100644
|
|
--- a/drivers/net/hyperv/hyperv_net.h
|
|
+++ b/drivers/net/hyperv/hyperv_net.h
|
|
@@ -98,7 +98,7 @@ struct rndis_device {
|
|
|
|
enum rndis_device_state state;
|
|
bool link_state;
|
|
- atomic_t new_req_id;
|
|
+ atomic_unchecked_t new_req_id;
|
|
|
|
spinlock_t request_lock;
|
|
struct list_head req_list;
|
|
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
|
|
index d6be64b..5d97e3b 100644
|
|
--- a/drivers/net/hyperv/rndis_filter.c
|
|
+++ b/drivers/net/hyperv/rndis_filter.c
|
|
@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
|
|
* template
|
|
*/
|
|
set = &rndis_msg->msg.set_req;
|
|
- set->req_id = atomic_inc_return(&dev->new_req_id);
|
|
+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
|
|
|
|
/* Add to the request list */
|
|
spin_lock_irqsave(&dev->request_lock, flags);
|
|
@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
|
|
|
|
/* Setup the rndis set */
|
|
halt = &request->request_msg.msg.halt_req;
|
|
- halt->req_id = atomic_inc_return(&dev->new_req_id);
|
|
+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
|
|
|
|
/* Ignore return since this msg is optional. */
|
|
rndis_filter_send_request(dev, request);
|
|
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
|
|
index 1207bb1..a51bac4 100644
|
|
--- a/drivers/net/ppp/ppp_generic.c
|
|
+++ b/drivers/net/ppp/ppp_generic.c
|
|
@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
|
|
struct ppp_stats stats;
|
|
struct ppp_comp_stats cstats;
|
|
- char *vers;
|
|
|
|
switch (cmd) {
|
|
case SIOCGPPPSTATS:
|
|
@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
break;
|
|
|
|
case SIOCGPPPVER:
|
|
- vers = PPP_VERSION;
|
|
- if (copy_to_user(addr, vers, strlen(vers) + 1))
|
|
+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
|
|
break;
|
|
err = 0;
|
|
break;
|
|
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
|
|
index b715e6b..6d2490f 100644
|
|
--- a/drivers/net/tokenring/abyss.c
|
|
+++ b/drivers/net/tokenring/abyss.c
|
|
@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
|
|
|
|
static int __init abyss_init (void)
|
|
{
|
|
- abyss_netdev_ops = tms380tr_netdev_ops;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
|
|
|
|
- abyss_netdev_ops.ndo_open = abyss_open;
|
|
- abyss_netdev_ops.ndo_stop = abyss_close;
|
|
+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
|
|
+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
|
|
+ pax_close_kernel();
|
|
|
|
return pci_register_driver(&abyss_driver);
|
|
}
|
|
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
|
|
index 28adcdf..ae82f35 100644
|
|
--- a/drivers/net/tokenring/madgemc.c
|
|
+++ b/drivers/net/tokenring/madgemc.c
|
|
@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
|
|
|
|
static int __init madgemc_init (void)
|
|
{
|
|
- madgemc_netdev_ops = tms380tr_netdev_ops;
|
|
- madgemc_netdev_ops.ndo_open = madgemc_open;
|
|
- madgemc_netdev_ops.ndo_stop = madgemc_close;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
|
|
+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
|
|
+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
|
|
+ pax_close_kernel();
|
|
|
|
return mca_register_driver (&madgemc_driver);
|
|
}
|
|
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
|
|
index 62d90e4..9d84237 100644
|
|
--- a/drivers/net/tokenring/proteon.c
|
|
+++ b/drivers/net/tokenring/proteon.c
|
|
@@ -352,9 +352,11 @@ static int __init proteon_init(void)
|
|
struct platform_device *pdev;
|
|
int i, num = 0, err = 0;
|
|
|
|
- proteon_netdev_ops = tms380tr_netdev_ops;
|
|
- proteon_netdev_ops.ndo_open = proteon_open;
|
|
- proteon_netdev_ops.ndo_stop = tms380tr_close;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
|
|
+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
|
|
+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
|
|
+ pax_close_kernel();
|
|
|
|
err = platform_driver_register(&proteon_driver);
|
|
if (err)
|
|
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
|
|
index ee11e93..c8f19c7 100644
|
|
--- a/drivers/net/tokenring/skisa.c
|
|
+++ b/drivers/net/tokenring/skisa.c
|
|
@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
|
|
struct platform_device *pdev;
|
|
int i, num = 0, err = 0;
|
|
|
|
- sk_isa_netdev_ops = tms380tr_netdev_ops;
|
|
- sk_isa_netdev_ops.ndo_open = sk_isa_open;
|
|
- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
|
|
+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
|
|
+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
|
|
+ pax_close_kernel();
|
|
|
|
err = platform_driver_register(&sk_isa_driver);
|
|
if (err)
|
|
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
|
|
index 2d2a688..35f2372 100644
|
|
--- a/drivers/net/usb/hso.c
|
|
+++ b/drivers/net/usb/hso.c
|
|
@@ -71,7 +71,7 @@
|
|
#include <asm/byteorder.h>
|
|
#include <linux/serial_core.h>
|
|
#include <linux/serial.h>
|
|
-
|
|
+#include <asm/local.h>
|
|
|
|
#define MOD_AUTHOR "Option Wireless"
|
|
#define MOD_DESCRIPTION "USB High Speed Option driver"
|
|
@@ -257,7 +257,7 @@ struct hso_serial {
|
|
|
|
/* from usb_serial_port */
|
|
struct tty_struct *tty;
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
spinlock_t serial_lock;
|
|
|
|
int (*write_data) (struct hso_serial *serial);
|
|
@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
|
|
struct urb *urb;
|
|
|
|
urb = serial->rx_urb[0];
|
|
- if (serial->open_count > 0) {
|
|
+ if (local_read(&serial->open_count) > 0) {
|
|
count = put_rxbuf_data(urb, serial);
|
|
if (count == -1)
|
|
return;
|
|
@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
|
|
DUMP1(urb->transfer_buffer, urb->actual_length);
|
|
|
|
/* Anyone listening? */
|
|
- if (serial->open_count == 0)
|
|
+ if (local_read(&serial->open_count) == 0)
|
|
return;
|
|
|
|
if (status == 0) {
|
|
@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
|
|
spin_unlock_irq(&serial->serial_lock);
|
|
|
|
/* check for port already opened, if not set the termios */
|
|
- serial->open_count++;
|
|
- if (serial->open_count == 1) {
|
|
+ if (local_inc_return(&serial->open_count) == 1) {
|
|
serial->rx_state = RX_IDLE;
|
|
/* Force default termio settings */
|
|
_hso_serial_set_termios(tty, NULL);
|
|
@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
|
|
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
|
|
if (result) {
|
|
hso_stop_serial_device(serial->parent);
|
|
- serial->open_count--;
|
|
+ local_dec(&serial->open_count);
|
|
kref_put(&serial->parent->ref, hso_serial_ref_free);
|
|
}
|
|
} else {
|
|
@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
|
|
|
|
/* reset the rts and dtr */
|
|
/* do the actual close */
|
|
- serial->open_count--;
|
|
+ local_dec(&serial->open_count);
|
|
|
|
- if (serial->open_count <= 0) {
|
|
- serial->open_count = 0;
|
|
+ if (local_read(&serial->open_count) <= 0) {
|
|
+ local_set(&serial->open_count, 0);
|
|
spin_lock_irq(&serial->serial_lock);
|
|
if (serial->tty == tty) {
|
|
serial->tty->driver_data = NULL;
|
|
@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
|
|
|
|
/* the actual setup */
|
|
spin_lock_irqsave(&serial->serial_lock, flags);
|
|
- if (serial->open_count)
|
|
+ if (local_read(&serial->open_count))
|
|
_hso_serial_set_termios(tty, old);
|
|
else
|
|
tty->termios = old;
|
|
@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
|
|
D1("Pending read interrupt on port %d\n", i);
|
|
spin_lock(&serial->serial_lock);
|
|
if (serial->rx_state == RX_IDLE &&
|
|
- serial->open_count > 0) {
|
|
+ local_read(&serial->open_count) > 0) {
|
|
/* Setup and send a ctrl req read on
|
|
* port i */
|
|
if (!serial->rx_urb_filled[0]) {
|
|
@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
|
|
/* Start all serial ports */
|
|
for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
|
|
if (serial_table[i] && (serial_table[i]->interface == iface)) {
|
|
- if (dev2ser(serial_table[i])->open_count) {
|
|
+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
|
|
result =
|
|
hso_start_serial_device(serial_table[i], GFP_NOIO);
|
|
hso_kick_transmit(dev2ser(serial_table[i]));
|
|
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
|
|
index 420d69b..74f90a2 100644
|
|
--- a/drivers/net/wireless/ath/ath.h
|
|
+++ b/drivers/net/wireless/ath/ath.h
|
|
@@ -119,6 +119,7 @@ struct ath_ops {
|
|
void (*write_flush) (void *);
|
|
u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
|
|
};
|
|
+typedef struct ath_ops __no_const ath_ops_no_const;
|
|
|
|
struct ath_common;
|
|
struct ath_bus_ops;
|
|
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
|
|
index fa7581a..5ecef3d 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
|
|
@@ -217,8 +217,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
|
|
ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
|
|
|
|
- ACCESS_ONCE(ads->ds_link) = i->link;
|
|
- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
|
|
+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
|
|
+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
|
|
|
|
ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
|
|
ctl6 = SM(i->keytype, AR_EncrType);
|
|
@@ -232,26 +232,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
|
|
if ((i->is_first || i->is_last) &&
|
|
i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
|
|
- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
|
|
| set11nTries(i->rates, 1)
|
|
| set11nTries(i->rates, 2)
|
|
| set11nTries(i->rates, 3)
|
|
| (i->dur_update ? AR_DurUpdateEna : 0)
|
|
| SM(0, AR_BurstDur);
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
|
|
| set11nRate(i->rates, 1)
|
|
| set11nRate(i->rates, 2)
|
|
| set11nRate(i->rates, 3);
|
|
} else {
|
|
- ACCESS_ONCE(ads->ds_ctl2) = 0;
|
|
- ACCESS_ONCE(ads->ds_ctl3) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
|
|
}
|
|
|
|
if (!i->is_first) {
|
|
- ACCESS_ONCE(ads->ds_ctl0) = 0;
|
|
- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
|
|
- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
|
|
return;
|
|
}
|
|
|
|
@@ -276,7 +276,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
break;
|
|
}
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
|
|
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
|
|
| SM(i->txpower, AR_XmitPower)
|
|
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
|
|
@@ -286,19 +286,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
| (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
|
|
(i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
|
|
- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
|
|
|
|
if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
|
|
return;
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
|
|
| set11nPktDurRTSCTS(i->rates, 1);
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
|
|
| set11nPktDurRTSCTS(i->rates, 3);
|
|
|
|
- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
|
|
| set11nRateFlags(i->rates, 1)
|
|
| set11nRateFlags(i->rates, 2)
|
|
| set11nRateFlags(i->rates, 3)
|
|
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
|
|
index a66a13b..0ef399e 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
|
|
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
|
|
@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
(i->qcu << AR_TxQcuNum_S) | desc_len;
|
|
|
|
checksum += val;
|
|
- ACCESS_ONCE(ads->info) = val;
|
|
+ ACCESS_ONCE_RW(ads->info) = val;
|
|
|
|
checksum += i->link;
|
|
- ACCESS_ONCE(ads->link) = i->link;
|
|
+ ACCESS_ONCE_RW(ads->link) = i->link;
|
|
|
|
checksum += i->buf_addr[0];
|
|
- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
|
|
+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
|
|
checksum += i->buf_addr[1];
|
|
- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
|
|
+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
|
|
checksum += i->buf_addr[2];
|
|
- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
|
|
+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
|
|
checksum += i->buf_addr[3];
|
|
- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
|
|
+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
|
|
|
|
checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
|
|
- ACCESS_ONCE(ads->ctl3) = val;
|
|
+ ACCESS_ONCE_RW(ads->ctl3) = val;
|
|
checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
|
|
- ACCESS_ONCE(ads->ctl5) = val;
|
|
+ ACCESS_ONCE_RW(ads->ctl5) = val;
|
|
checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
|
|
- ACCESS_ONCE(ads->ctl7) = val;
|
|
+ ACCESS_ONCE_RW(ads->ctl7) = val;
|
|
checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
|
|
- ACCESS_ONCE(ads->ctl9) = val;
|
|
+ ACCESS_ONCE_RW(ads->ctl9) = val;
|
|
|
|
checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
|
|
- ACCESS_ONCE(ads->ctl10) = checksum;
|
|
+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
|
|
|
|
if (i->is_first || i->is_last) {
|
|
- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
|
|
| set11nTries(i->rates, 1)
|
|
| set11nTries(i->rates, 2)
|
|
| set11nTries(i->rates, 3)
|
|
| (i->dur_update ? AR_DurUpdateEna : 0)
|
|
| SM(0, AR_BurstDur);
|
|
|
|
- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
|
|
| set11nRate(i->rates, 1)
|
|
| set11nRate(i->rates, 2)
|
|
| set11nRate(i->rates, 3);
|
|
} else {
|
|
- ACCESS_ONCE(ads->ctl13) = 0;
|
|
- ACCESS_ONCE(ads->ctl14) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl13) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl14) = 0;
|
|
}
|
|
|
|
ads->ctl20 = 0;
|
|
@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
|
|
ctl17 = SM(i->keytype, AR_EncrType);
|
|
if (!i->is_first) {
|
|
- ACCESS_ONCE(ads->ctl11) = 0;
|
|
- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
|
|
- ACCESS_ONCE(ads->ctl15) = 0;
|
|
- ACCESS_ONCE(ads->ctl16) = 0;
|
|
- ACCESS_ONCE(ads->ctl17) = ctl17;
|
|
- ACCESS_ONCE(ads->ctl18) = 0;
|
|
- ACCESS_ONCE(ads->ctl19) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl11) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
|
|
+ ACCESS_ONCE_RW(ads->ctl15) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl16) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
|
|
+ ACCESS_ONCE_RW(ads->ctl18) = 0;
|
|
+ ACCESS_ONCE_RW(ads->ctl19) = 0;
|
|
return;
|
|
}
|
|
|
|
- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
|
|
+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
|
|
| (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
|
|
| SM(i->txpower, AR_XmitPower)
|
|
| (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
|
|
@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
|
|
val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
|
|
ctl12 |= SM(val, AR_PAPRDChainMask);
|
|
|
|
- ACCESS_ONCE(ads->ctl12) = ctl12;
|
|
- ACCESS_ONCE(ads->ctl17) = ctl17;
|
|
+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
|
|
+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
|
|
|
|
- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
|
|
| set11nPktDurRTSCTS(i->rates, 1);
|
|
|
|
- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
|
|
+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
|
|
| set11nPktDurRTSCTS(i->rates, 3);
|
|
|
|
- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
|
|
+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
|
|
| set11nRateFlags(i->rates, 1)
|
|
| set11nRateFlags(i->rates, 2)
|
|
| set11nRateFlags(i->rates, 3)
|
|
| SM(i->rtscts_rate, AR_RTSCTSRate);
|
|
|
|
- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
|
|
+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
|
|
}
|
|
|
|
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
|
|
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
|
|
index d4f09b4..fb18952 100644
|
|
--- a/drivers/net/wireless/ath/ath9k/hw.h
|
|
+++ b/drivers/net/wireless/ath/ath9k/hw.h
|
|
@@ -616,7 +616,7 @@ struct ath_hw_private_ops {
|
|
|
|
/* ANI */
|
|
void (*ani_cache_ini_regs)(struct ath_hw *ah);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct ath_hw_ops - callbacks used by hardware code and driver code
|
|
@@ -646,7 +646,7 @@ struct ath_hw_ops {
|
|
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
|
|
struct ath_hw_antcomb_conf *antconf);
|
|
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct ath_nf_limits {
|
|
s16 max;
|
|
@@ -666,7 +666,7 @@ enum ath_cal_list {
|
|
#define AH_FASTCC 0x4
|
|
|
|
struct ath_hw {
|
|
- struct ath_ops reg_ops;
|
|
+ ath_ops_no_const reg_ops;
|
|
|
|
struct ieee80211_hw *hw;
|
|
struct ath_common common;
|
|
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
|
|
index af00e2c..ab04d34 100644
|
|
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
|
|
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
|
|
@@ -545,7 +545,7 @@ struct phy_func_ptr {
|
|
void (*carrsuppr)(struct brcms_phy *);
|
|
s32 (*rxsigpwr)(struct brcms_phy *, s32);
|
|
void (*detach)(struct brcms_phy *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct brcms_phy {
|
|
struct brcms_phy_pub pubpi_ro;
|
|
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
|
|
index faec404..a5277f1 100644
|
|
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
|
|
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
|
|
@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
*/
|
|
if (il3945_mod_params.disable_hw_scan) {
|
|
D_INFO("Disabling hw_scan\n");
|
|
- il3945_mac_ops.hw_scan = NULL;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&il3945_mac_ops.hw_scan = NULL;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
D_INFO("*** LOAD DRIVER ***\n");
|
|
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
|
|
index b7ce6a6..5649756 100644
|
|
--- a/drivers/net/wireless/mac80211_hwsim.c
|
|
+++ b/drivers/net/wireless/mac80211_hwsim.c
|
|
@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
|
|
return -EINVAL;
|
|
|
|
if (fake_hw_scan) {
|
|
- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
|
|
- mac80211_hwsim_ops.sw_scan_start = NULL;
|
|
- mac80211_hwsim_ops.sw_scan_complete = NULL;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
|
|
+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
|
|
+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
spin_lock_init(&hwsim_radio_lock);
|
|
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
|
|
index 35225e9..95e6bf9 100644
|
|
--- a/drivers/net/wireless/mwifiex/main.h
|
|
+++ b/drivers/net/wireless/mwifiex/main.h
|
|
@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
|
|
void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
|
|
int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
|
|
int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct mwifiex_adapter {
|
|
u8 iface_type;
|
|
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
|
|
index 414ac49..43b1f1a 100644
|
|
--- a/drivers/net/wireless/rndis_wlan.c
|
|
+++ b/drivers/net/wireless/rndis_wlan.c
|
|
@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
|
|
|
|
netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
|
|
|
|
- if (rts_threshold < 0 || rts_threshold > 2347)
|
|
+ if (rts_threshold > 2347)
|
|
rts_threshold = 2347;
|
|
|
|
tmp = cpu_to_le32(rts_threshold);
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
|
|
index 4be11e36..ad18f66 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2x00.h
|
|
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
|
|
@@ -396,7 +396,7 @@ struct rt2x00_intf {
|
|
* for hardware which doesn't support hardware
|
|
* sequence counting.
|
|
*/
|
|
- atomic_t seqno;
|
|
+ atomic_unchecked_t seqno;
|
|
};
|
|
|
|
static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
|
|
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
index c5bdbe9..8a05aa7 100644
|
|
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
|
@@ -203,9 +203,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
|
|
* sequence counter given by mac80211.
|
|
*/
|
|
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
|
|
- seqno = atomic_add_return(0x10, &intf->seqno);
|
|
+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
|
|
else
|
|
- seqno = atomic_read(&intf->seqno);
|
|
+ seqno = atomic_read_unchecked(&intf->seqno);
|
|
|
|
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
|
hdr->seq_ctrl |= cpu_to_le16(seqno);
|
|
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
|
|
index 9d8f581..0f6589e 100644
|
|
--- a/drivers/net/wireless/wl1251/wl1251.h
|
|
+++ b/drivers/net/wireless/wl1251/wl1251.h
|
|
@@ -266,7 +266,7 @@ struct wl1251_if_operations {
|
|
void (*reset)(struct wl1251 *wl);
|
|
void (*enable_irq)(struct wl1251 *wl);
|
|
void (*disable_irq)(struct wl1251 *wl);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct wl1251 {
|
|
struct ieee80211_hw *hw;
|
|
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
|
|
index f34b5b2..b5abb9f 100644
|
|
--- a/drivers/oprofile/buffer_sync.c
|
|
+++ b/drivers/oprofile/buffer_sync.c
|
|
@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
|
|
if (cookie == NO_COOKIE)
|
|
offset = pc;
|
|
if (cookie == INVALID_COOKIE) {
|
|
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
|
|
+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
|
|
offset = pc;
|
|
}
|
|
if (cookie != last_cookie) {
|
|
@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
|
|
/* add userspace sample */
|
|
|
|
if (!mm) {
|
|
- atomic_inc(&oprofile_stats.sample_lost_no_mm);
|
|
+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
|
|
return 0;
|
|
}
|
|
|
|
cookie = lookup_dcookie(mm, s->eip, &offset);
|
|
|
|
if (cookie == INVALID_COOKIE) {
|
|
- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
|
|
+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
|
|
return 0;
|
|
}
|
|
|
|
@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
|
|
/* ignore backtraces if failed to add a sample */
|
|
if (state == sb_bt_start) {
|
|
state = sb_bt_ignore;
|
|
- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
|
|
+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
|
|
}
|
|
}
|
|
release_mm(mm);
|
|
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
|
|
index c0cc4e7..44d4e54 100644
|
|
--- a/drivers/oprofile/event_buffer.c
|
|
+++ b/drivers/oprofile/event_buffer.c
|
|
@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
|
|
}
|
|
|
|
if (buffer_pos == buffer_size) {
|
|
- atomic_inc(&oprofile_stats.event_lost_overflow);
|
|
+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
|
|
return;
|
|
}
|
|
|
|
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
|
|
index ed2c3ec..deda85a 100644
|
|
--- a/drivers/oprofile/oprof.c
|
|
+++ b/drivers/oprofile/oprof.c
|
|
@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
|
|
if (oprofile_ops.switch_events())
|
|
return;
|
|
|
|
- atomic_inc(&oprofile_stats.multiplex_counter);
|
|
+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
|
|
start_switch_worker();
|
|
}
|
|
|
|
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
|
|
index 917d28e..d62d981 100644
|
|
--- a/drivers/oprofile/oprofile_stats.c
|
|
+++ b/drivers/oprofile/oprofile_stats.c
|
|
@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
|
|
cpu_buf->sample_invalid_eip = 0;
|
|
}
|
|
|
|
- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
|
|
- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
|
|
- atomic_set(&oprofile_stats.event_lost_overflow, 0);
|
|
- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
|
|
- atomic_set(&oprofile_stats.multiplex_counter, 0);
|
|
+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
|
|
+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
|
|
+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
|
|
+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
|
|
+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
|
|
}
|
|
|
|
|
|
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
|
|
index 38b6fc0..b5cbfce 100644
|
|
--- a/drivers/oprofile/oprofile_stats.h
|
|
+++ b/drivers/oprofile/oprofile_stats.h
|
|
@@ -13,11 +13,11 @@
|
|
#include <linux/atomic.h>
|
|
|
|
struct oprofile_stat_struct {
|
|
- atomic_t sample_lost_no_mm;
|
|
- atomic_t sample_lost_no_mapping;
|
|
- atomic_t bt_lost_no_mapping;
|
|
- atomic_t event_lost_overflow;
|
|
- atomic_t multiplex_counter;
|
|
+ atomic_unchecked_t sample_lost_no_mm;
|
|
+ atomic_unchecked_t sample_lost_no_mapping;
|
|
+ atomic_unchecked_t bt_lost_no_mapping;
|
|
+ atomic_unchecked_t event_lost_overflow;
|
|
+ atomic_unchecked_t multiplex_counter;
|
|
};
|
|
|
|
extern struct oprofile_stat_struct oprofile_stats;
|
|
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
|
|
index 849357c..b83c1e0 100644
|
|
--- a/drivers/oprofile/oprofilefs.c
|
|
+++ b/drivers/oprofile/oprofilefs.c
|
|
@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
|
|
|
|
|
|
int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
|
|
- char const *name, atomic_t *val)
|
|
+ char const *name, atomic_unchecked_t *val)
|
|
{
|
|
return __oprofilefs_create_file(sb, root, name,
|
|
&atomic_ro_fops, 0444, val);
|
|
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
|
|
index 3f56bc0..707d642 100644
|
|
--- a/drivers/parport/procfs.c
|
|
+++ b/drivers/parport/procfs.c
|
|
@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
|
|
|
|
*ppos += len;
|
|
|
|
- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
|
+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
|
|
}
|
|
|
|
#ifdef CONFIG_PARPORT_1284
|
|
@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
|
|
|
|
*ppos += len;
|
|
|
|
- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
|
|
+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
|
|
}
|
|
#endif /* IEEE1284.3 support. */
|
|
|
|
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
|
|
index 9fff878..ad0ad53 100644
|
|
--- a/drivers/pci/hotplug/cpci_hotplug.h
|
|
+++ b/drivers/pci/hotplug/cpci_hotplug.h
|
|
@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
|
|
int (*hardware_test) (struct slot* slot, u32 value);
|
|
u8 (*get_power) (struct slot* slot);
|
|
int (*set_power) (struct slot* slot, int value);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct cpci_hp_controller {
|
|
unsigned int irq;
|
|
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
|
|
index 76ba8a1..20ca857 100644
|
|
--- a/drivers/pci/hotplug/cpqphp_nvram.c
|
|
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
|
|
@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
|
|
|
|
void compaq_nvram_init (void __iomem *rom_start)
|
|
{
|
|
+
|
|
+#ifndef CONFIG_PAX_KERNEXEC
|
|
if (rom_start) {
|
|
compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
|
|
}
|
|
+#endif
|
|
+
|
|
dbg("int15 entry = %p\n", compaq_int15_entry_point);
|
|
|
|
/* initialize our int15 lock */
|
|
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
|
|
index c9ce611..65705f6 100644
|
|
--- a/drivers/pci/pcie/aspm.c
|
|
+++ b/drivers/pci/pcie/aspm.c
|
|
@@ -27,9 +27,9 @@
|
|
#define MODULE_PARAM_PREFIX "pcie_aspm."
|
|
|
|
/* Note: those are not register definitions */
|
|
-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
|
|
-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
|
|
-#define ASPM_STATE_L1 (4) /* L1 state */
|
|
+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
|
|
+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
|
|
+#define ASPM_STATE_L1 (4U) /* L1 state */
|
|
#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
|
|
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
|
|
|
|
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
|
|
index 868440f..e57afb2 100644
|
|
--- a/drivers/pci/probe.c
|
|
+++ b/drivers/pci/probe.c
|
|
@@ -213,7 +213,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
|
u16 orig_cmd;
|
|
struct pci_bus_region region;
|
|
|
|
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
|
|
+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
|
|
|
|
if (!dev->mmio_always_on) {
|
|
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
|
|
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
|
|
index aa232de..bda0cda 100644
|
|
--- a/drivers/platform/x86/thinkpad_acpi.c
|
|
+++ b/drivers/platform/x86/thinkpad_acpi.c
|
|
@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
|
|
return 0;
|
|
}
|
|
|
|
-void static hotkey_mask_warn_incomplete_mask(void)
|
|
+static void hotkey_mask_warn_incomplete_mask(void)
|
|
{
|
|
/* log only what the user can fix... */
|
|
const u32 wantedmask = hotkey_driver_mask &
|
|
@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
|
|
}
|
|
}
|
|
|
|
-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
- struct tp_nvram_state *newn,
|
|
- const u32 event_mask)
|
|
-{
|
|
-
|
|
#define TPACPI_COMPARE_KEY(__scancode, __member) \
|
|
do { \
|
|
if ((event_mask & (1 << __scancode)) && \
|
|
@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
tpacpi_hotkey_send_key(__scancode); \
|
|
} while (0)
|
|
|
|
- void issue_volchange(const unsigned int oldvol,
|
|
- const unsigned int newvol)
|
|
- {
|
|
- unsigned int i = oldvol;
|
|
+static void issue_volchange(const unsigned int oldvol,
|
|
+ const unsigned int newvol,
|
|
+ const u32 event_mask)
|
|
+{
|
|
+ unsigned int i = oldvol;
|
|
|
|
- while (i > newvol) {
|
|
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
|
|
- i--;
|
|
- }
|
|
- while (i < newvol) {
|
|
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
|
|
- i++;
|
|
- }
|
|
+ while (i > newvol) {
|
|
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
|
|
+ i--;
|
|
}
|
|
+ while (i < newvol) {
|
|
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
|
|
+ i++;
|
|
+ }
|
|
+}
|
|
|
|
- void issue_brightnesschange(const unsigned int oldbrt,
|
|
- const unsigned int newbrt)
|
|
- {
|
|
- unsigned int i = oldbrt;
|
|
+static void issue_brightnesschange(const unsigned int oldbrt,
|
|
+ const unsigned int newbrt,
|
|
+ const u32 event_mask)
|
|
+{
|
|
+ unsigned int i = oldbrt;
|
|
|
|
- while (i > newbrt) {
|
|
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
|
|
- i--;
|
|
- }
|
|
- while (i < newbrt) {
|
|
- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
|
|
- i++;
|
|
- }
|
|
+ while (i > newbrt) {
|
|
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
|
|
+ i--;
|
|
+ }
|
|
+ while (i < newbrt) {
|
|
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
|
|
+ i++;
|
|
}
|
|
+}
|
|
|
|
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
+ struct tp_nvram_state *newn,
|
|
+ const u32 event_mask)
|
|
+{
|
|
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
|
|
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
|
|
TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
|
|
@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
oldn->volume_level != newn->volume_level) {
|
|
/* recently muted, or repeated mute keypress, or
|
|
* multiple presses ending in mute */
|
|
- issue_volchange(oldn->volume_level, newn->volume_level);
|
|
+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
|
|
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
|
|
}
|
|
} else {
|
|
@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
|
|
}
|
|
if (oldn->volume_level != newn->volume_level) {
|
|
- issue_volchange(oldn->volume_level, newn->volume_level);
|
|
+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
|
|
} else if (oldn->volume_toggle != newn->volume_toggle) {
|
|
/* repeated vol up/down keypress at end of scale ? */
|
|
if (newn->volume_level == 0)
|
|
@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
/* handle brightness */
|
|
if (oldn->brightness_level != newn->brightness_level) {
|
|
issue_brightnesschange(oldn->brightness_level,
|
|
- newn->brightness_level);
|
|
+ newn->brightness_level,
|
|
+ event_mask);
|
|
} else if (oldn->brightness_toggle != newn->brightness_toggle) {
|
|
/* repeated key presses that didn't change state */
|
|
if (newn->brightness_level == 0)
|
|
@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
|
|
&& !tp_features.bright_unkfw)
|
|
TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
|
|
}
|
|
+}
|
|
|
|
#undef TPACPI_COMPARE_KEY
|
|
#undef TPACPI_MAY_SEND_KEY
|
|
-}
|
|
|
|
/*
|
|
* Polling driver
|
|
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
|
|
index 769d265..a3a05ca 100644
|
|
--- a/drivers/pnp/pnpbios/bioscalls.c
|
|
+++ b/drivers/pnp/pnpbios/bioscalls.c
|
|
@@ -58,7 +58,7 @@ do { \
|
|
set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
|
|
} while(0)
|
|
|
|
-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
|
|
+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
|
|
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
|
|
|
|
/*
|
|
@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
|
|
|
|
cpu = get_cpu();
|
|
save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
|
|
+
|
|
+ pax_open_kernel();
|
|
get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
|
|
+ pax_close_kernel();
|
|
|
|
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
|
|
spin_lock_irqsave(&pnp_bios_lock, flags);
|
|
@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
|
|
:"memory");
|
|
spin_unlock_irqrestore(&pnp_bios_lock, flags);
|
|
|
|
+ pax_open_kernel();
|
|
get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
|
|
+ pax_close_kernel();
|
|
+
|
|
put_cpu();
|
|
|
|
/* If we get here and this is set then the PnP BIOS faulted on us. */
|
|
@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
|
|
return status;
|
|
}
|
|
|
|
-void pnpbios_calls_init(union pnp_bios_install_struct *header)
|
|
+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
|
|
{
|
|
int i;
|
|
|
|
@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
|
|
pnp_bios_callpoint.offset = header->fields.pm16offset;
|
|
pnp_bios_callpoint.segment = PNP_CS16;
|
|
|
|
+ pax_open_kernel();
|
|
+
|
|
for_each_possible_cpu(i) {
|
|
struct desc_struct *gdt = get_cpu_gdt_table(i);
|
|
if (!gdt)
|
|
@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
|
|
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
|
|
(unsigned long)__va(header->fields.pm16dseg));
|
|
}
|
|
+
|
|
+ pax_close_kernel();
|
|
}
|
|
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
|
|
index b0ecacb..7c9da2e 100644
|
|
--- a/drivers/pnp/resource.c
|
|
+++ b/drivers/pnp/resource.c
|
|
@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
|
|
return 1;
|
|
|
|
/* check if the resource is valid */
|
|
- if (*irq < 0 || *irq > 15)
|
|
+ if (*irq > 15)
|
|
return 0;
|
|
|
|
/* check if the resource is reserved */
|
|
@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
|
|
return 1;
|
|
|
|
/* check if the resource is valid */
|
|
- if (*dma < 0 || *dma == 4 || *dma > 7)
|
|
+ if (*dma == 4 || *dma > 7)
|
|
return 0;
|
|
|
|
/* check if the resource is reserved */
|
|
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
|
|
index 222ccd8..6275fa5 100644
|
|
--- a/drivers/power/bq27x00_battery.c
|
|
+++ b/drivers/power/bq27x00_battery.c
|
|
@@ -72,7 +72,7 @@
|
|
struct bq27x00_device_info;
|
|
struct bq27x00_access_methods {
|
|
int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
|
|
-};
|
|
+} __no_const;
|
|
|
|
enum bq27x00_chip { BQ27000, BQ27500 };
|
|
|
|
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
|
|
index 4c5b053..104263e 100644
|
|
--- a/drivers/regulator/max8660.c
|
|
+++ b/drivers/regulator/max8660.c
|
|
@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
|
|
max8660->shadow_regs[MAX8660_OVER1] = 5;
|
|
} else {
|
|
/* Otherwise devices can be toggled via software */
|
|
- max8660_dcdc_ops.enable = max8660_dcdc_enable;
|
|
- max8660_dcdc_ops.disable = max8660_dcdc_disable;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
|
|
+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
|
|
index 845aa22..99ec402 100644
|
|
--- a/drivers/regulator/mc13892-regulator.c
|
|
+++ b/drivers/regulator/mc13892-regulator.c
|
|
@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
|
|
}
|
|
mc13xxx_unlock(mc13892);
|
|
|
|
- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
|
|
+ pax_open_kernel();
|
|
+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
|
|
= mc13892_vcam_set_mode;
|
|
- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
|
|
+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
|
|
= mc13892_vcam_get_mode;
|
|
+ pax_close_kernel();
|
|
|
|
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
|
|
ARRAY_SIZE(mc13892_regulators));
|
|
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
|
|
index 3fcf627..f334910 100644
|
|
--- a/drivers/scsi/aacraid/aacraid.h
|
|
+++ b/drivers/scsi/aacraid/aacraid.h
|
|
@@ -492,7 +492,7 @@ struct adapter_ops
|
|
int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
|
|
/* Administrative operations */
|
|
int (*adapter_comm)(struct aac_dev * dev, int comm);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Define which interrupt handler needs to be installed
|
|
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
|
|
index e9313f8..ec88882 100644
|
|
--- a/drivers/scsi/aacraid/linit.c
|
|
+++ b/drivers/scsi/aacraid/linit.c
|
|
@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
|
|
#elif defined(__devinitconst)
|
|
static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
|
|
#else
|
|
-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
|
|
+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
|
|
#endif
|
|
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
|
|
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
|
|
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
index ff80552..1c4120c 100644
|
|
--- a/drivers/scsi/aic94xx/aic94xx_init.c
|
|
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
|
|
@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
|
|
.lldd_ata_set_dmamode = asd_set_dmamode,
|
|
};
|
|
|
|
-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
|
|
+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
|
|
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
|
|
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
|
|
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
|
|
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
|
|
index 4ad7e36..d004679 100644
|
|
--- a/drivers/scsi/bfa/bfa.h
|
|
+++ b/drivers/scsi/bfa/bfa.h
|
|
@@ -196,7 +196,7 @@ struct bfa_hwif_s {
|
|
u32 *end);
|
|
int cpe_vec_q0;
|
|
int rme_vec_q0;
|
|
-};
|
|
+} __no_const;
|
|
typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
|
|
|
|
struct bfa_faa_cbfn_s {
|
|
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
|
|
index f0f80e2..8ec946b 100644
|
|
--- a/drivers/scsi/bfa/bfa_fcpim.c
|
|
+++ b/drivers/scsi/bfa/bfa_fcpim.c
|
|
@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|
|
|
bfa_iotag_attach(fcp);
|
|
|
|
- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
|
|
+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
|
|
bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
|
|
(fcp->num_itns * sizeof(struct bfa_itn_s));
|
|
memset(fcp->itn_arr, 0,
|
|
@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
|
|
void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
|
|
{
|
|
struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
|
|
- struct bfa_itn_s *itn;
|
|
+ bfa_itn_s_no_const *itn;
|
|
|
|
itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
|
|
itn->isr = isr;
|
|
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
|
|
index 36f26da..38a34a8 100644
|
|
--- a/drivers/scsi/bfa/bfa_fcpim.h
|
|
+++ b/drivers/scsi/bfa/bfa_fcpim.h
|
|
@@ -37,6 +37,7 @@ struct bfa_iotag_s {
|
|
struct bfa_itn_s {
|
|
bfa_isr_func_t isr;
|
|
};
|
|
+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
|
|
|
|
void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
|
|
void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
|
|
@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
|
|
struct list_head iotag_tio_free_q; /* free IO resources */
|
|
struct list_head iotag_unused_q; /* unused IO resources*/
|
|
struct bfa_iotag_s *iotag_arr;
|
|
- struct bfa_itn_s *itn_arr;
|
|
+ bfa_itn_s_no_const *itn_arr;
|
|
int num_ioim_reqs;
|
|
int num_fwtio_reqs;
|
|
int num_itns;
|
|
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
|
|
index 1a99d4b..e85d64b 100644
|
|
--- a/drivers/scsi/bfa/bfa_ioc.h
|
|
+++ b/drivers/scsi/bfa/bfa_ioc.h
|
|
@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
|
|
bfa_ioc_disable_cbfn_t disable_cbfn;
|
|
bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
|
|
bfa_ioc_reset_cbfn_t reset_cbfn;
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* IOC event notification mechanism.
|
|
@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
|
|
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
|
|
bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
|
|
bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Queue element to wait for room in request queue. FIFO order is
|
|
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
|
|
index b48c24f..dac0fbc 100644
|
|
--- a/drivers/scsi/hosts.c
|
|
+++ b/drivers/scsi/hosts.c
|
|
@@ -42,7 +42,7 @@
|
|
#include "scsi_logging.h"
|
|
|
|
|
|
-static atomic_t scsi_host_next_hn; /* host_no for next new host */
|
|
+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
|
|
|
|
|
|
static void scsi_host_cls_release(struct device *dev)
|
|
@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
|
* subtract one because we increment first then return, but we need to
|
|
* know what the next host number was before increment
|
|
*/
|
|
- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
|
|
+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
|
|
shost->dma_channel = 0xff;
|
|
|
|
/* These three are default values which can be overridden */
|
|
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
|
|
index f5ccdcdf..1fa60c6 100644
|
|
--- a/drivers/scsi/hpsa.c
|
|
+++ b/drivers/scsi/hpsa.c
|
|
@@ -539,7 +539,7 @@ static inline u32 next_command(struct ctlr_info *h)
|
|
u32 a;
|
|
|
|
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
|
|
- return h->access.command_completed(h);
|
|
+ return h->access->command_completed(h);
|
|
|
|
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
|
|
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
|
|
@@ -3051,7 +3051,7 @@ static void start_io(struct ctlr_info *h)
|
|
while (!list_empty(&h->reqQ)) {
|
|
c = list_entry(h->reqQ.next, struct CommandList, list);
|
|
/* can't do anything if fifo is full */
|
|
- if ((h->access.fifo_full(h))) {
|
|
+ if ((h->access->fifo_full(h))) {
|
|
dev_warn(&h->pdev->dev, "fifo full\n");
|
|
break;
|
|
}
|
|
@@ -3061,7 +3061,7 @@ static void start_io(struct ctlr_info *h)
|
|
h->Qdepth--;
|
|
|
|
/* Tell the controller execute command */
|
|
- h->access.submit_command(h, c);
|
|
+ h->access->submit_command(h, c);
|
|
|
|
/* Put job onto the completed Q */
|
|
addQ(&h->cmpQ, c);
|
|
@@ -3070,17 +3070,17 @@ static void start_io(struct ctlr_info *h)
|
|
|
|
static inline unsigned long get_next_completion(struct ctlr_info *h)
|
|
{
|
|
- return h->access.command_completed(h);
|
|
+ return h->access->command_completed(h);
|
|
}
|
|
|
|
static inline bool interrupt_pending(struct ctlr_info *h)
|
|
{
|
|
- return h->access.intr_pending(h);
|
|
+ return h->access->intr_pending(h);
|
|
}
|
|
|
|
static inline long interrupt_not_for_us(struct ctlr_info *h)
|
|
{
|
|
- return (h->access.intr_pending(h) == 0) ||
|
|
+ return (h->access->intr_pending(h) == 0) ||
|
|
(h->interrupts_enabled == 0);
|
|
}
|
|
|
|
@@ -3971,7 +3971,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
|
|
if (prod_index < 0)
|
|
return -ENODEV;
|
|
h->product_name = products[prod_index].product_name;
|
|
- h->access = *(products[prod_index].access);
|
|
+ h->access = products[prod_index].access;
|
|
|
|
if (hpsa_board_disabled(h->pdev)) {
|
|
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
|
|
@@ -4249,7 +4249,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
|
|
|
|
assert_spin_locked(&lockup_detector_lock);
|
|
remove_ctlr_from_lockup_detector_list(h);
|
|
- h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
|
|
spin_lock_irqsave(&h->lock, flags);
|
|
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
|
spin_unlock_irqrestore(&h->lock, flags);
|
|
@@ -4427,7 +4427,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|
}
|
|
|
|
/* make sure the board interrupts are off */
|
|
- h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
|
|
|
|
if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
|
|
goto clean2;
|
|
@@ -4461,7 +4461,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|
* fake ones to scoop up any residual completions.
|
|
*/
|
|
spin_lock_irqsave(&h->lock, flags);
|
|
- h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
|
|
spin_unlock_irqrestore(&h->lock, flags);
|
|
free_irq(h->intr[h->intr_mode], h);
|
|
rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
|
|
@@ -4480,9 +4480,9 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|
dev_info(&h->pdev->dev, "Board READY.\n");
|
|
dev_info(&h->pdev->dev,
|
|
"Waiting for stale completions to drain.\n");
|
|
- h->access.set_intr_mask(h, HPSA_INTR_ON);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_ON);
|
|
msleep(10000);
|
|
- h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
|
|
|
|
rc = controller_reset_failed(h->cfgtable);
|
|
if (rc)
|
|
@@ -4503,7 +4503,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|
|
}
|
|
|
|
/* Turn the interrupts on so we can service requests */
|
|
- h->access.set_intr_mask(h, HPSA_INTR_ON);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_ON);
|
|
|
|
hpsa_hba_inquiry(h);
|
|
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
|
|
@@ -4555,7 +4555,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
|
|
* To write all data in the battery backed cache to disks
|
|
*/
|
|
hpsa_flush_cache(h);
|
|
- h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
|
|
free_irq(h->intr[h->intr_mode], h);
|
|
#ifdef CONFIG_PCI_MSI
|
|
if (h->msix_vector)
|
|
@@ -4729,7 +4729,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
|
|
return;
|
|
}
|
|
/* Change the access methods to the performant access methods */
|
|
- h->access = SA5_performant_access;
|
|
+ h->access = &SA5_performant_access;
|
|
h->transMethod = CFGTBL_Trans_Performant;
|
|
}
|
|
|
|
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
|
|
index 6f30a6f..ccc7e72 100644
|
|
--- a/drivers/scsi/hpsa.h
|
|
+++ b/drivers/scsi/hpsa.h
|
|
@@ -72,7 +72,7 @@ struct ctlr_info {
|
|
unsigned int msix_vector;
|
|
unsigned int msi_vector;
|
|
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
|
|
- struct access_method access;
|
|
+ struct access_method *access;
|
|
|
|
/* queue and queue Info */
|
|
struct list_head reqQ;
|
|
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
|
|
index f2df059..a3a9930 100644
|
|
--- a/drivers/scsi/ips.h
|
|
+++ b/drivers/scsi/ips.h
|
|
@@ -1027,7 +1027,7 @@ typedef struct {
|
|
int (*intr)(struct ips_ha *);
|
|
void (*enableint)(struct ips_ha *);
|
|
uint32_t (*statupd)(struct ips_ha *);
|
|
-} ips_hw_func_t;
|
|
+} __no_const ips_hw_func_t;
|
|
|
|
typedef struct ips_ha {
|
|
uint8_t ha_id[IPS_MAX_CHANNELS+1];
|
|
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
|
|
index aceffad..c35c08d 100644
|
|
--- a/drivers/scsi/libfc/fc_exch.c
|
|
+++ b/drivers/scsi/libfc/fc_exch.c
|
|
@@ -105,12 +105,12 @@ struct fc_exch_mgr {
|
|
* all together if not used XXX
|
|
*/
|
|
struct {
|
|
- atomic_t no_free_exch;
|
|
- atomic_t no_free_exch_xid;
|
|
- atomic_t xid_not_found;
|
|
- atomic_t xid_busy;
|
|
- atomic_t seq_not_found;
|
|
- atomic_t non_bls_resp;
|
|
+ atomic_unchecked_t no_free_exch;
|
|
+ atomic_unchecked_t no_free_exch_xid;
|
|
+ atomic_unchecked_t xid_not_found;
|
|
+ atomic_unchecked_t xid_busy;
|
|
+ atomic_unchecked_t seq_not_found;
|
|
+ atomic_unchecked_t non_bls_resp;
|
|
} stats;
|
|
};
|
|
|
|
@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
/* allocate memory for exchange */
|
|
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
|
|
if (!ep) {
|
|
- atomic_inc(&mp->stats.no_free_exch);
|
|
+ atomic_inc_unchecked(&mp->stats.no_free_exch);
|
|
goto out;
|
|
}
|
|
memset(ep, 0, sizeof(*ep));
|
|
@@ -780,7 +780,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
|
|
return ep;
|
|
err:
|
|
spin_unlock_bh(&pool->lock);
|
|
- atomic_inc(&mp->stats.no_free_exch_xid);
|
|
+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
|
|
mempool_free(ep, mp->ep_pool);
|
|
return NULL;
|
|
}
|
|
@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|
xid = ntohs(fh->fh_ox_id); /* we originated exch */
|
|
ep = fc_exch_find(mp, xid);
|
|
if (!ep) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
reject = FC_RJT_OX_ID;
|
|
goto out;
|
|
}
|
|
@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|
ep = fc_exch_find(mp, xid);
|
|
if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
|
|
if (ep) {
|
|
- atomic_inc(&mp->stats.xid_busy);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_busy);
|
|
reject = FC_RJT_RX_ID;
|
|
goto rel;
|
|
}
|
|
@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|
}
|
|
xid = ep->xid; /* get our XID */
|
|
} else if (!ep) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
reject = FC_RJT_RX_ID; /* XID not found */
|
|
goto out;
|
|
}
|
|
@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|
} else {
|
|
sp = &ep->seq;
|
|
if (sp->id != fh->fh_seq_id) {
|
|
- atomic_inc(&mp->stats.seq_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.seq_not_found);
|
|
if (f_ctl & FC_FC_END_SEQ) {
|
|
/*
|
|
* Update sequence_id based on incoming last
|
|
@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|
|
|
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
|
|
if (!ep) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
goto out;
|
|
}
|
|
if (ep->esb_stat & ESB_ST_COMPLETE) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
goto rel;
|
|
}
|
|
if (ep->rxid == FC_XID_UNKNOWN)
|
|
ep->rxid = ntohs(fh->fh_rx_id);
|
|
if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
goto rel;
|
|
}
|
|
if (ep->did != ntoh24(fh->fh_s_id) &&
|
|
ep->did != FC_FID_FLOGI) {
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
goto rel;
|
|
}
|
|
sof = fr_sof(fp);
|
|
@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|
sp->ssb_stat |= SSB_ST_RESP;
|
|
sp->id = fh->fh_seq_id;
|
|
} else if (sp->id != fh->fh_seq_id) {
|
|
- atomic_inc(&mp->stats.seq_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.seq_not_found);
|
|
goto rel;
|
|
}
|
|
|
|
@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
|
|
|
|
if (!sp)
|
|
- atomic_inc(&mp->stats.xid_not_found);
|
|
+ atomic_inc_unchecked(&mp->stats.xid_not_found);
|
|
else
|
|
- atomic_inc(&mp->stats.non_bls_resp);
|
|
+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
|
|
|
|
fc_frame_free(fp);
|
|
}
|
|
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
|
|
index ec96c5f..7be5dcf2 100644
|
|
--- a/drivers/scsi/libsas/sas_ata.c
|
|
+++ b/drivers/scsi/libsas/sas_ata.c
|
|
@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
|
|
.postreset = ata_std_postreset,
|
|
.error_handler = ata_std_error_handler,
|
|
.post_internal_cmd = sas_ata_post_internal,
|
|
- .qc_defer = ata_std_qc_defer,
|
|
+ .qc_defer = ata_std_qc_defer,
|
|
.qc_prep = ata_noop_qc_prep,
|
|
.qc_issue = sas_ata_qc_issue,
|
|
.qc_fill_rtf = sas_ata_qc_fill_rtf,
|
|
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
|
|
index 3a1ffdd..8eb7c71 100644
|
|
--- a/drivers/scsi/lpfc/lpfc.h
|
|
+++ b/drivers/scsi/lpfc/lpfc.h
|
|
@@ -413,7 +413,7 @@ struct lpfc_vport {
|
|
struct dentry *debug_nodelist;
|
|
struct dentry *vport_debugfs_root;
|
|
struct lpfc_debugfs_trc *disc_trc;
|
|
- atomic_t disc_trc_cnt;
|
|
+ atomic_unchecked_t disc_trc_cnt;
|
|
#endif
|
|
uint8_t stat_data_enabled;
|
|
uint8_t stat_data_blocked;
|
|
@@ -826,8 +826,8 @@ struct lpfc_hba {
|
|
struct timer_list fabric_block_timer;
|
|
unsigned long bit_flags;
|
|
#define FABRIC_COMANDS_BLOCKED 0
|
|
- atomic_t num_rsrc_err;
|
|
- atomic_t num_cmd_success;
|
|
+ atomic_unchecked_t num_rsrc_err;
|
|
+ atomic_unchecked_t num_cmd_success;
|
|
unsigned long last_rsrc_error_time;
|
|
unsigned long last_ramp_down_time;
|
|
unsigned long last_ramp_up_time;
|
|
@@ -863,7 +863,7 @@ struct lpfc_hba {
|
|
|
|
struct dentry *debug_slow_ring_trc;
|
|
struct lpfc_debugfs_trc *slow_ring_trc;
|
|
- atomic_t slow_ring_trc_cnt;
|
|
+ atomic_unchecked_t slow_ring_trc_cnt;
|
|
/* iDiag debugfs sub-directory */
|
|
struct dentry *idiag_root;
|
|
struct dentry *idiag_pci_cfg;
|
|
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
index af04b0d..8f1a97e 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
|
|
@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
|
|
static unsigned long lpfc_debugfs_start_time = 0L;
|
|
|
|
/* iDiag */
|
|
@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
|
|
lpfc_debugfs_enable = 0;
|
|
|
|
len = 0;
|
|
- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
|
|
+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
|
|
(lpfc_debugfs_max_disc_trc - 1);
|
|
for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
|
|
dtp = vport->disc_trc + i;
|
|
@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
|
|
lpfc_debugfs_enable = 0;
|
|
|
|
len = 0;
|
|
- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
|
|
+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
|
|
(lpfc_debugfs_max_slow_ring_trc - 1);
|
|
for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
|
|
dtp = phba->slow_ring_trc + i;
|
|
@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
|
|
!vport || !vport->disc_trc)
|
|
return;
|
|
|
|
- index = atomic_inc_return(&vport->disc_trc_cnt) &
|
|
+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
|
|
(lpfc_debugfs_max_disc_trc - 1);
|
|
dtp = vport->disc_trc + index;
|
|
dtp->fmt = fmt;
|
|
dtp->data1 = data1;
|
|
dtp->data2 = data2;
|
|
dtp->data3 = data3;
|
|
- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
|
|
+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
|
|
dtp->jif = jiffies;
|
|
#endif
|
|
return;
|
|
@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
|
|
!phba || !phba->slow_ring_trc)
|
|
return;
|
|
|
|
- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
|
|
+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
|
|
(lpfc_debugfs_max_slow_ring_trc - 1);
|
|
dtp = phba->slow_ring_trc + index;
|
|
dtp->fmt = fmt;
|
|
dtp->data1 = data1;
|
|
dtp->data2 = data2;
|
|
dtp->data3 = data3;
|
|
- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
|
|
+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
|
|
dtp->jif = jiffies;
|
|
#endif
|
|
return;
|
|
@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|
"slow_ring buffer\n");
|
|
goto debug_failed;
|
|
}
|
|
- atomic_set(&phba->slow_ring_trc_cnt, 0);
|
|
+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
|
|
memset(phba->slow_ring_trc, 0,
|
|
(sizeof(struct lpfc_debugfs_trc) *
|
|
lpfc_debugfs_max_slow_ring_trc));
|
|
@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|
"buffer\n");
|
|
goto debug_failed;
|
|
}
|
|
- atomic_set(&vport->disc_trc_cnt, 0);
|
|
+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
|
|
|
|
snprintf(name, sizeof(name), "discovery_trace");
|
|
vport->debug_disc_trc =
|
|
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
|
|
index 9598fdc..7e9f3d9 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_init.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_init.c
|
|
@@ -10266,8 +10266,10 @@ lpfc_init(void)
|
|
"misc_register returned with status %d", error);
|
|
|
|
if (lpfc_enable_npiv) {
|
|
- lpfc_transport_functions.vport_create = lpfc_vport_create;
|
|
- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
|
|
+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
|
|
+ pax_close_kernel();
|
|
}
|
|
lpfc_transport_template =
|
|
fc_attach_transport(&lpfc_transport_functions);
|
|
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
index 88f3a83..686d3fa 100644
|
|
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
|
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
|
@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
|
|
uint32_t evt_posted;
|
|
|
|
spin_lock_irqsave(&phba->hbalock, flags);
|
|
- atomic_inc(&phba->num_rsrc_err);
|
|
+ atomic_inc_unchecked(&phba->num_rsrc_err);
|
|
phba->last_rsrc_error_time = jiffies;
|
|
|
|
if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
|
|
@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
|
|
unsigned long flags;
|
|
struct lpfc_hba *phba = vport->phba;
|
|
uint32_t evt_posted;
|
|
- atomic_inc(&phba->num_cmd_success);
|
|
+ atomic_inc_unchecked(&phba->num_cmd_success);
|
|
|
|
if (vport->cfg_lun_queue_depth <= queue_depth)
|
|
return;
|
|
@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
|
|
unsigned long num_rsrc_err, num_cmd_success;
|
|
int i;
|
|
|
|
- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
|
|
- num_cmd_success = atomic_read(&phba->num_cmd_success);
|
|
+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
|
|
+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
|
|
|
|
vports = lpfc_create_vport_work_array(phba);
|
|
if (vports != NULL)
|
|
@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
|
|
}
|
|
}
|
|
lpfc_destroy_vport_work_array(phba, vports);
|
|
- atomic_set(&phba->num_rsrc_err, 0);
|
|
- atomic_set(&phba->num_cmd_success, 0);
|
|
+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
|
|
+ atomic_set_unchecked(&phba->num_cmd_success, 0);
|
|
}
|
|
|
|
/**
|
|
@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
|
|
}
|
|
}
|
|
lpfc_destroy_vport_work_array(phba, vports);
|
|
- atomic_set(&phba->num_rsrc_err, 0);
|
|
- atomic_set(&phba->num_cmd_success, 0);
|
|
+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
|
|
+ atomic_set_unchecked(&phba->num_cmd_success, 0);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
|
|
index ea8a0b4..812a124 100644
|
|
--- a/drivers/scsi/pmcraid.c
|
|
+++ b/drivers/scsi/pmcraid.c
|
|
@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
|
|
res->scsi_dev = scsi_dev;
|
|
scsi_dev->hostdata = res;
|
|
res->change_detected = 0;
|
|
- atomic_set(&res->read_failures, 0);
|
|
- atomic_set(&res->write_failures, 0);
|
|
+ atomic_set_unchecked(&res->read_failures, 0);
|
|
+ atomic_set_unchecked(&res->write_failures, 0);
|
|
rc = 0;
|
|
}
|
|
spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
|
|
@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
|
|
|
|
/* If this was a SCSI read/write command keep count of errors */
|
|
if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
|
|
- atomic_inc(&res->read_failures);
|
|
+ atomic_inc_unchecked(&res->read_failures);
|
|
else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
|
|
- atomic_inc(&res->write_failures);
|
|
+ atomic_inc_unchecked(&res->write_failures);
|
|
|
|
if (!RES_IS_GSCSI(res->cfg_entry) &&
|
|
masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
|
|
@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
|
|
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
|
|
* hrrq_id assigned here in queuecommand
|
|
*/
|
|
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
|
|
+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
|
|
pinstance->num_hrrq;
|
|
cmd->cmd_done = pmcraid_io_done;
|
|
|
|
@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
|
|
* block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
|
|
* hrrq_id assigned here in queuecommand
|
|
*/
|
|
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
|
|
+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
|
|
pinstance->num_hrrq;
|
|
|
|
if (request_size) {
|
|
@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
|
|
|
|
pinstance = container_of(workp, struct pmcraid_instance, worker_q);
|
|
/* add resources only after host is added into system */
|
|
- if (!atomic_read(&pinstance->expose_resources))
|
|
+ if (!atomic_read_unchecked(&pinstance->expose_resources))
|
|
return;
|
|
|
|
fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
|
|
@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
|
|
init_waitqueue_head(&pinstance->reset_wait_q);
|
|
|
|
atomic_set(&pinstance->outstanding_cmds, 0);
|
|
- atomic_set(&pinstance->last_message_id, 0);
|
|
- atomic_set(&pinstance->expose_resources, 0);
|
|
+ atomic_set_unchecked(&pinstance->last_message_id, 0);
|
|
+ atomic_set_unchecked(&pinstance->expose_resources, 0);
|
|
|
|
INIT_LIST_HEAD(&pinstance->free_res_q);
|
|
INIT_LIST_HEAD(&pinstance->used_res_q);
|
|
@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
|
|
/* Schedule worker thread to handle CCN and take care of adding and
|
|
* removing devices to OS
|
|
*/
|
|
- atomic_set(&pinstance->expose_resources, 1);
|
|
+ atomic_set_unchecked(&pinstance->expose_resources, 1);
|
|
schedule_work(&pinstance->worker_q);
|
|
return rc;
|
|
|
|
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
|
|
index e1d150f..6c6df44 100644
|
|
--- a/drivers/scsi/pmcraid.h
|
|
+++ b/drivers/scsi/pmcraid.h
|
|
@@ -748,7 +748,7 @@ struct pmcraid_instance {
|
|
struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
|
|
|
|
/* Message id as filled in last fired IOARCB, used to identify HRRQ */
|
|
- atomic_t last_message_id;
|
|
+ atomic_unchecked_t last_message_id;
|
|
|
|
/* configuration table */
|
|
struct pmcraid_config_table *cfg_table;
|
|
@@ -777,7 +777,7 @@ struct pmcraid_instance {
|
|
atomic_t outstanding_cmds;
|
|
|
|
/* should add/delete resources to mid-layer now ?*/
|
|
- atomic_t expose_resources;
|
|
+ atomic_unchecked_t expose_resources;
|
|
|
|
|
|
|
|
@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
|
|
struct pmcraid_config_table_entry_ext cfg_entry_ext;
|
|
};
|
|
struct scsi_device *scsi_dev; /* Link scsi_device structure */
|
|
- atomic_t read_failures; /* count of failed READ commands */
|
|
- atomic_t write_failures; /* count of failed WRITE commands */
|
|
+ atomic_unchecked_t read_failures; /* count of failed READ commands */
|
|
+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
|
|
|
|
/* To indicate add/delete/modify during CCN */
|
|
u8 change_detected;
|
|
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
|
|
index 09bedb7..6fc9f7e 100644
|
|
--- a/drivers/scsi/qla2xxx/qla_def.h
|
|
+++ b/drivers/scsi/qla2xxx/qla_def.h
|
|
@@ -2264,7 +2264,7 @@ struct isp_operations {
|
|
int (*start_scsi) (srb_t *);
|
|
int (*abort_isp) (struct scsi_qla_host *);
|
|
int (*iospace_config)(struct qla_hw_data*);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* MSI-X Support *************************************************************/
|
|
|
|
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
|
|
index 7f2492e..51138772 100644
|
|
--- a/drivers/scsi/qla4xxx/ql4_def.h
|
|
+++ b/drivers/scsi/qla4xxx/ql4_def.h
|
|
@@ -268,7 +268,7 @@ struct ddb_entry {
|
|
* (4000 only) */
|
|
atomic_t relogin_timer; /* Max Time to wait for
|
|
* relogin to complete */
|
|
- atomic_t relogin_retry_count; /* Num of times relogin has been
|
|
+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
|
|
* retried */
|
|
uint32_t default_time2wait; /* Default Min time between
|
|
* relogins (+aens) */
|
|
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
|
|
index ee47820..a83b1f4 100644
|
|
--- a/drivers/scsi/qla4xxx/ql4_os.c
|
|
+++ b/drivers/scsi/qla4xxx/ql4_os.c
|
|
@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
|
|
*/
|
|
if (!iscsi_is_session_online(cls_sess)) {
|
|
/* Reset retry relogin timer */
|
|
- atomic_inc(&ddb_entry->relogin_retry_count);
|
|
+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
|
|
DEBUG2(ql4_printk(KERN_INFO, ha,
|
|
"%s: index[%d] relogin timed out-retrying"
|
|
" relogin (%d), retry (%d)\n", __func__,
|
|
ddb_entry->fw_ddb_index,
|
|
- atomic_read(&ddb_entry->relogin_retry_count),
|
|
+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
|
|
ddb_entry->default_time2wait + 4));
|
|
set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
|
|
atomic_set(&ddb_entry->retry_relogin_timer,
|
|
@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
|
|
|
|
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
|
|
atomic_set(&ddb_entry->relogin_timer, 0);
|
|
- atomic_set(&ddb_entry->relogin_retry_count, 0);
|
|
+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
|
|
def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
|
|
ddb_entry->default_relogin_timeout =
|
|
(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
|
|
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
|
|
index b1f76da..387ead3 100644
|
|
--- a/drivers/scsi/scsi.c
|
|
+++ b/drivers/scsi/scsi.c
|
|
@@ -656,7 +656,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
|
|
unsigned long timeout;
|
|
int rtn = 0;
|
|
|
|
- atomic_inc(&cmd->device->iorequest_cnt);
|
|
+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
|
|
|
|
/* check if the device is still usable */
|
|
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
|
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
|
index 29a8823..c6c95fb 100644
|
|
--- a/drivers/scsi/scsi_lib.c
|
|
+++ b/drivers/scsi/scsi_lib.c
|
|
@@ -1434,7 +1434,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
|
|
shost = sdev->host;
|
|
scsi_init_cmd_errh(cmd);
|
|
cmd->result = DID_NO_CONNECT << 16;
|
|
- atomic_inc(&cmd->device->iorequest_cnt);
|
|
+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
|
|
|
|
/*
|
|
* SCSI request completion path will do scsi_device_unbusy(),
|
|
@@ -1460,9 +1460,9 @@ static void scsi_softirq_done(struct request *rq)
|
|
|
|
INIT_LIST_HEAD(&cmd->eh_entry);
|
|
|
|
- atomic_inc(&cmd->device->iodone_cnt);
|
|
+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
|
|
if (cmd->result)
|
|
- atomic_inc(&cmd->device->ioerr_cnt);
|
|
+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
|
|
|
|
disposition = scsi_decide_disposition(cmd);
|
|
if (disposition != SUCCESS &&
|
|
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
|
|
index b6d12e7..f2f74f0 100644
|
|
--- a/drivers/scsi/scsi_sysfs.c
|
|
+++ b/drivers/scsi/scsi_sysfs.c
|
|
@@ -657,7 +657,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
|
|
char *buf) \
|
|
{ \
|
|
struct scsi_device *sdev = to_scsi_device(dev); \
|
|
- unsigned long long count = atomic_read(&sdev->field); \
|
|
+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
|
|
return snprintf(buf, 20, "0x%llx\n", count); \
|
|
} \
|
|
static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
|
|
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
|
|
index 84a1fdf..693b0d6 100644
|
|
--- a/drivers/scsi/scsi_tgt_lib.c
|
|
+++ b/drivers/scsi/scsi_tgt_lib.c
|
|
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
|
|
int err;
|
|
|
|
dprintk("%lx %u\n", uaddr, len);
|
|
- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
|
|
+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
|
|
if (err) {
|
|
/*
|
|
* TODO: need to fixup sg_tablesize, max_segment_size,
|
|
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
|
|
index 80fbe2a..efa223b 100644
|
|
--- a/drivers/scsi/scsi_transport_fc.c
|
|
+++ b/drivers/scsi/scsi_transport_fc.c
|
|
@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
|
|
* Netlink Infrastructure
|
|
*/
|
|
|
|
-static atomic_t fc_event_seq;
|
|
+static atomic_unchecked_t fc_event_seq;
|
|
|
|
/**
|
|
* fc_get_event_number - Obtain the next sequential FC event number
|
|
@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
|
|
u32
|
|
fc_get_event_number(void)
|
|
{
|
|
- return atomic_add_return(1, &fc_event_seq);
|
|
+ return atomic_add_return_unchecked(1, &fc_event_seq);
|
|
}
|
|
EXPORT_SYMBOL(fc_get_event_number);
|
|
|
|
@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
|
|
{
|
|
int error;
|
|
|
|
- atomic_set(&fc_event_seq, 0);
|
|
+ atomic_set_unchecked(&fc_event_seq, 0);
|
|
|
|
error = transport_class_register(&fc_host_class);
|
|
if (error)
|
|
@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
|
|
char *cp;
|
|
|
|
*val = simple_strtoul(buf, &cp, 0);
|
|
- if ((*cp && (*cp != '\n')) || (*val < 0))
|
|
+ if (*cp && (*cp != '\n'))
|
|
return -EINVAL;
|
|
/*
|
|
* Check for overflow; dev_loss_tmo is u32
|
|
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
|
|
index 1cf640e..78e9014 100644
|
|
--- a/drivers/scsi/scsi_transport_iscsi.c
|
|
+++ b/drivers/scsi/scsi_transport_iscsi.c
|
|
@@ -79,7 +79,7 @@ struct iscsi_internal {
|
|
struct transport_container session_cont;
|
|
};
|
|
|
|
-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
|
|
+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
|
|
static struct workqueue_struct *iscsi_eh_timer_workq;
|
|
|
|
static DEFINE_IDA(iscsi_sess_ida);
|
|
@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
|
|
int err;
|
|
|
|
ihost = shost->shost_data;
|
|
- session->sid = atomic_add_return(1, &iscsi_session_nr);
|
|
+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
|
|
|
|
if (target_id == ISCSI_MAX_TARGET) {
|
|
id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
|
|
@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
|
|
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
|
|
ISCSI_TRANSPORT_VERSION);
|
|
|
|
- atomic_set(&iscsi_session_nr, 0);
|
|
+ atomic_set_unchecked(&iscsi_session_nr, 0);
|
|
|
|
err = class_register(&iscsi_transport_class);
|
|
if (err)
|
|
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
|
|
index 21a045e..ec89e03 100644
|
|
--- a/drivers/scsi/scsi_transport_srp.c
|
|
+++ b/drivers/scsi/scsi_transport_srp.c
|
|
@@ -33,7 +33,7 @@
|
|
#include "scsi_transport_srp_internal.h"
|
|
|
|
struct srp_host_attrs {
|
|
- atomic_t next_port_id;
|
|
+ atomic_unchecked_t next_port_id;
|
|
};
|
|
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
|
|
|
|
@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
|
|
struct Scsi_Host *shost = dev_to_shost(dev);
|
|
struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
|
|
|
|
- atomic_set(&srp_host->next_port_id, 0);
|
|
+ atomic_set_unchecked(&srp_host->next_port_id, 0);
|
|
return 0;
|
|
}
|
|
|
|
@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
|
|
memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
|
|
rport->roles = ids->roles;
|
|
|
|
- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
|
|
+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
|
|
dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
|
|
|
|
transport_setup_device(&rport->dev);
|
|
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
|
|
index fb119ce..86d7e59 100644
|
|
--- a/drivers/scsi/sg.c
|
|
+++ b/drivers/scsi/sg.c
|
|
@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
|
sdp->disk->disk_name,
|
|
MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
|
|
NULL,
|
|
- (char *)arg);
|
|
+ (char __user *)arg);
|
|
case BLKTRACESTART:
|
|
return blk_trace_startstop(sdp->device->request_queue, 1);
|
|
case BLKTRACESTOP:
|
|
@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
|
|
const struct file_operations * fops;
|
|
};
|
|
|
|
-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
|
|
+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
|
|
{"allow_dio", &adio_fops},
|
|
{"debug", &debug_fops},
|
|
{"def_reserved_size", &dressz_fops},
|
|
@@ -2332,7 +2332,7 @@ sg_proc_init(void)
|
|
if (!sg_proc_sgp)
|
|
return 1;
|
|
for (k = 0; k < num_leaves; ++k) {
|
|
- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
|
|
+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
|
|
umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
|
|
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
|
|
}
|
|
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
|
|
index 3d8f662..070f1a5 100644
|
|
--- a/drivers/spi/spi.c
|
|
+++ b/drivers/spi/spi.c
|
|
@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
|
|
EXPORT_SYMBOL_GPL(spi_bus_unlock);
|
|
|
|
/* portable code must never pass more than 32 bytes */
|
|
-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
|
|
+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
|
|
|
|
static u8 *buf;
|
|
|
|
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
|
|
index d91751f..a3a9e36 100644
|
|
--- a/drivers/staging/octeon/ethernet-rx.c
|
|
+++ b/drivers/staging/octeon/ethernet-rx.c
|
|
@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
|
|
/* Increment RX stats for virtual ports */
|
|
if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
|
|
#ifdef CONFIG_64BIT
|
|
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
|
|
- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
|
|
+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
|
|
+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
|
|
#else
|
|
- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
|
|
- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
|
|
+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
|
|
+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
|
|
#endif
|
|
}
|
|
netif_receive_skb(skb);
|
|
@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
|
|
dev->name);
|
|
*/
|
|
#ifdef CONFIG_64BIT
|
|
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
|
|
+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
|
|
#else
|
|
- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
|
|
+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
|
|
#endif
|
|
dev_kfree_skb_irq(skb);
|
|
}
|
|
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
|
|
index 60cba81..71eb239 100644
|
|
--- a/drivers/staging/octeon/ethernet.c
|
|
+++ b/drivers/staging/octeon/ethernet.c
|
|
@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
|
|
* since the RX tasklet also increments it.
|
|
*/
|
|
#ifdef CONFIG_64BIT
|
|
- atomic64_add(rx_status.dropped_packets,
|
|
- (atomic64_t *)&priv->stats.rx_dropped);
|
|
+ atomic64_add_unchecked(rx_status.dropped_packets,
|
|
+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
|
|
#else
|
|
- atomic_add(rx_status.dropped_packets,
|
|
- (atomic_t *)&priv->stats.rx_dropped);
|
|
+ atomic_add_unchecked(rx_status.dropped_packets,
|
|
+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
|
|
#endif
|
|
}
|
|
|
|
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
|
|
index d3d8727..f9327bb8 100644
|
|
--- a/drivers/staging/rtl8712/rtl871x_io.h
|
|
+++ b/drivers/staging/rtl8712/rtl871x_io.h
|
|
@@ -108,7 +108,7 @@ struct _io_ops {
|
|
u8 *pmem);
|
|
u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
|
|
u8 *pmem);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct io_req {
|
|
struct list_head list;
|
|
diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
|
|
index c7b5e8b..783d6cbe 100644
|
|
--- a/drivers/staging/sbe-2t3e3/netdev.c
|
|
+++ b/drivers/staging/sbe-2t3e3/netdev.c
|
|
@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|
t3e3_if_config(sc, cmd_2t3e3, (char *)¶m, &resp, &rlen);
|
|
|
|
if (rlen)
|
|
- if (copy_to_user(data, &resp, rlen))
|
|
+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
diff --git a/drivers/staging/speakup/speakup_soft.c.rej b/drivers/staging/speakup/speakup_soft.c.rej
|
|
new file mode 100644
|
|
index 0000000..296a85f
|
|
--- /dev/null
|
|
+++ b/drivers/staging/speakup/speakup_soft.c.rej
|
|
@@ -0,0 +1,15 @@
|
|
+--- drivers/staging/speakup/speakup_soft.c 2011-10-24 12:48:37.219091188 +0200
|
|
++++ drivers/staging/speakup/speakup_soft.c 2012-05-21 12:10:10.868048961 +0200
|
|
+@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct fil
|
|
+ break;
|
|
+ } else if (!initialized) {
|
|
+ if (*init) {
|
|
+- ch = *init;
|
|
+ init++;
|
|
+ } else {
|
|
+ initialized = 1;
|
|
+ }
|
|
++ ch = *init;
|
|
+ } else {
|
|
+ ch = synth_buffer_getc();
|
|
+ }
|
|
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
|
|
index c7b888c..c94be93 100644
|
|
--- a/drivers/staging/usbip/usbip_common.h
|
|
+++ b/drivers/staging/usbip/usbip_common.h
|
|
@@ -289,7 +289,7 @@ struct usbip_device {
|
|
void (*shutdown)(struct usbip_device *);
|
|
void (*reset)(struct usbip_device *);
|
|
void (*unusable)(struct usbip_device *);
|
|
- } eh_ops;
|
|
+ } __no_const eh_ops;
|
|
};
|
|
|
|
/* usbip_common.c */
|
|
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
|
|
index 88b3298..3783eee 100644
|
|
--- a/drivers/staging/usbip/vhci.h
|
|
+++ b/drivers/staging/usbip/vhci.h
|
|
@@ -88,7 +88,7 @@ struct vhci_hcd {
|
|
unsigned resuming:1;
|
|
unsigned long re_timeout;
|
|
|
|
- atomic_t seqnum;
|
|
+ atomic_unchecked_t seqnum;
|
|
|
|
/*
|
|
* NOTE:
|
|
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
|
|
index dca9bf1..80735c9 100644
|
|
--- a/drivers/staging/usbip/vhci_hcd.c
|
|
+++ b/drivers/staging/usbip/vhci_hcd.c
|
|
@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
|
|
return;
|
|
}
|
|
|
|
- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
|
|
+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
|
|
if (priv->seqnum == 0xffff)
|
|
dev_info(&urb->dev->dev, "seqnum max\n");
|
|
|
|
@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
|
|
+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
|
|
if (unlink->seqnum == 0xffff)
|
|
pr_info("seqnum max\n");
|
|
|
|
@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
|
|
vdev->rhport = rhport;
|
|
}
|
|
|
|
- atomic_set(&vhci->seqnum, 0);
|
|
+ atomic_set_unchecked(&vhci->seqnum, 0);
|
|
spin_lock_init(&vhci->lock);
|
|
|
|
hcd->power_budget = 0; /* no limit */
|
|
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
|
|
index f5fba732..210a16c 100644
|
|
--- a/drivers/staging/usbip/vhci_rx.c
|
|
+++ b/drivers/staging/usbip/vhci_rx.c
|
|
@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
|
|
if (!urb) {
|
|
pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
|
|
pr_info("max seqnum %d\n",
|
|
- atomic_read(&the_controller->seqnum));
|
|
+ atomic_read_unchecked(&the_controller->seqnum));
|
|
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
|
|
return;
|
|
}
|
|
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
|
|
index 7735027..30eed13 100644
|
|
--- a/drivers/staging/vt6655/hostap.c
|
|
+++ b/drivers/staging/vt6655/hostap.c
|
|
@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
|
|
*
|
|
*/
|
|
|
|
+static net_device_ops_no_const apdev_netdev_ops;
|
|
+
|
|
static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
|
|
{
|
|
PSDevice apdev_priv;
|
|
struct net_device *dev = pDevice->dev;
|
|
int ret;
|
|
- const struct net_device_ops apdev_netdev_ops = {
|
|
- .ndo_start_xmit = pDevice->tx_80211,
|
|
- };
|
|
|
|
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
|
|
|
|
@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
|
|
*apdev_priv = *pDevice;
|
|
memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
|
|
|
|
+ /* only half broken now */
|
|
+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
|
|
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
|
|
|
|
pDevice->apdev->type = ARPHRD_IEEE80211;
|
|
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
|
|
index df8ea25..47dd9c6 100644
|
|
--- a/drivers/staging/vt6656/hostap.c
|
|
+++ b/drivers/staging/vt6656/hostap.c
|
|
@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
|
|
*
|
|
*/
|
|
|
|
+static net_device_ops_no_const apdev_netdev_ops;
|
|
+
|
|
static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
|
|
{
|
|
PSDevice apdev_priv;
|
|
struct net_device *dev = pDevice->dev;
|
|
int ret;
|
|
- const struct net_device_ops apdev_netdev_ops = {
|
|
- .ndo_start_xmit = pDevice->tx_80211,
|
|
- };
|
|
|
|
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
|
|
|
|
@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
|
|
*apdev_priv = *pDevice;
|
|
memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
|
|
|
|
+ /* only half broken now */
|
|
+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
|
|
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
|
|
|
|
pDevice->apdev->type = ARPHRD_IEEE80211;
|
|
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
|
|
index 7843dfd..3db105f 100644
|
|
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
|
|
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
|
|
@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
|
|
|
|
struct usbctlx_completor {
|
|
int (*complete) (struct usbctlx_completor *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
static int
|
|
hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
|
|
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
|
|
index 1ca66ea..76f1343 100644
|
|
--- a/drivers/staging/zcache/tmem.c
|
|
+++ b/drivers/staging/zcache/tmem.c
|
|
@@ -39,7 +39,7 @@
|
|
* A tmem host implementation must use this function to register callbacks
|
|
* for memory allocation.
|
|
*/
|
|
-static struct tmem_hostops tmem_hostops;
|
|
+static tmem_hostops_no_const tmem_hostops;
|
|
|
|
static void tmem_objnode_tree_init(void);
|
|
|
|
@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
|
|
* A tmem host implementation must use this function to register
|
|
* callbacks for a page-accessible memory (PAM) implementation
|
|
*/
|
|
-static struct tmem_pamops tmem_pamops;
|
|
+static tmem_pamops_no_const tmem_pamops;
|
|
|
|
void tmem_register_pamops(struct tmem_pamops *m)
|
|
{
|
|
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
|
|
index 0d4aa82..f7832d4 100644
|
|
--- a/drivers/staging/zcache/tmem.h
|
|
+++ b/drivers/staging/zcache/tmem.h
|
|
@@ -180,6 +180,7 @@ struct tmem_pamops {
|
|
void (*new_obj)(struct tmem_obj *);
|
|
int (*replace_in_obj)(void *, struct tmem_obj *);
|
|
};
|
|
+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
|
|
extern void tmem_register_pamops(struct tmem_pamops *m);
|
|
|
|
/* memory allocation methods provided by the host implementation */
|
|
@@ -189,6 +190,7 @@ struct tmem_hostops {
|
|
struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
|
|
void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
|
|
};
|
|
+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
|
|
extern void tmem_register_hostops(struct tmem_hostops *m);
|
|
|
|
/* core tmem accessor functions */
|
|
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
|
|
index 4a5c6d7..5ad76fe 100644
|
|
--- a/drivers/target/target_core_tmr.c
|
|
+++ b/drivers/target/target_core_tmr.c
|
|
@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
|
|
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
|
|
cmd->t_task_list_num,
|
|
atomic_read(&cmd->t_task_cdbs_left),
|
|
- atomic_read(&cmd->t_task_cdbs_sent),
|
|
+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
|
|
(cmd->transport_state & CMD_T_ACTIVE) != 0,
|
|
(cmd->transport_state & CMD_T_STOP) != 0,
|
|
(cmd->transport_state & CMD_T_SENT) != 0);
|
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
|
index eff40fd..58f2793 100644
|
|
--- a/drivers/target/target_core_transport.c
|
|
+++ b/drivers/target/target_core_transport.c
|
|
@@ -1356,7 +1356,7 @@ struct se_device *transport_add_device_to_core_hba(
|
|
spin_lock_init(&dev->se_port_lock);
|
|
spin_lock_init(&dev->se_tmr_lock);
|
|
spin_lock_init(&dev->qf_cmd_lock);
|
|
- atomic_set(&dev->dev_ordered_id, 0);
|
|
+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
|
|
|
|
se_dev_set_default_attribs(dev, dev_limits);
|
|
|
|
@@ -1544,7 +1544,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
|
|
* Used to determine when ORDERED commands should go from
|
|
* Dormant to Active status.
|
|
*/
|
|
- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
|
|
+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
|
|
smp_mb__after_atomic_inc();
|
|
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
|
|
cmd->se_ordered_id, cmd->sam_task_attr,
|
|
@@ -1961,7 +1961,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
|
|
" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
|
|
cmd->t_task_list_num,
|
|
atomic_read(&cmd->t_task_cdbs_left),
|
|
- atomic_read(&cmd->t_task_cdbs_sent),
|
|
+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
|
|
atomic_read(&cmd->t_task_cdbs_ex_left),
|
|
(cmd->transport_state & CMD_T_ACTIVE) != 0,
|
|
(cmd->transport_state & CMD_T_STOP) != 0,
|
|
@@ -2222,9 +2222,9 @@ static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_c
|
|
cmd = task->task_se_cmd;
|
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
task->task_flags |= (TF_ACTIVE | TF_SENT);
|
|
- atomic_inc(&cmd->t_task_cdbs_sent);
|
|
+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
|
|
|
|
- if (atomic_read(&cmd->t_task_cdbs_sent) ==
|
|
+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
|
|
cmd->t_task_list_num)
|
|
cmd->transport_state |= CMD_T_SENT;
|
|
|
|
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
|
|
index 3436436..772237b 100644
|
|
--- a/drivers/tty/hvc/hvcs.c
|
|
+++ b/drivers/tty/hvc/hvcs.c
|
|
@@ -83,6 +83,7 @@
|
|
#include <asm/hvcserver.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/vio.h>
|
|
+#include <asm/local.h>
|
|
|
|
/*
|
|
* 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
|
|
@@ -270,7 +271,7 @@ struct hvcs_struct {
|
|
unsigned int index;
|
|
|
|
struct tty_struct *tty;
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
|
|
/*
|
|
* Used to tell the driver kernel_thread what operations need to take
|
|
@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
|
|
|
|
spin_lock_irqsave(&hvcsd->lock, flags);
|
|
|
|
- if (hvcsd->open_count > 0) {
|
|
+ if (local_read(&hvcsd->open_count) > 0) {
|
|
spin_unlock_irqrestore(&hvcsd->lock, flags);
|
|
printk(KERN_INFO "HVCS: vterm state unchanged. "
|
|
"The hvcs device node is still in use.\n");
|
|
@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
|
|
if ((retval = hvcs_partner_connect(hvcsd)))
|
|
goto error_release;
|
|
|
|
- hvcsd->open_count = 1;
|
|
+ local_set(&hvcsd->open_count, 1);
|
|
hvcsd->tty = tty;
|
|
tty->driver_data = hvcsd;
|
|
|
|
@@ -1172,7 +1173,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
|
|
|
|
spin_lock_irqsave(&hvcsd->lock, flags);
|
|
kref_get(&hvcsd->kref);
|
|
- hvcsd->open_count++;
|
|
+ local_inc(&hvcsd->open_count);
|
|
hvcsd->todo_mask |= HVCS_SCHED_READ;
|
|
spin_unlock_irqrestore(&hvcsd->lock, flags);
|
|
|
|
@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
|
|
hvcsd = tty->driver_data;
|
|
|
|
spin_lock_irqsave(&hvcsd->lock, flags);
|
|
- if (--hvcsd->open_count == 0) {
|
|
+ if (local_dec_and_test(&hvcsd->open_count)) {
|
|
|
|
vio_disable_interrupts(hvcsd->vdev);
|
|
|
|
@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
|
|
free_irq(irq, hvcsd);
|
|
kref_put(&hvcsd->kref, destroy_hvcs_struct);
|
|
return;
|
|
- } else if (hvcsd->open_count < 0) {
|
|
+ } else if (local_read(&hvcsd->open_count) < 0) {
|
|
printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
|
|
" is missmanaged.\n",
|
|
- hvcsd->vdev->unit_address, hvcsd->open_count);
|
|
+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
|
|
}
|
|
|
|
spin_unlock_irqrestore(&hvcsd->lock, flags);
|
|
@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
|
|
|
|
spin_lock_irqsave(&hvcsd->lock, flags);
|
|
/* Preserve this so that we know how many kref refs to put */
|
|
- temp_open_count = hvcsd->open_count;
|
|
+ temp_open_count = local_read(&hvcsd->open_count);
|
|
|
|
/*
|
|
* Don't kref put inside the spinlock because the destruction
|
|
@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
|
|
hvcsd->tty->driver_data = NULL;
|
|
hvcsd->tty = NULL;
|
|
|
|
- hvcsd->open_count = 0;
|
|
+ local_set(&hvcsd->open_count, 0);
|
|
|
|
/* This will drop any buffered data on the floor which is OK in a hangup
|
|
* scenario. */
|
|
@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
|
|
* the middle of a write operation? This is a crummy place to do this
|
|
* but we want to keep it all in the spinlock.
|
|
*/
|
|
- if (hvcsd->open_count <= 0) {
|
|
+ if (local_read(&hvcsd->open_count) <= 0) {
|
|
spin_unlock_irqrestore(&hvcsd->lock, flags);
|
|
return -ENODEV;
|
|
}
|
|
@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
|
|
{
|
|
struct hvcs_struct *hvcsd = tty->driver_data;
|
|
|
|
- if (!hvcsd || hvcsd->open_count <= 0)
|
|
+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
|
|
return 0;
|
|
|
|
return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
|
|
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
|
|
index 4daf962..b4a2281 100644
|
|
--- a/drivers/tty/ipwireless/tty.c
|
|
+++ b/drivers/tty/ipwireless/tty.c
|
|
@@ -29,6 +29,7 @@
|
|
#include <linux/tty_driver.h>
|
|
#include <linux/tty_flip.h>
|
|
#include <linux/uaccess.h>
|
|
+#include <asm/local.h>
|
|
|
|
#include "tty.h"
|
|
#include "network.h"
|
|
@@ -51,7 +52,7 @@ struct ipw_tty {
|
|
int tty_type;
|
|
struct ipw_network *network;
|
|
struct tty_struct *linux_tty;
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
unsigned int control_lines;
|
|
struct mutex ipw_tty_mutex;
|
|
int tx_bytes_queued;
|
|
@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
|
|
mutex_unlock(&tty->ipw_tty_mutex);
|
|
return -ENODEV;
|
|
}
|
|
- if (tty->open_count == 0)
|
|
+ if (local_read(&tty->open_count) == 0)
|
|
tty->tx_bytes_queued = 0;
|
|
|
|
- tty->open_count++;
|
|
+ local_inc(&tty->open_count);
|
|
|
|
tty->linux_tty = linux_tty;
|
|
linux_tty->driver_data = tty;
|
|
@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
|
|
|
|
static void do_ipw_close(struct ipw_tty *tty)
|
|
{
|
|
- tty->open_count--;
|
|
-
|
|
- if (tty->open_count == 0) {
|
|
+ if (local_dec_return(&tty->open_count) == 0) {
|
|
struct tty_struct *linux_tty = tty->linux_tty;
|
|
|
|
if (linux_tty != NULL) {
|
|
@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
|
|
return;
|
|
|
|
mutex_lock(&tty->ipw_tty_mutex);
|
|
- if (tty->open_count == 0) {
|
|
+ if (local_read(&tty->open_count) == 0) {
|
|
mutex_unlock(&tty->ipw_tty_mutex);
|
|
return;
|
|
}
|
|
@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
|
|
return;
|
|
}
|
|
|
|
- if (!tty->open_count) {
|
|
+ if (!local_read(&tty->open_count)) {
|
|
mutex_unlock(&tty->ipw_tty_mutex);
|
|
return;
|
|
}
|
|
@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
|
|
return -ENODEV;
|
|
|
|
mutex_lock(&tty->ipw_tty_mutex);
|
|
- if (!tty->open_count) {
|
|
+ if (!local_read(&tty->open_count)) {
|
|
mutex_unlock(&tty->ipw_tty_mutex);
|
|
return -EINVAL;
|
|
}
|
|
@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
|
|
if (!tty)
|
|
return -ENODEV;
|
|
|
|
- if (!tty->open_count)
|
|
+ if (!local_read(&tty->open_count))
|
|
return -EINVAL;
|
|
|
|
room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
|
|
@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
|
|
if (!tty)
|
|
return 0;
|
|
|
|
- if (!tty->open_count)
|
|
+ if (!local_read(&tty->open_count))
|
|
return 0;
|
|
|
|
return tty->tx_bytes_queued;
|
|
@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
|
|
if (!tty)
|
|
return -ENODEV;
|
|
|
|
- if (!tty->open_count)
|
|
+ if (!local_read(&tty->open_count))
|
|
return -EINVAL;
|
|
|
|
return get_control_lines(tty);
|
|
@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
|
|
if (!tty)
|
|
return -ENODEV;
|
|
|
|
- if (!tty->open_count)
|
|
+ if (!local_read(&tty->open_count))
|
|
return -EINVAL;
|
|
|
|
return set_control_lines(tty, set, clear);
|
|
@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
|
|
if (!tty)
|
|
return -ENODEV;
|
|
|
|
- if (!tty->open_count)
|
|
+ if (!local_read(&tty->open_count))
|
|
return -EINVAL;
|
|
|
|
/* FIXME: Exactly how is the tty object locked here .. */
|
|
@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
|
|
against a parallel ioctl etc */
|
|
mutex_lock(&ttyj->ipw_tty_mutex);
|
|
}
|
|
- while (ttyj->open_count)
|
|
+ while (local_read(&ttyj->open_count))
|
|
do_ipw_close(ttyj);
|
|
ipwireless_disassociate_network_ttys(network,
|
|
ttyj->channel_idx);
|
|
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
|
|
index 53ff37b..410e5f4 100644
|
|
--- a/drivers/tty/n_gsm.c
|
|
+++ b/drivers/tty/n_gsm.c
|
|
@@ -1649,7 +1649,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
|
|
kref_init(&dlci->ref);
|
|
mutex_init(&dlci->mutex);
|
|
dlci->fifo = &dlci->_fifo;
|
|
- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
|
|
+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
|
|
kfree(dlci);
|
|
return NULL;
|
|
}
|
|
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
|
|
index 0a2d86e..1d5aec4a 100644
|
|
--- a/drivers/tty/n_tty.c
|
|
+++ b/drivers/tty/n_tty.c
|
|
@@ -2146,6 +2146,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
|
|
{
|
|
*ops = tty_ldisc_N_TTY;
|
|
ops->owner = NULL;
|
|
- ops->refcount = ops->flags = 0;
|
|
+ atomic_set(&ops->refcount, 0);
|
|
+ ops->flags = 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
|
|
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
|
|
index bc955d7..255f40b 100644
|
|
--- a/drivers/tty/pty.c
|
|
+++ b/drivers/tty/pty.c
|
|
@@ -712,8 +712,10 @@ static void __init unix98_pty_init(void)
|
|
panic("Couldn't register Unix98 pts driver");
|
|
|
|
/* Now create the /dev/ptmx special device */
|
|
+ pax_open_kernel();
|
|
tty_default_fops(&ptmx_fops);
|
|
- ptmx_fops.open = ptmx_open;
|
|
+ *(void **)&ptmx_fops.open = ptmx_open;
|
|
+ pax_close_kernel();
|
|
|
|
cdev_init(&ptmx_cdev, &ptmx_fops);
|
|
if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
|
|
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
|
|
index 2b42a01..32a2ed3 100644
|
|
--- a/drivers/tty/serial/kgdboc.c
|
|
+++ b/drivers/tty/serial/kgdboc.c
|
|
@@ -24,8 +24,9 @@
|
|
#define MAX_CONFIG_LEN 40
|
|
|
|
static struct kgdb_io kgdboc_io_ops;
|
|
+static struct kgdb_io kgdboc_io_ops_console;
|
|
|
|
-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
|
|
+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
|
|
static int configured = -1;
|
|
|
|
static char config[MAX_CONFIG_LEN];
|
|
@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
|
|
kgdboc_unregister_kbd();
|
|
if (configured == 1)
|
|
kgdb_unregister_io_module(&kgdboc_io_ops);
|
|
+ else if (configured == 2)
|
|
+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
|
|
}
|
|
|
|
static int configure_kgdboc(void)
|
|
@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
|
|
int err;
|
|
char *cptr = config;
|
|
struct console *cons;
|
|
+ int is_console = 0;
|
|
|
|
err = kgdboc_option_setup(config);
|
|
if (err || !strlen(config) || isspace(config[0]))
|
|
goto noconfig;
|
|
|
|
err = -ENODEV;
|
|
- kgdboc_io_ops.is_console = 0;
|
|
kgdb_tty_driver = NULL;
|
|
|
|
kgdboc_use_kms = 0;
|
|
@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
|
|
int idx;
|
|
if (cons->device && cons->device(cons, &idx) == p &&
|
|
idx == tty_line) {
|
|
- kgdboc_io_ops.is_console = 1;
|
|
+ is_console = 1;
|
|
break;
|
|
}
|
|
cons = cons->next;
|
|
@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
|
|
kgdb_tty_line = tty_line;
|
|
|
|
do_register:
|
|
- err = kgdb_register_io_module(&kgdboc_io_ops);
|
|
+ if (is_console) {
|
|
+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
|
|
+ configured = 2;
|
|
+ } else {
|
|
+ err = kgdb_register_io_module(&kgdboc_io_ops);
|
|
+ configured = 1;
|
|
+ }
|
|
if (err)
|
|
goto noconfig;
|
|
|
|
- configured = 1;
|
|
-
|
|
return 0;
|
|
|
|
noconfig:
|
|
@@ -213,7 +220,7 @@ static int configure_kgdboc(void)
|
|
static int __init init_kgdboc(void)
|
|
{
|
|
/* Already configured? */
|
|
- if (configured == 1)
|
|
+ if (configured >= 1)
|
|
return 0;
|
|
|
|
return configure_kgdboc();
|
|
@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
|
|
if (config[len - 1] == '\n')
|
|
config[len - 1] = '\0';
|
|
|
|
- if (configured == 1)
|
|
+ if (configured >= 1)
|
|
cleanup_kgdboc();
|
|
|
|
/* Go and configure with the new params. */
|
|
@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
|
|
.post_exception = kgdboc_post_exp_handler,
|
|
};
|
|
|
|
+static struct kgdb_io kgdboc_io_ops_console = {
|
|
+ .name = "kgdboc",
|
|
+ .read_char = kgdboc_get_char,
|
|
+ .write_char = kgdboc_put_char,
|
|
+ .pre_exception = kgdboc_pre_exp_handler,
|
|
+ .post_exception = kgdboc_post_exp_handler,
|
|
+ .is_console = 1
|
|
+};
|
|
+
|
|
#ifdef CONFIG_KGDB_SERIAL_CONSOLE
|
|
/* This is only available if kgdboc is a built in for early debugging */
|
|
static int __init kgdboc_early_init(char *opt)
|
|
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
|
|
index 837a7a8..5b4fb69 100644
|
|
--- a/drivers/tty/tty_io.c
|
|
+++ b/drivers/tty/tty_io.c
|
|
@@ -3290,7 +3290,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
|
|
|
|
void tty_default_fops(struct file_operations *fops)
|
|
{
|
|
- *fops = tty_fops;
|
|
+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
|
|
index 24b95db..9c078d0 100644
|
|
--- a/drivers/tty/tty_ldisc.c
|
|
+++ b/drivers/tty/tty_ldisc.c
|
|
@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
|
|
if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
|
|
struct tty_ldisc_ops *ldo = ld->ops;
|
|
|
|
- ldo->refcount--;
|
|
+ atomic_dec(&ldo->refcount);
|
|
module_put(ldo->owner);
|
|
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
|
|
|
|
@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
|
|
spin_lock_irqsave(&tty_ldisc_lock, flags);
|
|
tty_ldiscs[disc] = new_ldisc;
|
|
new_ldisc->num = disc;
|
|
- new_ldisc->refcount = 0;
|
|
+ atomic_set(&new_ldisc->refcount, 0);
|
|
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
|
|
|
|
return ret;
|
|
@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
|
|
return -EINVAL;
|
|
|
|
spin_lock_irqsave(&tty_ldisc_lock, flags);
|
|
- if (tty_ldiscs[disc]->refcount)
|
|
+ if (atomic_read(&tty_ldiscs[disc]->refcount))
|
|
ret = -EBUSY;
|
|
else
|
|
tty_ldiscs[disc] = NULL;
|
|
@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
|
|
if (ldops) {
|
|
ret = ERR_PTR(-EAGAIN);
|
|
if (try_module_get(ldops->owner)) {
|
|
- ldops->refcount++;
|
|
+ atomic_inc(&ldops->refcount);
|
|
ret = ldops;
|
|
}
|
|
}
|
|
@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&tty_ldisc_lock, flags);
|
|
- ldops->refcount--;
|
|
+ atomic_dec(&ldops->refcount);
|
|
module_put(ldops->owner);
|
|
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
|
|
}
|
|
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
|
|
index 0470194..02aed82 100644
|
|
--- a/drivers/uio/uio.c
|
|
+++ b/drivers/uio/uio.c
|
|
@@ -25,6 +25,7 @@
|
|
#include <linux/kobject.h>
|
|
#include <linux/cdev.h>
|
|
#include <linux/uio_driver.h>
|
|
+#include <asm/local.h>
|
|
|
|
#define UIO_MAX_DEVICES (1U << MINORBITS)
|
|
|
|
@@ -32,10 +33,10 @@ struct uio_device {
|
|
struct module *owner;
|
|
struct device *dev;
|
|
int minor;
|
|
- atomic_t event;
|
|
+ atomic_unchecked_t event;
|
|
struct fasync_struct *async_queue;
|
|
wait_queue_head_t wait;
|
|
- int vma_count;
|
|
+ local_t vma_count;
|
|
struct uio_info *info;
|
|
struct kobject *map_dir;
|
|
struct kobject *portio_dir;
|
|
@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct uio_device *idev = dev_get_drvdata(dev);
|
|
- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
|
|
+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
|
|
}
|
|
|
|
static struct device_attribute uio_class_attributes[] = {
|
|
@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
|
|
{
|
|
struct uio_device *idev = info->uio_dev;
|
|
|
|
- atomic_inc(&idev->event);
|
|
+ atomic_inc_unchecked(&idev->event);
|
|
wake_up_interruptible(&idev->wait);
|
|
kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
|
|
}
|
|
@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
|
|
}
|
|
|
|
listener->dev = idev;
|
|
- listener->event_count = atomic_read(&idev->event);
|
|
+ listener->event_count = atomic_read_unchecked(&idev->event);
|
|
filep->private_data = listener;
|
|
|
|
if (idev->info->open) {
|
|
@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
|
|
return -EIO;
|
|
|
|
poll_wait(filep, &idev->wait, wait);
|
|
- if (listener->event_count != atomic_read(&idev->event))
|
|
+ if (listener->event_count != atomic_read_unchecked(&idev->event))
|
|
return POLLIN | POLLRDNORM;
|
|
return 0;
|
|
}
|
|
@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
|
|
do {
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
- event_count = atomic_read(&idev->event);
|
|
+ event_count = atomic_read_unchecked(&idev->event);
|
|
if (event_count != listener->event_count) {
|
|
if (copy_to_user(buf, &event_count, count))
|
|
retval = -EFAULT;
|
|
@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
|
|
static void uio_vma_open(struct vm_area_struct *vma)
|
|
{
|
|
struct uio_device *idev = vma->vm_private_data;
|
|
- idev->vma_count++;
|
|
+ local_inc(&idev->vma_count);
|
|
}
|
|
|
|
static void uio_vma_close(struct vm_area_struct *vma)
|
|
{
|
|
struct uio_device *idev = vma->vm_private_data;
|
|
- idev->vma_count--;
|
|
+ local_dec(&idev->vma_count);
|
|
}
|
|
|
|
static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
@@ -841,7 +842,7 @@ int __uio_register_device(struct module *owner,
|
|
idev->owner = owner;
|
|
idev->info = info;
|
|
init_waitqueue_head(&idev->wait);
|
|
- atomic_set(&idev->event, 0);
|
|
+ atomic_set_unchecked(&idev->event, 0);
|
|
|
|
ret = uio_get_minor(idev);
|
|
if (ret)
|
|
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
|
|
index c8dbb97..8177d58 100644
|
|
--- a/drivers/usb/atm/cxacru.c
|
|
+++ b/drivers/usb/atm/cxacru.c
|
|
@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
|
|
ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
|
|
if (ret < 2)
|
|
return -EINVAL;
|
|
- if (index < 0 || index > 0x7f)
|
|
+ if (index > 0x7f)
|
|
return -EINVAL;
|
|
pos += tmp;
|
|
|
|
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
|
|
index d3448ca..d2864ca 100644
|
|
--- a/drivers/usb/atm/usbatm.c
|
|
+++ b/drivers/usb/atm/usbatm.c
|
|
@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
|
|
if (printk_ratelimit())
|
|
atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
|
|
__func__, vpi, vci);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
return;
|
|
}
|
|
|
|
@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
|
|
if (length > ATM_MAX_AAL5_PDU) {
|
|
atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
|
|
__func__, length, vcc);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto out;
|
|
}
|
|
|
|
@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
|
|
if (sarb->len < pdu_length) {
|
|
atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
|
|
__func__, pdu_length, sarb->len, vcc);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto out;
|
|
}
|
|
|
|
if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
|
|
atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
|
|
__func__, vcc);
|
|
- atomic_inc(&vcc->stats->rx_err);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_err);
|
|
goto out;
|
|
}
|
|
|
|
@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
|
|
if (printk_ratelimit())
|
|
atm_err(instance, "%s: no memory for skb (length: %u)!\n",
|
|
__func__, length);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
goto out;
|
|
}
|
|
|
|
@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
|
|
|
|
vcc->push(vcc, skb);
|
|
|
|
- atomic_inc(&vcc->stats->rx);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx);
|
|
out:
|
|
skb_trim(sarb, 0);
|
|
}
|
|
@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
|
|
struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
|
|
|
|
usbatm_pop(vcc, skb);
|
|
- atomic_inc(&vcc->stats->tx);
|
|
+ atomic_inc_unchecked(&vcc->stats->tx);
|
|
|
|
skb = skb_dequeue(&instance->sndqueue);
|
|
}
|
|
@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
|
|
if (!left--)
|
|
return sprintf(page,
|
|
"AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
|
|
- atomic_read(&atm_dev->stats.aal5.tx),
|
|
- atomic_read(&atm_dev->stats.aal5.tx_err),
|
|
- atomic_read(&atm_dev->stats.aal5.rx),
|
|
- atomic_read(&atm_dev->stats.aal5.rx_err),
|
|
- atomic_read(&atm_dev->stats.aal5.rx_drop));
|
|
+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
|
|
+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
|
|
+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
|
|
+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
|
|
+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
|
|
|
|
if (!left--) {
|
|
if (instance->disconnected)
|
|
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
|
|
index 3440812..2a4ef1f 100644
|
|
--- a/drivers/usb/core/devices.c
|
|
+++ b/drivers/usb/core/devices.c
|
|
@@ -126,7 +126,7 @@ static const char format_endpt[] =
|
|
* time it gets called.
|
|
*/
|
|
static struct device_connect_event {
|
|
- atomic_t count;
|
|
+ atomic_unchecked_t count;
|
|
wait_queue_head_t wait;
|
|
} device_event = {
|
|
.count = ATOMIC_INIT(1),
|
|
@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
|
|
|
|
void usbfs_conn_disc_event(void)
|
|
{
|
|
- atomic_add(2, &device_event.count);
|
|
+ atomic_add_unchecked(2, &device_event.count);
|
|
wake_up(&device_event.wait);
|
|
}
|
|
|
|
@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
|
|
|
|
poll_wait(file, &device_event.wait, wait);
|
|
|
|
- event_count = atomic_read(&device_event.count);
|
|
+ event_count = atomic_read_unchecked(&device_event.count);
|
|
if (file->f_version != event_count) {
|
|
file->f_version = event_count;
|
|
return POLLIN | POLLRDNORM;
|
|
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
|
|
index 347bb05..63e1b73 100644
|
|
--- a/drivers/usb/early/ehci-dbgp.c
|
|
+++ b/drivers/usb/early/ehci-dbgp.c
|
|
@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
|
|
|
|
#ifdef CONFIG_KGDB
|
|
static struct kgdb_io kgdbdbgp_io_ops;
|
|
-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
|
|
+static struct kgdb_io kgdbdbgp_io_ops_console;
|
|
+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
|
|
#else
|
|
#define dbgp_kgdb_mode (0)
|
|
#endif
|
|
@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
|
|
.write_char = kgdbdbgp_write_char,
|
|
};
|
|
|
|
+static struct kgdb_io kgdbdbgp_io_ops_console = {
|
|
+ .name = "kgdbdbgp",
|
|
+ .read_char = kgdbdbgp_read_char,
|
|
+ .write_char = kgdbdbgp_write_char,
|
|
+ .is_console = 1
|
|
+};
|
|
+
|
|
static int kgdbdbgp_wait_time;
|
|
|
|
static int __init kgdbdbgp_parse_config(char *str)
|
|
@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
|
|
ptr++;
|
|
kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
|
|
}
|
|
- kgdb_register_io_module(&kgdbdbgp_io_ops);
|
|
- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
|
|
+ if (early_dbgp_console.index != -1)
|
|
+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
|
|
+ else
|
|
+ kgdb_register_io_module(&kgdbdbgp_io_ops);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
|
|
index d6bea3e..60b250e 100644
|
|
--- a/drivers/usb/wusbcore/wa-hc.h
|
|
+++ b/drivers/usb/wusbcore/wa-hc.h
|
|
@@ -192,7 +192,7 @@ struct wahc {
|
|
struct list_head xfer_delayed_list;
|
|
spinlock_t xfer_list_lock;
|
|
struct work_struct xfer_work;
|
|
- atomic_t xfer_id_count;
|
|
+ atomic_unchecked_t xfer_id_count;
|
|
};
|
|
|
|
|
|
@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
|
|
INIT_LIST_HEAD(&wa->xfer_delayed_list);
|
|
spin_lock_init(&wa->xfer_list_lock);
|
|
INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
|
|
- atomic_set(&wa->xfer_id_count, 1);
|
|
+ atomic_set_unchecked(&wa->xfer_id_count, 1);
|
|
}
|
|
|
|
/**
|
|
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
|
|
index 5f6df6e..0a16602 100644
|
|
--- a/drivers/usb/wusbcore/wa-xfer.c
|
|
+++ b/drivers/usb/wusbcore/wa-xfer.c
|
|
@@ -297,7 +297,7 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
|
|
*/
|
|
static void wa_xfer_id_init(struct wa_xfer *xfer)
|
|
{
|
|
- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
|
|
+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
|
|
}
|
|
|
|
/*
|
|
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
|
|
index a50cb9c..8aaeaad 100644
|
|
--- a/drivers/vhost/vhost.c
|
|
+++ b/drivers/vhost/vhost.c
|
|
@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
|
|
return 0;
|
|
}
|
|
|
|
-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
|
|
+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
|
|
{
|
|
struct file *eventfp, *filep = NULL,
|
|
*pollstart = NULL, *pollstop = NULL;
|
|
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
|
|
index b0b2ac3..89a4399 100644
|
|
--- a/drivers/video/aty/aty128fb.c
|
|
+++ b/drivers/video/aty/aty128fb.c
|
|
@@ -148,7 +148,7 @@ enum {
|
|
};
|
|
|
|
/* Must match above enum */
|
|
-static const char *r128_family[] __devinitdata = {
|
|
+static const char *r128_family[] __devinitconst = {
|
|
"AGP",
|
|
"PCI",
|
|
"PRO AGP",
|
|
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
|
|
index f26570d..911f719 100644
|
|
--- a/drivers/video/fbcmap.c
|
|
+++ b/drivers/video/fbcmap.c
|
|
@@ -299,8 +299,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
|
|
rc = -ENODEV;
|
|
goto out;
|
|
}
|
|
- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
|
|
- !info->fbops->fb_setcmap)) {
|
|
+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
|
|
rc = -EINVAL;
|
|
goto out1;
|
|
}
|
|
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
|
|
index 969d8c0..cb2029d 100644
|
|
--- a/drivers/video/fbmem.c
|
|
+++ b/drivers/video/fbmem.c
|
|
@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
|
image->dx += image->width + 8;
|
|
}
|
|
} else if (rotate == FB_ROTATE_UD) {
|
|
- for (x = 0; x < num && image->dx >= 0; x++) {
|
|
+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
|
|
info->fbops->fb_imageblit(info, image);
|
|
image->dx -= image->width + 8;
|
|
}
|
|
@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
|
|
image->dy += image->height + 8;
|
|
}
|
|
} else if (rotate == FB_ROTATE_CCW) {
|
|
- for (x = 0; x < num && image->dy >= 0; x++) {
|
|
+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
|
|
info->fbops->fb_imageblit(info, image);
|
|
image->dy -= image->height + 8;
|
|
}
|
|
@@ -1161,7 +1161,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
|
|
return -EFAULT;
|
|
if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
|
|
return -EINVAL;
|
|
- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
|
|
+ if (con2fb.framebuffer >= FB_MAX)
|
|
return -EINVAL;
|
|
if (!registered_fb[con2fb.framebuffer])
|
|
request_module("fb%d", con2fb.framebuffer);
|
|
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
|
|
index 5a5d092..265c5ed 100644
|
|
--- a/drivers/video/geode/gx1fb_core.c
|
|
+++ b/drivers/video/geode/gx1fb_core.c
|
|
@@ -29,7 +29,7 @@ static int crt_option = 1;
|
|
static char panel_option[32] = "";
|
|
|
|
/* Modes relevant to the GX1 (taken from modedb.c) */
|
|
-static const struct fb_videomode __devinitdata gx1_modedb[] = {
|
|
+static const struct fb_videomode __devinitconst gx1_modedb[] = {
|
|
/* 640x480-60 VESA */
|
|
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
|
|
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
|
|
diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
|
|
index 0fad23f..0e9afa4 100644
|
|
--- a/drivers/video/gxt4500.c
|
|
+++ b/drivers/video/gxt4500.c
|
|
@@ -156,7 +156,7 @@ struct gxt4500_par {
|
|
static char *mode_option;
|
|
|
|
/* default mode: 1280x1024 @ 60 Hz, 8 bpp */
|
|
-static const struct fb_videomode defaultmode __devinitdata = {
|
|
+static const struct fb_videomode defaultmode __devinitconst = {
|
|
.refresh = 60,
|
|
.xres = 1280,
|
|
.yres = 1024,
|
|
@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
|
|
return 0;
|
|
}
|
|
|
|
-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
|
|
+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
|
|
.id = "IBM GXT4500P",
|
|
.type = FB_TYPE_PACKED_PIXELS,
|
|
.visual = FB_VISUAL_PSEUDOCOLOR,
|
|
diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
|
|
index 7672d2e..b56437f 100644
|
|
--- a/drivers/video/i810/i810_accel.c
|
|
+++ b/drivers/video/i810/i810_accel.c
|
|
@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
|
|
}
|
|
}
|
|
printk("ringbuffer lockup!!!\n");
|
|
+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
|
|
i810_report_error(mmio);
|
|
par->dev_flags |= LOCKUP;
|
|
info->pixmap.scan_align = 1;
|
|
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
|
|
index b83f361..2b05a91 100644
|
|
--- a/drivers/video/i810/i810_main.c
|
|
+++ b/drivers/video/i810/i810_main.c
|
|
@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
|
|
static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
|
|
|
|
/* PCI */
|
|
-static const char *i810_pci_list[] __devinitdata = {
|
|
+static const char *i810_pci_list[] __devinitconst = {
|
|
"Intel(R) 810 Framebuffer Device" ,
|
|
"Intel(R) 810-DC100 Framebuffer Device" ,
|
|
"Intel(R) 810E Framebuffer Device" ,
|
|
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
|
|
index de36693..3c63fc2 100644
|
|
--- a/drivers/video/jz4740_fb.c
|
|
+++ b/drivers/video/jz4740_fb.c
|
|
@@ -136,7 +136,7 @@ struct jzfb {
|
|
uint32_t pseudo_palette[16];
|
|
};
|
|
|
|
-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
|
|
+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
|
|
.id = "JZ4740 FB",
|
|
.type = FB_TYPE_PACKED_PIXELS,
|
|
.visual = FB_VISUAL_TRUECOLOR,
|
|
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
|
|
index 85d8110..e839969 100644
|
|
--- a/drivers/video/udlfb.c
|
|
+++ b/drivers/video/udlfb.c
|
|
@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
|
|
dlfb_urb_completion(urb);
|
|
|
|
error:
|
|
- atomic_add(bytes_sent, &dev->bytes_sent);
|
|
- atomic_add(bytes_identical, &dev->bytes_identical);
|
|
- atomic_add(width*height*2, &dev->bytes_rendered);
|
|
+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
|
|
+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
|
|
+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
|
|
end_cycles = get_cycles();
|
|
- atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
|
+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
|
|
>> 10)), /* Kcycles */
|
|
&dev->cpu_kcycles_used);
|
|
|
|
@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
|
|
dlfb_urb_completion(urb);
|
|
|
|
error:
|
|
- atomic_add(bytes_sent, &dev->bytes_sent);
|
|
- atomic_add(bytes_identical, &dev->bytes_identical);
|
|
- atomic_add(bytes_rendered, &dev->bytes_rendered);
|
|
+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
|
|
+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
|
|
+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
|
|
end_cycles = get_cycles();
|
|
- atomic_add(((unsigned int) ((end_cycles - start_cycles)
|
|
+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
|
|
>> 10)), /* Kcycles */
|
|
&dev->cpu_kcycles_used);
|
|
}
|
|
@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
|
|
struct fb_info *fb_info = dev_get_drvdata(fbdev);
|
|
struct dlfb_data *dev = fb_info->par;
|
|
return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- atomic_read(&dev->bytes_rendered));
|
|
+ atomic_read_unchecked(&dev->bytes_rendered));
|
|
}
|
|
|
|
static ssize_t metrics_bytes_identical_show(struct device *fbdev,
|
|
@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
|
|
struct fb_info *fb_info = dev_get_drvdata(fbdev);
|
|
struct dlfb_data *dev = fb_info->par;
|
|
return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- atomic_read(&dev->bytes_identical));
|
|
+ atomic_read_unchecked(&dev->bytes_identical));
|
|
}
|
|
|
|
static ssize_t metrics_bytes_sent_show(struct device *fbdev,
|
|
@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
|
|
struct fb_info *fb_info = dev_get_drvdata(fbdev);
|
|
struct dlfb_data *dev = fb_info->par;
|
|
return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- atomic_read(&dev->bytes_sent));
|
|
+ atomic_read_unchecked(&dev->bytes_sent));
|
|
}
|
|
|
|
static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
|
|
@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
|
|
struct fb_info *fb_info = dev_get_drvdata(fbdev);
|
|
struct dlfb_data *dev = fb_info->par;
|
|
return snprintf(buf, PAGE_SIZE, "%u\n",
|
|
- atomic_read(&dev->cpu_kcycles_used));
|
|
+ atomic_read_unchecked(&dev->cpu_kcycles_used));
|
|
}
|
|
|
|
static ssize_t edid_show(
|
|
@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
|
|
struct fb_info *fb_info = dev_get_drvdata(fbdev);
|
|
struct dlfb_data *dev = fb_info->par;
|
|
|
|
- atomic_set(&dev->bytes_rendered, 0);
|
|
- atomic_set(&dev->bytes_identical, 0);
|
|
- atomic_set(&dev->bytes_sent, 0);
|
|
- atomic_set(&dev->cpu_kcycles_used, 0);
|
|
+ atomic_set_unchecked(&dev->bytes_rendered, 0);
|
|
+ atomic_set_unchecked(&dev->bytes_identical, 0);
|
|
+ atomic_set_unchecked(&dev->bytes_sent, 0);
|
|
+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
|
|
|
|
return count;
|
|
}
|
|
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
|
|
index b0e2a42..e2df3ad 100644
|
|
--- a/drivers/video/uvesafb.c
|
|
+++ b/drivers/video/uvesafb.c
|
|
@@ -19,6 +19,7 @@
|
|
#include <linux/io.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/moduleloader.h>
|
|
#include <video/edid.h>
|
|
#include <video/uvesafb.h>
|
|
#ifdef CONFIG_X86
|
|
@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
|
|
if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
|
|
par->pmi_setpal = par->ypan = 0;
|
|
} else {
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#ifdef CONFIG_MODULES
|
|
+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
|
|
+#endif
|
|
+ if (!par->pmi_code) {
|
|
+ par->pmi_setpal = par->ypan = 0;
|
|
+ return 0;
|
|
+ }
|
|
+#endif
|
|
+
|
|
par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
|
|
+ task->t.regs.edi);
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ pax_open_kernel();
|
|
+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
|
|
+ pax_close_kernel();
|
|
+
|
|
+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
|
|
+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
|
|
+#else
|
|
par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
|
|
par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
|
|
+#endif
|
|
+
|
|
printk(KERN_INFO "uvesafb: protected mode interface info at "
|
|
"%04x:%04x\n",
|
|
(u16)task->t.regs.es, (u16)task->t.regs.edi);
|
|
@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
|
|
par->ypan = ypan;
|
|
|
|
if (par->pmi_setpal || par->ypan) {
|
|
+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
|
|
if (__supported_pte_mask & _PAGE_NX) {
|
|
par->pmi_setpal = par->ypan = 0;
|
|
printk(KERN_WARNING "uvesafb: NX protection is actively."
|
|
"We have better not to use the PMI.\n");
|
|
- } else {
|
|
+ } else
|
|
+#endif
|
|
uvesafb_vbe_getpmi(task, par);
|
|
- }
|
|
}
|
|
#else
|
|
/* The protected mode interface is not available on non-x86. */
|
|
@@ -1836,6 +1860,11 @@ static int __devinit uvesafb_probe(struct platform_device *dev)
|
|
if (par->vbe_modes)
|
|
kfree(par->vbe_modes);
|
|
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (par->pmi_code)
|
|
+ module_free_exec(NULL, par->pmi_code);
|
|
+#endif
|
|
+
|
|
framebuffer_release(info);
|
|
return err;
|
|
}
|
|
@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
|
|
kfree(par->vbe_state_orig);
|
|
if (par->vbe_state_saved)
|
|
kfree(par->vbe_state_saved);
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (par->pmi_code)
|
|
+ module_free_exec(NULL, par->pmi_code);
|
|
+#endif
|
|
+
|
|
}
|
|
|
|
framebuffer_release(info);
|
|
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
|
|
index 501b340..86bd4cf 100644
|
|
--- a/drivers/video/vesafb.c
|
|
+++ b/drivers/video/vesafb.c
|
|
@@ -9,6 +9,7 @@
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
+#include <linux/moduleloader.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
|
|
static int vram_total __initdata; /* Set total amount of memory */
|
|
static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
|
|
static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
|
|
-static void (*pmi_start)(void) __read_mostly;
|
|
-static void (*pmi_pal) (void) __read_mostly;
|
|
+static void (*pmi_start)(void) __read_only;
|
|
+static void (*pmi_pal) (void) __read_only;
|
|
static int depth __read_mostly;
|
|
static int vga_compat __read_mostly;
|
|
/* --------------------------------------------------------------------- */
|
|
@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
|
|
unsigned int size_vmode;
|
|
unsigned int size_remap;
|
|
unsigned int size_total;
|
|
+ void *pmi_code = NULL;
|
|
|
|
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
|
|
return -ENODEV;
|
|
@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
|
|
size_remap = size_total;
|
|
vesafb_fix.smem_len = size_remap;
|
|
|
|
-#ifndef __i386__
|
|
- screen_info.vesapm_seg = 0;
|
|
-#endif
|
|
-
|
|
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
|
|
printk(KERN_WARNING
|
|
"vesafb: cannot reserve video memory at 0x%lx\n",
|
|
@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
|
|
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
|
|
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
|
|
|
|
+#ifdef __i386__
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
|
|
+ if (!pmi_code)
|
|
+#elif !defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (0)
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+ screen_info.vesapm_seg = 0;
|
|
+
|
|
if (screen_info.vesapm_seg) {
|
|
- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
|
|
- screen_info.vesapm_seg,screen_info.vesapm_off);
|
|
+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
|
|
+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
|
|
}
|
|
|
|
if (screen_info.vesapm_seg < 0xc000)
|
|
@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
|
|
|
|
if (ypan || pmi_setpal) {
|
|
unsigned short *pmi_base;
|
|
+
|
|
pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
|
|
- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
|
|
- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ pax_open_kernel();
|
|
+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
|
|
+#else
|
|
+ pmi_code = pmi_base;
|
|
+#endif
|
|
+
|
|
+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
|
|
+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ pmi_start = ktva_ktla(pmi_start);
|
|
+ pmi_pal = ktva_ktla(pmi_pal);
|
|
+ pax_close_kernel();
|
|
+#endif
|
|
+
|
|
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
|
|
if (pmi_base[3]) {
|
|
printk(KERN_INFO "vesafb: pmi: ports = ");
|
|
@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
|
|
info->node, info->fix.id);
|
|
return 0;
|
|
err:
|
|
+
|
|
+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ module_free_exec(NULL, pmi_code);
|
|
+#endif
|
|
+
|
|
if (info->screen_base)
|
|
iounmap(info->screen_base);
|
|
framebuffer_release(info);
|
|
diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
|
|
index 88714ae..16c2e11 100644
|
|
--- a/drivers/video/via/via_clock.h
|
|
+++ b/drivers/video/via/via_clock.h
|
|
@@ -56,7 +56,7 @@ struct via_clock {
|
|
|
|
void (*set_engine_pll_state)(u8 state);
|
|
void (*set_engine_pll)(struct via_pll_config config);
|
|
-};
|
|
+} __no_const;
|
|
|
|
|
|
static inline u32 get_pll_internal_frequency(u32 ref_freq,
|
|
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
|
|
index 62461a8..6cdff66 100644
|
|
--- a/drivers/xen/xen-pciback/conf_space.h
|
|
+++ b/drivers/xen/xen-pciback/conf_space.h
|
|
@@ -44,15 +44,15 @@ struct config_field {
|
|
struct {
|
|
conf_dword_write write;
|
|
conf_dword_read read;
|
|
- } dw;
|
|
+ } __no_const dw;
|
|
struct {
|
|
conf_word_write write;
|
|
conf_word_read read;
|
|
- } w;
|
|
+ } __no_const w;
|
|
struct {
|
|
conf_byte_write write;
|
|
conf_byte_read read;
|
|
- } b;
|
|
+ } __no_const b;
|
|
} u;
|
|
struct list_head list;
|
|
};
|
|
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
|
|
index 014c8dd..6f3dfe6 100644
|
|
--- a/fs/9p/vfs_inode.c
|
|
+++ b/fs/9p/vfs_inode.c
|
|
@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
void
|
|
v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
|
|
{
|
|
- char *s = nd_get_link(nd);
|
|
+ const char *s = nd_get_link(nd);
|
|
|
|
p9_debug(P9_DEBUG_VFS, " %s %s\n",
|
|
dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
|
|
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
|
|
index e95d1b6..3454244 100644
|
|
--- a/fs/Kconfig.binfmt
|
|
+++ b/fs/Kconfig.binfmt
|
|
@@ -89,7 +89,7 @@ config HAVE_AOUT
|
|
|
|
config BINFMT_AOUT
|
|
tristate "Kernel support for a.out and ECOFF binaries"
|
|
- depends on HAVE_AOUT
|
|
+ depends on HAVE_AOUT && BROKEN
|
|
---help---
|
|
A.out (Assembler.OUTput) is a set of formats for libraries and
|
|
executables used in the earliest versions of UNIX. Linux used
|
|
diff --git a/fs/aio.c b/fs/aio.c
|
|
index cdc8dc4..75f2998 100644
|
|
--- a/fs/aio.c
|
|
+++ b/fs/aio.c
|
|
@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
|
|
size += sizeof(struct io_event) * nr_events;
|
|
nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
|
|
|
|
- if (nr_pages < 0)
|
|
+ if (nr_pages <= 0)
|
|
return -EINVAL;
|
|
|
|
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
|
|
@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
|
|
static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
|
|
{
|
|
ssize_t ret;
|
|
+ struct iovec iovstack;
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
if (compat)
|
|
ret = compat_rw_copy_check_uvector(type,
|
|
(struct compat_iovec __user *)kiocb->ki_buf,
|
|
- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
|
|
+ kiocb->ki_nbytes, 1, &iovstack,
|
|
&kiocb->ki_iovec, 1);
|
|
else
|
|
#endif
|
|
ret = rw_copy_check_uvector(type,
|
|
(struct iovec __user *)kiocb->ki_buf,
|
|
- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
|
|
+ kiocb->ki_nbytes, 1, &iovstack,
|
|
&kiocb->ki_iovec, 1);
|
|
if (ret < 0)
|
|
goto out;
|
|
@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
+ if (kiocb->ki_iovec == &iovstack) {
|
|
+ kiocb->ki_inline_vec = iovstack;
|
|
+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
|
|
+ }
|
|
kiocb->ki_nr_segs = kiocb->ki_nbytes;
|
|
kiocb->ki_cur_seg = 0;
|
|
/* ki_nbytes/left now reflect bytes instead of segs */
|
|
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
|
|
index da8876d..9f3e6d8 100644
|
|
--- a/fs/autofs4/waitq.c
|
|
+++ b/fs/autofs4/waitq.c
|
|
@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
|
|
{
|
|
unsigned long sigpipe, flags;
|
|
mm_segment_t fs;
|
|
- const char *data = (const char *)addr;
|
|
+ const char __user *data = (const char __force_user *)addr;
|
|
ssize_t wr = 0;
|
|
|
|
sigpipe = sigismember(¤t->pending.signal, SIGPIPE);
|
|
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
|
|
index e18da23..affc30e 100644
|
|
--- a/fs/befs/linuxvfs.c
|
|
+++ b/fs/befs/linuxvfs.c
|
|
@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
|
|
{
|
|
befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
|
|
if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
|
|
- char *link = nd_get_link(nd);
|
|
+ const char *link = nd_get_link(nd);
|
|
if (!IS_ERR(link))
|
|
kfree(link);
|
|
}
|
|
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
|
|
index d146e18..a12e8cb 100644
|
|
--- a/fs/binfmt_aout.c
|
|
+++ b/fs/binfmt_aout.c
|
|
@@ -265,6 +265,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
|
|
|
|
install_exec_creds(bprm);
|
|
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+ current->mm->pax_flags = 0UL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
|
|
+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
|
|
+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
|
|
+ current->mm->pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (N_MAGIC(ex) == OMAGIC) {
|
|
unsigned long text_addr, map_size;
|
|
loff_t pos;
|
|
@@ -330,7 +351,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
|
|
}
|
|
|
|
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
|
|
- PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
+ PROT_READ | PROT_WRITE,
|
|
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
|
|
fd_offset + ex.a_text);
|
|
if (error != N_DATADDR(ex)) {
|
|
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
|
|
index a181b58..46af0db 100644
|
|
--- a/fs/binfmt_elf.c
|
|
+++ b/fs/binfmt_elf.c
|
|
@@ -32,6 +32,7 @@
|
|
#include <linux/elf.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/coredump.h>
|
|
+#include <linux/xattr.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/param.h>
|
|
#include <asm/page.h>
|
|
@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
|
|
#define elf_core_dump NULL
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
|
|
+#endif
|
|
+
|
|
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
|
|
#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
|
|
#else
|
|
@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
|
|
.load_binary = load_elf_binary,
|
|
.load_shlib = load_elf_library,
|
|
.core_dump = elf_core_dump,
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ .handle_mprotect= elf_handle_mprotect,
|
|
+#endif
|
|
+
|
|
.min_coredump = ELF_EXEC_PAGESIZE,
|
|
};
|
|
|
|
@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
|
|
|
|
static int set_brk(unsigned long start, unsigned long end)
|
|
{
|
|
+ unsigned long e = end;
|
|
+
|
|
start = ELF_PAGEALIGN(start);
|
|
end = ELF_PAGEALIGN(end);
|
|
if (end > start) {
|
|
@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
|
|
if (BAD_ADDR(addr))
|
|
return addr;
|
|
}
|
|
- current->mm->start_brk = current->mm->brk = end;
|
|
+ current->mm->start_brk = current->mm->brk = e;
|
|
return 0;
|
|
}
|
|
|
|
@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
|
|
elf_addr_t __user *u_rand_bytes;
|
|
const char *k_platform = ELF_PLATFORM;
|
|
const char *k_base_platform = ELF_BASE_PLATFORM;
|
|
- unsigned char k_rand_bytes[16];
|
|
+ u32 k_rand_bytes[4];
|
|
int items;
|
|
elf_addr_t *elf_info;
|
|
int ei_index = 0;
|
|
const struct cred *cred = current_cred();
|
|
struct vm_area_struct *vma;
|
|
+ unsigned long saved_auxv[AT_VECTOR_SIZE];
|
|
|
|
/*
|
|
* In some cases (e.g. Hyper-Threading), we want to avoid L1
|
|
@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
|
|
* Generate 16 random bytes for userspace PRNG seeding.
|
|
*/
|
|
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
|
|
- u_rand_bytes = (elf_addr_t __user *)
|
|
- STACK_ALLOC(p, sizeof(k_rand_bytes));
|
|
+ srandom32(k_rand_bytes[0] ^ random32());
|
|
+ srandom32(k_rand_bytes[1] ^ random32());
|
|
+ srandom32(k_rand_bytes[2] ^ random32());
|
|
+ srandom32(k_rand_bytes[3] ^ random32());
|
|
+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
|
|
+ u_rand_bytes = (elf_addr_t __user *) p;
|
|
if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
|
|
return -EFAULT;
|
|
|
|
@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
|
|
return -EFAULT;
|
|
current->mm->env_end = p;
|
|
|
|
+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
|
|
+
|
|
/* Put the elf_info on the stack in the right place. */
|
|
sp = (elf_addr_t __user *)envp + 1;
|
|
- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
|
|
+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
|
|
return -EFAULT;
|
|
return 0;
|
|
}
|
|
@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
|
|
{
|
|
struct elf_phdr *elf_phdata;
|
|
struct elf_phdr *eppnt;
|
|
- unsigned long load_addr = 0;
|
|
+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
|
|
int load_addr_set = 0;
|
|
unsigned long last_bss = 0, elf_bss = 0;
|
|
- unsigned long error = ~0UL;
|
|
+ unsigned long error = -EINVAL;
|
|
unsigned long total_size;
|
|
int retval, i, size;
|
|
|
|
@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
|
|
goto out_close;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
eppnt = elf_phdata;
|
|
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
|
|
if (eppnt->p_type == PT_LOAD) {
|
|
@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
|
|
k = load_addr + eppnt->p_vaddr;
|
|
if (BAD_ADDR(k) ||
|
|
eppnt->p_filesz > eppnt->p_memsz ||
|
|
- eppnt->p_memsz > TASK_SIZE ||
|
|
- TASK_SIZE - eppnt->p_memsz < k) {
|
|
+ eppnt->p_memsz > pax_task_size ||
|
|
+ pax_task_size - eppnt->p_memsz < k) {
|
|
error = -ENOMEM;
|
|
goto out_close;
|
|
}
|
|
@@ -525,6 +549,311 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
|
|
return error;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
|
|
+{
|
|
+ unsigned long pax_flags = 0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (elf_phdata->p_flags & PF_PAGEEXEC)
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (elf_phdata->p_flags & PF_SEGMEXEC)
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if (elf_phdata->p_flags & PF_EMUTRAMP)
|
|
+ pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (elf_phdata->p_flags & PF_MPROTECT)
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
|
|
+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+ return pax_flags;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
|
|
+{
|
|
+ unsigned long pax_flags = 0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
|
|
+ pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
|
|
+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+ return pax_flags;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
|
|
+{
|
|
+ unsigned long pax_flags = 0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
|
|
+ pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (pax_flags_softmode & MF_PAX_MPROTECT)
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
|
|
+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+ return pax_flags;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
|
|
+{
|
|
+ unsigned long pax_flags = 0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
|
|
+ pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
|
|
+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+ return pax_flags;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
|
|
+{
|
|
+ unsigned long pax_flags = 0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_EI_PAX
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
|
|
+ pax_flags |= MF_PAX_EMUTRAMP;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+#else
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ pax_flags |= MF_PAX_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_flags |= MF_PAX_SEGMEXEC;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ pax_flags |= MF_PAX_MPROTECT;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (randomize_va_space)
|
|
+ pax_flags |= MF_PAX_RANDMMAP;
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+ return pax_flags;
|
|
+}
|
|
+
|
|
+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_PT_PAX_FLAGS
|
|
+ unsigned long i;
|
|
+
|
|
+ for (i = 0UL; i < elf_ex->e_phnum; i++)
|
|
+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
|
|
+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
|
|
+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
|
|
+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
|
|
+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
|
|
+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
|
|
+ return ~0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+ if (pax_softmode)
|
|
+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
|
|
+ else
|
|
+#endif
|
|
+
|
|
+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return ~0UL;
|
|
+}
|
|
+
|
|
+static unsigned long pax_parse_xattr_pax(struct file * const file)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
|
|
+ ssize_t xattr_size, i;
|
|
+ unsigned char xattr_value[5];
|
|
+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
|
|
+
|
|
+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
|
|
+ if (xattr_size <= 0)
|
|
+ return ~0UL;
|
|
+
|
|
+ for (i = 0; i < xattr_size; i++)
|
|
+ switch (xattr_value[i]) {
|
|
+ default:
|
|
+ return ~0UL;
|
|
+
|
|
+#define parse_flag(option1, option2, flag) \
|
|
+ case option1: \
|
|
+ pax_flags_hardmode |= MF_PAX_##flag; \
|
|
+ break; \
|
|
+ case option2: \
|
|
+ pax_flags_softmode |= MF_PAX_##flag; \
|
|
+ break;
|
|
+
|
|
+ parse_flag('p', 'P', PAGEEXEC);
|
|
+ parse_flag('e', 'E', EMUTRAMP);
|
|
+ parse_flag('m', 'M', MPROTECT);
|
|
+ parse_flag('r', 'R', RANDMMAP);
|
|
+ parse_flag('s', 'S', SEGMEXEC);
|
|
+
|
|
+#undef parse_flag
|
|
+ }
|
|
+
|
|
+ if (pax_flags_hardmode & pax_flags_softmode)
|
|
+ return ~0UL;
|
|
+
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+ if (pax_softmode)
|
|
+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
|
|
+ else
|
|
+#endif
|
|
+
|
|
+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
|
|
+#else
|
|
+ return ~0UL;
|
|
+#endif
|
|
+
|
|
+}
|
|
+
|
|
+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
|
|
+{
|
|
+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
|
|
+
|
|
+ pax_flags = pax_parse_ei_pax(elf_ex);
|
|
+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
|
|
+ xattr_pax_flags = pax_parse_xattr_pax(file);
|
|
+
|
|
+ if (pt_pax_flags == ~0UL)
|
|
+ pt_pax_flags = xattr_pax_flags;
|
|
+ else if (xattr_pax_flags == ~0UL)
|
|
+ xattr_pax_flags = pt_pax_flags;
|
|
+ if (pt_pax_flags != xattr_pax_flags)
|
|
+ return -EINVAL;
|
|
+ if (pt_pax_flags != ~0UL)
|
|
+ pax_flags = pt_pax_flags;
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
|
|
+ if ((__supported_pte_mask & _PAGE_NX))
|
|
+ pax_flags &= ~MF_PAX_SEGMEXEC;
|
|
+ else
|
|
+ pax_flags &= ~MF_PAX_PAGEEXEC;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (0 > pax_check_flags(&pax_flags))
|
|
+ return -EINVAL;
|
|
+
|
|
+ current->mm->pax_flags = pax_flags;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* These are the functions used to load ELF style executables and shared
|
|
* libraries. There is no binary dependent code anywhere else.
|
|
@@ -541,6 +870,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
|
|
{
|
|
unsigned long random_variable = 0;
|
|
|
|
+#ifdef CONFIG_PAX_RANDUSTACK
|
|
+ if (randomize_va_space)
|
|
+ return stack_top - current->mm->delta_stack;
|
|
+#endif
|
|
+
|
|
if ((current->flags & PF_RANDOMIZE) &&
|
|
!(current->personality & ADDR_NO_RANDOMIZE)) {
|
|
random_variable = (unsigned long) get_random_int();
|
|
@@ -560,7 +894,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
unsigned long load_addr = 0, load_bias = 0;
|
|
int load_addr_set = 0;
|
|
char * elf_interpreter = NULL;
|
|
- unsigned long error;
|
|
+ unsigned long error = 0;
|
|
struct elf_phdr *elf_ppnt, *elf_phdata;
|
|
unsigned long elf_bss, elf_brk;
|
|
int retval, i;
|
|
@@ -570,11 +904,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
unsigned long start_code, end_code, start_data, end_data;
|
|
unsigned long reloc_func_desc __maybe_unused = 0;
|
|
int executable_stack = EXSTACK_DEFAULT;
|
|
- unsigned long def_flags = 0;
|
|
struct {
|
|
struct elfhdr elf_ex;
|
|
struct elfhdr interp_elf_ex;
|
|
} *loc;
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
|
|
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
|
|
if (!loc) {
|
|
@@ -710,11 +1044,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
goto out_free_dentry;
|
|
|
|
/* OK, This is the point of no return */
|
|
- current->mm->def_flags = def_flags;
|
|
+
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+ current->mm->pax_flags = 0UL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+ current->mm->call_dl_resolve = 0UL;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
|
|
+ current->mm->call_syscall = 0UL;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+ current->mm->delta_mmap = 0UL;
|
|
+ current->mm->delta_stack = 0UL;
|
|
+#endif
|
|
+
|
|
+ current->mm->def_flags = 0;
|
|
+
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
|
|
+ send_sig(SIGKILL, current, 0);
|
|
+ goto out_free_dentry;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
|
|
+ pax_set_initial_flags(bprm);
|
|
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
|
|
+ if (pax_set_initial_flags_func)
|
|
+ (pax_set_initial_flags_func)(bprm);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
|
|
+ current->mm->context.user_cs_limit = PAGE_SIZE;
|
|
+ current->mm->def_flags |= VM_PAGEEXEC;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
|
|
+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+ current->mm->def_flags |= VM_NOHUGEPAGE;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
|
|
+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
|
|
+ put_cpu();
|
|
+ }
|
|
+#endif
|
|
|
|
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
|
|
may depend on the personality. */
|
|
SET_PERSONALITY(loc->elf_ex);
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
|
|
+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
|
|
+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
|
|
+ executable_stack = EXSTACK_DISABLE_X;
|
|
+ current->personality &= ~READ_IMPLIES_EXEC;
|
|
+ } else
|
|
+#endif
|
|
+
|
|
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
|
|
current->personality |= READ_IMPLIES_EXEC;
|
|
|
|
@@ -844,9 +1248,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
* allowed task size. Note that p_filesz must always be
|
|
* <= p_memsz so it is only necessary to check p_memsz.
|
|
*/
|
|
- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
|
|
- elf_ppnt->p_memsz > TASK_SIZE ||
|
|
- TASK_SIZE - elf_ppnt->p_memsz < k) {
|
|
+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
|
|
+ elf_ppnt->p_memsz > pax_task_size ||
|
|
+ pax_task_size - elf_ppnt->p_memsz < k) {
|
|
/* set_brk can never work. Avoid overflows. */
|
|
send_sig(SIGKILL, current, 0);
|
|
retval = -EINVAL;
|
|
@@ -885,11 +1289,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
goto out_free_dentry;
|
|
}
|
|
if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
|
|
- send_sig(SIGSEGV, current, 0);
|
|
- retval = -EFAULT; /* Nobody gets to see this, but.. */
|
|
- goto out_free_dentry;
|
|
+ /*
|
|
+ * This bss-zeroing can fail if the ELF
|
|
+ * file specifies odd protections. So
|
|
+ * we don't check the return value
|
|
+ */
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
|
|
+ unsigned long start, size;
|
|
+
|
|
+ start = ELF_PAGEALIGN(elf_brk);
|
|
+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
+ retval = -ENOMEM;
|
|
+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
|
|
+ unsigned long prot = PROT_NONE;
|
|
+
|
|
+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
+// if (current->personality & ADDR_NO_RANDOMIZE)
|
|
+// prot = PROT_READ;
|
|
+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
|
|
+ retval = IS_ERR_VALUE(start) ? start : 0;
|
|
+ }
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
+ if (retval == 0)
|
|
+ retval = set_brk(start + size, start + size + PAGE_SIZE);
|
|
+ if (retval < 0) {
|
|
+ send_sig(SIGKILL, current, 0);
|
|
+ goto out_free_dentry;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (elf_interpreter) {
|
|
unsigned long uninitialized_var(interp_map_addr);
|
|
|
|
@@ -1117,7 +1550,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
|
|
* Decide what to dump of a segment, part, all or none.
|
|
*/
|
|
static unsigned long vma_dump_size(struct vm_area_struct *vma,
|
|
- unsigned long mm_flags)
|
|
+ unsigned long mm_flags, long signr)
|
|
{
|
|
#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
|
|
|
|
@@ -1154,7 +1587,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
|
|
if (vma->vm_file == NULL)
|
|
return 0;
|
|
|
|
- if (FILTER(MAPPED_PRIVATE))
|
|
+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
|
|
goto whole;
|
|
|
|
/*
|
|
@@ -1376,9 +1809,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
|
|
{
|
|
elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
|
|
int i = 0;
|
|
- do
|
|
+ do {
|
|
i += 2;
|
|
- while (auxv[i - 2] != AT_NULL);
|
|
+ } while (auxv[i - 2] != AT_NULL);
|
|
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
|
|
}
|
|
|
|
@@ -1889,14 +2322,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
|
|
}
|
|
|
|
static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
|
|
- unsigned long mm_flags)
|
|
+ struct coredump_params *cprm)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
size_t size = 0;
|
|
|
|
for (vma = first_vma(current, gate_vma); vma != NULL;
|
|
vma = next_vma(vma, gate_vma))
|
|
- size += vma_dump_size(vma, mm_flags);
|
|
+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
|
|
return size;
|
|
}
|
|
|
|
@@ -1990,7 +2423,7 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|
|
|
dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
|
|
|
|
- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
|
|
+ offset += elf_core_vma_data_size(gate_vma, cprm);
|
|
offset += elf_core_extra_data_size();
|
|
e_shoff = offset;
|
|
|
|
@@ -2021,7 +2454,7 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|
phdr.p_offset = offset;
|
|
phdr.p_vaddr = vma->vm_start;
|
|
phdr.p_paddr = 0;
|
|
- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
|
|
+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
|
|
phdr.p_memsz = vma->vm_end - vma->vm_start;
|
|
offset += phdr.p_filesz;
|
|
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
|
|
@@ -2056,7 +2489,7 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|
unsigned long addr;
|
|
unsigned long end;
|
|
|
|
- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
|
|
+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
|
|
|
|
for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
|
|
struct page *page;
|
|
@@ -2102,6 +2535,96 @@ static int elf_core_dump(struct coredump_params *cprm)
|
|
|
|
#endif /* CONFIG_ELF_CORE */
|
|
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+/* PaX: non-PIC ELF libraries need relocations on their executable segments
|
|
+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
|
|
+ * we'll remove VM_MAYWRITE for good on RELRO segments.
|
|
+ *
|
|
+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
|
|
+ * basis because we want to allow the common case and not the special ones.
|
|
+ */
|
|
+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
|
|
+{
|
|
+ struct elfhdr elf_h;
|
|
+ struct elf_phdr elf_p;
|
|
+ unsigned long i;
|
|
+ unsigned long oldflags;
|
|
+ bool is_textrel_rw, is_textrel_rx, is_relro;
|
|
+
|
|
+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
|
|
+ return;
|
|
+
|
|
+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
|
|
+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
|
|
+
|
|
+#ifdef CONFIG_PAX_ELFRELOCS
|
|
+ /* possible TEXTREL */
|
|
+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
|
|
+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
|
|
+#else
|
|
+ is_textrel_rw = false;
|
|
+ is_textrel_rx = false;
|
|
+#endif
|
|
+
|
|
+ /* possible RELRO */
|
|
+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
|
|
+
|
|
+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
|
|
+ return;
|
|
+
|
|
+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
|
|
+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
|
|
+
|
|
+#ifdef CONFIG_PAX_ETEXECRELOCS
|
|
+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
|
|
+#else
|
|
+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
|
|
+#endif
|
|
+
|
|
+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
|
|
+ !elf_check_arch(&elf_h) ||
|
|
+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
|
|
+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
|
|
+ return;
|
|
+
|
|
+ for (i = 0UL; i < elf_h.e_phnum; i++) {
|
|
+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
|
|
+ return;
|
|
+ switch (elf_p.p_type) {
|
|
+ case PT_DYNAMIC:
|
|
+ if (!is_textrel_rw && !is_textrel_rx)
|
|
+ continue;
|
|
+ i = 0UL;
|
|
+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
|
|
+ elf_dyn dyn;
|
|
+
|
|
+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
|
|
+ return;
|
|
+ if (dyn.d_tag == DT_NULL)
|
|
+ return;
|
|
+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
|
|
+ if (is_textrel_rw)
|
|
+ vma->vm_flags |= VM_MAYWRITE;
|
|
+ else
|
|
+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
|
|
+ vma->vm_flags &= ~VM_MAYWRITE;
|
|
+ return;
|
|
+ }
|
|
+ i++;
|
|
+ }
|
|
+ return;
|
|
+
|
|
+ case PT_GNU_RELRO:
|
|
+ if (!is_relro)
|
|
+ continue;
|
|
+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
|
|
+ vma->vm_flags &= ~VM_MAYWRITE;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
static int __init init_elf_binfmt(void)
|
|
{
|
|
register_binfmt(&elf_format);
|
|
diff --git a/fs/binfmt_elf.c.rej b/fs/binfmt_elf.c.rej
|
|
new file mode 100644
|
|
index 0000000..f60f6971
|
|
--- /dev/null
|
|
+++ b/fs/binfmt_elf.c.rej
|
|
@@ -0,0 +1,22 @@
|
|
+diff a/fs/binfmt_elf.c b/fs/binfmt_elf.c (rejected hunks)
|
|
+@@ -805,6 +1209,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
|
|
+ #else
|
|
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
|
|
+ #endif
|
|
++
|
|
++#ifdef CONFIG_PAX_RANDMMAP
|
|
++ /* PaX: randomize base address at the default exe base if requested */
|
|
++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
|
|
++#ifdef CONFIG_SPARC64
|
|
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
|
|
++#else
|
|
++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
|
|
++#endif
|
|
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
|
|
++ elf_flags |= MAP_FIXED;
|
|
++ }
|
|
++#endif
|
|
++
|
|
+ }
|
|
+
|
|
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
|
|
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
|
|
index 6b2daf9..a70dccb 100644
|
|
--- a/fs/binfmt_flat.c
|
|
+++ b/fs/binfmt_flat.c
|
|
@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
|
|
realdatastart = (unsigned long) -ENOMEM;
|
|
printk("Unable to allocate RAM for process data, errno %d\n",
|
|
(int)-realdatastart);
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
do_munmap(current->mm, textpos, text_len);
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
ret = realdatastart;
|
|
goto err;
|
|
}
|
|
@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
|
|
}
|
|
if (IS_ERR_VALUE(result)) {
|
|
printk("Unable to read data+bss, errno %d\n", (int)-result);
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
do_munmap(current->mm, textpos, text_len);
|
|
do_munmap(current->mm, realdatastart, len);
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
ret = result;
|
|
goto err;
|
|
}
|
|
@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
|
|
}
|
|
if (IS_ERR_VALUE(result)) {
|
|
printk("Unable to read code+data+bss, errno %d\n",(int)-result);
|
|
+ down_write(¤t->mm->mmap_sem);
|
|
do_munmap(current->mm, textpos, text_len + data_len + extra +
|
|
MAX_SHARED_LIBS * sizeof(unsigned long));
|
|
+ up_write(¤t->mm->mmap_sem);
|
|
ret = result;
|
|
goto err;
|
|
}
|
|
diff --git a/fs/bio.c b/fs/bio.c
|
|
index 1e9383b..f05b24e 100644
|
|
--- a/fs/bio.c
|
|
+++ b/fs/bio.c
|
|
@@ -849,7 +849,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|
/*
|
|
* Overflow, abort
|
|
*/
|
|
- if (end < start)
|
|
+ if (end < start || end - start > INT_MAX - nr_pages)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
nr_pages += end - start;
|
|
@@ -983,7 +983,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
|
|
/*
|
|
* Overflow, abort
|
|
*/
|
|
- if (end < start)
|
|
+ if (end < start || end - start > INT_MAX - nr_pages)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
nr_pages += end - start;
|
|
@@ -1245,7 +1245,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
|
|
const int read = bio_data_dir(bio) == READ;
|
|
struct bio_map_data *bmd = bio->bi_private;
|
|
int i;
|
|
- char *p = bmd->sgvecs[0].iov_base;
|
|
+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
|
|
|
|
__bio_for_each_segment(bvec, bio, i, 0) {
|
|
char *addr = page_address(bvec->bv_page);
|
|
diff --git a/fs/block_dev.c b/fs/block_dev.c
|
|
index 319d9c7..8b583f31 100644
|
|
--- a/fs/block_dev.c
|
|
+++ b/fs/block_dev.c
|
|
@@ -712,7 +712,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
|
|
else if (bdev->bd_contains == bdev)
|
|
return true; /* is a whole device which isn't held */
|
|
|
|
- else if (whole->bd_holder == bd_may_claim)
|
|
+ else if (whole->bd_holder == (void *)bd_may_claim)
|
|
return true; /* is a partition of a device that is being partitioned */
|
|
else if (whole->bd_holder != NULL)
|
|
return false; /* is a partition of a held device */
|
|
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
|
|
index c053e90..e5f1afc 100644
|
|
--- a/fs/btrfs/check-integrity.c
|
|
+++ b/fs/btrfs/check-integrity.c
|
|
@@ -156,7 +156,7 @@ struct btrfsic_block {
|
|
union {
|
|
bio_end_io_t *bio;
|
|
bh_end_io_t *bh;
|
|
- } orig_bio_bh_end_io;
|
|
+ } __no_const orig_bio_bh_end_io;
|
|
int submit_bio_bh_rw;
|
|
u64 flush_gen; /* only valid if !never_written */
|
|
};
|
|
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
|
|
index 4106264..8157ede 100644
|
|
--- a/fs/btrfs/ctree.c
|
|
+++ b/fs/btrfs/ctree.c
|
|
@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|
free_extent_buffer(buf);
|
|
add_root_to_dirty_list(root);
|
|
} else {
|
|
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
|
|
- parent_start = parent->start;
|
|
- else
|
|
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
|
|
+ if (parent)
|
|
+ parent_start = parent->start;
|
|
+ else
|
|
+ parent_start = 0;
|
|
+ } else
|
|
parent_start = 0;
|
|
|
|
WARN_ON(trans->transid != btrfs_header_generation(parent));
|
|
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
|
|
index d46c481..17fa65d 100644
|
|
--- a/fs/btrfs/ioctl.c
|
|
+++ b/fs/btrfs/ioctl.c
|
|
@@ -2919,7 +2919,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
|
|
up_read(&info->groups_sem);
|
|
}
|
|
|
|
- user_dest = (struct btrfs_ioctl_space_info *)
|
|
+ user_dest = (struct btrfs_ioctl_space_info __user *)
|
|
(arg + sizeof(struct btrfs_ioctl_space_args));
|
|
|
|
if (copy_to_user(user_dest, dest_orig, alloc_size))
|
|
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
|
|
index 92841a7..3069096 100644
|
|
--- a/fs/btrfs/relocation.c
|
|
+++ b/fs/btrfs/relocation.c
|
|
@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
|
|
}
|
|
spin_unlock(&rc->reloc_root_tree.lock);
|
|
|
|
- BUG_ON((struct btrfs_root *)node->data != root);
|
|
+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
|
|
|
|
if (!del) {
|
|
spin_lock(&rc->reloc_root_tree.lock);
|
|
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
|
|
index 622f469..e8d2d55 100644
|
|
--- a/fs/cachefiles/bind.c
|
|
+++ b/fs/cachefiles/bind.c
|
|
@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
|
|
args);
|
|
|
|
/* start by checking things over */
|
|
- ASSERT(cache->fstop_percent >= 0 &&
|
|
- cache->fstop_percent < cache->fcull_percent &&
|
|
+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
|
|
cache->fcull_percent < cache->frun_percent &&
|
|
cache->frun_percent < 100);
|
|
|
|
- ASSERT(cache->bstop_percent >= 0 &&
|
|
- cache->bstop_percent < cache->bcull_percent &&
|
|
+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
|
|
cache->bcull_percent < cache->brun_percent &&
|
|
cache->brun_percent < 100);
|
|
|
|
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
|
|
index 0a1467b..6a53245 100644
|
|
--- a/fs/cachefiles/daemon.c
|
|
+++ b/fs/cachefiles/daemon.c
|
|
@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
|
|
if (n > buflen)
|
|
return -EMSGSIZE;
|
|
|
|
- if (copy_to_user(_buffer, buffer, n) != 0)
|
|
+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
|
|
return -EFAULT;
|
|
|
|
return n;
|
|
@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
|
|
if (test_bit(CACHEFILES_DEAD, &cache->flags))
|
|
return -EIO;
|
|
|
|
- if (datalen < 0 || datalen > PAGE_SIZE - 1)
|
|
+ if (datalen > PAGE_SIZE - 1)
|
|
return -EOPNOTSUPP;
|
|
|
|
/* drag the command string into the kernel so we can parse it */
|
|
@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
|
|
if (args[0] != '%' || args[1] != '\0')
|
|
return -EINVAL;
|
|
|
|
- if (fstop < 0 || fstop >= cache->fcull_percent)
|
|
+ if (fstop >= cache->fcull_percent)
|
|
return cachefiles_daemon_range_error(cache, args);
|
|
|
|
cache->fstop_percent = fstop;
|
|
@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
|
|
if (args[0] != '%' || args[1] != '\0')
|
|
return -EINVAL;
|
|
|
|
- if (bstop < 0 || bstop >= cache->bcull_percent)
|
|
+ if (bstop >= cache->bcull_percent)
|
|
return cachefiles_daemon_range_error(cache, args);
|
|
|
|
cache->bstop_percent = bstop;
|
|
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
|
|
index bd6bc1b..b627b53 100644
|
|
--- a/fs/cachefiles/internal.h
|
|
+++ b/fs/cachefiles/internal.h
|
|
@@ -57,7 +57,7 @@ struct cachefiles_cache {
|
|
wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
|
|
struct rb_root active_nodes; /* active nodes (can't be culled) */
|
|
rwlock_t active_lock; /* lock for active_nodes */
|
|
- atomic_t gravecounter; /* graveyard uniquifier */
|
|
+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
|
|
unsigned frun_percent; /* when to stop culling (% files) */
|
|
unsigned fcull_percent; /* when to start culling (% files) */
|
|
unsigned fstop_percent; /* when to stop allocating (% files) */
|
|
@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
|
|
* proc.c
|
|
*/
|
|
#ifdef CONFIG_CACHEFILES_HISTOGRAM
|
|
-extern atomic_t cachefiles_lookup_histogram[HZ];
|
|
-extern atomic_t cachefiles_mkdir_histogram[HZ];
|
|
-extern atomic_t cachefiles_create_histogram[HZ];
|
|
+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
|
|
+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
|
|
+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
|
|
|
|
extern int __init cachefiles_proc_init(void);
|
|
extern void cachefiles_proc_cleanup(void);
|
|
static inline
|
|
-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
|
|
+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
|
|
{
|
|
unsigned long jif = jiffies - start_jif;
|
|
if (jif >= HZ)
|
|
jif = HZ - 1;
|
|
- atomic_inc(&histogram[jif]);
|
|
+ atomic_inc_unchecked(&histogram[jif]);
|
|
}
|
|
|
|
#else
|
|
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
|
|
index 7f0771d..87d4f36 100644
|
|
--- a/fs/cachefiles/namei.c
|
|
+++ b/fs/cachefiles/namei.c
|
|
@@ -318,7 +318,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
|
|
/* first step is to make up a grave dentry in the graveyard */
|
|
sprintf(nbuffer, "%08x%08x",
|
|
(uint32_t) get_seconds(),
|
|
- (uint32_t) atomic_inc_return(&cache->gravecounter));
|
|
+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
|
|
|
|
/* do the multiway lock magic */
|
|
trap = lock_rename(cache->graveyard, dir);
|
|
diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
|
|
index eccd339..4c1d995 100644
|
|
--- a/fs/cachefiles/proc.c
|
|
+++ b/fs/cachefiles/proc.c
|
|
@@ -14,9 +14,9 @@
|
|
#include <linux/seq_file.h>
|
|
#include "internal.h"
|
|
|
|
-atomic_t cachefiles_lookup_histogram[HZ];
|
|
-atomic_t cachefiles_mkdir_histogram[HZ];
|
|
-atomic_t cachefiles_create_histogram[HZ];
|
|
+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
|
|
+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
|
|
+atomic_unchecked_t cachefiles_create_histogram[HZ];
|
|
|
|
/*
|
|
* display the latency histogram
|
|
@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
|
|
return 0;
|
|
default:
|
|
index = (unsigned long) v - 3;
|
|
- x = atomic_read(&cachefiles_lookup_histogram[index]);
|
|
- y = atomic_read(&cachefiles_mkdir_histogram[index]);
|
|
- z = atomic_read(&cachefiles_create_histogram[index]);
|
|
+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
|
|
+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
|
|
+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
|
|
if (x == 0 && y == 0 && z == 0)
|
|
return 0;
|
|
|
|
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
|
|
index b4d2438..0935840 100644
|
|
--- a/fs/cachefiles/rdwr.c
|
|
+++ b/fs/cachefiles/rdwr.c
|
|
@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
|
|
old_fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
ret = file->f_op->write(
|
|
- file, (const void __user *) data, len, &pos);
|
|
+ file, (const void __force_user *) data, len, &pos);
|
|
set_fs(old_fs);
|
|
kunmap(page);
|
|
if (ret != len)
|
|
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
|
|
index 3e8094b..d656726 100644
|
|
--- a/fs/ceph/dir.c
|
|
+++ b/fs/ceph/dir.c
|
|
@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
|
|
struct ceph_mds_client *mdsc = fsc->mdsc;
|
|
unsigned frag = fpos_frag(filp->f_pos);
|
|
- int off = fpos_off(filp->f_pos);
|
|
+ unsigned int off = fpos_off(filp->f_pos);
|
|
int err;
|
|
u32 ftype;
|
|
struct ceph_mds_reply_info_parsed *rinfo;
|
|
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
|
|
index 2704646..c581c91 100644
|
|
--- a/fs/cifs/cifs_debug.c
|
|
+++ b/fs/cifs/cifs_debug.c
|
|
@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
|
|
|
|
if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
- atomic_set(&totBufAllocCount, 0);
|
|
- atomic_set(&totSmBufAllocCount, 0);
|
|
+ atomic_set_unchecked(&totBufAllocCount, 0);
|
|
+ atomic_set_unchecked(&totSmBufAllocCount, 0);
|
|
#endif /* CONFIG_CIFS_STATS2 */
|
|
spin_lock(&cifs_tcp_ses_lock);
|
|
list_for_each(tmp1, &cifs_tcp_ses_list) {
|
|
@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
|
|
tcon = list_entry(tmp3,
|
|
struct cifs_tcon,
|
|
tcon_list);
|
|
- atomic_set(&tcon->num_smbs_sent, 0);
|
|
- atomic_set(&tcon->num_writes, 0);
|
|
- atomic_set(&tcon->num_reads, 0);
|
|
- atomic_set(&tcon->num_oplock_brks, 0);
|
|
- atomic_set(&tcon->num_opens, 0);
|
|
- atomic_set(&tcon->num_posixopens, 0);
|
|
- atomic_set(&tcon->num_posixmkdirs, 0);
|
|
- atomic_set(&tcon->num_closes, 0);
|
|
- atomic_set(&tcon->num_deletes, 0);
|
|
- atomic_set(&tcon->num_mkdirs, 0);
|
|
- atomic_set(&tcon->num_rmdirs, 0);
|
|
- atomic_set(&tcon->num_renames, 0);
|
|
- atomic_set(&tcon->num_t2renames, 0);
|
|
- atomic_set(&tcon->num_ffirst, 0);
|
|
- atomic_set(&tcon->num_fnext, 0);
|
|
- atomic_set(&tcon->num_fclose, 0);
|
|
- atomic_set(&tcon->num_hardlinks, 0);
|
|
- atomic_set(&tcon->num_symlinks, 0);
|
|
- atomic_set(&tcon->num_locks, 0);
|
|
+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
|
|
+ atomic_set_unchecked(&tcon->num_writes, 0);
|
|
+ atomic_set_unchecked(&tcon->num_reads, 0);
|
|
+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
|
|
+ atomic_set_unchecked(&tcon->num_opens, 0);
|
|
+ atomic_set_unchecked(&tcon->num_posixopens, 0);
|
|
+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
|
|
+ atomic_set_unchecked(&tcon->num_closes, 0);
|
|
+ atomic_set_unchecked(&tcon->num_deletes, 0);
|
|
+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
|
|
+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
|
|
+ atomic_set_unchecked(&tcon->num_renames, 0);
|
|
+ atomic_set_unchecked(&tcon->num_t2renames, 0);
|
|
+ atomic_set_unchecked(&tcon->num_ffirst, 0);
|
|
+ atomic_set_unchecked(&tcon->num_fnext, 0);
|
|
+ atomic_set_unchecked(&tcon->num_fclose, 0);
|
|
+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
|
|
+ atomic_set_unchecked(&tcon->num_symlinks, 0);
|
|
+ atomic_set_unchecked(&tcon->num_locks, 0);
|
|
}
|
|
}
|
|
}
|
|
@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
|
|
smBufAllocCount.counter, cifs_min_small);
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
seq_printf(m, "Total Large %d Small %d Allocations\n",
|
|
- atomic_read(&totBufAllocCount),
|
|
- atomic_read(&totSmBufAllocCount));
|
|
+ atomic_read_unchecked(&totBufAllocCount),
|
|
+ atomic_read_unchecked(&totSmBufAllocCount));
|
|
#endif /* CONFIG_CIFS_STATS2 */
|
|
|
|
seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
|
|
@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
|
|
if (tcon->need_reconnect)
|
|
seq_puts(m, "\tDISCONNECTED ");
|
|
seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
|
|
- atomic_read(&tcon->num_smbs_sent),
|
|
- atomic_read(&tcon->num_oplock_brks));
|
|
+ atomic_read_unchecked(&tcon->num_smbs_sent),
|
|
+ atomic_read_unchecked(&tcon->num_oplock_brks));
|
|
seq_printf(m, "\nReads: %d Bytes: %lld",
|
|
- atomic_read(&tcon->num_reads),
|
|
+ atomic_read_unchecked(&tcon->num_reads),
|
|
(long long)(tcon->bytes_read));
|
|
seq_printf(m, "\nWrites: %d Bytes: %lld",
|
|
- atomic_read(&tcon->num_writes),
|
|
+ atomic_read_unchecked(&tcon->num_writes),
|
|
(long long)(tcon->bytes_written));
|
|
seq_printf(m, "\nFlushes: %d",
|
|
- atomic_read(&tcon->num_flushes));
|
|
+ atomic_read_unchecked(&tcon->num_flushes));
|
|
seq_printf(m, "\nLocks: %d HardLinks: %d "
|
|
"Symlinks: %d",
|
|
- atomic_read(&tcon->num_locks),
|
|
- atomic_read(&tcon->num_hardlinks),
|
|
- atomic_read(&tcon->num_symlinks));
|
|
+ atomic_read_unchecked(&tcon->num_locks),
|
|
+ atomic_read_unchecked(&tcon->num_hardlinks),
|
|
+ atomic_read_unchecked(&tcon->num_symlinks));
|
|
seq_printf(m, "\nOpens: %d Closes: %d "
|
|
"Deletes: %d",
|
|
- atomic_read(&tcon->num_opens),
|
|
- atomic_read(&tcon->num_closes),
|
|
- atomic_read(&tcon->num_deletes));
|
|
+ atomic_read_unchecked(&tcon->num_opens),
|
|
+ atomic_read_unchecked(&tcon->num_closes),
|
|
+ atomic_read_unchecked(&tcon->num_deletes));
|
|
seq_printf(m, "\nPosix Opens: %d "
|
|
"Posix Mkdirs: %d",
|
|
- atomic_read(&tcon->num_posixopens),
|
|
- atomic_read(&tcon->num_posixmkdirs));
|
|
+ atomic_read_unchecked(&tcon->num_posixopens),
|
|
+ atomic_read_unchecked(&tcon->num_posixmkdirs));
|
|
seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
|
|
- atomic_read(&tcon->num_mkdirs),
|
|
- atomic_read(&tcon->num_rmdirs));
|
|
+ atomic_read_unchecked(&tcon->num_mkdirs),
|
|
+ atomic_read_unchecked(&tcon->num_rmdirs));
|
|
seq_printf(m, "\nRenames: %d T2 Renames %d",
|
|
- atomic_read(&tcon->num_renames),
|
|
- atomic_read(&tcon->num_t2renames));
|
|
+ atomic_read_unchecked(&tcon->num_renames),
|
|
+ atomic_read_unchecked(&tcon->num_t2renames));
|
|
seq_printf(m, "\nFindFirst: %d FNext %d "
|
|
"FClose %d",
|
|
- atomic_read(&tcon->num_ffirst),
|
|
- atomic_read(&tcon->num_fnext),
|
|
- atomic_read(&tcon->num_fclose));
|
|
+ atomic_read_unchecked(&tcon->num_ffirst),
|
|
+ atomic_read_unchecked(&tcon->num_fnext),
|
|
+ atomic_read_unchecked(&tcon->num_fclose));
|
|
}
|
|
}
|
|
}
|
|
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
|
|
index c0f65e8..22c019f 100644
|
|
--- a/fs/cifs/cifsfs.c
|
|
+++ b/fs/cifs/cifsfs.c
|
|
@@ -1014,7 +1014,7 @@ cifs_init_request_bufs(void)
|
|
cifs_req_cachep = kmem_cache_create("cifs_request",
|
|
CIFSMaxBufSize +
|
|
MAX_CIFS_HDR_SIZE, 0,
|
|
- SLAB_HWCACHE_ALIGN, NULL);
|
|
+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
|
|
if (cifs_req_cachep == NULL)
|
|
return -ENOMEM;
|
|
|
|
@@ -1041,7 +1041,7 @@ cifs_init_request_bufs(void)
|
|
efficient to alloc 1 per page off the slab compared to 17K (5page)
|
|
alloc of large cifs buffers even when page debugging is on */
|
|
cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
|
|
- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
|
|
+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
|
|
NULL);
|
|
if (cifs_sm_req_cachep == NULL) {
|
|
mempool_destroy(cifs_req_poolp);
|
|
@@ -1126,8 +1126,8 @@ init_cifs(void)
|
|
atomic_set(&bufAllocCount, 0);
|
|
atomic_set(&smBufAllocCount, 0);
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
- atomic_set(&totBufAllocCount, 0);
|
|
- atomic_set(&totSmBufAllocCount, 0);
|
|
+ atomic_set_unchecked(&totBufAllocCount, 0);
|
|
+ atomic_set_unchecked(&totSmBufAllocCount, 0);
|
|
#endif /* CONFIG_CIFS_STATS2 */
|
|
|
|
atomic_set(&midCount, 0);
|
|
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
|
|
index a5fcf19..083775f 100644
|
|
--- a/fs/cifs/cifsglob.h
|
|
+++ b/fs/cifs/cifsglob.h
|
|
@@ -440,28 +440,28 @@ struct cifs_tcon {
|
|
__u16 Flags; /* optional support bits */
|
|
enum statusEnum tidStatus;
|
|
#ifdef CONFIG_CIFS_STATS
|
|
- atomic_t num_smbs_sent;
|
|
- atomic_t num_writes;
|
|
- atomic_t num_reads;
|
|
- atomic_t num_flushes;
|
|
- atomic_t num_oplock_brks;
|
|
- atomic_t num_opens;
|
|
- atomic_t num_closes;
|
|
- atomic_t num_deletes;
|
|
- atomic_t num_mkdirs;
|
|
- atomic_t num_posixopens;
|
|
- atomic_t num_posixmkdirs;
|
|
- atomic_t num_rmdirs;
|
|
- atomic_t num_renames;
|
|
- atomic_t num_t2renames;
|
|
- atomic_t num_ffirst;
|
|
- atomic_t num_fnext;
|
|
- atomic_t num_fclose;
|
|
- atomic_t num_hardlinks;
|
|
- atomic_t num_symlinks;
|
|
- atomic_t num_locks;
|
|
- atomic_t num_acl_get;
|
|
- atomic_t num_acl_set;
|
|
+ atomic_unchecked_t num_smbs_sent;
|
|
+ atomic_unchecked_t num_writes;
|
|
+ atomic_unchecked_t num_reads;
|
|
+ atomic_unchecked_t num_flushes;
|
|
+ atomic_unchecked_t num_oplock_brks;
|
|
+ atomic_unchecked_t num_opens;
|
|
+ atomic_unchecked_t num_closes;
|
|
+ atomic_unchecked_t num_deletes;
|
|
+ atomic_unchecked_t num_mkdirs;
|
|
+ atomic_unchecked_t num_posixopens;
|
|
+ atomic_unchecked_t num_posixmkdirs;
|
|
+ atomic_unchecked_t num_rmdirs;
|
|
+ atomic_unchecked_t num_renames;
|
|
+ atomic_unchecked_t num_t2renames;
|
|
+ atomic_unchecked_t num_ffirst;
|
|
+ atomic_unchecked_t num_fnext;
|
|
+ atomic_unchecked_t num_fclose;
|
|
+ atomic_unchecked_t num_hardlinks;
|
|
+ atomic_unchecked_t num_symlinks;
|
|
+ atomic_unchecked_t num_locks;
|
|
+ atomic_unchecked_t num_acl_get;
|
|
+ atomic_unchecked_t num_acl_set;
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
unsigned long long time_writes;
|
|
unsigned long long time_reads;
|
|
@@ -678,7 +678,7 @@ convert_delimiter(char *path, char delim)
|
|
}
|
|
|
|
#ifdef CONFIG_CIFS_STATS
|
|
-#define cifs_stats_inc atomic_inc
|
|
+#define cifs_stats_inc atomic_inc_unchecked
|
|
|
|
static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
|
|
unsigned int bytes)
|
|
@@ -1037,8 +1037,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
|
|
/* Various Debug counters */
|
|
GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
|
|
-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
|
|
+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
|
|
+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
|
|
#endif
|
|
GLOBAL_EXTERN atomic_t smBufAllocCount;
|
|
GLOBAL_EXTERN atomic_t midCount;
|
|
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
|
|
index 6b0e064..94e6c3c 100644
|
|
--- a/fs/cifs/link.c
|
|
+++ b/fs/cifs/link.c
|
|
@@ -600,7 +600,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
|
|
|
|
void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
|
|
{
|
|
- char *p = nd_get_link(nd);
|
|
+ const char *p = nd_get_link(nd);
|
|
if (!IS_ERR(p))
|
|
kfree(p);
|
|
}
|
|
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
|
|
index c29d1aa..58018da 100644
|
|
--- a/fs/cifs/misc.c
|
|
+++ b/fs/cifs/misc.c
|
|
@@ -156,7 +156,7 @@ cifs_buf_get(void)
|
|
memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
|
|
atomic_inc(&bufAllocCount);
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
- atomic_inc(&totBufAllocCount);
|
|
+ atomic_inc_unchecked(&totBufAllocCount);
|
|
#endif /* CONFIG_CIFS_STATS2 */
|
|
}
|
|
|
|
@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
|
|
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
|
|
atomic_inc(&smBufAllocCount);
|
|
#ifdef CONFIG_CIFS_STATS2
|
|
- atomic_inc(&totSmBufAllocCount);
|
|
+ atomic_inc_unchecked(&totSmBufAllocCount);
|
|
#endif /* CONFIG_CIFS_STATS2 */
|
|
|
|
}
|
|
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
|
|
index 6901578..d402eb5 100644
|
|
--- a/fs/coda/cache.c
|
|
+++ b/fs/coda/cache.c
|
|
@@ -24,7 +24,7 @@
|
|
#include "coda_linux.h"
|
|
#include "coda_cache.h"
|
|
|
|
-static atomic_t permission_epoch = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
|
|
|
|
/* replace or extend an acl cache hit */
|
|
void coda_cache_enter(struct inode *inode, int mask)
|
|
@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
|
|
struct coda_inode_info *cii = ITOC(inode);
|
|
|
|
spin_lock(&cii->c_lock);
|
|
- cii->c_cached_epoch = atomic_read(&permission_epoch);
|
|
+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
|
|
if (cii->c_uid != current_fsuid()) {
|
|
cii->c_uid = current_fsuid();
|
|
cii->c_cached_perm = mask;
|
|
@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
|
|
{
|
|
struct coda_inode_info *cii = ITOC(inode);
|
|
spin_lock(&cii->c_lock);
|
|
- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
|
|
+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
|
|
spin_unlock(&cii->c_lock);
|
|
}
|
|
|
|
/* remove all acl caches */
|
|
void coda_cache_clear_all(struct super_block *sb)
|
|
{
|
|
- atomic_inc(&permission_epoch);
|
|
+ atomic_inc_unchecked(&permission_epoch);
|
|
}
|
|
|
|
|
|
@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
|
|
spin_lock(&cii->c_lock);
|
|
hit = (mask & cii->c_cached_perm) == mask &&
|
|
cii->c_uid == current_fsuid() &&
|
|
- cii->c_cached_epoch == atomic_read(&permission_epoch);
|
|
+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
|
|
spin_unlock(&cii->c_lock);
|
|
|
|
return hit;
|
|
diff --git a/fs/compat.c b/fs/compat.c
|
|
index 56acdcb..a70d231 100644
|
|
--- a/fs/compat.c
|
|
+++ b/fs/compat.c
|
|
@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
|
|
|
|
set_fs(KERNEL_DS);
|
|
/* The __user pointer cast is valid because of the set_fs() */
|
|
- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
|
|
+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
|
|
set_fs(oldfs);
|
|
/* truncating is ok because it's a user address */
|
|
if (!ret)
|
|
@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
|
|
goto out;
|
|
|
|
ret = -EINVAL;
|
|
- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
|
|
+ if (nr_segs > UIO_MAXIOV)
|
|
goto out;
|
|
if (nr_segs > fast_segs) {
|
|
ret = -ENOMEM;
|
|
@@ -1066,7 +1066,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
|
|
error = buf.error;
|
|
lastdirent = buf.previous;
|
|
if (lastdirent) {
|
|
- typeof(lastdirent->d_off) d_off = file->f_pos;
|
|
+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
|
|
if (__put_user_unaligned(d_off, &lastdirent->d_off))
|
|
error = -EFAULT;
|
|
else
|
|
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
|
|
index 112e45a..b59845b 100644
|
|
--- a/fs/compat_binfmt_elf.c
|
|
+++ b/fs/compat_binfmt_elf.c
|
|
@@ -30,11 +30,13 @@
|
|
#undef elf_phdr
|
|
#undef elf_shdr
|
|
#undef elf_note
|
|
+#undef elf_dyn
|
|
#undef elf_addr_t
|
|
#define elfhdr elf32_hdr
|
|
#define elf_phdr elf32_phdr
|
|
#define elf_shdr elf32_shdr
|
|
#define elf_note elf32_note
|
|
+#define elf_dyn Elf32_Dyn
|
|
#define elf_addr_t Elf32_Addr
|
|
|
|
/*
|
|
diff --git a/fs/compat_ioctl.c.rej b/fs/compat_ioctl.c.rej
|
|
new file mode 100644
|
|
index 0000000..6ac40a2
|
|
--- /dev/null
|
|
+++ b/fs/compat_ioctl.c.rej
|
|
@@ -0,0 +1,40 @@
|
|
+--- fs/compat_ioctl.c 2012-05-21 11:33:34.635929678 +0200
|
|
++++ fs/compat_ioctl.c 2012-05-21 12:10:11.084048973 +0200
|
|
+@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsi
|
|
+
|
|
+ err = get_user(palp, &up->palette);
|
|
+ err |= get_user(length, &up->length);
|
|
++ if (err)
|
|
++ return -EFAULT;
|
|
+
|
|
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
|
|
+ err = put_user(compat_ptr(palp), &up_native->palette);
|
|
+@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned
|
|
+ return -EFAULT;
|
|
+ if (__get_user(udata, &ss32->iomem_base))
|
|
+ return -EFAULT;
|
|
+- ss.iomem_base = compat_ptr(udata);
|
|
++ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
|
|
+ if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
|
|
+ __get_user(ss.port_high, &ss32->port_high))
|
|
+ return -EFAULT;
|
|
+@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(stru
|
|
+ copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
|
|
+ copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
|
|
+ copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
|
|
+- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
|
|
++ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return ioctl_preallocate(file, p);
|
|
+@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigne
|
|
+ static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
|
|
+ {
|
|
+ unsigned int a, b;
|
|
+- a = *(unsigned int *)p;
|
|
+- b = *(unsigned int *)q;
|
|
++ a = *(const unsigned int *)p;
|
|
++ b = *(const unsigned int *)q;
|
|
+ if (a > b)
|
|
+ return 1;
|
|
+ if (a < b)
|
|
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
|
|
index c91f6d1..de7ba5f 100644
|
|
--- a/fs/configfs/dir.c
|
|
+++ b/fs/configfs/dir.c
|
|
@@ -1576,7 +1576,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
|
|
}
|
|
for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
|
|
struct configfs_dirent *next;
|
|
- const char * name;
|
|
+ const unsigned char * name;
|
|
+ char d_name[sizeof(next->s_dentry->d_iname)];
|
|
int len;
|
|
struct inode *inode = NULL;
|
|
|
|
@@ -1586,7 +1587,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
|
|
continue;
|
|
|
|
name = configfs_get_name(next);
|
|
- len = strlen(name);
|
|
+ if (next->s_dentry && name == next->s_dentry->d_iname) {
|
|
+ len = next->s_dentry->d_name.len;
|
|
+ memcpy(d_name, name, len);
|
|
+ name = d_name;
|
|
+ } else
|
|
+ len = strlen(name);
|
|
|
|
/*
|
|
* We'll have a dentry and an inode for
|
|
diff --git a/fs/dcache.c b/fs/dcache.c
|
|
index 9f8c58d..57689a2 100644
|
|
--- a/fs/dcache.c
|
|
+++ b/fs/dcache.c
|
|
@@ -3108,7 +3108,7 @@ void __init vfs_caches_init(unsigned long mempages)
|
|
mempages -= reserve;
|
|
|
|
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
|
|
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
|
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
|
|
|
|
dcache_init();
|
|
inode_init();
|
|
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
|
|
index b5b9b40..a1aefaa 100644
|
|
--- a/fs/ecryptfs/inode.c
|
|
+++ b/fs/ecryptfs/inode.c
|
|
@@ -686,7 +686,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
|
|
- (char __user *)lower_buf,
|
|
+ (char __force_user *)lower_buf,
|
|
lower_bufsiz);
|
|
set_fs(old_fs);
|
|
if (rc < 0)
|
|
@@ -732,7 +732,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
}
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
|
|
+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
|
|
set_fs(old_fs);
|
|
if (rc < 0) {
|
|
kfree(buf);
|
|
@@ -747,7 +747,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
static void
|
|
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
|
|
{
|
|
- char *buf = nd_get_link(nd);
|
|
+ const char *buf = nd_get_link(nd);
|
|
if (!IS_ERR(buf)) {
|
|
/* Free the char* */
|
|
kfree(buf);
|
|
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
|
|
index c0038f65..47ab347 100644
|
|
--- a/fs/ecryptfs/miscdev.c
|
|
+++ b/fs/ecryptfs/miscdev.c
|
|
@@ -355,7 +355,7 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
|
|
goto out_unlock_msg_ctx;
|
|
i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
|
|
if (msg_ctx->msg) {
|
|
- if (copy_to_user(&buf[i], packet_length, packet_length_size))
|
|
+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
|
|
goto out_unlock_msg_ctx;
|
|
i += packet_length_size;
|
|
if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
|
|
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
|
|
index b2a34a1..162fa69 100644
|
|
--- a/fs/ecryptfs/read_write.c
|
|
+++ b/fs/ecryptfs/read_write.c
|
|
@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
|
|
return -EIO;
|
|
fs_save = get_fs();
|
|
set_fs(get_ds());
|
|
- rc = vfs_write(lower_file, data, size, &offset);
|
|
+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
|
|
set_fs(fs_save);
|
|
mark_inode_dirty_sync(ecryptfs_inode);
|
|
return rc;
|
|
@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
|
|
return -EIO;
|
|
fs_save = get_fs();
|
|
set_fs(get_ds());
|
|
- rc = vfs_read(lower_file, data, size, &offset);
|
|
+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
|
|
set_fs(fs_save);
|
|
return rc;
|
|
}
|
|
diff --git a/fs/exec.c b/fs/exec.c
|
|
index a4d05ce..90d4bdf 100644
|
|
--- a/fs/exec.c
|
|
+++ b/fs/exec.c
|
|
@@ -55,6 +55,13 @@
|
|
#include <linux/pipe_fs_i.h>
|
|
#include <linux/oom.h>
|
|
#include <linux/compat.h>
|
|
+#include <linux/random.h>
|
|
+#include <linux/seq_file.h>
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+#include <linux/kallsyms.h>
|
|
+#include <linux/kdebug.h>
|
|
+#endif
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/mmu_context.h>
|
|
@@ -66,6 +73,18 @@
|
|
|
|
#include <trace/events/sched.h>
|
|
|
|
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
|
|
+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
|
|
+{
|
|
+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
|
|
+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
|
|
+EXPORT_SYMBOL(pax_set_initial_flags_func);
|
|
+#endif
|
|
+
|
|
int core_uses_pid;
|
|
char core_pattern[CORENAME_MAX_SIZE] = "core";
|
|
unsigned int core_pipe_limit;
|
|
@@ -75,7 +94,7 @@ struct core_name {
|
|
char *corename;
|
|
int used, size;
|
|
};
|
|
-static atomic_t call_count = ATOMIC_INIT(1);
|
|
+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
|
|
|
|
/* The maximal length of core_pattern is also specified in sysctl.c */
|
|
|
|
@@ -191,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|
int write)
|
|
{
|
|
struct page *page;
|
|
- int ret;
|
|
|
|
-#ifdef CONFIG_STACK_GROWSUP
|
|
- if (write) {
|
|
- ret = expand_downwards(bprm->vma, pos);
|
|
- if (ret < 0)
|
|
- return NULL;
|
|
- }
|
|
-#endif
|
|
- ret = get_user_pages(current, bprm->mm, pos,
|
|
- 1, write, 1, &page, NULL);
|
|
- if (ret <= 0)
|
|
+ if (0 > expand_downwards(bprm->vma, pos))
|
|
+ return NULL;
|
|
+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
|
|
return NULL;
|
|
|
|
if (write) {
|
|
@@ -277,6 +288,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|
vma->vm_end = STACK_TOP_MAX;
|
|
vma->vm_start = vma->vm_end - PAGE_SIZE;
|
|
vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
|
|
+#endif
|
|
+
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
|
|
|
@@ -291,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
|
|
mm->stack_vm = mm->total_vm = 1;
|
|
up_write(&mm->mmap_sem);
|
|
bprm->p = vma->vm_end - sizeof(void *);
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDUSTACK
|
|
+ if (randomize_va_space)
|
|
+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
|
|
+#endif
|
|
+
|
|
return 0;
|
|
err:
|
|
up_write(&mm->mmap_sem);
|
|
@@ -420,14 +442,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
|
|
compat_uptr_t compat;
|
|
|
|
if (get_user(compat, argv.ptr.compat + nr))
|
|
- return ERR_PTR(-EFAULT);
|
|
+ return (const char __force_user *)ERR_PTR(-EFAULT);
|
|
|
|
return compat_ptr(compat);
|
|
}
|
|
#endif
|
|
|
|
if (get_user(native, argv.ptr.native + nr))
|
|
- return ERR_PTR(-EFAULT);
|
|
+ return (const char __force_user *)ERR_PTR(-EFAULT);
|
|
|
|
return native;
|
|
}
|
|
@@ -446,7 +468,7 @@ static int count(struct user_arg_ptr argv, int max)
|
|
if (!p)
|
|
break;
|
|
|
|
- if (IS_ERR(p))
|
|
+ if (IS_ERR((const char __force_kernel *)p))
|
|
return -EFAULT;
|
|
|
|
if (i++ >= max)
|
|
@@ -480,7 +502,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
|
|
|
|
ret = -EFAULT;
|
|
str = get_user_arg_ptr(argv, argc);
|
|
- if (IS_ERR(str))
|
|
+ if (IS_ERR((const char __force_kernel *)str))
|
|
goto out;
|
|
|
|
len = strnlen_user(str, MAX_ARG_STRLEN);
|
|
@@ -562,7 +584,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
|
|
int r;
|
|
mm_segment_t oldfs = get_fs();
|
|
struct user_arg_ptr argv = {
|
|
- .ptr.native = (const char __user *const __user *)__argv,
|
|
+ .ptr.native = (const char __force_user *const __force_user *)__argv,
|
|
};
|
|
|
|
set_fs(KERNEL_DS);
|
|
@@ -597,7 +619,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
|
|
unsigned long new_end = old_end - shift;
|
|
struct mmu_gather tlb;
|
|
|
|
- BUG_ON(new_start > new_end);
|
|
+ if (new_start >= new_end || new_start < mmap_min_addr)
|
|
+ return -ENOMEM;
|
|
|
|
/*
|
|
* ensure there are no vmas between where we want to go
|
|
@@ -606,6 +629,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
|
|
if (vma != find_vma(mm, new_start))
|
|
return -EFAULT;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ BUG_ON(pax_find_mirror_vma(vma));
|
|
+#endif
|
|
+
|
|
/*
|
|
* cover the whole range: [new_start, old_end)
|
|
*/
|
|
@@ -686,10 +713,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
|
stack_top = arch_align_stack(stack_top);
|
|
stack_top = PAGE_ALIGN(stack_top);
|
|
|
|
- if (unlikely(stack_top < mmap_min_addr) ||
|
|
- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
|
|
- return -ENOMEM;
|
|
-
|
|
stack_shift = vma->vm_end - stack_top;
|
|
|
|
bprm->p -= stack_shift;
|
|
@@ -701,8 +724,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
|
bprm->exec -= stack_shift;
|
|
|
|
down_write(&mm->mmap_sem);
|
|
+
|
|
+ /* Move stack pages down in memory. */
|
|
+ if (stack_shift) {
|
|
+ ret = shift_arg_pages(vma, stack_shift);
|
|
+ if (ret)
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
vm_flags = VM_STACK_FLAGS;
|
|
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
|
|
+ vm_flags &= ~VM_EXEC;
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (mm->pax_flags & MF_PAX_MPROTECT)
|
|
+ vm_flags &= ~VM_MAYEXEC;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+#endif
|
|
+
|
|
/*
|
|
* Adjust stack execute permissions; explicitly enable for
|
|
* EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
|
|
@@ -721,13 +764,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
|
|
goto out_unlock;
|
|
BUG_ON(prev != vma);
|
|
|
|
- /* Move stack pages down in memory. */
|
|
- if (stack_shift) {
|
|
- ret = shift_arg_pages(vma, stack_shift);
|
|
- if (ret)
|
|
- goto out_unlock;
|
|
- }
|
|
-
|
|
/* mprotect_fixup is overkill to remove the temporary stack flags */
|
|
vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
|
|
|
|
@@ -808,7 +844,7 @@ int kernel_read(struct file *file, loff_t offset,
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
/* The cast to a user pointer is valid due to the set_fs() */
|
|
- result = vfs_read(file, (void __user *)addr, count, &pos);
|
|
+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
|
|
set_fs(old_fs);
|
|
return result;
|
|
}
|
|
@@ -1282,7 +1318,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
|
|
}
|
|
rcu_read_unlock();
|
|
|
|
- if (p->fs->users > n_fs) {
|
|
+ if (atomic_read(&p->fs->users) > n_fs) {
|
|
bprm->unsafe |= LSM_UNSAFE_SHARE;
|
|
} else {
|
|
res = -EAGAIN;
|
|
@@ -1673,7 +1709,7 @@ static int expand_corename(struct core_name *cn)
|
|
{
|
|
char *old_corename = cn->corename;
|
|
|
|
- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
|
|
+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
|
|
cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
|
|
|
|
if (!cn->corename) {
|
|
@@ -1770,7 +1806,7 @@ static int format_corename(struct core_name *cn, long signr)
|
|
int pid_in_pattern = 0;
|
|
int err = 0;
|
|
|
|
- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
|
|
+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
|
|
cn->corename = kmalloc(cn->size, GFP_KERNEL);
|
|
cn->used = 0;
|
|
|
|
@@ -1867,6 +1903,238 @@ static int format_corename(struct core_name *cn, long signr)
|
|
return ispipe;
|
|
}
|
|
|
|
+int pax_check_flags(unsigned long *flags)
|
|
+{
|
|
+ int retval = 0;
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if (*flags & MF_PAX_SEGMEXEC)
|
|
+ {
|
|
+ *flags &= ~MF_PAX_SEGMEXEC;
|
|
+ retval = -EINVAL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if ((*flags & MF_PAX_PAGEEXEC)
|
|
+
|
|
+#ifdef CONFIG_PAX_PAGEEXEC
|
|
+ && (*flags & MF_PAX_SEGMEXEC)
|
|
+#endif
|
|
+
|
|
+ )
|
|
+ {
|
|
+ *flags &= ~MF_PAX_PAGEEXEC;
|
|
+ retval = -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((*flags & MF_PAX_MPROTECT)
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
|
|
+#endif
|
|
+
|
|
+ )
|
|
+ {
|
|
+ *flags &= ~MF_PAX_MPROTECT;
|
|
+ retval = -EINVAL;
|
|
+ }
|
|
+
|
|
+ if ((*flags & MF_PAX_EMUTRAMP)
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUTRAMP
|
|
+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
|
|
+#endif
|
|
+
|
|
+ )
|
|
+ {
|
|
+ *flags &= ~MF_PAX_EMUTRAMP;
|
|
+ retval = -EINVAL;
|
|
+ }
|
|
+
|
|
+ return retval;
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(pax_check_flags);
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
|
|
+{
|
|
+ struct task_struct *tsk = current;
|
|
+ struct mm_struct *mm = current->mm;
|
|
+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
|
|
+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
|
|
+ char *path_exec = NULL;
|
|
+ char *path_fault = NULL;
|
|
+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
|
|
+
|
|
+ if (buffer_exec && buffer_fault) {
|
|
+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
|
|
+
|
|
+ down_read(&mm->mmap_sem);
|
|
+ vma = mm->mmap;
|
|
+ while (vma && (!vma_exec || !vma_fault)) {
|
|
+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
|
|
+ vma_exec = vma;
|
|
+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
|
|
+ vma_fault = vma;
|
|
+ vma = vma->vm_next;
|
|
+ }
|
|
+ if (vma_exec) {
|
|
+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
|
|
+ if (IS_ERR(path_exec))
|
|
+ path_exec = "<path too long>";
|
|
+ else {
|
|
+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
|
|
+ if (path_exec) {
|
|
+ *path_exec = 0;
|
|
+ path_exec = buffer_exec;
|
|
+ } else
|
|
+ path_exec = "<path too long>";
|
|
+ }
|
|
+ }
|
|
+ if (vma_fault) {
|
|
+ start = vma_fault->vm_start;
|
|
+ end = vma_fault->vm_end;
|
|
+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
|
|
+ if (vma_fault->vm_file) {
|
|
+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
|
|
+ if (IS_ERR(path_fault))
|
|
+ path_fault = "<path too long>";
|
|
+ else {
|
|
+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
|
|
+ if (path_fault) {
|
|
+ *path_fault = 0;
|
|
+ path_fault = buffer_fault;
|
|
+ } else
|
|
+ path_fault = "<path too long>";
|
|
+ }
|
|
+ } else
|
|
+ path_fault = "<anonymous mapping>";
|
|
+ }
|
|
+ up_read(&mm->mmap_sem);
|
|
+ }
|
|
+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
|
|
+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
|
|
+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
|
|
+ task_uid(tsk), task_euid(tsk), pc, sp);
|
|
+ free_page((unsigned long)buffer_exec);
|
|
+ free_page((unsigned long)buffer_fault);
|
|
+ pax_report_insns(regs, pc, sp);
|
|
+ do_coredump(SIGKILL, SIGKILL, regs);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+void pax_report_refcount_overflow(struct pt_regs *regs)
|
|
+{
|
|
+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
|
|
+ current->comm, task_pid_nr(current), current_uid(), current_euid());
|
|
+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
|
|
+ show_regs(regs);
|
|
+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
|
|
+static noinline int check_stack_object(const void *obj, unsigned long len)
|
|
+{
|
|
+ const void * const stack = task_stack_page(current);
|
|
+ const void * const stackend = stack + THREAD_SIZE;
|
|
+
|
|
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
|
|
+ const void *frame = NULL;
|
|
+ const void *oldframe;
|
|
+#endif
|
|
+
|
|
+ if (obj + len < obj)
|
|
+ return -1;
|
|
+
|
|
+ if (obj + len <= stack || stackend <= obj)
|
|
+ return 0;
|
|
+
|
|
+ if (obj < stack || stackend < obj + len)
|
|
+ return -1;
|
|
+
|
|
+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
|
|
+ oldframe = __builtin_frame_address(1);
|
|
+ if (oldframe)
|
|
+ frame = __builtin_frame_address(2);
|
|
+ /*
|
|
+ low ----------------------------------------------> high
|
|
+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
|
|
+ ^----------------^
|
|
+ allow copies only within here
|
|
+ */
|
|
+ while (stack <= frame && frame < stackend) {
|
|
+ /* if obj + len extends past the last frame, this
|
|
+ check won't pass and the next frame will be 0,
|
|
+ causing us to bail out and correctly report
|
|
+ the copy as invalid
|
|
+ */
|
|
+ if (obj + len <= frame)
|
|
+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
|
|
+ oldframe = frame;
|
|
+ frame = *(const void * const *)frame;
|
|
+ }
|
|
+ return -1;
|
|
+#else
|
|
+ return 1;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
|
|
+{
|
|
+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
|
|
+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
|
|
+ dump_stack();
|
|
+ do_group_exit(SIGKILL);
|
|
+}
|
|
+#endif
|
|
+
|
|
+void check_object_size(const void *ptr, unsigned long n, bool to)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+ const char *type;
|
|
+
|
|
+ if (!n)
|
|
+ return;
|
|
+
|
|
+ type = check_heap_object(ptr, n, to);
|
|
+ if (!type) {
|
|
+ if (check_stack_object(ptr, n) != -1)
|
|
+ return;
|
|
+ type = "<process stack>";
|
|
+ }
|
|
+
|
|
+ pax_report_usercopy(ptr, n, to, type);
|
|
+#endif
|
|
+
|
|
+}
|
|
+EXPORT_SYMBOL(check_object_size);
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
|
|
+void pax_track_stack(void)
|
|
+{
|
|
+ unsigned long sp = (unsigned long)&sp;
|
|
+ if (sp < current_thread_info()->lowest_stack &&
|
|
+ sp > (unsigned long)task_stack_page(current))
|
|
+ current_thread_info()->lowest_stack = sp;
|
|
+}
|
|
+EXPORT_SYMBOL(pax_track_stack);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SIZE_OVERFLOW
|
|
+void report_size_overflow(const char *file, unsigned int line, const char *func)
|
|
+{
|
|
+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
|
|
+ dump_stack();
|
|
+ do_group_exit(SIGKILL);
|
|
+}
|
|
+EXPORT_SYMBOL(report_size_overflow);
|
|
+#endif
|
|
+
|
|
static int zap_process(struct task_struct *start, int exit_code)
|
|
{
|
|
struct task_struct *t;
|
|
@@ -2070,17 +2338,17 @@ static void wait_for_dump_helpers(struct file *file)
|
|
pipe = file->f_path.dentry->d_inode->i_pipe;
|
|
|
|
pipe_lock(pipe);
|
|
- pipe->readers++;
|
|
- pipe->writers--;
|
|
+ atomic_inc(&pipe->readers);
|
|
+ atomic_dec(&pipe->writers);
|
|
|
|
- while ((pipe->readers > 1) && (!signal_pending(current))) {
|
|
+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
|
|
wake_up_interruptible_sync(&pipe->wait);
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
pipe_wait(pipe);
|
|
}
|
|
|
|
- pipe->readers--;
|
|
- pipe->writers++;
|
|
+ atomic_dec(&pipe->readers);
|
|
+ atomic_inc(&pipe->writers);
|
|
pipe_unlock(pipe);
|
|
|
|
}
|
|
@@ -2141,7 +2409,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
|
|
int retval = 0;
|
|
int flag = 0;
|
|
int ispipe;
|
|
- static atomic_t core_dump_count = ATOMIC_INIT(0);
|
|
+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
|
|
struct coredump_params cprm = {
|
|
.signr = signr,
|
|
.regs = regs,
|
|
@@ -2223,7 +2491,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
|
|
}
|
|
cprm.limit = RLIM_INFINITY;
|
|
|
|
- dump_count = atomic_inc_return(&core_dump_count);
|
|
+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
|
|
if (core_pipe_limit && (core_pipe_limit < dump_count)) {
|
|
printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
|
|
task_tgid_vnr(current), current->comm);
|
|
@@ -2293,7 +2561,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
|
|
filp_close(cprm.file, NULL);
|
|
fail_dropcount:
|
|
if (ispipe)
|
|
- atomic_dec(&core_dump_count);
|
|
+ atomic_dec_unchecked(&core_dump_count);
|
|
fail_unlock:
|
|
kfree(cn.corename);
|
|
fail_corename:
|
|
@@ -2312,7 +2580,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
|
|
*/
|
|
int dump_write(struct file *file, const void *addr, int nr)
|
|
{
|
|
- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
|
|
+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
|
|
}
|
|
EXPORT_SYMBOL(dump_write);
|
|
|
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
|
index 78cbec5..c853632 100644
|
|
--- a/fs/ext4/ext4.h
|
|
+++ b/fs/ext4/ext4.h
|
|
@@ -1232,19 +1232,19 @@ struct ext4_sb_info {
|
|
unsigned long s_mb_last_start;
|
|
|
|
/* stats for buddy allocator */
|
|
- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
|
|
- atomic_t s_bal_success; /* we found long enough chunks */
|
|
- atomic_t s_bal_allocated; /* in blocks */
|
|
- atomic_t s_bal_ex_scanned; /* total extents scanned */
|
|
- atomic_t s_bal_goals; /* goal hits */
|
|
- atomic_t s_bal_breaks; /* too long searches */
|
|
- atomic_t s_bal_2orders; /* 2^order hits */
|
|
+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
|
|
+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
|
|
+ atomic_unchecked_t s_bal_allocated; /* in blocks */
|
|
+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
|
|
+ atomic_unchecked_t s_bal_goals; /* goal hits */
|
|
+ atomic_unchecked_t s_bal_breaks; /* too long searches */
|
|
+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
|
|
spinlock_t s_bal_lock;
|
|
unsigned long s_mb_buddies_generated;
|
|
unsigned long long s_mb_generation_time;
|
|
- atomic_t s_mb_lost_chunks;
|
|
- atomic_t s_mb_preallocated;
|
|
- atomic_t s_mb_discarded;
|
|
+ atomic_unchecked_t s_mb_lost_chunks;
|
|
+ atomic_unchecked_t s_mb_preallocated;
|
|
+ atomic_unchecked_t s_mb_discarded;
|
|
atomic_t s_lock_busy;
|
|
|
|
/* locality groups */
|
|
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
|
|
index fd73b2d..78c11b8 100644
|
|
--- a/fs/ext4/mballoc.c
|
|
+++ b/fs/ext4/mballoc.c
|
|
@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
|
|
BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
|
|
|
|
if (EXT4_SB(sb)->s_mb_stats)
|
|
- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
|
|
+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
|
|
|
|
break;
|
|
}
|
|
@@ -2045,7 +2045,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|
ac->ac_status = AC_STATUS_CONTINUE;
|
|
ac->ac_flags |= EXT4_MB_HINT_FIRST;
|
|
cr = 3;
|
|
- atomic_inc(&sbi->s_mb_lost_chunks);
|
|
+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
|
|
goto repeat;
|
|
}
|
|
}
|
|
@@ -2549,25 +2549,25 @@ int ext4_mb_release(struct super_block *sb)
|
|
if (sbi->s_mb_stats) {
|
|
ext4_msg(sb, KERN_INFO,
|
|
"mballoc: %u blocks %u reqs (%u success)",
|
|
- atomic_read(&sbi->s_bal_allocated),
|
|
- atomic_read(&sbi->s_bal_reqs),
|
|
- atomic_read(&sbi->s_bal_success));
|
|
+ atomic_read_unchecked(&sbi->s_bal_allocated),
|
|
+ atomic_read_unchecked(&sbi->s_bal_reqs),
|
|
+ atomic_read_unchecked(&sbi->s_bal_success));
|
|
ext4_msg(sb, KERN_INFO,
|
|
"mballoc: %u extents scanned, %u goal hits, "
|
|
"%u 2^N hits, %u breaks, %u lost",
|
|
- atomic_read(&sbi->s_bal_ex_scanned),
|
|
- atomic_read(&sbi->s_bal_goals),
|
|
- atomic_read(&sbi->s_bal_2orders),
|
|
- atomic_read(&sbi->s_bal_breaks),
|
|
- atomic_read(&sbi->s_mb_lost_chunks));
|
|
+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
|
|
+ atomic_read_unchecked(&sbi->s_bal_goals),
|
|
+ atomic_read_unchecked(&sbi->s_bal_2orders),
|
|
+ atomic_read_unchecked(&sbi->s_bal_breaks),
|
|
+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
|
|
ext4_msg(sb, KERN_INFO,
|
|
"mballoc: %lu generated and it took %Lu",
|
|
sbi->s_mb_buddies_generated,
|
|
sbi->s_mb_generation_time);
|
|
ext4_msg(sb, KERN_INFO,
|
|
"mballoc: %u preallocated, %u discarded",
|
|
- atomic_read(&sbi->s_mb_preallocated),
|
|
- atomic_read(&sbi->s_mb_discarded));
|
|
+ atomic_read_unchecked(&sbi->s_mb_preallocated),
|
|
+ atomic_read_unchecked(&sbi->s_mb_discarded));
|
|
}
|
|
|
|
free_percpu(sbi->s_locality_groups);
|
|
@@ -3050,16 +3050,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
|
|
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
|
|
|
|
if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
|
|
- atomic_inc(&sbi->s_bal_reqs);
|
|
- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
|
|
+ atomic_inc_unchecked(&sbi->s_bal_reqs);
|
|
+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
|
|
if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
|
|
- atomic_inc(&sbi->s_bal_success);
|
|
- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
|
|
+ atomic_inc_unchecked(&sbi->s_bal_success);
|
|
+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
|
|
if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
|
|
ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
|
|
- atomic_inc(&sbi->s_bal_goals);
|
|
+ atomic_inc_unchecked(&sbi->s_bal_goals);
|
|
if (ac->ac_found > sbi->s_mb_max_to_scan)
|
|
- atomic_inc(&sbi->s_bal_breaks);
|
|
+ atomic_inc_unchecked(&sbi->s_bal_breaks);
|
|
}
|
|
|
|
if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
|
|
@@ -3468,7 +3468,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
|
trace_ext4_mb_new_inode_pa(ac, pa);
|
|
|
|
ext4_mb_use_inode_pa(ac, pa);
|
|
- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
|
|
+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
|
|
|
|
ei = EXT4_I(ac->ac_inode);
|
|
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
|
@@ -3528,7 +3528,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
|
trace_ext4_mb_new_group_pa(ac, pa);
|
|
|
|
ext4_mb_use_group_pa(ac, pa);
|
|
- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
|
|
+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
|
|
|
|
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
|
lg = ac->ac_lg;
|
|
@@ -3617,7 +3617,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
|
|
* from the bitmap and continue.
|
|
*/
|
|
}
|
|
- atomic_add(free, &sbi->s_mb_discarded);
|
|
+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
|
|
|
|
return err;
|
|
}
|
|
@@ -3635,7 +3635,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
|
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
|
|
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
|
|
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
|
|
- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
|
|
+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
|
|
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
|
|
|
|
return 0;
|
|
diff --git a/fs/fcntl.c b/fs/fcntl.c
|
|
index 75e7c1f..4b23577 100644
|
|
--- a/fs/fcntl.c
|
|
+++ b/fs/fcntl.c
|
|
@@ -266,7 +266,7 @@ pid_t f_getown(struct file *filp)
|
|
|
|
static int f_setown_ex(struct file *filp, unsigned long arg)
|
|
{
|
|
- struct f_owner_ex * __user owner_p = (void * __user)arg;
|
|
+ struct f_owner_ex __user *owner_p = (void __user *)arg;
|
|
struct f_owner_ex owner;
|
|
struct pid *pid;
|
|
int type;
|
|
@@ -306,7 +306,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
|
|
|
|
static int f_getown_ex(struct file *filp, unsigned long arg)
|
|
{
|
|
- struct f_owner_ex * __user owner_p = (void * __user)arg;
|
|
+ struct f_owner_ex __user *owner_p = (void __user *)arg;
|
|
struct f_owner_ex owner;
|
|
int ret = 0;
|
|
|
|
diff --git a/fs/fifo.c b/fs/fifo.c
|
|
index cf6f434..3d7942c 100644
|
|
--- a/fs/fifo.c
|
|
+++ b/fs/fifo.c
|
|
@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|
*/
|
|
filp->f_op = &read_pipefifo_fops;
|
|
pipe->r_counter++;
|
|
- if (pipe->readers++ == 0)
|
|
+ if (atomic_inc_return(&pipe->readers) == 1)
|
|
wake_up_partner(inode);
|
|
|
|
- if (!pipe->writers) {
|
|
+ if (!atomic_read(&pipe->writers)) {
|
|
if ((filp->f_flags & O_NONBLOCK)) {
|
|
/* suppress POLLHUP until we have
|
|
* seen a writer */
|
|
@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|
* errno=ENXIO when there is no process reading the FIFO.
|
|
*/
|
|
ret = -ENXIO;
|
|
- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
|
|
+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
|
|
goto err;
|
|
|
|
filp->f_op = &write_pipefifo_fops;
|
|
pipe->w_counter++;
|
|
- if (!pipe->writers++)
|
|
+ if (atomic_inc_return(&pipe->writers) == 1)
|
|
wake_up_partner(inode);
|
|
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
if (wait_for_partner(inode, &pipe->r_counter))
|
|
goto err_wr;
|
|
}
|
|
@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|
*/
|
|
filp->f_op = &rdwr_pipefifo_fops;
|
|
|
|
- pipe->readers++;
|
|
- pipe->writers++;
|
|
+ atomic_inc(&pipe->readers);
|
|
+ atomic_inc(&pipe->writers);
|
|
pipe->r_counter++;
|
|
pipe->w_counter++;
|
|
- if (pipe->readers == 1 || pipe->writers == 1)
|
|
+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
|
|
wake_up_partner(inode);
|
|
break;
|
|
|
|
@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
|
|
return 0;
|
|
|
|
err_rd:
|
|
- if (!--pipe->readers)
|
|
+ if (atomic_dec_and_test(&pipe->readers))
|
|
wake_up_interruptible(&pipe->wait);
|
|
ret = -ERESTARTSYS;
|
|
goto err;
|
|
|
|
err_wr:
|
|
- if (!--pipe->writers)
|
|
+ if (atomic_dec_and_test(&pipe->writers))
|
|
wake_up_interruptible(&pipe->wait);
|
|
ret = -ERESTARTSYS;
|
|
goto err;
|
|
|
|
err:
|
|
- if (!pipe->readers && !pipe->writers)
|
|
+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
|
|
free_pipe_info(inode);
|
|
|
|
err_nocleanup:
|
|
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
|
|
index e159e68..9c8a9b1 100644
|
|
--- a/fs/fs_struct.c
|
|
+++ b/fs/fs_struct.c
|
|
@@ -111,7 +111,7 @@ void exit_fs(struct task_struct *tsk)
|
|
task_lock(tsk);
|
|
spin_lock(&fs->lock);
|
|
tsk->fs = NULL;
|
|
- kill = !--fs->users;
|
|
+ kill = !atomic_dec_return(&fs->users);
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(tsk);
|
|
if (kill)
|
|
@@ -124,7 +124,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
|
|
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
|
|
/* We don't need to lock fs - think why ;-) */
|
|
if (fs) {
|
|
- fs->users = 1;
|
|
+ atomic_set(&fs->users, 1);
|
|
fs->in_exec = 0;
|
|
spin_lock_init(&fs->lock);
|
|
seqcount_init(&fs->seq);
|
|
@@ -151,7 +151,7 @@ int unshare_fs_struct(void)
|
|
|
|
task_lock(current);
|
|
spin_lock(&fs->lock);
|
|
- kill = !--fs->users;
|
|
+ kill = !atomic_dec_return(&fs->users);
|
|
current->fs = new_fs;
|
|
spin_unlock(&fs->lock);
|
|
task_unlock(current);
|
|
@@ -171,7 +171,7 @@ EXPORT_SYMBOL(current_umask);
|
|
|
|
/* to be mentioned only in INIT_TASK */
|
|
struct fs_struct init_fs = {
|
|
- .users = 1,
|
|
+ .users = ATOMIC_INIT(1),
|
|
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
|
|
.seq = SEQCNT_ZERO,
|
|
.umask = 0022,
|
|
@@ -187,12 +187,12 @@ void daemonize_fs_struct(void)
|
|
task_lock(current);
|
|
|
|
spin_lock(&init_fs.lock);
|
|
- init_fs.users++;
|
|
+ atomic_inc(&init_fs.users);
|
|
spin_unlock(&init_fs.lock);
|
|
|
|
spin_lock(&fs->lock);
|
|
current->fs = &init_fs;
|
|
- kill = !--fs->users;
|
|
+ kill = !atomic_dec_return(&fs->users);
|
|
spin_unlock(&fs->lock);
|
|
|
|
task_unlock(current);
|
|
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
|
|
index 9905350..02eaec4 100644
|
|
--- a/fs/fscache/cookie.c
|
|
+++ b/fs/fscache/cookie.c
|
|
@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|
parent ? (char *) parent->def->name : "<no-parent>",
|
|
def->name, netfs_data);
|
|
|
|
- fscache_stat(&fscache_n_acquires);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires);
|
|
|
|
/* if there's no parent cookie, then we don't create one here either */
|
|
if (!parent) {
|
|
- fscache_stat(&fscache_n_acquires_null);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires_null);
|
|
_leave(" [no parent]");
|
|
return NULL;
|
|
}
|
|
@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|
/* allocate and initialise a cookie */
|
|
cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
|
|
if (!cookie) {
|
|
- fscache_stat(&fscache_n_acquires_oom);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires_oom);
|
|
_leave(" [ENOMEM]");
|
|
return NULL;
|
|
}
|
|
@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|
|
|
switch (cookie->def->type) {
|
|
case FSCACHE_COOKIE_TYPE_INDEX:
|
|
- fscache_stat(&fscache_n_cookie_index);
|
|
+ fscache_stat_unchecked(&fscache_n_cookie_index);
|
|
break;
|
|
case FSCACHE_COOKIE_TYPE_DATAFILE:
|
|
- fscache_stat(&fscache_n_cookie_data);
|
|
+ fscache_stat_unchecked(&fscache_n_cookie_data);
|
|
break;
|
|
default:
|
|
- fscache_stat(&fscache_n_cookie_special);
|
|
+ fscache_stat_unchecked(&fscache_n_cookie_special);
|
|
break;
|
|
}
|
|
|
|
@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|
if (fscache_acquire_non_index_cookie(cookie) < 0) {
|
|
atomic_dec(&parent->n_children);
|
|
__fscache_cookie_put(cookie);
|
|
- fscache_stat(&fscache_n_acquires_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
|
|
_leave(" = NULL");
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
- fscache_stat(&fscache_n_acquires_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires_ok);
|
|
_leave(" = %p", cookie);
|
|
return cookie;
|
|
}
|
|
@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
|
|
cache = fscache_select_cache_for_object(cookie->parent);
|
|
if (!cache) {
|
|
up_read(&fscache_addremove_sem);
|
|
- fscache_stat(&fscache_n_acquires_no_cache);
|
|
+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
|
|
_leave(" = -ENOMEDIUM [no cache]");
|
|
return -ENOMEDIUM;
|
|
}
|
|
@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
|
|
object = cache->ops->alloc_object(cache, cookie);
|
|
fscache_stat_d(&fscache_n_cop_alloc_object);
|
|
if (IS_ERR(object)) {
|
|
- fscache_stat(&fscache_n_object_no_alloc);
|
|
+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
|
|
ret = PTR_ERR(object);
|
|
goto error;
|
|
}
|
|
|
|
- fscache_stat(&fscache_n_object_alloc);
|
|
+ fscache_stat_unchecked(&fscache_n_object_alloc);
|
|
|
|
object->debug_id = atomic_inc_return(&fscache_object_debug_id);
|
|
|
|
@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
|
|
struct fscache_object *object;
|
|
struct hlist_node *_p;
|
|
|
|
- fscache_stat(&fscache_n_updates);
|
|
+ fscache_stat_unchecked(&fscache_n_updates);
|
|
|
|
if (!cookie) {
|
|
- fscache_stat(&fscache_n_updates_null);
|
|
+ fscache_stat_unchecked(&fscache_n_updates_null);
|
|
_leave(" [no cookie]");
|
|
return;
|
|
}
|
|
@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
|
struct fscache_object *object;
|
|
unsigned long event;
|
|
|
|
- fscache_stat(&fscache_n_relinquishes);
|
|
+ fscache_stat_unchecked(&fscache_n_relinquishes);
|
|
if (retire)
|
|
- fscache_stat(&fscache_n_relinquishes_retire);
|
|
+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
|
|
|
|
if (!cookie) {
|
|
- fscache_stat(&fscache_n_relinquishes_null);
|
|
+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
|
|
_leave(" [no cookie]");
|
|
return;
|
|
}
|
|
@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
|
|
|
|
/* wait for the cookie to finish being instantiated (or to fail) */
|
|
if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
|
|
- fscache_stat(&fscache_n_relinquishes_waitcrt);
|
|
+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
|
|
wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
|
|
fscache_wait_bit, TASK_UNINTERRUPTIBLE);
|
|
}
|
|
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
|
|
index f6aad48..88dcf26 100644
|
|
--- a/fs/fscache/internal.h
|
|
+++ b/fs/fscache/internal.h
|
|
@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
|
|
extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
|
|
extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
|
|
|
|
-extern atomic_t fscache_n_op_pend;
|
|
-extern atomic_t fscache_n_op_run;
|
|
-extern atomic_t fscache_n_op_enqueue;
|
|
-extern atomic_t fscache_n_op_deferred_release;
|
|
-extern atomic_t fscache_n_op_release;
|
|
-extern atomic_t fscache_n_op_gc;
|
|
-extern atomic_t fscache_n_op_cancelled;
|
|
-extern atomic_t fscache_n_op_rejected;
|
|
-
|
|
-extern atomic_t fscache_n_attr_changed;
|
|
-extern atomic_t fscache_n_attr_changed_ok;
|
|
-extern atomic_t fscache_n_attr_changed_nobufs;
|
|
-extern atomic_t fscache_n_attr_changed_nomem;
|
|
-extern atomic_t fscache_n_attr_changed_calls;
|
|
-
|
|
-extern atomic_t fscache_n_allocs;
|
|
-extern atomic_t fscache_n_allocs_ok;
|
|
-extern atomic_t fscache_n_allocs_wait;
|
|
-extern atomic_t fscache_n_allocs_nobufs;
|
|
-extern atomic_t fscache_n_allocs_intr;
|
|
-extern atomic_t fscache_n_allocs_object_dead;
|
|
-extern atomic_t fscache_n_alloc_ops;
|
|
-extern atomic_t fscache_n_alloc_op_waits;
|
|
-
|
|
-extern atomic_t fscache_n_retrievals;
|
|
-extern atomic_t fscache_n_retrievals_ok;
|
|
-extern atomic_t fscache_n_retrievals_wait;
|
|
-extern atomic_t fscache_n_retrievals_nodata;
|
|
-extern atomic_t fscache_n_retrievals_nobufs;
|
|
-extern atomic_t fscache_n_retrievals_intr;
|
|
-extern atomic_t fscache_n_retrievals_nomem;
|
|
-extern atomic_t fscache_n_retrievals_object_dead;
|
|
-extern atomic_t fscache_n_retrieval_ops;
|
|
-extern atomic_t fscache_n_retrieval_op_waits;
|
|
-
|
|
-extern atomic_t fscache_n_stores;
|
|
-extern atomic_t fscache_n_stores_ok;
|
|
-extern atomic_t fscache_n_stores_again;
|
|
-extern atomic_t fscache_n_stores_nobufs;
|
|
-extern atomic_t fscache_n_stores_oom;
|
|
-extern atomic_t fscache_n_store_ops;
|
|
-extern atomic_t fscache_n_store_calls;
|
|
-extern atomic_t fscache_n_store_pages;
|
|
-extern atomic_t fscache_n_store_radix_deletes;
|
|
-extern atomic_t fscache_n_store_pages_over_limit;
|
|
-
|
|
-extern atomic_t fscache_n_store_vmscan_not_storing;
|
|
-extern atomic_t fscache_n_store_vmscan_gone;
|
|
-extern atomic_t fscache_n_store_vmscan_busy;
|
|
-extern atomic_t fscache_n_store_vmscan_cancelled;
|
|
-
|
|
-extern atomic_t fscache_n_marks;
|
|
-extern atomic_t fscache_n_uncaches;
|
|
-
|
|
-extern atomic_t fscache_n_acquires;
|
|
-extern atomic_t fscache_n_acquires_null;
|
|
-extern atomic_t fscache_n_acquires_no_cache;
|
|
-extern atomic_t fscache_n_acquires_ok;
|
|
-extern atomic_t fscache_n_acquires_nobufs;
|
|
-extern atomic_t fscache_n_acquires_oom;
|
|
-
|
|
-extern atomic_t fscache_n_updates;
|
|
-extern atomic_t fscache_n_updates_null;
|
|
-extern atomic_t fscache_n_updates_run;
|
|
-
|
|
-extern atomic_t fscache_n_relinquishes;
|
|
-extern atomic_t fscache_n_relinquishes_null;
|
|
-extern atomic_t fscache_n_relinquishes_waitcrt;
|
|
-extern atomic_t fscache_n_relinquishes_retire;
|
|
-
|
|
-extern atomic_t fscache_n_cookie_index;
|
|
-extern atomic_t fscache_n_cookie_data;
|
|
-extern atomic_t fscache_n_cookie_special;
|
|
-
|
|
-extern atomic_t fscache_n_object_alloc;
|
|
-extern atomic_t fscache_n_object_no_alloc;
|
|
-extern atomic_t fscache_n_object_lookups;
|
|
-extern atomic_t fscache_n_object_lookups_negative;
|
|
-extern atomic_t fscache_n_object_lookups_positive;
|
|
-extern atomic_t fscache_n_object_lookups_timed_out;
|
|
-extern atomic_t fscache_n_object_created;
|
|
-extern atomic_t fscache_n_object_avail;
|
|
-extern atomic_t fscache_n_object_dead;
|
|
-
|
|
-extern atomic_t fscache_n_checkaux_none;
|
|
-extern atomic_t fscache_n_checkaux_okay;
|
|
-extern atomic_t fscache_n_checkaux_update;
|
|
-extern atomic_t fscache_n_checkaux_obsolete;
|
|
+extern atomic_unchecked_t fscache_n_op_pend;
|
|
+extern atomic_unchecked_t fscache_n_op_run;
|
|
+extern atomic_unchecked_t fscache_n_op_enqueue;
|
|
+extern atomic_unchecked_t fscache_n_op_deferred_release;
|
|
+extern atomic_unchecked_t fscache_n_op_release;
|
|
+extern atomic_unchecked_t fscache_n_op_gc;
|
|
+extern atomic_unchecked_t fscache_n_op_cancelled;
|
|
+extern atomic_unchecked_t fscache_n_op_rejected;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_attr_changed;
|
|
+extern atomic_unchecked_t fscache_n_attr_changed_ok;
|
|
+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
|
|
+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
|
|
+extern atomic_unchecked_t fscache_n_attr_changed_calls;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_allocs;
|
|
+extern atomic_unchecked_t fscache_n_allocs_ok;
|
|
+extern atomic_unchecked_t fscache_n_allocs_wait;
|
|
+extern atomic_unchecked_t fscache_n_allocs_nobufs;
|
|
+extern atomic_unchecked_t fscache_n_allocs_intr;
|
|
+extern atomic_unchecked_t fscache_n_allocs_object_dead;
|
|
+extern atomic_unchecked_t fscache_n_alloc_ops;
|
|
+extern atomic_unchecked_t fscache_n_alloc_op_waits;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_retrievals;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_ok;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_wait;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_nodata;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_intr;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_nomem;
|
|
+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
|
|
+extern atomic_unchecked_t fscache_n_retrieval_ops;
|
|
+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_stores;
|
|
+extern atomic_unchecked_t fscache_n_stores_ok;
|
|
+extern atomic_unchecked_t fscache_n_stores_again;
|
|
+extern atomic_unchecked_t fscache_n_stores_nobufs;
|
|
+extern atomic_unchecked_t fscache_n_stores_oom;
|
|
+extern atomic_unchecked_t fscache_n_store_ops;
|
|
+extern atomic_unchecked_t fscache_n_store_calls;
|
|
+extern atomic_unchecked_t fscache_n_store_pages;
|
|
+extern atomic_unchecked_t fscache_n_store_radix_deletes;
|
|
+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
|
|
+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
|
|
+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
|
|
+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_marks;
|
|
+extern atomic_unchecked_t fscache_n_uncaches;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_acquires;
|
|
+extern atomic_unchecked_t fscache_n_acquires_null;
|
|
+extern atomic_unchecked_t fscache_n_acquires_no_cache;
|
|
+extern atomic_unchecked_t fscache_n_acquires_ok;
|
|
+extern atomic_unchecked_t fscache_n_acquires_nobufs;
|
|
+extern atomic_unchecked_t fscache_n_acquires_oom;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_updates;
|
|
+extern atomic_unchecked_t fscache_n_updates_null;
|
|
+extern atomic_unchecked_t fscache_n_updates_run;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_relinquishes;
|
|
+extern atomic_unchecked_t fscache_n_relinquishes_null;
|
|
+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
|
|
+extern atomic_unchecked_t fscache_n_relinquishes_retire;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_cookie_index;
|
|
+extern atomic_unchecked_t fscache_n_cookie_data;
|
|
+extern atomic_unchecked_t fscache_n_cookie_special;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_object_alloc;
|
|
+extern atomic_unchecked_t fscache_n_object_no_alloc;
|
|
+extern atomic_unchecked_t fscache_n_object_lookups;
|
|
+extern atomic_unchecked_t fscache_n_object_lookups_negative;
|
|
+extern atomic_unchecked_t fscache_n_object_lookups_positive;
|
|
+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
|
|
+extern atomic_unchecked_t fscache_n_object_created;
|
|
+extern atomic_unchecked_t fscache_n_object_avail;
|
|
+extern atomic_unchecked_t fscache_n_object_dead;
|
|
+
|
|
+extern atomic_unchecked_t fscache_n_checkaux_none;
|
|
+extern atomic_unchecked_t fscache_n_checkaux_okay;
|
|
+extern atomic_unchecked_t fscache_n_checkaux_update;
|
|
+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
|
|
|
|
extern atomic_t fscache_n_cop_alloc_object;
|
|
extern atomic_t fscache_n_cop_lookup_object;
|
|
@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
|
|
atomic_inc(stat);
|
|
}
|
|
|
|
+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
|
|
+{
|
|
+ atomic_inc_unchecked(stat);
|
|
+}
|
|
+
|
|
static inline void fscache_stat_d(atomic_t *stat)
|
|
{
|
|
atomic_dec(stat);
|
|
@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
|
|
|
|
#define __fscache_stat(stat) (NULL)
|
|
#define fscache_stat(stat) do {} while (0)
|
|
+#define fscache_stat_unchecked(stat) do {} while (0)
|
|
#define fscache_stat_d(stat) do {} while (0)
|
|
#endif
|
|
|
|
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
|
|
index b6b897c..0ffff9c 100644
|
|
--- a/fs/fscache/object.c
|
|
+++ b/fs/fscache/object.c
|
|
@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
|
|
/* update the object metadata on disk */
|
|
case FSCACHE_OBJECT_UPDATING:
|
|
clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
|
|
- fscache_stat(&fscache_n_updates_run);
|
|
+ fscache_stat_unchecked(&fscache_n_updates_run);
|
|
fscache_stat(&fscache_n_cop_update_object);
|
|
object->cache->ops->update_object(object);
|
|
fscache_stat_d(&fscache_n_cop_update_object);
|
|
@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
|
|
spin_lock(&object->lock);
|
|
object->state = FSCACHE_OBJECT_DEAD;
|
|
spin_unlock(&object->lock);
|
|
- fscache_stat(&fscache_n_object_dead);
|
|
+ fscache_stat_unchecked(&fscache_n_object_dead);
|
|
goto terminal_transit;
|
|
|
|
/* handle the parent cache of this object being withdrawn from
|
|
@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
|
|
spin_lock(&object->lock);
|
|
object->state = FSCACHE_OBJECT_DEAD;
|
|
spin_unlock(&object->lock);
|
|
- fscache_stat(&fscache_n_object_dead);
|
|
+ fscache_stat_unchecked(&fscache_n_object_dead);
|
|
goto terminal_transit;
|
|
|
|
/* complain about the object being woken up once it is
|
|
@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
|
|
parent->cookie->def->name, cookie->def->name,
|
|
object->cache->tag->name);
|
|
|
|
- fscache_stat(&fscache_n_object_lookups);
|
|
+ fscache_stat_unchecked(&fscache_n_object_lookups);
|
|
fscache_stat(&fscache_n_cop_lookup_object);
|
|
ret = object->cache->ops->lookup_object(object);
|
|
fscache_stat_d(&fscache_n_cop_lookup_object);
|
|
@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
|
|
if (ret == -ETIMEDOUT) {
|
|
/* probably stuck behind another object, so move this one to
|
|
* the back of the queue */
|
|
- fscache_stat(&fscache_n_object_lookups_timed_out);
|
|
+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
|
|
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
|
|
}
|
|
|
|
@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
|
|
|
|
spin_lock(&object->lock);
|
|
if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
|
|
- fscache_stat(&fscache_n_object_lookups_negative);
|
|
+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
|
|
|
|
/* transit here to allow write requests to begin stacking up
|
|
* and read requests to begin returning ENODATA */
|
|
@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
|
|
* result, in which case there may be data available */
|
|
spin_lock(&object->lock);
|
|
if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
|
|
- fscache_stat(&fscache_n_object_lookups_positive);
|
|
+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
|
|
|
|
clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
|
|
|
|
@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
|
|
set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
|
|
} else {
|
|
ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
|
|
- fscache_stat(&fscache_n_object_created);
|
|
+ fscache_stat_unchecked(&fscache_n_object_created);
|
|
|
|
object->state = FSCACHE_OBJECT_AVAILABLE;
|
|
spin_unlock(&object->lock);
|
|
@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
|
|
fscache_enqueue_dependents(object);
|
|
|
|
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
|
|
- fscache_stat(&fscache_n_object_avail);
|
|
+ fscache_stat_unchecked(&fscache_n_object_avail);
|
|
|
|
_leave("");
|
|
}
|
|
@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
|
|
enum fscache_checkaux result;
|
|
|
|
if (!object->cookie->def->check_aux) {
|
|
- fscache_stat(&fscache_n_checkaux_none);
|
|
+ fscache_stat_unchecked(&fscache_n_checkaux_none);
|
|
return FSCACHE_CHECKAUX_OKAY;
|
|
}
|
|
|
|
@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
|
|
switch (result) {
|
|
/* entry okay as is */
|
|
case FSCACHE_CHECKAUX_OKAY:
|
|
- fscache_stat(&fscache_n_checkaux_okay);
|
|
+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
|
|
break;
|
|
|
|
/* entry requires update */
|
|
case FSCACHE_CHECKAUX_NEEDS_UPDATE:
|
|
- fscache_stat(&fscache_n_checkaux_update);
|
|
+ fscache_stat_unchecked(&fscache_n_checkaux_update);
|
|
break;
|
|
|
|
/* entry requires deletion */
|
|
case FSCACHE_CHECKAUX_OBSOLETE:
|
|
- fscache_stat(&fscache_n_checkaux_obsolete);
|
|
+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
|
|
break;
|
|
|
|
default:
|
|
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
|
|
index 30afdfa..2256596 100644
|
|
--- a/fs/fscache/operation.c
|
|
+++ b/fs/fscache/operation.c
|
|
@@ -17,7 +17,7 @@
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
|
|
-atomic_t fscache_op_debug_id;
|
|
+atomic_unchecked_t fscache_op_debug_id;
|
|
EXPORT_SYMBOL(fscache_op_debug_id);
|
|
|
|
/**
|
|
@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
|
|
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
|
|
ASSERTCMP(atomic_read(&op->usage), >, 0);
|
|
|
|
- fscache_stat(&fscache_n_op_enqueue);
|
|
+ fscache_stat_unchecked(&fscache_n_op_enqueue);
|
|
switch (op->flags & FSCACHE_OP_TYPE) {
|
|
case FSCACHE_OP_ASYNC:
|
|
_debug("queue async");
|
|
@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
|
|
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
|
|
if (op->processor)
|
|
fscache_enqueue_operation(op);
|
|
- fscache_stat(&fscache_n_op_run);
|
|
+ fscache_stat_unchecked(&fscache_n_op_run);
|
|
}
|
|
|
|
/*
|
|
@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
|
|
if (object->n_ops > 1) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
} else if (!list_empty(&object->pending_ops)) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
fscache_start_operations(object);
|
|
} else {
|
|
ASSERTCMP(object->n_in_progress, ==, 0);
|
|
@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
|
|
object->n_exclusive++; /* reads and writes must wait */
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
ret = 0;
|
|
} else {
|
|
/* not allowed to submit ops in any other state */
|
|
@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
|
|
if (object->n_exclusive > 0) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
} else if (!list_empty(&object->pending_ops)) {
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
fscache_start_operations(object);
|
|
} else {
|
|
ASSERTCMP(object->n_exclusive, ==, 0);
|
|
@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
|
|
object->n_ops++;
|
|
atomic_inc(&op->usage);
|
|
list_add_tail(&op->pend_link, &object->pending_ops);
|
|
- fscache_stat(&fscache_n_op_pend);
|
|
+ fscache_stat_unchecked(&fscache_n_op_pend);
|
|
ret = 0;
|
|
} else if (object->state == FSCACHE_OBJECT_DYING ||
|
|
object->state == FSCACHE_OBJECT_LC_DYING ||
|
|
object->state == FSCACHE_OBJECT_WITHDRAWING) {
|
|
- fscache_stat(&fscache_n_op_rejected);
|
|
+ fscache_stat_unchecked(&fscache_n_op_rejected);
|
|
ret = -ENOBUFS;
|
|
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
|
|
fscache_report_unexpected_submission(object, op, ostate);
|
|
@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
|
|
|
|
ret = -EBUSY;
|
|
if (!list_empty(&op->pend_link)) {
|
|
- fscache_stat(&fscache_n_op_cancelled);
|
|
+ fscache_stat_unchecked(&fscache_n_op_cancelled);
|
|
list_del_init(&op->pend_link);
|
|
object->n_ops--;
|
|
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
|
|
@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
|
|
if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
|
|
BUG();
|
|
|
|
- fscache_stat(&fscache_n_op_release);
|
|
+ fscache_stat_unchecked(&fscache_n_op_release);
|
|
|
|
if (op->release) {
|
|
op->release(op);
|
|
@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
|
|
* lock, and defer it otherwise */
|
|
if (!spin_trylock(&object->lock)) {
|
|
_debug("defer put");
|
|
- fscache_stat(&fscache_n_op_deferred_release);
|
|
+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
|
|
|
|
cache = object->cache;
|
|
spin_lock(&cache->op_gc_list_lock);
|
|
@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
|
|
|
|
_debug("GC DEFERRED REL OBJ%x OP%x",
|
|
object->debug_id, op->debug_id);
|
|
- fscache_stat(&fscache_n_op_gc);
|
|
+ fscache_stat_unchecked(&fscache_n_op_gc);
|
|
|
|
ASSERTCMP(atomic_read(&op->usage), ==, 0);
|
|
|
|
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
|
|
index 3f7a59b..cf196cc 100644
|
|
--- a/fs/fscache/page.c
|
|
+++ b/fs/fscache/page.c
|
|
@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
|
|
val = radix_tree_lookup(&cookie->stores, page->index);
|
|
if (!val) {
|
|
rcu_read_unlock();
|
|
- fscache_stat(&fscache_n_store_vmscan_not_storing);
|
|
+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
|
|
__fscache_uncache_page(cookie, page);
|
|
return true;
|
|
}
|
|
@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->stores_lock);
|
|
|
|
if (xpage) {
|
|
- fscache_stat(&fscache_n_store_vmscan_cancelled);
|
|
- fscache_stat(&fscache_n_store_radix_deletes);
|
|
+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
|
|
+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
|
|
ASSERTCMP(xpage, ==, page);
|
|
} else {
|
|
- fscache_stat(&fscache_n_store_vmscan_gone);
|
|
+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
|
|
}
|
|
|
|
wake_up_bit(&cookie->flags, 0);
|
|
@@ -107,7 +107,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
|
|
/* we might want to wait here, but that could deadlock the allocator as
|
|
* the work threads writing to the cache may all end up sleeping
|
|
* on memory allocation */
|
|
- fscache_stat(&fscache_n_store_vmscan_busy);
|
|
+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL(__fscache_maybe_release_page);
|
|
@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
|
|
FSCACHE_COOKIE_STORING_TAG);
|
|
if (!radix_tree_tag_get(&cookie->stores, page->index,
|
|
FSCACHE_COOKIE_PENDING_TAG)) {
|
|
- fscache_stat(&fscache_n_store_radix_deletes);
|
|
+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
|
|
xpage = radix_tree_delete(&cookie->stores, page->index);
|
|
}
|
|
spin_unlock(&cookie->stores_lock);
|
|
@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
|
|
|
|
_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
|
|
|
|
- fscache_stat(&fscache_n_attr_changed_calls);
|
|
+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
|
|
|
|
if (fscache_object_is_active(object)) {
|
|
fscache_stat(&fscache_n_cop_attr_changed);
|
|
@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
|
|
|
|
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
|
|
|
|
- fscache_stat(&fscache_n_attr_changed);
|
|
+ fscache_stat_unchecked(&fscache_n_attr_changed);
|
|
|
|
op = kzalloc(sizeof(*op), GFP_KERNEL);
|
|
if (!op) {
|
|
- fscache_stat(&fscache_n_attr_changed_nomem);
|
|
+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
|
|
_leave(" = -ENOMEM");
|
|
return -ENOMEM;
|
|
}
|
|
@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
|
|
if (fscache_submit_exclusive_op(object, op) < 0)
|
|
goto nobufs;
|
|
spin_unlock(&cookie->lock);
|
|
- fscache_stat(&fscache_n_attr_changed_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
|
|
fscache_put_operation(op);
|
|
_leave(" = 0");
|
|
return 0;
|
|
@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
|
|
nobufs:
|
|
spin_unlock(&cookie->lock);
|
|
kfree(op);
|
|
- fscache_stat(&fscache_n_attr_changed_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
|
|
_leave(" = %d", -ENOBUFS);
|
|
return -ENOBUFS;
|
|
}
|
|
@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
|
|
/* allocate a retrieval operation and attempt to submit it */
|
|
op = kzalloc(sizeof(*op), GFP_NOIO);
|
|
if (!op) {
|
|
- fscache_stat(&fscache_n_retrievals_nomem);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
|
|
return NULL;
|
|
}
|
|
|
|
@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
|
|
return 0;
|
|
}
|
|
|
|
- fscache_stat(&fscache_n_retrievals_wait);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
|
|
|
|
jif = jiffies;
|
|
if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
|
|
fscache_wait_bit_interruptible,
|
|
TASK_INTERRUPTIBLE) != 0) {
|
|
- fscache_stat(&fscache_n_retrievals_intr);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
|
|
_leave(" = -ERESTARTSYS");
|
|
return -ERESTARTSYS;
|
|
}
|
|
@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
|
|
*/
|
|
static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
|
|
struct fscache_retrieval *op,
|
|
- atomic_t *stat_op_waits,
|
|
- atomic_t *stat_object_dead)
|
|
+ atomic_unchecked_t *stat_op_waits,
|
|
+ atomic_unchecked_t *stat_object_dead)
|
|
{
|
|
int ret;
|
|
|
|
@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
|
|
goto check_if_dead;
|
|
|
|
_debug(">>> WT");
|
|
- fscache_stat(stat_op_waits);
|
|
+ fscache_stat_unchecked(stat_op_waits);
|
|
if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
|
|
fscache_wait_bit_interruptible,
|
|
TASK_INTERRUPTIBLE) < 0) {
|
|
@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
|
|
|
|
check_if_dead:
|
|
if (unlikely(fscache_object_is_dead(object))) {
|
|
- fscache_stat(stat_object_dead);
|
|
+ fscache_stat_unchecked(stat_object_dead);
|
|
return -ENOBUFS;
|
|
}
|
|
return 0;
|
|
@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
|
|
|
_enter("%p,%p,,,", cookie, page);
|
|
|
|
- fscache_stat(&fscache_n_retrievals);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals);
|
|
|
|
if (hlist_empty(&cookie->backing_objects))
|
|
goto nobufs;
|
|
@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
|
goto nobufs_unlock;
|
|
spin_unlock(&cookie->lock);
|
|
|
|
- fscache_stat(&fscache_n_retrieval_ops);
|
|
+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
|
|
|
|
/* pin the netfs read context in case we need to do the actual netfs
|
|
* read because we've encountered a cache read failure */
|
|
@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
|
|
|
error:
|
|
if (ret == -ENOMEM)
|
|
- fscache_stat(&fscache_n_retrievals_nomem);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
|
|
else if (ret == -ERESTARTSYS)
|
|
- fscache_stat(&fscache_n_retrievals_intr);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
|
|
else if (ret == -ENODATA)
|
|
- fscache_stat(&fscache_n_retrievals_nodata);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
|
|
else if (ret < 0)
|
|
- fscache_stat(&fscache_n_retrievals_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
|
|
else
|
|
- fscache_stat(&fscache_n_retrievals_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
|
|
|
|
fscache_put_retrieval(op);
|
|
_leave(" = %d", ret);
|
|
@@ -429,7 +429,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->lock);
|
|
kfree(op);
|
|
nobufs:
|
|
- fscache_stat(&fscache_n_retrievals_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
|
|
_leave(" = -ENOBUFS");
|
|
return -ENOBUFS;
|
|
}
|
|
@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
|
|
|
_enter("%p,,%d,,,", cookie, *nr_pages);
|
|
|
|
- fscache_stat(&fscache_n_retrievals);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals);
|
|
|
|
if (hlist_empty(&cookie->backing_objects))
|
|
goto nobufs;
|
|
@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
|
goto nobufs_unlock;
|
|
spin_unlock(&cookie->lock);
|
|
|
|
- fscache_stat(&fscache_n_retrieval_ops);
|
|
+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
|
|
|
|
/* pin the netfs read context in case we need to do the actual netfs
|
|
* read because we've encountered a cache read failure */
|
|
@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
|
|
|
error:
|
|
if (ret == -ENOMEM)
|
|
- fscache_stat(&fscache_n_retrievals_nomem);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
|
|
else if (ret == -ERESTARTSYS)
|
|
- fscache_stat(&fscache_n_retrievals_intr);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
|
|
else if (ret == -ENODATA)
|
|
- fscache_stat(&fscache_n_retrievals_nodata);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
|
|
else if (ret < 0)
|
|
- fscache_stat(&fscache_n_retrievals_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
|
|
else
|
|
- fscache_stat(&fscache_n_retrievals_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
|
|
|
|
fscache_put_retrieval(op);
|
|
_leave(" = %d", ret);
|
|
@@ -545,7 +545,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->lock);
|
|
kfree(op);
|
|
nobufs:
|
|
- fscache_stat(&fscache_n_retrievals_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
|
|
_leave(" = -ENOBUFS");
|
|
return -ENOBUFS;
|
|
}
|
|
@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
|
|
|
_enter("%p,%p,,,", cookie, page);
|
|
|
|
- fscache_stat(&fscache_n_allocs);
|
|
+ fscache_stat_unchecked(&fscache_n_allocs);
|
|
|
|
if (hlist_empty(&cookie->backing_objects))
|
|
goto nobufs;
|
|
@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
|
goto nobufs_unlock;
|
|
spin_unlock(&cookie->lock);
|
|
|
|
- fscache_stat(&fscache_n_alloc_ops);
|
|
+ fscache_stat_unchecked(&fscache_n_alloc_ops);
|
|
|
|
ret = fscache_wait_for_retrieval_activation(
|
|
object, op,
|
|
@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
|
|
|
error:
|
|
if (ret == -ERESTARTSYS)
|
|
- fscache_stat(&fscache_n_allocs_intr);
|
|
+ fscache_stat_unchecked(&fscache_n_allocs_intr);
|
|
else if (ret < 0)
|
|
- fscache_stat(&fscache_n_allocs_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
|
|
else
|
|
- fscache_stat(&fscache_n_allocs_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_allocs_ok);
|
|
|
|
fscache_put_retrieval(op);
|
|
_leave(" = %d", ret);
|
|
@@ -625,7 +625,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->lock);
|
|
kfree(op);
|
|
nobufs:
|
|
- fscache_stat(&fscache_n_allocs_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
|
|
_leave(" = -ENOBUFS");
|
|
return -ENOBUFS;
|
|
}
|
|
@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
|
|
|
|
spin_lock(&cookie->stores_lock);
|
|
|
|
- fscache_stat(&fscache_n_store_calls);
|
|
+ fscache_stat_unchecked(&fscache_n_store_calls);
|
|
|
|
/* find a page to store */
|
|
page = NULL;
|
|
@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
|
|
page = results[0];
|
|
_debug("gang %d [%lx]", n, page->index);
|
|
if (page->index > op->store_limit) {
|
|
- fscache_stat(&fscache_n_store_pages_over_limit);
|
|
+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
|
|
goto superseded;
|
|
}
|
|
|
|
@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
|
|
spin_unlock(&cookie->stores_lock);
|
|
spin_unlock(&object->lock);
|
|
|
|
- fscache_stat(&fscache_n_store_pages);
|
|
+ fscache_stat_unchecked(&fscache_n_store_pages);
|
|
fscache_stat(&fscache_n_cop_write_page);
|
|
ret = object->cache->ops->write_page(op, page);
|
|
fscache_stat_d(&fscache_n_cop_write_page);
|
|
@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
|
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
|
|
ASSERT(PageFsCache(page));
|
|
|
|
- fscache_stat(&fscache_n_stores);
|
|
+ fscache_stat_unchecked(&fscache_n_stores);
|
|
|
|
op = kzalloc(sizeof(*op), GFP_NOIO);
|
|
if (!op)
|
|
@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->stores_lock);
|
|
spin_unlock(&object->lock);
|
|
|
|
- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
|
|
+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
|
|
op->store_limit = object->store_limit;
|
|
|
|
if (fscache_submit_op(object, &op->op) < 0)
|
|
@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
|
|
|
spin_unlock(&cookie->lock);
|
|
radix_tree_preload_end();
|
|
- fscache_stat(&fscache_n_store_ops);
|
|
- fscache_stat(&fscache_n_stores_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_store_ops);
|
|
+ fscache_stat_unchecked(&fscache_n_stores_ok);
|
|
|
|
/* the work queue now carries its own ref on the object */
|
|
fscache_put_operation(&op->op);
|
|
@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
|
return 0;
|
|
|
|
already_queued:
|
|
- fscache_stat(&fscache_n_stores_again);
|
|
+ fscache_stat_unchecked(&fscache_n_stores_again);
|
|
already_pending:
|
|
spin_unlock(&cookie->stores_lock);
|
|
spin_unlock(&object->lock);
|
|
spin_unlock(&cookie->lock);
|
|
radix_tree_preload_end();
|
|
kfree(op);
|
|
- fscache_stat(&fscache_n_stores_ok);
|
|
+ fscache_stat_unchecked(&fscache_n_stores_ok);
|
|
_leave(" = 0");
|
|
return 0;
|
|
|
|
@@ -851,14 +851,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
|
|
spin_unlock(&cookie->lock);
|
|
radix_tree_preload_end();
|
|
kfree(op);
|
|
- fscache_stat(&fscache_n_stores_nobufs);
|
|
+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
|
|
_leave(" = -ENOBUFS");
|
|
return -ENOBUFS;
|
|
|
|
nomem_free:
|
|
kfree(op);
|
|
nomem:
|
|
- fscache_stat(&fscache_n_stores_oom);
|
|
+ fscache_stat_unchecked(&fscache_n_stores_oom);
|
|
_leave(" = -ENOMEM");
|
|
return -ENOMEM;
|
|
}
|
|
@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
|
|
ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
|
|
ASSERTCMP(page, !=, NULL);
|
|
|
|
- fscache_stat(&fscache_n_uncaches);
|
|
+ fscache_stat_unchecked(&fscache_n_uncaches);
|
|
|
|
/* cache withdrawal may beat us to it */
|
|
if (!PageFsCache(page))
|
|
@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
|
|
unsigned long loop;
|
|
|
|
#ifdef CONFIG_FSCACHE_STATS
|
|
- atomic_add(pagevec->nr, &fscache_n_marks);
|
|
+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
|
|
#endif
|
|
|
|
for (loop = 0; loop < pagevec->nr; loop++) {
|
|
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
|
|
index 73c0bd7..adb2f79 100644
|
|
--- a/fs/fscache/stats.c
|
|
+++ b/fs/fscache/stats.c
|
|
@@ -18,95 +18,95 @@
|
|
/*
|
|
* operation counters
|
|
*/
|
|
-atomic_t fscache_n_op_pend;
|
|
-atomic_t fscache_n_op_run;
|
|
-atomic_t fscache_n_op_enqueue;
|
|
-atomic_t fscache_n_op_requeue;
|
|
-atomic_t fscache_n_op_deferred_release;
|
|
-atomic_t fscache_n_op_release;
|
|
-atomic_t fscache_n_op_gc;
|
|
-atomic_t fscache_n_op_cancelled;
|
|
-atomic_t fscache_n_op_rejected;
|
|
-
|
|
-atomic_t fscache_n_attr_changed;
|
|
-atomic_t fscache_n_attr_changed_ok;
|
|
-atomic_t fscache_n_attr_changed_nobufs;
|
|
-atomic_t fscache_n_attr_changed_nomem;
|
|
-atomic_t fscache_n_attr_changed_calls;
|
|
-
|
|
-atomic_t fscache_n_allocs;
|
|
-atomic_t fscache_n_allocs_ok;
|
|
-atomic_t fscache_n_allocs_wait;
|
|
-atomic_t fscache_n_allocs_nobufs;
|
|
-atomic_t fscache_n_allocs_intr;
|
|
-atomic_t fscache_n_allocs_object_dead;
|
|
-atomic_t fscache_n_alloc_ops;
|
|
-atomic_t fscache_n_alloc_op_waits;
|
|
-
|
|
-atomic_t fscache_n_retrievals;
|
|
-atomic_t fscache_n_retrievals_ok;
|
|
-atomic_t fscache_n_retrievals_wait;
|
|
-atomic_t fscache_n_retrievals_nodata;
|
|
-atomic_t fscache_n_retrievals_nobufs;
|
|
-atomic_t fscache_n_retrievals_intr;
|
|
-atomic_t fscache_n_retrievals_nomem;
|
|
-atomic_t fscache_n_retrievals_object_dead;
|
|
-atomic_t fscache_n_retrieval_ops;
|
|
-atomic_t fscache_n_retrieval_op_waits;
|
|
-
|
|
-atomic_t fscache_n_stores;
|
|
-atomic_t fscache_n_stores_ok;
|
|
-atomic_t fscache_n_stores_again;
|
|
-atomic_t fscache_n_stores_nobufs;
|
|
-atomic_t fscache_n_stores_oom;
|
|
-atomic_t fscache_n_store_ops;
|
|
-atomic_t fscache_n_store_calls;
|
|
-atomic_t fscache_n_store_pages;
|
|
-atomic_t fscache_n_store_radix_deletes;
|
|
-atomic_t fscache_n_store_pages_over_limit;
|
|
-
|
|
-atomic_t fscache_n_store_vmscan_not_storing;
|
|
-atomic_t fscache_n_store_vmscan_gone;
|
|
-atomic_t fscache_n_store_vmscan_busy;
|
|
-atomic_t fscache_n_store_vmscan_cancelled;
|
|
-
|
|
-atomic_t fscache_n_marks;
|
|
-atomic_t fscache_n_uncaches;
|
|
-
|
|
-atomic_t fscache_n_acquires;
|
|
-atomic_t fscache_n_acquires_null;
|
|
-atomic_t fscache_n_acquires_no_cache;
|
|
-atomic_t fscache_n_acquires_ok;
|
|
-atomic_t fscache_n_acquires_nobufs;
|
|
-atomic_t fscache_n_acquires_oom;
|
|
-
|
|
-atomic_t fscache_n_updates;
|
|
-atomic_t fscache_n_updates_null;
|
|
-atomic_t fscache_n_updates_run;
|
|
-
|
|
-atomic_t fscache_n_relinquishes;
|
|
-atomic_t fscache_n_relinquishes_null;
|
|
-atomic_t fscache_n_relinquishes_waitcrt;
|
|
-atomic_t fscache_n_relinquishes_retire;
|
|
-
|
|
-atomic_t fscache_n_cookie_index;
|
|
-atomic_t fscache_n_cookie_data;
|
|
-atomic_t fscache_n_cookie_special;
|
|
-
|
|
-atomic_t fscache_n_object_alloc;
|
|
-atomic_t fscache_n_object_no_alloc;
|
|
-atomic_t fscache_n_object_lookups;
|
|
-atomic_t fscache_n_object_lookups_negative;
|
|
-atomic_t fscache_n_object_lookups_positive;
|
|
-atomic_t fscache_n_object_lookups_timed_out;
|
|
-atomic_t fscache_n_object_created;
|
|
-atomic_t fscache_n_object_avail;
|
|
-atomic_t fscache_n_object_dead;
|
|
-
|
|
-atomic_t fscache_n_checkaux_none;
|
|
-atomic_t fscache_n_checkaux_okay;
|
|
-atomic_t fscache_n_checkaux_update;
|
|
-atomic_t fscache_n_checkaux_obsolete;
|
|
+atomic_unchecked_t fscache_n_op_pend;
|
|
+atomic_unchecked_t fscache_n_op_run;
|
|
+atomic_unchecked_t fscache_n_op_enqueue;
|
|
+atomic_unchecked_t fscache_n_op_requeue;
|
|
+atomic_unchecked_t fscache_n_op_deferred_release;
|
|
+atomic_unchecked_t fscache_n_op_release;
|
|
+atomic_unchecked_t fscache_n_op_gc;
|
|
+atomic_unchecked_t fscache_n_op_cancelled;
|
|
+atomic_unchecked_t fscache_n_op_rejected;
|
|
+
|
|
+atomic_unchecked_t fscache_n_attr_changed;
|
|
+atomic_unchecked_t fscache_n_attr_changed_ok;
|
|
+atomic_unchecked_t fscache_n_attr_changed_nobufs;
|
|
+atomic_unchecked_t fscache_n_attr_changed_nomem;
|
|
+atomic_unchecked_t fscache_n_attr_changed_calls;
|
|
+
|
|
+atomic_unchecked_t fscache_n_allocs;
|
|
+atomic_unchecked_t fscache_n_allocs_ok;
|
|
+atomic_unchecked_t fscache_n_allocs_wait;
|
|
+atomic_unchecked_t fscache_n_allocs_nobufs;
|
|
+atomic_unchecked_t fscache_n_allocs_intr;
|
|
+atomic_unchecked_t fscache_n_allocs_object_dead;
|
|
+atomic_unchecked_t fscache_n_alloc_ops;
|
|
+atomic_unchecked_t fscache_n_alloc_op_waits;
|
|
+
|
|
+atomic_unchecked_t fscache_n_retrievals;
|
|
+atomic_unchecked_t fscache_n_retrievals_ok;
|
|
+atomic_unchecked_t fscache_n_retrievals_wait;
|
|
+atomic_unchecked_t fscache_n_retrievals_nodata;
|
|
+atomic_unchecked_t fscache_n_retrievals_nobufs;
|
|
+atomic_unchecked_t fscache_n_retrievals_intr;
|
|
+atomic_unchecked_t fscache_n_retrievals_nomem;
|
|
+atomic_unchecked_t fscache_n_retrievals_object_dead;
|
|
+atomic_unchecked_t fscache_n_retrieval_ops;
|
|
+atomic_unchecked_t fscache_n_retrieval_op_waits;
|
|
+
|
|
+atomic_unchecked_t fscache_n_stores;
|
|
+atomic_unchecked_t fscache_n_stores_ok;
|
|
+atomic_unchecked_t fscache_n_stores_again;
|
|
+atomic_unchecked_t fscache_n_stores_nobufs;
|
|
+atomic_unchecked_t fscache_n_stores_oom;
|
|
+atomic_unchecked_t fscache_n_store_ops;
|
|
+atomic_unchecked_t fscache_n_store_calls;
|
|
+atomic_unchecked_t fscache_n_store_pages;
|
|
+atomic_unchecked_t fscache_n_store_radix_deletes;
|
|
+atomic_unchecked_t fscache_n_store_pages_over_limit;
|
|
+
|
|
+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
|
|
+atomic_unchecked_t fscache_n_store_vmscan_gone;
|
|
+atomic_unchecked_t fscache_n_store_vmscan_busy;
|
|
+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
|
|
+
|
|
+atomic_unchecked_t fscache_n_marks;
|
|
+atomic_unchecked_t fscache_n_uncaches;
|
|
+
|
|
+atomic_unchecked_t fscache_n_acquires;
|
|
+atomic_unchecked_t fscache_n_acquires_null;
|
|
+atomic_unchecked_t fscache_n_acquires_no_cache;
|
|
+atomic_unchecked_t fscache_n_acquires_ok;
|
|
+atomic_unchecked_t fscache_n_acquires_nobufs;
|
|
+atomic_unchecked_t fscache_n_acquires_oom;
|
|
+
|
|
+atomic_unchecked_t fscache_n_updates;
|
|
+atomic_unchecked_t fscache_n_updates_null;
|
|
+atomic_unchecked_t fscache_n_updates_run;
|
|
+
|
|
+atomic_unchecked_t fscache_n_relinquishes;
|
|
+atomic_unchecked_t fscache_n_relinquishes_null;
|
|
+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
|
|
+atomic_unchecked_t fscache_n_relinquishes_retire;
|
|
+
|
|
+atomic_unchecked_t fscache_n_cookie_index;
|
|
+atomic_unchecked_t fscache_n_cookie_data;
|
|
+atomic_unchecked_t fscache_n_cookie_special;
|
|
+
|
|
+atomic_unchecked_t fscache_n_object_alloc;
|
|
+atomic_unchecked_t fscache_n_object_no_alloc;
|
|
+atomic_unchecked_t fscache_n_object_lookups;
|
|
+atomic_unchecked_t fscache_n_object_lookups_negative;
|
|
+atomic_unchecked_t fscache_n_object_lookups_positive;
|
|
+atomic_unchecked_t fscache_n_object_lookups_timed_out;
|
|
+atomic_unchecked_t fscache_n_object_created;
|
|
+atomic_unchecked_t fscache_n_object_avail;
|
|
+atomic_unchecked_t fscache_n_object_dead;
|
|
+
|
|
+atomic_unchecked_t fscache_n_checkaux_none;
|
|
+atomic_unchecked_t fscache_n_checkaux_okay;
|
|
+atomic_unchecked_t fscache_n_checkaux_update;
|
|
+atomic_unchecked_t fscache_n_checkaux_obsolete;
|
|
|
|
atomic_t fscache_n_cop_alloc_object;
|
|
atomic_t fscache_n_cop_lookup_object;
|
|
@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
|
|
seq_puts(m, "FS-Cache statistics\n");
|
|
|
|
seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
|
|
- atomic_read(&fscache_n_cookie_index),
|
|
- atomic_read(&fscache_n_cookie_data),
|
|
- atomic_read(&fscache_n_cookie_special));
|
|
+ atomic_read_unchecked(&fscache_n_cookie_index),
|
|
+ atomic_read_unchecked(&fscache_n_cookie_data),
|
|
+ atomic_read_unchecked(&fscache_n_cookie_special));
|
|
|
|
seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
|
|
- atomic_read(&fscache_n_object_alloc),
|
|
- atomic_read(&fscache_n_object_no_alloc),
|
|
- atomic_read(&fscache_n_object_avail),
|
|
- atomic_read(&fscache_n_object_dead));
|
|
+ atomic_read_unchecked(&fscache_n_object_alloc),
|
|
+ atomic_read_unchecked(&fscache_n_object_no_alloc),
|
|
+ atomic_read_unchecked(&fscache_n_object_avail),
|
|
+ atomic_read_unchecked(&fscache_n_object_dead));
|
|
seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
|
|
- atomic_read(&fscache_n_checkaux_none),
|
|
- atomic_read(&fscache_n_checkaux_okay),
|
|
- atomic_read(&fscache_n_checkaux_update),
|
|
- atomic_read(&fscache_n_checkaux_obsolete));
|
|
+ atomic_read_unchecked(&fscache_n_checkaux_none),
|
|
+ atomic_read_unchecked(&fscache_n_checkaux_okay),
|
|
+ atomic_read_unchecked(&fscache_n_checkaux_update),
|
|
+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
|
|
|
|
seq_printf(m, "Pages : mrk=%u unc=%u\n",
|
|
- atomic_read(&fscache_n_marks),
|
|
- atomic_read(&fscache_n_uncaches));
|
|
+ atomic_read_unchecked(&fscache_n_marks),
|
|
+ atomic_read_unchecked(&fscache_n_uncaches));
|
|
|
|
seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
|
|
" oom=%u\n",
|
|
- atomic_read(&fscache_n_acquires),
|
|
- atomic_read(&fscache_n_acquires_null),
|
|
- atomic_read(&fscache_n_acquires_no_cache),
|
|
- atomic_read(&fscache_n_acquires_ok),
|
|
- atomic_read(&fscache_n_acquires_nobufs),
|
|
- atomic_read(&fscache_n_acquires_oom));
|
|
+ atomic_read_unchecked(&fscache_n_acquires),
|
|
+ atomic_read_unchecked(&fscache_n_acquires_null),
|
|
+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
|
|
+ atomic_read_unchecked(&fscache_n_acquires_ok),
|
|
+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
|
|
+ atomic_read_unchecked(&fscache_n_acquires_oom));
|
|
|
|
seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
|
|
- atomic_read(&fscache_n_object_lookups),
|
|
- atomic_read(&fscache_n_object_lookups_negative),
|
|
- atomic_read(&fscache_n_object_lookups_positive),
|
|
- atomic_read(&fscache_n_object_created),
|
|
- atomic_read(&fscache_n_object_lookups_timed_out));
|
|
+ atomic_read_unchecked(&fscache_n_object_lookups),
|
|
+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
|
|
+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
|
|
+ atomic_read_unchecked(&fscache_n_object_created),
|
|
+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
|
|
|
|
seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
|
|
- atomic_read(&fscache_n_updates),
|
|
- atomic_read(&fscache_n_updates_null),
|
|
- atomic_read(&fscache_n_updates_run));
|
|
+ atomic_read_unchecked(&fscache_n_updates),
|
|
+ atomic_read_unchecked(&fscache_n_updates_null),
|
|
+ atomic_read_unchecked(&fscache_n_updates_run));
|
|
|
|
seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
|
|
- atomic_read(&fscache_n_relinquishes),
|
|
- atomic_read(&fscache_n_relinquishes_null),
|
|
- atomic_read(&fscache_n_relinquishes_waitcrt),
|
|
- atomic_read(&fscache_n_relinquishes_retire));
|
|
+ atomic_read_unchecked(&fscache_n_relinquishes),
|
|
+ atomic_read_unchecked(&fscache_n_relinquishes_null),
|
|
+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
|
|
+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
|
|
|
|
seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
|
|
- atomic_read(&fscache_n_attr_changed),
|
|
- atomic_read(&fscache_n_attr_changed_ok),
|
|
- atomic_read(&fscache_n_attr_changed_nobufs),
|
|
- atomic_read(&fscache_n_attr_changed_nomem),
|
|
- atomic_read(&fscache_n_attr_changed_calls));
|
|
+ atomic_read_unchecked(&fscache_n_attr_changed),
|
|
+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
|
|
+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
|
|
+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
|
|
+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
|
|
|
|
seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
|
|
- atomic_read(&fscache_n_allocs),
|
|
- atomic_read(&fscache_n_allocs_ok),
|
|
- atomic_read(&fscache_n_allocs_wait),
|
|
- atomic_read(&fscache_n_allocs_nobufs),
|
|
- atomic_read(&fscache_n_allocs_intr));
|
|
+ atomic_read_unchecked(&fscache_n_allocs),
|
|
+ atomic_read_unchecked(&fscache_n_allocs_ok),
|
|
+ atomic_read_unchecked(&fscache_n_allocs_wait),
|
|
+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
|
|
+ atomic_read_unchecked(&fscache_n_allocs_intr));
|
|
seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
|
|
- atomic_read(&fscache_n_alloc_ops),
|
|
- atomic_read(&fscache_n_alloc_op_waits),
|
|
- atomic_read(&fscache_n_allocs_object_dead));
|
|
+ atomic_read_unchecked(&fscache_n_alloc_ops),
|
|
+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
|
|
+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
|
|
|
|
seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
|
|
" int=%u oom=%u\n",
|
|
- atomic_read(&fscache_n_retrievals),
|
|
- atomic_read(&fscache_n_retrievals_ok),
|
|
- atomic_read(&fscache_n_retrievals_wait),
|
|
- atomic_read(&fscache_n_retrievals_nodata),
|
|
- atomic_read(&fscache_n_retrievals_nobufs),
|
|
- atomic_read(&fscache_n_retrievals_intr),
|
|
- atomic_read(&fscache_n_retrievals_nomem));
|
|
+ atomic_read_unchecked(&fscache_n_retrievals),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_ok),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_wait),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_intr),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
|
|
seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
|
|
- atomic_read(&fscache_n_retrieval_ops),
|
|
- atomic_read(&fscache_n_retrieval_op_waits),
|
|
- atomic_read(&fscache_n_retrievals_object_dead));
|
|
+ atomic_read_unchecked(&fscache_n_retrieval_ops),
|
|
+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
|
|
+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
|
|
|
|
seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
|
|
- atomic_read(&fscache_n_stores),
|
|
- atomic_read(&fscache_n_stores_ok),
|
|
- atomic_read(&fscache_n_stores_again),
|
|
- atomic_read(&fscache_n_stores_nobufs),
|
|
- atomic_read(&fscache_n_stores_oom));
|
|
+ atomic_read_unchecked(&fscache_n_stores),
|
|
+ atomic_read_unchecked(&fscache_n_stores_ok),
|
|
+ atomic_read_unchecked(&fscache_n_stores_again),
|
|
+ atomic_read_unchecked(&fscache_n_stores_nobufs),
|
|
+ atomic_read_unchecked(&fscache_n_stores_oom));
|
|
seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
|
|
- atomic_read(&fscache_n_store_ops),
|
|
- atomic_read(&fscache_n_store_calls),
|
|
- atomic_read(&fscache_n_store_pages),
|
|
- atomic_read(&fscache_n_store_radix_deletes),
|
|
- atomic_read(&fscache_n_store_pages_over_limit));
|
|
+ atomic_read_unchecked(&fscache_n_store_ops),
|
|
+ atomic_read_unchecked(&fscache_n_store_calls),
|
|
+ atomic_read_unchecked(&fscache_n_store_pages),
|
|
+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
|
|
+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
|
|
|
|
seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
|
|
- atomic_read(&fscache_n_store_vmscan_not_storing),
|
|
- atomic_read(&fscache_n_store_vmscan_gone),
|
|
- atomic_read(&fscache_n_store_vmscan_busy),
|
|
- atomic_read(&fscache_n_store_vmscan_cancelled));
|
|
+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
|
|
+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
|
|
+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
|
|
+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
|
|
|
|
seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
|
|
- atomic_read(&fscache_n_op_pend),
|
|
- atomic_read(&fscache_n_op_run),
|
|
- atomic_read(&fscache_n_op_enqueue),
|
|
- atomic_read(&fscache_n_op_cancelled),
|
|
- atomic_read(&fscache_n_op_rejected));
|
|
+ atomic_read_unchecked(&fscache_n_op_pend),
|
|
+ atomic_read_unchecked(&fscache_n_op_run),
|
|
+ atomic_read_unchecked(&fscache_n_op_enqueue),
|
|
+ atomic_read_unchecked(&fscache_n_op_cancelled),
|
|
+ atomic_read_unchecked(&fscache_n_op_rejected));
|
|
seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
|
|
- atomic_read(&fscache_n_op_deferred_release),
|
|
- atomic_read(&fscache_n_op_release),
|
|
- atomic_read(&fscache_n_op_gc));
|
|
+ atomic_read_unchecked(&fscache_n_op_deferred_release),
|
|
+ atomic_read_unchecked(&fscache_n_op_release),
|
|
+ atomic_read_unchecked(&fscache_n_op_gc));
|
|
|
|
seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
|
|
atomic_read(&fscache_n_cop_alloc_object),
|
|
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
|
|
index 3426521..3b75162 100644
|
|
--- a/fs/fuse/cuse.c
|
|
+++ b/fs/fuse/cuse.c
|
|
@@ -587,10 +587,12 @@ static int __init cuse_init(void)
|
|
INIT_LIST_HEAD(&cuse_conntbl[i]);
|
|
|
|
/* inherit and extend fuse_dev_operations */
|
|
- cuse_channel_fops = fuse_dev_operations;
|
|
- cuse_channel_fops.owner = THIS_MODULE;
|
|
- cuse_channel_fops.open = cuse_channel_open;
|
|
- cuse_channel_fops.release = cuse_channel_release;
|
|
+ pax_open_kernel();
|
|
+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
|
|
+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
|
|
+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
|
|
+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
|
|
+ pax_close_kernel();
|
|
|
|
cuse_class = class_create(THIS_MODULE, "cuse");
|
|
if (IS_ERR(cuse_class))
|
|
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
|
|
index 69bf97c..d0683ce 100644
|
|
--- a/fs/fuse/dev.c
|
|
+++ b/fs/fuse/dev.c
|
|
@@ -1246,7 +1246,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
|
|
ret = 0;
|
|
pipe_lock(pipe);
|
|
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
|
|
index f31c136..f92d6d1 100644
|
|
--- a/fs/fuse/dir.c
|
|
+++ b/fs/fuse/dir.c
|
|
@@ -1190,7 +1190,7 @@ static char *read_link(struct dentry *dentry)
|
|
return link;
|
|
}
|
|
|
|
-static void free_link(char *link)
|
|
+static void free_link(const char *link)
|
|
{
|
|
if (!IS_ERR(link))
|
|
free_page((unsigned long) link);
|
|
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
|
|
index a9ba244..d9df391 100644
|
|
--- a/fs/gfs2/inode.c
|
|
+++ b/fs/gfs2/inode.c
|
|
@@ -1496,7 +1496,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
|
|
{
|
|
- char *s = nd_get_link(nd);
|
|
+ const char *s = nd_get_link(nd);
|
|
if (!IS_ERR(s))
|
|
kfree(s);
|
|
}
|
|
diff --git a/fs/inode.c b/fs/inode.c
|
|
index 8de457e..d00f92e 100644
|
|
--- a/fs/inode.c
|
|
+++ b/fs/inode.c
|
|
@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
|
|
|
|
#ifdef CONFIG_SMP
|
|
if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
|
|
- static atomic_t shared_last_ino;
|
|
- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
|
|
+ static atomic_unchecked_t shared_last_ino;
|
|
+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
|
|
|
|
res = next - LAST_INO_BATCH;
|
|
}
|
|
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
|
|
index 4a6cf28..d3a29d3 100644
|
|
--- a/fs/jffs2/erase.c
|
|
+++ b/fs/jffs2/erase.c
|
|
@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
|
|
struct jffs2_unknown_node marker = {
|
|
.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
|
|
.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
|
|
- .totlen = cpu_to_je32(c->cleanmarker_size)
|
|
+ .totlen = cpu_to_je32(c->cleanmarker_size),
|
|
+ .hdr_crc = cpu_to_je32(0)
|
|
};
|
|
|
|
jffs2_prealloc_raw_node_refs(c, jeb, 1);
|
|
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
|
|
index 6bec5c0..ccc10fe 100644
|
|
--- a/fs/jffs2/wbuf.c
|
|
+++ b/fs/jffs2/wbuf.c
|
|
@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
|
|
{
|
|
.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
|
|
.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
|
|
- .totlen = constant_cpu_to_je32(8)
|
|
+ .totlen = constant_cpu_to_je32(8),
|
|
+ .hdr_crc = constant_cpu_to_je32(0)
|
|
};
|
|
|
|
/*
|
|
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
|
|
index 4a82950..bcaa0cb 100644
|
|
--- a/fs/jfs/super.c
|
|
+++ b/fs/jfs/super.c
|
|
@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
|
|
|
|
jfs_inode_cachep =
|
|
kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
|
|
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
|
|
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
|
|
init_once);
|
|
if (jfs_inode_cachep == NULL)
|
|
return -ENOMEM;
|
|
diff --git a/fs/libfs.c b/fs/libfs.c
|
|
index 18d08f5..83cee1c 100644
|
|
--- a/fs/libfs.c
|
|
+++ b/fs/libfs.c
|
|
@@ -174,7 +174,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
|
|
|
|
spin_unlock(&next->d_lock);
|
|
spin_unlock(&dentry->d_lock);
|
|
- if (filldir(dirent, next->d_name.name,
|
|
+ name = next->d_name.name;
|
|
+ if (name == next->d_iname) {
|
|
+ memcpy(d_name, name, next->d_name.len);
|
|
+ name = d_name;
|
|
+ }
|
|
+ if (filldir(dirent, name,
|
|
next->d_name.len, filp->f_pos,
|
|
next->d_inode->i_ino,
|
|
dt_type(next->d_inode)) < 0)
|
|
diff --git a/fs/libfs.c.rej b/fs/libfs.c.rej
|
|
new file mode 100644
|
|
index 0000000..8470aa1
|
|
--- /dev/null
|
|
+++ b/fs/libfs.c.rej
|
|
@@ -0,0 +1,12 @@
|
|
+--- fs/libfs.c 2012-05-21 11:33:35.695929736 +0200
|
|
++++ fs/libfs.c 2012-05-21 12:10:11.168048978 +0200
|
|
+@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, v
|
|
+
|
|
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
|
|
+ struct dentry *next;
|
|
++ char d_name[sizeof(next->d_iname)];
|
|
++ const unsigned char *name;
|
|
++
|
|
+ next = list_entry(p, struct dentry, d_u.d_child);
|
|
+ spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
|
|
+ if (!simple_positive(next)) {
|
|
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
|
|
index 8392cb8..80d6193 100644
|
|
--- a/fs/lockd/clntproc.c
|
|
+++ b/fs/lockd/clntproc.c
|
|
@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
|
|
/*
|
|
* Cookie counter for NLM requests
|
|
*/
|
|
-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
|
|
+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
|
|
|
|
void nlmclnt_next_cookie(struct nlm_cookie *c)
|
|
{
|
|
- u32 cookie = atomic_inc_return(&nlm_cookie);
|
|
+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
|
|
|
|
memcpy(c->data, &cookie, 4);
|
|
c->len=4;
|
|
diff --git a/fs/locks.c b/fs/locks.c
|
|
index d4f1d89..0114708 100644
|
|
--- a/fs/locks.c
|
|
+++ b/fs/locks.c
|
|
@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
|
|
return;
|
|
|
|
if (filp->f_op && filp->f_op->flock) {
|
|
- struct file_lock fl = {
|
|
+ struct file_lock flock = {
|
|
.fl_pid = current->tgid,
|
|
.fl_file = filp,
|
|
.fl_flags = FL_FLOCK,
|
|
.fl_type = F_UNLCK,
|
|
.fl_end = OFFSET_MAX,
|
|
};
|
|
- filp->f_op->flock(filp, F_SETLKW, &fl);
|
|
- if (fl.fl_ops && fl.fl_ops->fl_release_private)
|
|
- fl.fl_ops->fl_release_private(&fl);
|
|
+ filp->f_op->flock(filp, F_SETLKW, &flock);
|
|
+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
|
|
+ flock.fl_ops->fl_release_private(&flock);
|
|
}
|
|
|
|
lock_flocks();
|
|
diff --git a/fs/namei.c b/fs/namei.c
|
|
index d7a1b84..bbb86df 100644
|
|
--- a/fs/namei.c
|
|
+++ b/fs/namei.c
|
|
@@ -675,7 +675,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
|
|
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
|
|
error = PTR_ERR(*p);
|
|
if (!IS_ERR(*p)) {
|
|
- char *s = nd_get_link(nd);
|
|
+ const char *s = nd_get_link(nd);
|
|
error = 0;
|
|
if (s)
|
|
error = __vfs_follow_link(nd, s);
|
|
@@ -3382,6 +3382,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
|
|
|
|
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
|
|
{
|
|
+ char tmpbuf[64];
|
|
+ const char *newlink;
|
|
int len;
|
|
|
|
len = PTR_ERR(link);
|
|
@@ -3391,7 +3393,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
|
|
len = strlen(link);
|
|
if (len > (unsigned) buflen)
|
|
len = buflen;
|
|
- if (copy_to_user(buffer, link, len))
|
|
+
|
|
+ if (len < sizeof(tmpbuf)) {
|
|
+ memcpy(tmpbuf, link, len);
|
|
+ newlink = tmpbuf;
|
|
+ } else
|
|
+ newlink = link;
|
|
+
|
|
+ if (copy_to_user(buffer, newlink, len))
|
|
len = -EFAULT;
|
|
out:
|
|
return len;
|
|
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
|
|
index 6064250..4f789ae 100644
|
|
--- a/fs/nfs/inode.c
|
|
+++ b/fs/nfs/inode.c
|
|
@@ -1007,16 +1007,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
|
|
return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
|
|
}
|
|
|
|
-static atomic_long_t nfs_attr_generation_counter;
|
|
+static atomic_long_unchecked_t nfs_attr_generation_counter;
|
|
|
|
static unsigned long nfs_read_attr_generation_counter(void)
|
|
{
|
|
- return atomic_long_read(&nfs_attr_generation_counter);
|
|
+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
|
|
}
|
|
|
|
unsigned long nfs_inc_attr_generation_counter(void)
|
|
{
|
|
- return atomic_long_inc_return(&nfs_attr_generation_counter);
|
|
+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
|
|
}
|
|
|
|
void nfs_fattr_init(struct nfs_fattr *fattr)
|
|
diff --git a/fs/nfs/inode.c.rej b/fs/nfs/inode.c.rej
|
|
new file mode 100644
|
|
index 0000000..3928e5c
|
|
--- /dev/null
|
|
+++ b/fs/nfs/inode.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- fs/nfs/inode.c 2012-05-21 11:33:35.907929747 +0200
|
|
++++ fs/nfs/inode.c 2012-05-21 12:10:11.184048978 +0200
|
|
+@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct
|
|
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
|
|
+ nfsi->attrtimeo_timestamp = jiffies;
|
|
+
|
|
+- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
|
|
++ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
|
|
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
|
|
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
|
|
+ else
|
|
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
|
|
index 36620e6..2d528ee 100644
|
|
--- a/fs/nfsd/vfs.c
|
|
+++ b/fs/nfsd/vfs.c
|
|
@@ -971,7 +971,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
|
|
} else {
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
|
|
+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
|
|
set_fs(oldfs);
|
|
}
|
|
|
|
@@ -1075,7 +1075,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
|
|
|
|
/* Write the data. */
|
|
oldfs = get_fs(); set_fs(KERNEL_DS);
|
|
- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
|
|
+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
|
|
set_fs(oldfs);
|
|
if (host_err < 0)
|
|
goto out_nfserr;
|
|
@@ -1617,7 +1617,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
|
|
*/
|
|
|
|
oldfs = get_fs(); set_fs(KERNEL_DS);
|
|
- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
|
|
+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
|
|
set_fs(oldfs);
|
|
|
|
if (host_err < 0)
|
|
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
|
|
index 97d91f0..2ab20a4 100644
|
|
--- a/fs/notify/fanotify/fanotify_user.c
|
|
+++ b/fs/notify/fanotify/fanotify_user.c
|
|
@@ -279,7 +279,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
|
|
goto out_close_fd;
|
|
|
|
ret = -EFAULT;
|
|
- if (copy_to_user(buf, &fanotify_event_metadata,
|
|
+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
|
|
+ copy_to_user(buf, &fanotify_event_metadata,
|
|
fanotify_event_metadata.event_len))
|
|
goto out_kill_access_response;
|
|
|
|
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
|
|
index c887b13..0fdf472 100644
|
|
--- a/fs/notify/notification.c
|
|
+++ b/fs/notify/notification.c
|
|
@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
|
|
* get set to 0 so it will never get 'freed'
|
|
*/
|
|
static struct fsnotify_event *q_overflow_event;
|
|
-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
|
|
|
|
/**
|
|
* fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
|
|
@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
|
|
*/
|
|
u32 fsnotify_get_cookie(void)
|
|
{
|
|
- return atomic_inc_return(&fsnotify_sync_cookie);
|
|
+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
|
|
}
|
|
EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
|
|
|
|
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
|
|
index 99e3610..02c1068 100644
|
|
--- a/fs/ntfs/dir.c
|
|
+++ b/fs/ntfs/dir.c
|
|
@@ -1329,7 +1329,7 @@ static int ntfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|
ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
|
|
~(s64)(ndir->itype.index.block_size - 1)));
|
|
/* Bounds checks. */
|
|
- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
|
|
+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
|
|
ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
|
|
"inode 0x%lx or driver bug.", vdir->i_ino);
|
|
goto err_out;
|
|
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
|
|
index 8639169..76697aa 100644
|
|
--- a/fs/ntfs/file.c
|
|
+++ b/fs/ntfs/file.c
|
|
@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
|
|
#endif /* NTFS_RW */
|
|
};
|
|
|
|
-const struct file_operations ntfs_empty_file_ops = {};
|
|
+const struct file_operations ntfs_empty_file_ops __read_only;
|
|
|
|
-const struct inode_operations ntfs_empty_inode_ops = {};
|
|
+const struct inode_operations ntfs_empty_inode_ops __read_only;
|
|
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
|
|
index 210c352..a174f83 100644
|
|
--- a/fs/ocfs2/localalloc.c
|
|
+++ b/fs/ocfs2/localalloc.c
|
|
@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
|
|
goto bail;
|
|
}
|
|
|
|
- atomic_inc(&osb->alloc_stats.moves);
|
|
+ atomic_inc_unchecked(&osb->alloc_stats.moves);
|
|
|
|
bail:
|
|
if (handle)
|
|
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
|
|
index d355e6e..578d905 100644
|
|
--- a/fs/ocfs2/ocfs2.h
|
|
+++ b/fs/ocfs2/ocfs2.h
|
|
@@ -235,11 +235,11 @@ enum ocfs2_vol_state
|
|
|
|
struct ocfs2_alloc_stats
|
|
{
|
|
- atomic_t moves;
|
|
- atomic_t local_data;
|
|
- atomic_t bitmap_data;
|
|
- atomic_t bg_allocs;
|
|
- atomic_t bg_extends;
|
|
+ atomic_unchecked_t moves;
|
|
+ atomic_unchecked_t local_data;
|
|
+ atomic_unchecked_t bitmap_data;
|
|
+ atomic_unchecked_t bg_allocs;
|
|
+ atomic_unchecked_t bg_extends;
|
|
};
|
|
|
|
enum ocfs2_local_alloc_state
|
|
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
|
|
index b7e74b5..19c6536 100644
|
|
--- a/fs/ocfs2/suballoc.c
|
|
+++ b/fs/ocfs2/suballoc.c
|
|
@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
|
|
mlog_errno(status);
|
|
goto bail;
|
|
}
|
|
- atomic_inc(&osb->alloc_stats.bg_extends);
|
|
+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
|
|
|
|
/* You should never ask for this much metadata */
|
|
BUG_ON(bits_wanted >
|
|
@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
|
|
mlog_errno(status);
|
|
goto bail;
|
|
}
|
|
- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
|
|
*suballoc_loc = res.sr_bg_blkno;
|
|
*suballoc_bit_start = res.sr_bit_offset;
|
|
@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
|
|
trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
|
|
res->sr_bits);
|
|
|
|
- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
|
|
BUG_ON(res->sr_bits != 1);
|
|
|
|
@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
|
|
mlog_errno(status);
|
|
goto bail;
|
|
}
|
|
- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
|
|
|
|
BUG_ON(res.sr_bits != 1);
|
|
|
|
@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
|
|
cluster_start,
|
|
num_clusters);
|
|
if (!status)
|
|
- atomic_inc(&osb->alloc_stats.local_data);
|
|
+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
|
|
} else {
|
|
if (min_clusters > (osb->bitmap_cpg - 1)) {
|
|
/* The only paths asking for contiguousness
|
|
@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
|
|
ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
|
|
res.sr_bg_blkno,
|
|
res.sr_bit_offset);
|
|
- atomic_inc(&osb->alloc_stats.bitmap_data);
|
|
+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
|
|
*num_clusters = res.sr_bits;
|
|
}
|
|
}
|
|
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
|
|
index 68f4541..89cfe6a 100644
|
|
--- a/fs/ocfs2/super.c
|
|
+++ b/fs/ocfs2/super.c
|
|
@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
|
|
"%10s => GlobalAllocs: %d LocalAllocs: %d "
|
|
"SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
|
|
"Stats",
|
|
- atomic_read(&osb->alloc_stats.bitmap_data),
|
|
- atomic_read(&osb->alloc_stats.local_data),
|
|
- atomic_read(&osb->alloc_stats.bg_allocs),
|
|
- atomic_read(&osb->alloc_stats.moves),
|
|
- atomic_read(&osb->alloc_stats.bg_extends));
|
|
+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
|
|
+ atomic_read_unchecked(&osb->alloc_stats.local_data),
|
|
+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
|
|
+ atomic_read_unchecked(&osb->alloc_stats.moves),
|
|
+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
|
|
|
|
out += snprintf(buf + out, len - out,
|
|
"%10s => State: %u Descriptor: %llu Size: %u bits "
|
|
@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
|
|
spin_lock_init(&osb->osb_xattr_lock);
|
|
ocfs2_init_steal_slots(osb);
|
|
|
|
- atomic_set(&osb->alloc_stats.moves, 0);
|
|
- atomic_set(&osb->alloc_stats.local_data, 0);
|
|
- atomic_set(&osb->alloc_stats.bitmap_data, 0);
|
|
- atomic_set(&osb->alloc_stats.bg_allocs, 0);
|
|
- atomic_set(&osb->alloc_stats.bg_extends, 0);
|
|
+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
|
|
+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
|
|
+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
|
|
+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
|
|
+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
|
|
|
|
/* Copy the blockcheck stats from the superblock probe */
|
|
osb->osb_ecc_stats = *stats;
|
|
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
|
|
index 5d22872..523db20 100644
|
|
--- a/fs/ocfs2/symlink.c
|
|
+++ b/fs/ocfs2/symlink.c
|
|
@@ -142,7 +142,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
|
|
|
|
static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
|
|
{
|
|
- char *link = nd_get_link(nd);
|
|
+ const char *link = nd_get_link(nd);
|
|
if (!IS_ERR(link))
|
|
kfree(link);
|
|
}
|
|
diff --git a/fs/pipe.c b/fs/pipe.c
|
|
index abfb935..792ecd5 100644
|
|
--- a/fs/pipe.c
|
|
+++ b/fs/pipe.c
|
|
@@ -443,9 +443,9 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
|
|
}
|
|
if (bufs) /* More to do? */
|
|
continue;
|
|
- if (!pipe->writers)
|
|
+ if (!atomic_read(&pipe->writers))
|
|
break;
|
|
- if (!pipe->waiting_writers) {
|
|
+ if (!atomic_read(&pipe->waiting_writers)) {
|
|
/* syscall merging: Usually we must not sleep
|
|
* if O_NONBLOCK is set, or if we got some data.
|
|
* But if a writer sleeps in kernel space, then
|
|
@@ -509,7 +509,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
mutex_lock(&inode->i_mutex);
|
|
pipe = inode->i_pipe;
|
|
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
ret = -EPIPE;
|
|
goto out;
|
|
@@ -559,7 +559,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
for (;;) {
|
|
int bufs;
|
|
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
@@ -653,9 +653,9 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
do_wakeup = 0;
|
|
}
|
|
- pipe->waiting_writers++;
|
|
+ atomic_inc(&pipe->waiting_writers);
|
|
pipe_wait(pipe);
|
|
- pipe->waiting_writers--;
|
|
+ atomic_dec(&pipe->waiting_writers);
|
|
}
|
|
out:
|
|
mutex_unlock(&inode->i_mutex);
|
|
@@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
|
|
mask = 0;
|
|
if (filp->f_mode & FMODE_READ) {
|
|
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
|
|
- if (!pipe->writers && filp->f_version != pipe->w_counter)
|
|
+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
|
|
mask |= POLLHUP;
|
|
}
|
|
|
|
@@ -732,7 +732,7 @@ pipe_poll(struct file *filp, poll_table *wait)
|
|
* Most Unices do not set POLLERR for FIFOs but on Linux they
|
|
* behave exactly like pipes for poll().
|
|
*/
|
|
- if (!pipe->readers)
|
|
+ if (!atomic_read(&pipe->readers))
|
|
mask |= POLLERR;
|
|
}
|
|
|
|
@@ -746,10 +746,10 @@ pipe_release(struct inode *inode, int decr, int decw)
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
pipe = inode->i_pipe;
|
|
- pipe->readers -= decr;
|
|
- pipe->writers -= decw;
|
|
+ atomic_sub(decr, &pipe->readers);
|
|
+ atomic_sub(decw, &pipe->writers);
|
|
|
|
- if (!pipe->readers && !pipe->writers) {
|
|
+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
|
|
free_pipe_info(inode);
|
|
} else {
|
|
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
|
|
@@ -839,7 +839,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
|
|
|
|
if (inode->i_pipe) {
|
|
ret = 0;
|
|
- inode->i_pipe->readers++;
|
|
+ atomic_inc(&inode->i_pipe->readers);
|
|
}
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
@@ -856,7 +856,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
|
|
|
|
if (inode->i_pipe) {
|
|
ret = 0;
|
|
- inode->i_pipe->writers++;
|
|
+ atomic_inc(&inode->i_pipe->writers);
|
|
}
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
@@ -877,9 +877,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
|
|
if (inode->i_pipe) {
|
|
ret = 0;
|
|
if (filp->f_mode & FMODE_READ)
|
|
- inode->i_pipe->readers++;
|
|
+ atomic_inc(&inode->i_pipe->readers);
|
|
if (filp->f_mode & FMODE_WRITE)
|
|
- inode->i_pipe->writers++;
|
|
+ atomic_inc(&inode->i_pipe->writers);
|
|
}
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
@@ -1001,7 +1001,8 @@ static struct inode * get_pipe_inode(void)
|
|
goto fail_iput;
|
|
inode->i_pipe = pipe;
|
|
|
|
- pipe->readers = pipe->writers = 1;
|
|
+ atomic_set(&pipe->readers, 1);
|
|
+ atomic_set(&pipe->writers, 1);
|
|
inode->i_fop = &rdwr_pipefifo_fops;
|
|
|
|
/*
|
|
diff --git a/fs/proc/array.c b/fs/proc/array.c
|
|
index e7bb0fe..092b371 100644
|
|
--- a/fs/proc/array.c
|
|
+++ b/fs/proc/array.c
|
|
@@ -338,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
|
|
seq_putc(m, '\n');
|
|
}
|
|
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+static inline void task_pax(struct seq_file *m, struct task_struct *p)
|
|
+{
|
|
+ if (p->mm)
|
|
+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
|
|
+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
|
|
+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
|
|
+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
|
|
+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
|
|
+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
|
|
+ else
|
|
+ seq_printf(m, "PaX:\t-----\n");
|
|
+}
|
|
+#endif
|
|
+
|
|
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
|
struct pid *pid, struct task_struct *task)
|
|
{
|
|
@@ -355,6 +370,11 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
|
task_cpus_allowed(m, task);
|
|
cpuset_task_status_allowed(m, task);
|
|
task_context_switch_counts(m, task);
|
|
+
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+ task_pax(m, task);
|
|
+#endif
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/fs/proc/base.c b/fs/proc/base.c
|
|
index 5ba3ab3..bfed511c 100644
|
|
--- a/fs/proc/base.c
|
|
+++ b/fs/proc/base.c
|
|
@@ -2767,7 +2767,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
|
|
void *cookie)
|
|
{
|
|
- char *s = nd_get_link(nd);
|
|
+ const char *s = nd_get_link(nd);
|
|
if (!IS_ERR(s))
|
|
__putname(s);
|
|
}
|
|
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
|
|
index 86c67ee..4f6ca98 100644
|
|
--- a/fs/proc/kcore.c
|
|
+++ b/fs/proc/kcore.c
|
|
@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|
* the addresses in the elf_phdr on our list.
|
|
*/
|
|
start = kc_offset_to_vaddr(*fpos - elf_buflen);
|
|
- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
|
|
+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
|
|
+ if (tsz > buflen)
|
|
tsz = buflen;
|
|
-
|
|
+
|
|
while (buflen) {
|
|
struct kcore_list *m;
|
|
|
|
@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
|
kfree(elf_buf);
|
|
} else {
|
|
if (kern_addr_valid(start)) {
|
|
- unsigned long n;
|
|
-
|
|
- n = copy_to_user(buffer, (char *)start, tsz);
|
|
- /*
|
|
- * We cannot distinguish between fault on source
|
|
- * and fault on destination. When this happens
|
|
- * we clear too and hope it will trigger the
|
|
- * EFAULT again.
|
|
- */
|
|
- if (n) {
|
|
- if (clear_user(buffer + tsz - n,
|
|
- n))
|
|
+ char *elf_buf;
|
|
+ mm_segment_t oldfs;
|
|
+
|
|
+ elf_buf = kmalloc(tsz, GFP_KERNEL);
|
|
+ if (!elf_buf)
|
|
+ return -ENOMEM;
|
|
+ oldfs = get_fs();
|
|
+ set_fs(KERNEL_DS);
|
|
+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
|
|
+ set_fs(oldfs);
|
|
+ if (copy_to_user(buffer, elf_buf, tsz)) {
|
|
+ kfree(elf_buf);
|
|
return -EFAULT;
|
|
+ }
|
|
}
|
|
+ set_fs(oldfs);
|
|
+ kfree(elf_buf);
|
|
} else {
|
|
if (clear_user(buffer, tsz))
|
|
return -EFAULT;
|
|
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
|
|
index 80e4645..53e5fcf 100644
|
|
--- a/fs/proc/meminfo.c
|
|
+++ b/fs/proc/meminfo.c
|
|
@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|
vmi.used >> 10,
|
|
vmi.largest_chunk >> 10
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
|
|
+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
|
|
#endif
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
|
|
diff --git a/fs/proc/nommu.c.rej b/fs/proc/nommu.c.rej
|
|
new file mode 100644
|
|
index 0000000..186828f
|
|
--- /dev/null
|
|
+++ b/fs/proc/nommu.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- fs/proc/nommu.c 2011-07-22 04:17:23.000000000 +0200
|
|
++++ fs/proc/nommu.c 2012-05-21 12:10:11.232048981 +0200
|
|
+@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
|
|
+ if (len < 1)
|
|
+ len = 1;
|
|
+ seq_printf(m, "%*c", len, ' ');
|
|
+- seq_path(m, &file->f_path, "");
|
|
++ seq_path(m, &file->f_path, "\n\\");
|
|
+ }
|
|
+
|
|
+ seq_putc(m, '\n');
|
|
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
|
|
index 324eb57..d44f967 100644
|
|
--- a/fs/proc/task_mmu.c
|
|
+++ b/fs/proc/task_mmu.c
|
|
@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
|
|
"VmExe:\t%8lu kB\n"
|
|
"VmLib:\t%8lu kB\n"
|
|
"VmPTE:\t%8lu kB\n"
|
|
- "VmSwap:\t%8lu kB\n",
|
|
- hiwater_vm << (PAGE_SHIFT-10),
|
|
+ "VmSwap:\t%8lu kB\n"
|
|
+
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
|
|
+#endif
|
|
+
|
|
+ ,hiwater_vm << (PAGE_SHIFT-10),
|
|
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
|
|
mm->locked_vm << (PAGE_SHIFT-10),
|
|
mm->pinned_vm << (PAGE_SHIFT-10),
|
|
@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
|
|
data << (PAGE_SHIFT-10),
|
|
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
|
|
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
|
|
- swap << (PAGE_SHIFT-10));
|
|
+ swap << (PAGE_SHIFT-10)
|
|
+
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+ , mm->context.user_cs_base, mm->context.user_cs_limit
|
|
+#endif
|
|
+
|
|
+ );
|
|
}
|
|
|
|
unsigned long task_vsize(struct mm_struct *mm)
|
|
@@ -323,8 +334,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
|
|
* Thread stack in /proc/PID/task/TID/maps or
|
|
* the main process stack.
|
|
*/
|
|
- if (!is_pid || (vma->vm_start <= mm->start_stack &&
|
|
- vma->vm_end >= mm->start_stack)) {
|
|
+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
|
|
+ (vma->vm_start <= mm->start_stack &&
|
|
+ vma->vm_end >= mm->start_stack)) {
|
|
name = "[stack]";
|
|
} else {
|
|
/* Thread stack in /proc/PID/maps */
|
|
@@ -1238,7 +1250,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
|
|
|
|
if (file) {
|
|
seq_printf(m, " file=");
|
|
- seq_path(m, &file->f_path, "\n\t= ");
|
|
+ seq_path(m, &file->f_path, "\n\t\\= ");
|
|
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
|
|
seq_printf(m, " heap");
|
|
} else {
|
|
diff --git a/fs/proc/task_mmu.c.rej b/fs/proc/task_mmu.c.rej
|
|
new file mode 100644
|
|
index 0000000..0a6886b
|
|
--- /dev/null
|
|
+++ b/fs/proc/task_mmu.c.rej
|
|
@@ -0,0 +1,39 @@
|
|
+--- fs/proc/task_mmu.c 2012-06-11 19:12:31.104367105 +0200
|
|
++++ fs/proc/task_mmu.c 2012-06-11 19:17:09.188382567 +0200
|
|
+@@ -242,20 +253,23 @@ show_map_vma(struct seq_file *m, struct
|
|
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
|
|
+ }
|
|
+
|
|
+- /* We don't show the stack guard page in /proc/maps */
|
|
+ start = vma->vm_start;
|
|
+- if (stack_guard_page_start(vma, start))
|
|
+- start += PAGE_SIZE;
|
|
+ end = vma->vm_end;
|
|
+- if (stack_guard_page_end(vma, end))
|
|
+- end -= PAGE_SIZE;
|
|
+
|
|
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
|
|
+ start,
|
|
+ end,
|
|
++
|
|
++#if 0
|
|
++ flags & VM_MAYREAD ? flags & VM_READ ? 'R' : '+' : flags & VM_READ ? 'r' : '-',
|
|
++ flags & VM_MAYWRITE ? flags & VM_WRITE ? 'W' : '+' : flags & VM_WRITE ? 'w' : '-',
|
|
++ flags & VM_MAYEXEC ? flags & VM_EXEC ? 'X' : '+' : flags & VM_EXEC ? 'x' : '-',
|
|
++#else
|
|
+ flags & VM_READ ? 'r' : '-',
|
|
+ flags & VM_WRITE ? 'w' : '-',
|
|
+ flags & VM_EXEC ? 'x' : '-',
|
|
++#endif
|
|
++
|
|
+ flags & VM_MAYSHARE ? 's' : 'p',
|
|
+ pgoff,
|
|
+ MAJOR(dev), MINOR(dev), ino, &len);
|
|
+@@ -266,7 +280,7 @@ show_map_vma(struct seq_file *m, struct
|
|
+ */
|
|
+ if (file) {
|
|
+ pad_len_spaces(m, len);
|
|
+- seq_path(m, &file->f_path, "\n");
|
|
++ seq_path(m, &file->f_path, "\n\\");
|
|
+ goto done;
|
|
+ }
|
|
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
|
|
index 1d2b545..473f497 100644
|
|
--- a/fs/proc/task_nommu.c
|
|
+++ b/fs/proc/task_nommu.c
|
|
@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
|
|
else
|
|
bytes += kobjsize(mm);
|
|
|
|
- if (current->fs && current->fs->users > 1)
|
|
+ if (current->fs && atomic_read(¤t->fs->users) > 1)
|
|
sbytes += kobjsize(current->fs);
|
|
else
|
|
bytes += kobjsize(current->fs);
|
|
diff --git a/fs/proc/task_nommu.c.rej b/fs/proc/task_nommu.c.rej
|
|
new file mode 100644
|
|
index 0000000..4757380
|
|
--- /dev/null
|
|
+++ b/fs/proc/task_nommu.c.rej
|
|
@@ -0,0 +1,10 @@
|
|
+--- fs/proc/task_nommu.c 2012-05-21 11:33:36.239929765 +0200
|
|
++++ fs/proc/task_nommu.c 2012-05-21 12:10:11.236048981 +0200
|
|
+@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_fil
|
|
+
|
|
+ if (file) {
|
|
+ pad_len_spaces(m, len);
|
|
+- seq_path(m, &file->f_path, "");
|
|
++ seq_path(m, &file->f_path, "\n\\");
|
|
+ } else if (mm) {
|
|
+ pid_t tid = vm_is_stack(priv->task, vma, is_pid);
|
|
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
|
|
index d67908b..d13f6a6 100644
|
|
--- a/fs/quota/netlink.c
|
|
+++ b/fs/quota/netlink.c
|
|
@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
|
|
void quota_send_warning(short type, unsigned int id, dev_t dev,
|
|
const char warntype)
|
|
{
|
|
- static atomic_t seq;
|
|
+ static atomic_unchecked_t seq;
|
|
struct sk_buff *skb;
|
|
void *msg_head;
|
|
int ret;
|
|
@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
|
|
"VFS: Not enough memory to send quota warning.\n");
|
|
return;
|
|
}
|
|
- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
|
|
+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
|
|
"a_genl_family, 0, QUOTA_NL_C_WARNING);
|
|
if (!msg_head) {
|
|
printk(KERN_ERR
|
|
diff --git a/fs/readdir.c b/fs/readdir.c
|
|
index cc0a822..2d5cc3a 100644
|
|
--- a/fs/readdir.c
|
|
+++ b/fs/readdir.c
|
|
@@ -299,7 +299,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
|
|
error = buf.error;
|
|
lastdirent = buf.previous;
|
|
if (lastdirent) {
|
|
- typeof(lastdirent->d_off) d_off = file->f_pos;
|
|
+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
|
|
if (__put_user(d_off, &lastdirent->d_off))
|
|
error = -EFAULT;
|
|
else
|
|
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
|
|
index 2b7882b..1c5ef48 100644
|
|
--- a/fs/reiserfs/do_balan.c
|
|
+++ b/fs/reiserfs/do_balan.c
|
|
@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
|
|
return;
|
|
}
|
|
|
|
- atomic_inc(&(fs_generation(tb->tb_sb)));
|
|
+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
|
|
do_balance_starts(tb);
|
|
|
|
/* balance leaf returns 0 except if combining L R and S into
|
|
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
|
|
index 2c1ade6..8c59d8d 100644
|
|
--- a/fs/reiserfs/procfs.c
|
|
+++ b/fs/reiserfs/procfs.c
|
|
@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
|
|
"SMALL_TAILS " : "NO_TAILS ",
|
|
replay_only(sb) ? "REPLAY_ONLY " : "",
|
|
convert_reiserfs(sb) ? "CONV " : "",
|
|
- atomic_read(&r->s_generation_counter),
|
|
+ atomic_read_unchecked(&r->s_generation_counter),
|
|
SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
|
|
SF(s_do_balance), SF(s_unneeded_left_neighbor),
|
|
SF(s_good_search_by_key_reada), SF(s_bmaps),
|
|
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
|
|
index a59d271..e12d1cf 100644
|
|
--- a/fs/reiserfs/reiserfs.h
|
|
+++ b/fs/reiserfs/reiserfs.h
|
|
@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
|
|
/* Comment? -Hans */
|
|
wait_queue_head_t s_wait;
|
|
/* To be obsoleted soon by per buffer seals.. -Hans */
|
|
- atomic_t s_generation_counter; // increased by one every time the
|
|
+ atomic_unchecked_t s_generation_counter; // increased by one every time the
|
|
// tree gets re-balanced
|
|
unsigned long s_properties; /* File system properties. Currently holds
|
|
on-disk FS format */
|
|
@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
|
|
#define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
|
|
|
|
#define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
|
|
-#define get_generation(s) atomic_read (&fs_generation(s))
|
|
+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
|
|
#define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
|
|
#define __fs_changed(gen,s) (gen != get_generation (s))
|
|
#define fs_changed(gen,s) \
|
|
diff --git a/fs/seq_file.c b/fs/seq_file.c
|
|
index 74717b4..fa54f8f 100644
|
|
--- a/fs/seq_file.c
|
|
+++ b/fs/seq_file.c
|
|
@@ -567,7 +567,7 @@ static void single_stop(struct seq_file *p, void *v)
|
|
int single_open(struct file *file, int (*show)(struct seq_file *, void *),
|
|
void *data)
|
|
{
|
|
- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
|
|
+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
|
|
int res = -ENOMEM;
|
|
|
|
if (op) {
|
|
diff --git a/fs/splice.c b/fs/splice.c
|
|
index 67c5210..51790fa 100644
|
|
--- a/fs/splice.c
|
|
+++ b/fs/splice.c
|
|
@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
|
|
pipe_lock(pipe);
|
|
|
|
for (;;) {
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
|
|
do_wakeup = 0;
|
|
}
|
|
|
|
- pipe->waiting_writers++;
|
|
+ atomic_inc(&pipe->waiting_writers);
|
|
pipe_wait(pipe);
|
|
- pipe->waiting_writers--;
|
|
+ atomic_dec(&pipe->waiting_writers);
|
|
}
|
|
|
|
pipe_unlock(pipe);
|
|
@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
/* The cast to a user pointer is valid due to the set_fs() */
|
|
- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
|
|
+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
|
|
set_fs(old_fs);
|
|
|
|
return res;
|
|
@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
/* The cast to a user pointer is valid due to the set_fs() */
|
|
- res = vfs_write(file, (const char __user *)buf, count, &pos);
|
|
+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
|
|
set_fs(old_fs);
|
|
|
|
return res;
|
|
@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
|
|
goto err;
|
|
|
|
this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
|
|
- vec[i].iov_base = (void __user *) page_address(page);
|
|
+ vec[i].iov_base = (void __force_user *) page_address(page);
|
|
vec[i].iov_len = this_len;
|
|
spd.pages[i] = page;
|
|
spd.nr_pages++;
|
|
@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
|
|
int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
|
|
{
|
|
while (!pipe->nrbufs) {
|
|
- if (!pipe->writers)
|
|
+ if (!atomic_read(&pipe->writers))
|
|
return 0;
|
|
|
|
- if (!pipe->waiting_writers && sd->num_spliced)
|
|
+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
|
|
return 0;
|
|
|
|
if (sd->flags & SPLICE_F_NONBLOCK)
|
|
@@ -1191,7 +1191,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
|
|
* out of the pipe right after the splice_to_pipe(). So set
|
|
* PIPE_READERS appropriately.
|
|
*/
|
|
- pipe->readers = 1;
|
|
+ atomic_set(&pipe->readers, 1);
|
|
|
|
current->splice_pipe = pipe;
|
|
}
|
|
@@ -1744,9 +1744,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
|
|
ret = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
- if (!pipe->writers)
|
|
+ if (!atomic_read(&pipe->writers))
|
|
break;
|
|
- if (!pipe->waiting_writers) {
|
|
+ if (!atomic_read(&pipe->waiting_writers)) {
|
|
if (flags & SPLICE_F_NONBLOCK) {
|
|
ret = -EAGAIN;
|
|
break;
|
|
@@ -1778,7 +1778,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
|
|
pipe_lock(pipe);
|
|
|
|
while (pipe->nrbufs >= pipe->buffers) {
|
|
- if (!pipe->readers) {
|
|
+ if (!atomic_read(&pipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
ret = -EPIPE;
|
|
break;
|
|
@@ -1791,9 +1791,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
|
|
ret = -ERESTARTSYS;
|
|
break;
|
|
}
|
|
- pipe->waiting_writers++;
|
|
+ atomic_inc(&pipe->waiting_writers);
|
|
pipe_wait(pipe);
|
|
- pipe->waiting_writers--;
|
|
+ atomic_dec(&pipe->waiting_writers);
|
|
}
|
|
|
|
pipe_unlock(pipe);
|
|
@@ -1829,14 +1829,14 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
|
|
pipe_double_lock(ipipe, opipe);
|
|
|
|
do {
|
|
- if (!opipe->readers) {
|
|
+ if (!atomic_read(&opipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
break;
|
|
}
|
|
|
|
- if (!ipipe->nrbufs && !ipipe->writers)
|
|
+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
|
|
break;
|
|
|
|
/*
|
|
@@ -1933,7 +1933,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
|
|
pipe_double_lock(ipipe, opipe);
|
|
|
|
do {
|
|
- if (!opipe->readers) {
|
|
+ if (!atomic_read(&opipe->readers)) {
|
|
send_sig(SIGPIPE, current, 0);
|
|
if (!ret)
|
|
ret = -EPIPE;
|
|
@@ -1978,7 +1978,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
|
|
* return EAGAIN if we have the potential of some data in the
|
|
* future, otherwise just return 0
|
|
*/
|
|
- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
|
|
+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
|
|
ret = -EAGAIN;
|
|
|
|
pipe_unlock(ipipe);
|
|
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
|
|
index 00012e3..8392349 100644
|
|
--- a/fs/sysfs/file.c
|
|
+++ b/fs/sysfs/file.c
|
|
@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
|
|
|
|
struct sysfs_open_dirent {
|
|
atomic_t refcnt;
|
|
- atomic_t event;
|
|
+ atomic_unchecked_t event;
|
|
wait_queue_head_t poll;
|
|
struct list_head buffers; /* goes through sysfs_buffer.list */
|
|
};
|
|
@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
|
|
if (!sysfs_get_active(attr_sd))
|
|
return -ENODEV;
|
|
|
|
- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
|
|
+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
|
|
count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
|
|
|
|
sysfs_put_active(attr_sd);
|
|
@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
|
|
return -ENOMEM;
|
|
|
|
atomic_set(&new_od->refcnt, 0);
|
|
- atomic_set(&new_od->event, 1);
|
|
+ atomic_set_unchecked(&new_od->event, 1);
|
|
init_waitqueue_head(&new_od->poll);
|
|
INIT_LIST_HEAD(&new_od->buffers);
|
|
goto retry;
|
|
@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
|
|
|
|
sysfs_put_active(attr_sd);
|
|
|
|
- if (buffer->event != atomic_read(&od->event))
|
|
+ if (buffer->event != atomic_read_unchecked(&od->event))
|
|
goto trigger;
|
|
|
|
return DEFAULT_POLLMASK;
|
|
@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
|
|
|
|
od = sd->s_attr.open;
|
|
if (od) {
|
|
- atomic_inc(&od->event);
|
|
+ atomic_inc_unchecked(&od->event);
|
|
wake_up_interruptible(&od->poll);
|
|
}
|
|
|
|
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
|
|
index a7ac78f..02158e1 100644
|
|
--- a/fs/sysfs/symlink.c
|
|
+++ b/fs/sysfs/symlink.c
|
|
@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
|
|
{
|
|
- char *page = nd_get_link(nd);
|
|
+ const char *page = nd_get_link(nd);
|
|
if (!IS_ERR(page))
|
|
free_page((unsigned long)page);
|
|
}
|
|
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
|
|
index c175b4d..8f36a16 100644
|
|
--- a/fs/udf/misc.c
|
|
+++ b/fs/udf/misc.c
|
|
@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
|
|
|
|
u8 udf_tag_checksum(const struct tag *t)
|
|
{
|
|
- u8 *data = (u8 *)t;
|
|
+ const u8 *data = (const u8 *)t;
|
|
u8 checksum = 0;
|
|
int i;
|
|
for (i = 0; i < sizeof(struct tag); ++i)
|
|
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
|
|
index 69d06b0..c0996e5 100644
|
|
--- a/fs/xattr_acl.c
|
|
+++ b/fs/xattr_acl.c
|
|
@@ -17,8 +17,8 @@
|
|
struct posix_acl *
|
|
posix_acl_from_xattr(const void *value, size_t size)
|
|
{
|
|
- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
|
|
- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
|
|
+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
|
|
+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
|
|
int count;
|
|
struct posix_acl *acl;
|
|
struct posix_acl_entry *acl_e;
|
|
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
|
|
index 85e7e32..5344e52 100644
|
|
--- a/fs/xfs/xfs_bmap.c
|
|
+++ b/fs/xfs/xfs_bmap.c
|
|
@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
|
|
int nmap,
|
|
int ret_nmap);
|
|
#else
|
|
-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
|
|
+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
|
|
#endif /* DEBUG */
|
|
|
|
STATIC int
|
|
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
|
|
index 79d05e8..e3e5861 100644
|
|
--- a/fs/xfs/xfs_dir2_sf.c
|
|
+++ b/fs/xfs/xfs_dir2_sf.c
|
|
@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
|
|
}
|
|
|
|
ino = xfs_dir2_sfe_get_ino(sfp, sfep);
|
|
- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
|
|
+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
|
|
+ char name[sfep->namelen];
|
|
+ memcpy(name, sfep->name, sfep->namelen);
|
|
+ if (filldir(dirent, name, sfep->namelen,
|
|
+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
|
|
+ *offset = off & 0x7fffffff;
|
|
+ return 0;
|
|
+ }
|
|
+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
|
|
off & 0x7fffffff, ino, DT_UNKNOWN)) {
|
|
*offset = off & 0x7fffffff;
|
|
return 0;
|
|
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
|
|
index 6a6c1fd..6a0cc70 100644
|
|
--- a/fs/xfs/xfs_ioctl.c
|
|
+++ b/fs/xfs/xfs_ioctl.c
|
|
@@ -128,7 +128,7 @@ xfs_find_handle(
|
|
}
|
|
|
|
error = -EFAULT;
|
|
- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
|
|
+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
|
|
copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
|
|
goto out_put;
|
|
|
|
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
|
|
index 23c79ca..53d0b81 100644
|
|
--- a/fs/xfs/xfs_iops.c
|
|
+++ b/fs/xfs/xfs_iops.c
|
|
@@ -397,7 +397,7 @@ xfs_vn_put_link(
|
|
struct nameidata *nd,
|
|
void *p)
|
|
{
|
|
- char *s = nd_get_link(nd);
|
|
+ const char *s = nd_get_link(nd);
|
|
|
|
if (!IS_ERR(s))
|
|
kfree(s);
|
|
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
|
|
index f1c8ca6..b5c1cc7 100644
|
|
--- a/include/acpi/acpi_bus.h
|
|
+++ b/include/acpi/acpi_bus.h
|
|
@@ -107,7 +107,7 @@ struct acpi_device_ops {
|
|
acpi_op_bind bind;
|
|
acpi_op_unbind unbind;
|
|
acpi_op_notify notify;
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
|
|
|
|
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
|
|
index b7babf0..3ba8aee 100644
|
|
--- a/include/asm-generic/atomic-long.h
|
|
+++ b/include/asm-generic/atomic-long.h
|
|
@@ -22,6 +22,12 @@
|
|
|
|
typedef atomic64_t atomic_long_t;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+typedef atomic64_unchecked_t atomic_long_unchecked_t;
|
|
+#else
|
|
+typedef atomic64_t atomic_long_unchecked_t;
|
|
+#endif
|
|
+
|
|
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
|
|
|
|
static inline long atomic_long_read(atomic_long_t *l)
|
|
@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
|
|
return (long)atomic64_read(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ return (long)atomic64_read_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
atomic64_set(v, i);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ atomic64_set_unchecked(v, i);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_inc(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
|
|
atomic64_inc(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ atomic64_inc_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_dec(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
|
|
atomic64_dec(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ atomic64_dec_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
atomic64_add(i, v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ atomic64_add_unchecked(i, v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
atomic64_sub(i, v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ atomic64_sub_unchecked(i, v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
|
|
return (long)atomic64_inc_return(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
|
|
+
|
|
+ return (long)atomic64_inc_return_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline long atomic_long_dec_return(atomic_long_t *l)
|
|
{
|
|
atomic64_t *v = (atomic64_t *)l;
|
|
@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
|
|
|
|
typedef atomic_t atomic_long_t;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+typedef atomic_unchecked_t atomic_long_unchecked_t;
|
|
+#else
|
|
+typedef atomic_t atomic_long_unchecked_t;
|
|
+#endif
|
|
+
|
|
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
|
|
static inline long atomic_long_read(atomic_long_t *l)
|
|
{
|
|
@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
|
|
return (long)atomic_read(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ return (long)atomic_read_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
|
|
atomic_set(v, i);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ atomic_set_unchecked(v, i);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_inc(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
|
|
atomic_inc(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ atomic_inc_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_dec(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
|
|
atomic_dec(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ atomic_dec_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
|
|
atomic_add(i, v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ atomic_add_unchecked(i, v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
|
|
atomic_sub(i, v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ atomic_sub_unchecked(i, v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
|
|
return (long)atomic_inc_return(v);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
|
|
+{
|
|
+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
|
|
+
|
|
+ return (long)atomic_inc_return_unchecked(v);
|
|
+}
|
|
+#endif
|
|
+
|
|
static inline long atomic_long_dec_return(atomic_long_t *l)
|
|
{
|
|
atomic_t *v = (atomic_t *)l;
|
|
@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
|
|
|
|
#endif /* BITS_PER_LONG == 64 */
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+static inline void pax_refcount_needs_these_functions(void)
|
|
+{
|
|
+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
|
|
+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
|
|
+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
|
|
+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
|
|
+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
|
|
+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
|
|
+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
|
|
+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
|
|
+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
|
|
+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
|
|
+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
|
|
+#ifdef CONFIG_X86
|
|
+ atomic_clear_mask_unchecked(0, NULL);
|
|
+ atomic_set_mask_unchecked(0, NULL);
|
|
+#endif
|
|
+
|
|
+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
|
|
+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
|
|
+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
|
|
+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
|
|
+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
|
|
+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
|
|
+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
|
|
+}
|
|
+#else
|
|
+#define atomic_read_unchecked(v) atomic_read(v)
|
|
+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
|
|
+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
|
|
+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
|
|
+#define atomic_inc_unchecked(v) atomic_inc(v)
|
|
+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
|
|
+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
|
|
+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
|
|
+#define atomic_dec_unchecked(v) atomic_dec(v)
|
|
+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
|
|
+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
|
|
+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
|
|
+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
|
|
+
|
|
+#define atomic_long_read_unchecked(v) atomic_long_read(v)
|
|
+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
|
|
+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
|
|
+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
|
|
+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
|
|
+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
|
|
+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
|
|
+#endif
|
|
+
|
|
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
|
|
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
|
|
index 1ced641..c896ee8 100644
|
|
--- a/include/asm-generic/atomic.h
|
|
+++ b/include/asm-generic/atomic.h
|
|
@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
* Atomically clears the bits set in @mask from @v
|
|
*/
|
|
#ifndef atomic_clear_mask
|
|
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
|
|
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
|
{
|
|
unsigned long flags;
|
|
|
|
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
|
|
index b18ce4f..2ee2843 100644
|
|
--- a/include/asm-generic/atomic64.h
|
|
+++ b/include/asm-generic/atomic64.h
|
|
@@ -16,6 +16,8 @@ typedef struct {
|
|
long long counter;
|
|
} atomic64_t;
|
|
|
|
+typedef atomic64_t atomic64_unchecked_t;
|
|
+
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
extern long long atomic64_read(const atomic64_t *v);
|
|
@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
|
|
|
+#define atomic64_read_unchecked(v) atomic64_read(v)
|
|
+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
|
|
+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
|
|
+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
|
|
+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
|
|
+#define atomic64_inc_unchecked(v) atomic64_inc(v)
|
|
+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
|
|
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
|
|
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
|
|
+
|
|
#endif /* _ASM_GENERIC_ATOMIC64_H */
|
|
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
|
|
index 1bfcfe5..e04c5c9 100644
|
|
--- a/include/asm-generic/cache.h
|
|
+++ b/include/asm-generic/cache.h
|
|
@@ -6,7 +6,7 @@
|
|
* cache lines need to provide their own cache.h.
|
|
*/
|
|
|
|
-#define L1_CACHE_SHIFT 5
|
|
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
+#define L1_CACHE_SHIFT 5UL
|
|
+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
|
|
|
|
#endif /* __ASM_GENERIC_CACHE_H */
|
|
diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
|
|
index 0d68a1e..b74a761 100644
|
|
--- a/include/asm-generic/emergency-restart.h
|
|
+++ b/include/asm-generic/emergency-restart.h
|
|
@@ -1,7 +1,7 @@
|
|
#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
|
|
#define _ASM_GENERIC_EMERGENCY_RESTART_H
|
|
|
|
-static inline void machine_emergency_restart(void)
|
|
+static inline __noreturn void machine_emergency_restart(void)
|
|
{
|
|
machine_restart(NULL);
|
|
}
|
|
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
|
|
index 0232ccb..13d9165 100644
|
|
--- a/include/asm-generic/kmap_types.h
|
|
+++ b/include/asm-generic/kmap_types.h
|
|
@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
|
|
KMAP_D(17) KM_NMI,
|
|
KMAP_D(18) KM_NMI_PTE,
|
|
KMAP_D(19) KM_KDB,
|
|
+KMAP_D(20) KM_CLEARPAGE,
|
|
/*
|
|
* Remember to update debug_kmap_atomic() when adding new kmap types!
|
|
*/
|
|
-KMAP_D(20) KM_TYPE_NR
|
|
+KMAP_D(21) KM_TYPE_NR
|
|
};
|
|
|
|
#undef KMAP_D
|
|
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
|
|
index 9ceb03b..2efbcbd 100644
|
|
--- a/include/asm-generic/local.h
|
|
+++ b/include/asm-generic/local.h
|
|
@@ -39,6 +39,7 @@ typedef struct
|
|
#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
|
|
#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
|
|
#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
|
|
+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
|
|
|
|
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
|
|
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
|
|
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
|
|
index 725612b..9cc513a 100644
|
|
--- a/include/asm-generic/pgtable-nopmd.h
|
|
+++ b/include/asm-generic/pgtable-nopmd.h
|
|
@@ -1,14 +1,19 @@
|
|
#ifndef _PGTABLE_NOPMD_H
|
|
#define _PGTABLE_NOPMD_H
|
|
|
|
-#ifndef __ASSEMBLY__
|
|
-
|
|
#include <asm-generic/pgtable-nopud.h>
|
|
|
|
-struct mm_struct;
|
|
-
|
|
#define __PAGETABLE_PMD_FOLDED
|
|
|
|
+#define PMD_SHIFT PUD_SHIFT
|
|
+#define PTRS_PER_PMD 1
|
|
+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
|
|
+#define PMD_MASK (~(PMD_SIZE-1))
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
+
|
|
+struct mm_struct;
|
|
+
|
|
/*
|
|
* Having the pmd type consist of a pud gets the size right, and allows
|
|
* us to conceptually access the pud entry that this pmd is folded into
|
|
@@ -16,11 +21,6 @@ struct mm_struct;
|
|
*/
|
|
typedef struct { pud_t pud; } pmd_t;
|
|
|
|
-#define PMD_SHIFT PUD_SHIFT
|
|
-#define PTRS_PER_PMD 1
|
|
-#define PMD_SIZE (1UL << PMD_SHIFT)
|
|
-#define PMD_MASK (~(PMD_SIZE-1))
|
|
-
|
|
/*
|
|
* The "pud_xxx()" functions here are trivial for a folded two-level
|
|
* setup: the pmd is never bad, and a pmd always exists (as it's folded
|
|
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
|
|
index 810431d..0ec4804f 100644
|
|
--- a/include/asm-generic/pgtable-nopud.h
|
|
+++ b/include/asm-generic/pgtable-nopud.h
|
|
@@ -1,10 +1,15 @@
|
|
#ifndef _PGTABLE_NOPUD_H
|
|
#define _PGTABLE_NOPUD_H
|
|
|
|
-#ifndef __ASSEMBLY__
|
|
-
|
|
#define __PAGETABLE_PUD_FOLDED
|
|
|
|
+#define PUD_SHIFT PGDIR_SHIFT
|
|
+#define PTRS_PER_PUD 1
|
|
+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
|
|
+#define PUD_MASK (~(PUD_SIZE-1))
|
|
+
|
|
+#ifndef __ASSEMBLY__
|
|
+
|
|
/*
|
|
* Having the pud type consist of a pgd gets the size right, and allows
|
|
* us to conceptually access the pgd entry that this pud is folded into
|
|
@@ -12,11 +17,6 @@
|
|
*/
|
|
typedef struct { pgd_t pgd; } pud_t;
|
|
|
|
-#define PUD_SHIFT PGDIR_SHIFT
|
|
-#define PTRS_PER_PUD 1
|
|
-#define PUD_SIZE (1UL << PUD_SHIFT)
|
|
-#define PUD_MASK (~(PUD_SIZE-1))
|
|
-
|
|
/*
|
|
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
|
* setup: the pud is never bad, and a pud always exists (as it's folded
|
|
@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
|
|
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
|
|
|
|
#define pgd_populate(mm, pgd, pud) do { } while (0)
|
|
+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
|
|
/*
|
|
* (puds are folded into pgds so this doesn't get actually called,
|
|
* but the define is needed for a generic inline function.)
|
|
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
|
|
index 055b565..537b390 100644
|
|
--- a/include/asm-generic/pgtable.h
|
|
+++ b/include/asm-generic/pgtable.h
|
|
@@ -540,6 +540,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
|
|
#endif
|
|
}
|
|
|
|
+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
|
|
+static inline unsigned long pax_open_kernel(void) { return 0; }
|
|
+#endif
|
|
+
|
|
+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
|
|
+static inline unsigned long pax_close_kernel(void) { return 0; }
|
|
+#endif
|
|
+
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
|
|
index 6af2a3e..fcb2647 100644
|
|
--- a/include/asm-generic/vmlinux.lds.h
|
|
+++ b/include/asm-generic/vmlinux.lds.h
|
|
@@ -218,6 +218,7 @@
|
|
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
|
|
VMLINUX_SYMBOL(__start_rodata) = .; \
|
|
*(.rodata) *(.rodata.*) \
|
|
+ *(.data..read_only) \
|
|
*(__vermagic) /* Kernel version magic */ \
|
|
. = ALIGN(8); \
|
|
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
|
|
@@ -721,17 +722,18 @@
|
|
* section in the linker script will go there too. @phdr should have
|
|
* a leading colon.
|
|
*
|
|
- * Note that this macros defines __per_cpu_load as an absolute symbol.
|
|
+ * Note that this macros defines per_cpu_load as an absolute symbol.
|
|
* If there is no need to put the percpu section at a predetermined
|
|
* address, use PERCPU_SECTION.
|
|
*/
|
|
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
|
|
- VMLINUX_SYMBOL(__per_cpu_load) = .; \
|
|
- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
|
|
+ per_cpu_load = .; \
|
|
+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
|
|
- LOAD_OFFSET) { \
|
|
+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
|
|
PERCPU_INPUT(cacheline) \
|
|
} phdr \
|
|
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
|
|
+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
|
|
|
|
/**
|
|
* PERCPU_SECTION - define output section for percpu area, simple version
|
|
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
|
|
index 3bf1490..ac85a95 100644
|
|
--- a/include/drm/drmP.h
|
|
+++ b/include/drm/drmP.h
|
|
@@ -72,6 +72,7 @@
|
|
#include <linux/workqueue.h>
|
|
#include <linux/poll.h>
|
|
#include <asm/pgalloc.h>
|
|
+#include <asm/local.h>
|
|
#include "drm.h"
|
|
|
|
#include <linux/idr.h>
|
|
@@ -1074,7 +1075,7 @@ struct drm_device {
|
|
|
|
/** \name Usage Counters */
|
|
/*@{ */
|
|
- int open_count; /**< Outstanding files open */
|
|
+ local_t open_count; /**< Outstanding files open */
|
|
atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
|
|
atomic_t vma_count; /**< Outstanding vma areas open */
|
|
int buf_use; /**< Buffers in use -- cannot alloc */
|
|
@@ -1085,7 +1086,7 @@ struct drm_device {
|
|
/*@{ */
|
|
unsigned long counters;
|
|
enum drm_stat_type types[15];
|
|
- atomic_t counts[15];
|
|
+ atomic_unchecked_t counts[15];
|
|
/*@} */
|
|
|
|
struct list_head filelist;
|
|
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
|
|
index 37515d1..34fa8b0 100644
|
|
--- a/include/drm/drm_crtc_helper.h
|
|
+++ b/include/drm/drm_crtc_helper.h
|
|
@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
|
|
|
|
/* disable crtc when not in use - more explicit than dpms off */
|
|
void (*disable)(struct drm_crtc *crtc);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct drm_encoder_helper_funcs {
|
|
void (*dpms)(struct drm_encoder *encoder, int mode);
|
|
@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
|
|
struct drm_connector *connector);
|
|
/* disable encoder when not in use - more explicit than dpms off */
|
|
void (*disable)(struct drm_encoder *encoder);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct drm_connector_helper_funcs {
|
|
int (*get_modes)(struct drm_connector *connector);
|
|
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
|
|
index d6d1da4..fdd1ac5 100644
|
|
--- a/include/drm/ttm/ttm_memory.h
|
|
+++ b/include/drm/ttm/ttm_memory.h
|
|
@@ -48,7 +48,7 @@
|
|
|
|
struct ttm_mem_shrink {
|
|
int (*do_shrink) (struct ttm_mem_shrink *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct ttm_mem_global - Global memory accounting structure.
|
|
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
|
|
index e86dfca..40cc55f 100644
|
|
--- a/include/linux/a.out.h
|
|
+++ b/include/linux/a.out.h
|
|
@@ -39,6 +39,14 @@ enum machine_type {
|
|
M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
|
|
};
|
|
|
|
+/* Constants for the N_FLAGS field */
|
|
+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
|
|
+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
|
|
+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
|
|
+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
|
|
+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
|
|
+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
|
|
+
|
|
#if !defined (N_MAGIC)
|
|
#define N_MAGIC(exec) ((exec).a_info & 0xffff)
|
|
#endif
|
|
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
|
|
index 06fd4bb..1caec0d 100644
|
|
--- a/include/linux/atmdev.h
|
|
+++ b/include/linux/atmdev.h
|
|
@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
|
|
#endif
|
|
|
|
struct k_atm_aal_stats {
|
|
-#define __HANDLE_ITEM(i) atomic_t i
|
|
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
|
|
__AAL_STAT_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
};
|
|
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
|
|
index 424b381..5de2d1e 100644
|
|
--- a/include/linux/binfmts.h
|
|
+++ b/include/linux/binfmts.h
|
|
@@ -87,6 +87,7 @@ struct linux_binfmt {
|
|
int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
|
|
int (*load_shlib)(struct file *);
|
|
int (*core_dump)(struct coredump_params *cprm);
|
|
+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
|
|
unsigned long min_coredump; /* minimal dump size */
|
|
};
|
|
|
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
|
index 6619eb2..757c75c 100644
|
|
--- a/include/linux/blkdev.h
|
|
+++ b/include/linux/blkdev.h
|
|
@@ -1413,7 +1413,7 @@ struct block_device_operations {
|
|
/* this callback is with swap_lock and sometimes page table lock held */
|
|
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
|
struct module *owner;
|
|
-};
|
|
+} __do_const;
|
|
|
|
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
|
|
unsigned long);
|
|
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
|
|
index 4d1a074..88f929a 100644
|
|
--- a/include/linux/blktrace_api.h
|
|
+++ b/include/linux/blktrace_api.h
|
|
@@ -162,7 +162,7 @@ struct blk_trace {
|
|
struct dentry *dir;
|
|
struct dentry *dropped_file;
|
|
struct dentry *msg_file;
|
|
- atomic_t dropped;
|
|
+ atomic_unchecked_t dropped;
|
|
};
|
|
|
|
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
|
|
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
|
|
index 83195fb..0b0f77d 100644
|
|
--- a/include/linux/byteorder/little_endian.h
|
|
+++ b/include/linux/byteorder/little_endian.h
|
|
@@ -42,51 +42,51 @@
|
|
|
|
static inline __le64 __cpu_to_le64p(const __u64 *p)
|
|
{
|
|
- return (__force __le64)*p;
|
|
+ return (__force const __le64)*p;
|
|
}
|
|
static inline __u64 __le64_to_cpup(const __le64 *p)
|
|
{
|
|
- return (__force __u64)*p;
|
|
+ return (__force const __u64)*p;
|
|
}
|
|
static inline __le32 __cpu_to_le32p(const __u32 *p)
|
|
{
|
|
- return (__force __le32)*p;
|
|
+ return (__force const __le32)*p;
|
|
}
|
|
static inline __u32 __le32_to_cpup(const __le32 *p)
|
|
{
|
|
- return (__force __u32)*p;
|
|
+ return (__force const __u32)*p;
|
|
}
|
|
static inline __le16 __cpu_to_le16p(const __u16 *p)
|
|
{
|
|
- return (__force __le16)*p;
|
|
+ return (__force const __le16)*p;
|
|
}
|
|
static inline __u16 __le16_to_cpup(const __le16 *p)
|
|
{
|
|
- return (__force __u16)*p;
|
|
+ return (__force const __u16)*p;
|
|
}
|
|
static inline __be64 __cpu_to_be64p(const __u64 *p)
|
|
{
|
|
- return (__force __be64)__swab64p(p);
|
|
+ return (__force const __be64)__swab64p(p);
|
|
}
|
|
static inline __u64 __be64_to_cpup(const __be64 *p)
|
|
{
|
|
- return __swab64p((__u64 *)p);
|
|
+ return __swab64p((const __u64 *)p);
|
|
}
|
|
static inline __be32 __cpu_to_be32p(const __u32 *p)
|
|
{
|
|
- return (__force __be32)__swab32p(p);
|
|
+ return (__force const __be32)__swab32p(p);
|
|
}
|
|
static inline __u32 __be32_to_cpup(const __be32 *p)
|
|
{
|
|
- return __swab32p((__u32 *)p);
|
|
+ return __swab32p((const __u32 *)p);
|
|
}
|
|
static inline __be16 __cpu_to_be16p(const __u16 *p)
|
|
{
|
|
- return (__force __be16)__swab16p(p);
|
|
+ return (__force const __be16)__swab16p(p);
|
|
}
|
|
static inline __u16 __be16_to_cpup(const __be16 *p)
|
|
{
|
|
- return __swab16p((__u16 *)p);
|
|
+ return __swab16p((const __u16 *)p);
|
|
}
|
|
#define __cpu_to_le64s(x) do { (void)(x); } while (0)
|
|
#define __le64_to_cpus(x) do { (void)(x); } while (0)
|
|
diff --git a/include/linux/cache.h b/include/linux/cache.h
|
|
index 4c57065..4307975 100644
|
|
--- a/include/linux/cache.h
|
|
+++ b/include/linux/cache.h
|
|
@@ -16,6 +16,10 @@
|
|
#define __read_mostly
|
|
#endif
|
|
|
|
+#ifndef __read_only
|
|
+#define __read_only __read_mostly
|
|
+#endif
|
|
+
|
|
#ifndef ____cacheline_aligned
|
|
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
|
|
#endif
|
|
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
|
|
index 42e55de..1cd0e66 100644
|
|
--- a/include/linux/cleancache.h
|
|
+++ b/include/linux/cleancache.h
|
|
@@ -31,7 +31,7 @@ struct cleancache_ops {
|
|
void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
|
|
void (*invalidate_inode)(int, struct cleancache_filekey);
|
|
void (*invalidate_fs)(int);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern struct cleancache_ops
|
|
cleancache_register_ops(struct cleancache_ops *ops);
|
|
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
|
|
index 91b1aa8..664e779 100644
|
|
--- a/include/linux/compiler-gcc4.h
|
|
+++ b/include/linux/compiler-gcc4.h
|
|
@@ -48,6 +48,20 @@
|
|
#endif
|
|
|
|
#if __GNUC_MINOR__ >= 5
|
|
+
|
|
+#ifdef CONSTIFY_PLUGIN
|
|
+#define __no_const __attribute__((no_const))
|
|
+#define __do_const __attribute__((do_const))
|
|
+#endif
|
|
+
|
|
+#ifdef SIZE_OVERFLOW_PLUGIN
|
|
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
|
|
+#endif
|
|
+
|
|
+#ifdef LATENT_ENTROPY_PLUGIN
|
|
+#define __latent_entropy __attribute__((latent_entropy))
|
|
+#endif
|
|
+
|
|
/*
|
|
* Mark a position in code as unreachable. This can be used to
|
|
* suppress control flow warnings after asm blocks that transfer
|
|
@@ -63,6 +77,11 @@
|
|
#define __noclone __attribute__((__noclone__))
|
|
|
|
#endif
|
|
+
|
|
+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
|
|
+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
|
|
+#define __bos0(ptr) __bos((ptr), 0)
|
|
+#define __bos1(ptr) __bos((ptr), 1)
|
|
#endif
|
|
|
|
#if __GNUC_MINOR__ > 0
|
|
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
|
|
index 923d093..1fef491 100644
|
|
--- a/include/linux/compiler.h
|
|
+++ b/include/linux/compiler.h
|
|
@@ -5,31 +5,62 @@
|
|
|
|
#ifdef __CHECKER__
|
|
# define __user __attribute__((noderef, address_space(1)))
|
|
+# define __force_user __force __user
|
|
# define __kernel __attribute__((address_space(0)))
|
|
+# define __force_kernel __force __kernel
|
|
# define __safe __attribute__((safe))
|
|
# define __force __attribute__((force))
|
|
# define __nocast __attribute__((nocast))
|
|
# define __iomem __attribute__((noderef, address_space(2)))
|
|
+# define __force_iomem __force __iomem
|
|
# define __acquires(x) __attribute__((context(x,0,1)))
|
|
# define __releases(x) __attribute__((context(x,1,0)))
|
|
# define __acquire(x) __context__(x,1)
|
|
# define __release(x) __context__(x,-1)
|
|
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
|
|
# define __percpu __attribute__((noderef, address_space(3)))
|
|
+# define __force_percpu __force __percpu
|
|
#ifdef CONFIG_SPARSE_RCU_POINTER
|
|
# define __rcu __attribute__((noderef, address_space(4)))
|
|
+# define __force_rcu __force __rcu
|
|
#else
|
|
# define __rcu
|
|
+# define __force_rcu
|
|
#endif
|
|
extern void __chk_user_ptr(const volatile void __user *);
|
|
extern void __chk_io_ptr(const volatile void __iomem *);
|
|
+#elif defined(CHECKER_PLUGIN)
|
|
+//# define __user
|
|
+//# define __force_user
|
|
+//# define __kernel
|
|
+//# define __force_kernel
|
|
+# define __safe
|
|
+# define __force
|
|
+# define __nocast
|
|
+# define __iomem
|
|
+# define __force_iomem
|
|
+# define __chk_user_ptr(x) (void)0
|
|
+# define __chk_io_ptr(x) (void)0
|
|
+# define __builtin_warning(x, y...) (1)
|
|
+# define __acquires(x)
|
|
+# define __releases(x)
|
|
+# define __acquire(x) (void)0
|
|
+# define __release(x) (void)0
|
|
+# define __cond_lock(x,c) (c)
|
|
+# define __percpu
|
|
+# define __force_percpu
|
|
+# define __rcu
|
|
+# define __force_rcu
|
|
#else
|
|
# define __user
|
|
+# define __force_user
|
|
# define __kernel
|
|
+# define __force_kernel
|
|
# define __safe
|
|
# define __force
|
|
# define __nocast
|
|
# define __iomem
|
|
+# define __force_iomem
|
|
# define __chk_user_ptr(x) (void)0
|
|
# define __chk_io_ptr(x) (void)0
|
|
# define __builtin_warning(x, y...) (1)
|
|
@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
|
|
# define __release(x) (void)0
|
|
# define __cond_lock(x,c) (c)
|
|
# define __percpu
|
|
+# define __force_percpu
|
|
# define __rcu
|
|
+# define __force_rcu
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|
# define __attribute_const__ /* unimplemented */
|
|
#endif
|
|
|
|
+#ifndef __no_const
|
|
+# define __no_const
|
|
+#endif
|
|
+
|
|
+#ifndef __do_const
|
|
+# define __do_const
|
|
+#endif
|
|
+
|
|
+#ifndef __size_overflow
|
|
+# define __size_overflow(...)
|
|
+#endif
|
|
+
|
|
+#ifndef __latent_entropy
|
|
+# define __latent_entropy
|
|
+#endif
|
|
+
|
|
/*
|
|
* Tell gcc if a function is cold. The compiler will assume any path
|
|
* directly leading to the call is unlikely.
|
|
@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|
#define __cold
|
|
#endif
|
|
|
|
+#ifndef __alloc_size
|
|
+#define __alloc_size(...)
|
|
+#endif
|
|
+
|
|
+#ifndef __bos
|
|
+#define __bos(ptr, arg)
|
|
+#endif
|
|
+
|
|
+#ifndef __bos0
|
|
+#define __bos0(ptr)
|
|
+#endif
|
|
+
|
|
+#ifndef __bos1
|
|
+#define __bos1(ptr)
|
|
+#endif
|
|
+
|
|
/* Simple shorthand for a section definition */
|
|
#ifndef __section
|
|
# define __section(S) __attribute__ ((__section__(#S)))
|
|
@@ -308,6 +373,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|
* use is to mediate communication between process-level code and irq/NMI
|
|
* handlers, all running on the same CPU.
|
|
*/
|
|
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
|
+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
|
|
+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
|
|
|
|
#endif /* __LINUX_COMPILER_H */
|
|
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
|
|
index b92eadf..b4ecdc1 100644
|
|
--- a/include/linux/crypto.h
|
|
+++ b/include/linux/crypto.h
|
|
@@ -373,7 +373,7 @@ struct cipher_tfm {
|
|
const u8 *key, unsigned int keylen);
|
|
void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
|
void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct hash_tfm {
|
|
int (*init)(struct hash_desc *desc);
|
|
@@ -394,13 +394,13 @@ struct compress_tfm {
|
|
int (*cot_decompress)(struct crypto_tfm *tfm,
|
|
const u8 *src, unsigned int slen,
|
|
u8 *dst, unsigned int *dlen);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct rng_tfm {
|
|
int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
|
|
unsigned int dlen);
|
|
int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define crt_ablkcipher crt_u.ablkcipher
|
|
#define crt_aead crt_u.aead
|
|
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
|
|
index 7925bf0..d5143d2 100644
|
|
--- a/include/linux/decompress/mm.h
|
|
+++ b/include/linux/decompress/mm.h
|
|
@@ -77,7 +77,7 @@ static void free(void *where)
|
|
* warnings when not needed (indeed large_malloc / large_free are not
|
|
* needed by inflate */
|
|
|
|
-#define malloc(a) kmalloc(a, GFP_KERNEL)
|
|
+#define malloc(a) kmalloc((a), GFP_KERNEL)
|
|
#define free(a) kfree(a)
|
|
|
|
#define large_malloc(a) vmalloc(a)
|
|
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
|
|
index 94af418..b1ca7a2 100644
|
|
--- a/include/linux/dma-mapping.h
|
|
+++ b/include/linux/dma-mapping.h
|
|
@@ -54,7 +54,7 @@ struct dma_map_ops {
|
|
u64 (*get_required_mask)(struct device *dev);
|
|
#endif
|
|
int is_phys;
|
|
-};
|
|
+} __do_const;
|
|
|
|
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
|
|
|
|
diff --git a/include/linux/efi.h.rej b/include/linux/efi.h.rej
|
|
new file mode 100644
|
|
index 0000000..e4f1b70
|
|
--- /dev/null
|
|
+++ b/include/linux/efi.h.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- include/linux/efi.h 2012-05-21 11:33:37.395929829 +0200
|
|
++++ include/linux/efi.h 2012-05-21 12:10:11.384048989 +0200
|
|
+@@ -635,7 +635,7 @@ struct efivar_operations {
|
|
+ efi_get_variable_t *get_variable;
|
|
+ efi_get_next_variable_t *get_next_variable;
|
|
+ efi_set_variable_t *set_variable;
|
|
+-};
|
|
++} __no_const;
|
|
+
|
|
+ struct efivars {
|
|
+ /*
|
|
diff --git a/include/linux/elf.h b/include/linux/elf.h
|
|
index 999b4f5..57753b4 100644
|
|
--- a/include/linux/elf.h
|
|
+++ b/include/linux/elf.h
|
|
@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
|
|
#define PT_GNU_EH_FRAME 0x6474e550
|
|
|
|
#define PT_GNU_STACK (PT_LOOS + 0x474e551)
|
|
+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
|
|
+
|
|
+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
|
|
+
|
|
+/* Constants for the e_flags field */
|
|
+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
|
|
+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
|
|
+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
|
|
+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
|
|
+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
|
|
+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
|
|
|
|
/*
|
|
* Extended Numbering
|
|
@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
|
|
#define DT_DEBUG 21
|
|
#define DT_TEXTREL 22
|
|
#define DT_JMPREL 23
|
|
+#define DT_FLAGS 30
|
|
+ #define DF_TEXTREL 0x00000004
|
|
#define DT_ENCODING 32
|
|
#define OLD_DT_LOOS 0x60000000
|
|
#define DT_LOOS 0x6000000d
|
|
@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
|
|
#define PF_W 0x2
|
|
#define PF_X 0x1
|
|
|
|
+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
|
|
+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
|
|
+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
|
|
+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
|
|
+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
|
|
+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
|
|
+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
|
|
+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
|
|
+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
|
|
+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
|
|
+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
|
|
+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
|
|
+
|
|
typedef struct elf32_phdr{
|
|
Elf32_Word p_type;
|
|
Elf32_Off p_offset;
|
|
@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
|
|
#define EI_OSABI 7
|
|
#define EI_PAD 8
|
|
|
|
+#define EI_PAX 14
|
|
+
|
|
#define ELFMAG0 0x7f /* EI_MAG */
|
|
#define ELFMAG1 'E'
|
|
#define ELFMAG2 'L'
|
|
@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
|
|
#define elf_note elf32_note
|
|
#define elf_addr_t Elf32_Off
|
|
#define Elf_Half Elf32_Half
|
|
+#define elf_dyn Elf32_Dyn
|
|
|
|
#else
|
|
|
|
@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
|
|
#define elf_note elf64_note
|
|
#define elf_addr_t Elf64_Off
|
|
#define Elf_Half Elf64_Half
|
|
+#define elf_dyn Elf64_Dyn
|
|
|
|
#endif
|
|
|
|
diff --git a/include/linux/filter.h b/include/linux/filter.h
|
|
index f2e5315..13d571c 100644
|
|
--- a/include/linux/filter.h
|
|
+++ b/include/linux/filter.h
|
|
@@ -145,6 +145,7 @@ struct compat_sock_fprog {
|
|
|
|
struct sk_buff;
|
|
struct sock;
|
|
+struct bpf_jit_work;
|
|
|
|
struct sk_filter
|
|
{
|
|
@@ -152,6 +153,9 @@ struct sk_filter
|
|
unsigned int len; /* Number of filter blocks */
|
|
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
|
const struct sock_filter *filter);
|
|
+#ifdef CONFIG_BPF_JIT
|
|
+ struct bpf_jit_work *work;
|
|
+#endif
|
|
struct rcu_head rcu;
|
|
struct sock_filter insns[0];
|
|
};
|
|
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
|
|
index 66e013b..6128774 100644
|
|
--- a/include/linux/firewire.h
|
|
+++ b/include/linux/firewire.h
|
|
@@ -415,7 +415,7 @@ struct fw_iso_context {
|
|
union {
|
|
fw_iso_callback_t sc;
|
|
fw_iso_mc_callback_t mc;
|
|
- } callback;
|
|
+ } __no_const callback;
|
|
void *callback_data;
|
|
};
|
|
|
|
diff --git a/include/linux/fs.h b/include/linux/fs.h
|
|
index a5fb99d..263e5b8 100644
|
|
--- a/include/linux/fs.h
|
|
+++ b/include/linux/fs.h
|
|
@@ -1647,7 +1647,8 @@ struct file_operations {
|
|
int (*setlease)(struct file *, long, struct file_lock **);
|
|
long (*fallocate)(struct file *file, int mode, loff_t offset,
|
|
loff_t len);
|
|
-};
|
|
+} __do_const;
|
|
+typedef struct file_operations __no_const file_operations_no_const;
|
|
|
|
struct inode_operations {
|
|
struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
|
|
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
|
|
index 003dc0f..3c4ea97 100644
|
|
--- a/include/linux/fs_struct.h
|
|
+++ b/include/linux/fs_struct.h
|
|
@@ -6,7 +6,7 @@
|
|
#include <linux/seqlock.h>
|
|
|
|
struct fs_struct {
|
|
- int users;
|
|
+ atomic_t users;
|
|
spinlock_t lock;
|
|
seqcount_t seq;
|
|
int umask;
|
|
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
|
|
index ce31408..b1ad003 100644
|
|
--- a/include/linux/fscache-cache.h
|
|
+++ b/include/linux/fscache-cache.h
|
|
@@ -102,7 +102,7 @@ struct fscache_operation {
|
|
fscache_operation_release_t release;
|
|
};
|
|
|
|
-extern atomic_t fscache_op_debug_id;
|
|
+extern atomic_unchecked_t fscache_op_debug_id;
|
|
extern void fscache_op_work_func(struct work_struct *work);
|
|
|
|
extern void fscache_enqueue_operation(struct fscache_operation *);
|
|
@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
|
|
{
|
|
INIT_WORK(&op->work, fscache_op_work_func);
|
|
atomic_set(&op->usage, 1);
|
|
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
|
|
+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
|
|
op->processor = processor;
|
|
op->release = release;
|
|
INIT_LIST_HEAD(&op->pend_link);
|
|
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
|
|
index fac26cb..8026d20 100644
|
|
--- a/include/linux/fsnotify.h
|
|
+++ b/include/linux/fsnotify.h
|
|
@@ -317,7 +317,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
|
|
*/
|
|
static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
|
|
{
|
|
- return kstrdup(name, GFP_KERNEL);
|
|
+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
|
|
}
|
|
|
|
/*
|
|
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
|
|
index 91d0e0a3..035666b 100644
|
|
--- a/include/linux/fsnotify_backend.h
|
|
+++ b/include/linux/fsnotify_backend.h
|
|
@@ -105,6 +105,7 @@ struct fsnotify_ops {
|
|
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
|
|
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
|
|
};
|
|
+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
|
|
|
|
/*
|
|
* A group is a "thing" that wants to receive notification about filesystem
|
|
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
|
|
index 16cad53..68724dc 100644
|
|
--- a/include/linux/ftrace_event.h
|
|
+++ b/include/linux/ftrace_event.h
|
|
@@ -99,7 +99,7 @@ struct trace_event_functions {
|
|
trace_print_func raw;
|
|
trace_print_func hex;
|
|
trace_print_func binary;
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct trace_event {
|
|
struct hlist_node node;
|
|
@@ -265,7 +265,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
|
|
extern int trace_add_event_call(struct ftrace_event_call *call);
|
|
extern void trace_remove_event_call(struct ftrace_event_call *call);
|
|
|
|
-#define is_signed_type(type) (((type)(-1)) < 0)
|
|
+#define is_signed_type(type) (((type)(-1)) < (type)1)
|
|
|
|
int trace_set_clr_event(const char *system, const char *event, int set);
|
|
|
|
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
|
|
index 017a7fb..33a8507 100644
|
|
--- a/include/linux/genhd.h
|
|
+++ b/include/linux/genhd.h
|
|
@@ -185,7 +185,7 @@ struct gendisk {
|
|
struct kobject *slave_dir;
|
|
|
|
struct timer_rand_state *random;
|
|
- atomic_t sync_io; /* RAID */
|
|
+ atomic_unchecked_t sync_io; /* RAID */
|
|
struct disk_events *ev;
|
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
|
struct blk_integrity *integrity;
|
|
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
|
|
index 9c9bd08..8c987785 100644
|
|
--- a/include/linux/gfp.h
|
|
+++ b/include/linux/gfp.h
|
|
@@ -39,6 +39,12 @@ struct vm_area_struct;
|
|
#define ___GFP_OTHER_NODE 0x800000u
|
|
#define ___GFP_WRITE 0x1000000u
|
|
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+#define ___GFP_USERCOPY 0x2000000u
|
|
+#else
|
|
+#define ___GFP_USERCOPY 0
|
|
+#endif
|
|
+
|
|
/*
|
|
* GFP bitmasks..
|
|
*
|
|
@@ -90,6 +96,7 @@ struct vm_area_struct;
|
|
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
|
|
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
|
|
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
|
|
+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
|
|
|
|
/*
|
|
* This may seem redundant, but it's a way of annotating false positives vs.
|
|
@@ -151,6 +158,8 @@ struct vm_area_struct;
|
|
/* 4GB DMA on some platforms */
|
|
#define GFP_DMA32 __GFP_DMA32
|
|
|
|
+#define GFP_USERCOPY __GFP_USERCOPY
|
|
+
|
|
/* Convert GFP flags to their corresponding migrate type */
|
|
static inline int allocflags_to_migratetype(gfp_t gfp_flags)
|
|
{
|
|
diff --git a/include/linux/gfp.h.rej b/include/linux/gfp.h.rej
|
|
new file mode 100644
|
|
index 0000000..46a5762
|
|
--- /dev/null
|
|
+++ b/include/linux/gfp.h.rej
|
|
@@ -0,0 +1,10 @@
|
|
+diff a/include/linux/gfp.h b/include/linux/gfp.h (rejected hunks)
|
|
+@@ -97,7 +104,7 @@ struct vm_area_struct;
|
|
+ */
|
|
+ #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
|
+
|
|
+-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
|
|
++#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
|
|
+ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
|
+
|
|
+ /* This equals 0, but use constants in case they ever change */
|
|
diff --git a/include/linux/hid.h b/include/linux/hid.h
|
|
index 117bcf8..548da22 100644
|
|
--- a/include/linux/hid.h
|
|
+++ b/include/linux/hid.h
|
|
@@ -703,7 +703,7 @@ struct hid_ll_driver {
|
|
unsigned int code, int value);
|
|
|
|
int (*parse)(struct hid_device *hdev);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define PM_HINT_FULLON 1<<5
|
|
#define PM_HINT_NORMAL 1<<1
|
|
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
|
|
index 2a144e6..e1ab821 100644
|
|
--- a/include/linux/highmem.h
|
|
+++ b/include/linux/highmem.h
|
|
@@ -243,6 +243,18 @@ static inline void clear_highpage(struct page *page)
|
|
kunmap_atomic(kaddr);
|
|
}
|
|
|
|
+static inline void sanitize_highpage(struct page *page)
|
|
+{
|
|
+ void *kaddr;
|
|
+ unsigned long flags;
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ kaddr = kmap_atomic(page);
|
|
+ clear_page(kaddr);
|
|
+ kunmap_atomic(kaddr);
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+
|
|
static inline void zero_user_segments(struct page *page,
|
|
unsigned start1, unsigned end1,
|
|
unsigned start2, unsigned end2)
|
|
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
|
|
index 195d8b3..e20cfab 100644
|
|
--- a/include/linux/i2c.h
|
|
+++ b/include/linux/i2c.h
|
|
@@ -365,6 +365,7 @@ struct i2c_algorithm {
|
|
/* To determine what the adapter supports */
|
|
u32 (*functionality) (struct i2c_adapter *);
|
|
};
|
|
+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
|
|
|
|
/*
|
|
* i2c_adapter is the structure used to identify a physical i2c bus along
|
|
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
|
|
index d23c3c2..eb63c81 100644
|
|
--- a/include/linux/i2o.h
|
|
+++ b/include/linux/i2o.h
|
|
@@ -565,7 +565,7 @@ struct i2o_controller {
|
|
struct i2o_device *exec; /* Executive */
|
|
#if BITS_PER_LONG == 64
|
|
spinlock_t context_list_lock; /* lock for context_list */
|
|
- atomic_t context_list_counter; /* needed for unique contexts */
|
|
+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
|
|
struct list_head context_list; /* list of context id's
|
|
and pointers */
|
|
#endif
|
|
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
|
|
index b159b10..458a073 100644
|
|
--- a/include/linux/if_team.h
|
|
+++ b/include/linux/if_team.h
|
|
@@ -64,6 +64,7 @@ struct team_mode_ops {
|
|
void (*port_leave)(struct team *team, struct team_port *port);
|
|
void (*port_change_mac)(struct team *team, struct team_port *port);
|
|
};
|
|
+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
|
|
|
|
enum team_option_type {
|
|
TEAM_OPTION_TYPE_U32,
|
|
diff --git a/include/linux/if_team.h.rej b/include/linux/if_team.h.rej
|
|
new file mode 100644
|
|
index 0000000..1641669
|
|
--- /dev/null
|
|
+++ b/include/linux/if_team.h.rej
|
|
@@ -0,0 +1,10 @@
|
|
+--- include/linux/if_team.h 2012-03-19 10:39:10.972049232 +0100
|
|
++++ include/linux/if_team.h 2012-05-21 12:10:11.452048993 +0200
|
|
+@@ -113,7 +114,7 @@ struct team {
|
|
+ struct list_head option_list;
|
|
+
|
|
+ const struct team_mode *mode;
|
|
+- struct team_mode_ops ops;
|
|
++ team_mode_ops_no_const ops;
|
|
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
|
|
+ };
|
|
diff --git a/include/linux/init.h b/include/linux/init.h
|
|
index 6b95109..bcbdd68 100644
|
|
--- a/include/linux/init.h
|
|
+++ b/include/linux/init.h
|
|
@@ -39,9 +39,15 @@
|
|
* Also note, that this data cannot be "const".
|
|
*/
|
|
|
|
+#ifdef MODULE
|
|
+#define add_latent_entropy
|
|
+#else
|
|
+#define add_latent_entropy __latent_entropy
|
|
+#endif
|
|
+
|
|
/* These are for everybody (although not all archs will actually
|
|
discard it in modules) */
|
|
-#define __init __section(.init.text) __cold notrace
|
|
+#define __init __section(.init.text) __cold notrace add_latent_entropy
|
|
#define __initdata __section(.init.data)
|
|
#define __initconst __section(.init.rodata)
|
|
#define __exitdata __section(.exit.data)
|
|
@@ -83,7 +89,7 @@
|
|
#define __exit __section(.exit.text) __exitused __cold notrace
|
|
|
|
/* Used for HOTPLUG */
|
|
-#define __devinit __section(.devinit.text) __cold notrace
|
|
+#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
|
|
#define __devinitdata __section(.devinit.data)
|
|
#define __devinitconst __section(.devinit.rodata)
|
|
#define __devexit __section(.devexit.text) __exitused __cold notrace
|
|
@@ -91,7 +97,7 @@
|
|
#define __devexitconst __section(.devexit.rodata)
|
|
|
|
/* Used for HOTPLUG_CPU */
|
|
-#define __cpuinit __section(.cpuinit.text) __cold notrace
|
|
+#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
|
|
#define __cpuinitdata __section(.cpuinit.data)
|
|
#define __cpuinitconst __section(.cpuinit.rodata)
|
|
#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
|
|
@@ -99,7 +105,7 @@
|
|
#define __cpuexitconst __section(.cpuexit.rodata)
|
|
|
|
/* Used for MEMORY_HOTPLUG */
|
|
-#define __meminit __section(.meminit.text) __cold notrace
|
|
+#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
|
|
#define __meminitdata __section(.meminit.data)
|
|
#define __meminitconst __section(.meminit.rodata)
|
|
#define __memexit __section(.memexit.text) __exitused __cold notrace
|
|
@@ -294,13 +300,13 @@ void __init parse_early_options(char *cmdline);
|
|
|
|
/* Each module must use one module_init(). */
|
|
#define module_init(initfn) \
|
|
- static inline initcall_t __inittest(void) \
|
|
+ static inline __used initcall_t __inittest(void) \
|
|
{ return initfn; } \
|
|
int init_module(void) __attribute__((alias(#initfn)));
|
|
|
|
/* This is only required if you want to be unloadable. */
|
|
#define module_exit(exitfn) \
|
|
- static inline exitcall_t __exittest(void) \
|
|
+ static inline __used exitcall_t __exittest(void) \
|
|
{ return exitfn; } \
|
|
void cleanup_module(void) __attribute__((alias(#exitfn)));
|
|
|
|
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
|
|
index b11e298..a2acf7b 100644
|
|
--- a/include/linux/init_task.h
|
|
+++ b/include/linux/init_task.h
|
|
@@ -144,6 +144,12 @@ extern struct task_group root_task_group;
|
|
|
|
#define INIT_TASK_COMM "swapper"
|
|
|
|
+#ifdef CONFIG_X86
|
|
+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
|
|
+#else
|
|
+#define INIT_TASK_THREAD_INFO
|
|
+#endif
|
|
+
|
|
/*
|
|
* INIT_TASK is used to set up the first task table, touch at
|
|
* your own risk!. Base=0, limit=0x1fffff (=2MB)
|
|
@@ -183,6 +189,7 @@ extern struct task_group root_task_group;
|
|
RCU_INIT_POINTER(.cred, &init_cred), \
|
|
.comm = INIT_TASK_COMM, \
|
|
.thread = INIT_THREAD, \
|
|
+ INIT_TASK_THREAD_INFO \
|
|
.fs = &init_fs, \
|
|
.files = &init_files, \
|
|
.signal = &init_signals, \
|
|
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
|
|
index e6ca56d..8583707 100644
|
|
--- a/include/linux/intel-iommu.h
|
|
+++ b/include/linux/intel-iommu.h
|
|
@@ -296,7 +296,7 @@ struct iommu_flush {
|
|
u8 fm, u64 type);
|
|
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
|
|
unsigned int size_order, u64 type);
|
|
-};
|
|
+} __no_const;
|
|
|
|
enum {
|
|
SR_DMAR_FECTL_REG,
|
|
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
|
|
index 01b2063..b8a454d 100644
|
|
--- a/include/linux/interrupt.h
|
|
+++ b/include/linux/interrupt.h
|
|
@@ -468,7 +468,7 @@ enum
|
|
/* map softirq index to softirq name. update 'softirq_to_name' in
|
|
* kernel/softirq.c when adding a new softirq.
|
|
*/
|
|
-extern char *softirq_to_name[NR_SOFTIRQS];
|
|
+extern const char * const softirq_to_name[NR_SOFTIRQS];
|
|
|
|
/* softirq mask and active fields moved to irq_cpustat_t in
|
|
* asm/hardirq.h to get better cache usage. KAO
|
|
@@ -476,12 +476,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
|
|
|
|
struct softirq_action
|
|
{
|
|
- void (*action)(struct softirq_action *);
|
|
+ void (*action)(void);
|
|
};
|
|
|
|
asmlinkage void do_softirq(void);
|
|
asmlinkage void __do_softirq(void);
|
|
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
|
|
+extern void open_softirq(int nr, void (*action)(void));
|
|
extern void softirq_init(void);
|
|
extern void __raise_softirq_irqoff(unsigned int nr);
|
|
|
|
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
|
|
index c4d2fc1..5df9c19 100644
|
|
--- a/include/linux/kgdb.h
|
|
+++ b/include/linux/kgdb.h
|
|
@@ -53,7 +53,7 @@ extern int kgdb_connected;
|
|
extern int kgdb_io_module_registered;
|
|
|
|
extern atomic_t kgdb_setting_breakpoint;
|
|
-extern atomic_t kgdb_cpu_doing_single_step;
|
|
+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
|
|
|
|
extern struct task_struct *kgdb_usethread;
|
|
extern struct task_struct *kgdb_contthread;
|
|
@@ -252,7 +252,7 @@ struct kgdb_arch {
|
|
void (*disable_hw_break)(struct pt_regs *regs);
|
|
void (*remove_all_hw_break)(void);
|
|
void (*correct_hw_break)(void);
|
|
-};
|
|
+} __do_const;
|
|
|
|
/**
|
|
* struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
|
|
@@ -277,7 +277,7 @@ struct kgdb_io {
|
|
void (*pre_exception) (void);
|
|
void (*post_exception) (void);
|
|
int is_console;
|
|
-};
|
|
+} __do_const;
|
|
|
|
extern struct kgdb_arch arch_kgdb_ops;
|
|
|
|
diff --git a/include/linux/kref.h b/include/linux/kref.h
|
|
index 6f515f2..cfa132f 100644
|
|
--- a/include/linux/kref.h
|
|
+++ b/include/linux/kref.h
|
|
@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
|
|
static inline int kref_sub(struct kref *kref, unsigned int count,
|
|
void (*release)(struct kref *kref))
|
|
{
|
|
- WARN_ON(release == NULL);
|
|
+ BUG_ON(release == NULL);
|
|
|
|
if (atomic_sub_and_test((int) count, &kref->refcount)) {
|
|
release(kref);
|
|
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
index c6fb815..5d5f90e 100644
|
|
--- a/include/linux/kvm_host.h
|
|
+++ b/include/linux/kvm_host.h
|
|
@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
|
|
void vcpu_load(struct kvm_vcpu *vcpu);
|
|
void vcpu_put(struct kvm_vcpu *vcpu);
|
|
|
|
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
struct module *module);
|
|
void kvm_exit(void);
|
|
|
|
@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|
struct kvm_guest_debug *dbg);
|
|
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
|
|
|
|
-int kvm_arch_init(void *opaque);
|
|
+int kvm_arch_init(const void *opaque);
|
|
void kvm_arch_exit(void);
|
|
|
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
|
|
diff --git a/include/linux/libata.h b/include/linux/libata.h
|
|
index 35e7f71..b9cdb75 100644
|
|
--- a/include/linux/libata.h
|
|
+++ b/include/linux/libata.h
|
|
@@ -923,7 +923,7 @@ struct ata_port_operations {
|
|
* fields must be pointers.
|
|
*/
|
|
const struct ata_port_operations *inherits;
|
|
-};
|
|
+} __do_const;
|
|
|
|
struct ata_port_info {
|
|
unsigned long flags;
|
|
diff --git a/include/linux/mca.h b/include/linux/mca.h
|
|
index 3797270..7765ede 100644
|
|
--- a/include/linux/mca.h
|
|
+++ b/include/linux/mca.h
|
|
@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
|
|
int region);
|
|
void * (*mca_transform_memory)(struct mca_device *,
|
|
void *memory);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct mca_bus {
|
|
u64 default_dma_mask;
|
|
diff --git a/include/linux/memory.h b/include/linux/memory.h
|
|
index ff9a9f8..c715deb 100644
|
|
--- a/include/linux/memory.h
|
|
+++ b/include/linux/memory.h
|
|
@@ -143,7 +143,7 @@ struct memory_accessor {
|
|
size_t count);
|
|
ssize_t (*write)(struct memory_accessor *, const char *buf,
|
|
off_t offset, size_t count);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* Kernel text modification mutex, used for code patching. Users of this lock
|
|
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
|
|
index ee96cd5..7823c3a 100644
|
|
--- a/include/linux/mfd/abx500.h
|
|
+++ b/include/linux/mfd/abx500.h
|
|
@@ -455,6 +455,7 @@ struct abx500_ops {
|
|
int (*event_registers_startup_state_get) (struct device *, u8 *);
|
|
int (*startup_irq_enabled) (struct device *, unsigned int);
|
|
};
|
|
+typedef struct abx500_ops __no_const abx500_ops_no_const;
|
|
|
|
int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
|
|
void abx500_remove_ops(struct device *dev);
|
|
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
|
|
index 9b07725..3d55001 100644
|
|
--- a/include/linux/mfd/abx500/ux500_chargalg.h
|
|
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
|
|
@@ -19,7 +19,7 @@ struct ux500_charger_ops {
|
|
int (*enable) (struct ux500_charger *, int, int, int);
|
|
int (*kick_wd) (struct ux500_charger *);
|
|
int (*update_curr) (struct ux500_charger *, int);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* struct ux500_charger - power supply ux500 charger sub class
|
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
|
index e2309a7..6bd668b 100644
|
|
--- a/include/linux/mm.h
|
|
+++ b/include/linux/mm.h
|
|
@@ -119,7 +119,14 @@ extern unsigned int kobjsize(const void *objp);
|
|
|
|
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
|
|
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
|
|
+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
|
|
+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
|
|
+#else
|
|
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
|
|
+#endif
|
|
+
|
|
#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
|
|
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
|
|
|
|
@@ -1047,34 +1054,6 @@ int set_page_dirty(struct page *page);
|
|
int set_page_dirty_lock(struct page *page);
|
|
int clear_page_dirty_for_io(struct page *page);
|
|
|
|
-/* Is the vma a continuation of the stack vma above it? */
|
|
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
|
|
-{
|
|
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
|
|
-}
|
|
-
|
|
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
|
|
- unsigned long addr)
|
|
-{
|
|
- return (vma->vm_flags & VM_GROWSDOWN) &&
|
|
- (vma->vm_start == addr) &&
|
|
- !vma_growsdown(vma->vm_prev, addr);
|
|
-}
|
|
-
|
|
-/* Is the vma a continuation of the stack vma below it? */
|
|
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
|
|
-{
|
|
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
|
|
-}
|
|
-
|
|
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
|
- unsigned long addr)
|
|
-{
|
|
- return (vma->vm_flags & VM_GROWSUP) &&
|
|
- (vma->vm_end == addr) &&
|
|
- !vma_growsup(vma->vm_next, addr);
|
|
-}
|
|
-
|
|
extern pid_t
|
|
vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
|
|
|
|
@@ -1178,6 +1157,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
|
|
}
|
|
#endif
|
|
|
|
+#ifdef CONFIG_MMU
|
|
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
|
|
+#else
|
|
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
|
|
+{
|
|
+ return __pgprot(0);
|
|
+}
|
|
+#endif
|
|
+
|
|
int vma_wants_writenotify(struct vm_area_struct *vma);
|
|
|
|
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
|
@@ -1196,8 +1184,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
|
|
{
|
|
return 0;
|
|
}
|
|
+
|
|
+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
|
|
+ unsigned long address)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
#else
|
|
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
|
|
+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
|
|
#endif
|
|
|
|
#ifdef __PAGETABLE_PMD_FOLDED
|
|
@@ -1206,8 +1201,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
|
|
{
|
|
return 0;
|
|
}
|
|
+
|
|
+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
|
|
+ unsigned long address)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
#else
|
|
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
|
|
+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
|
|
#endif
|
|
|
|
int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
@@ -1225,11 +1227,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
|
|
NULL: pud_offset(pgd, address);
|
|
}
|
|
|
|
+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
|
+{
|
|
+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
|
|
+ NULL: pud_offset(pgd, address);
|
|
+}
|
|
+
|
|
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|
{
|
|
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
|
|
NULL: pmd_offset(pud, address);
|
|
}
|
|
+
|
|
+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|
+{
|
|
+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
|
|
+ NULL: pmd_offset(pud, address);
|
|
+}
|
|
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
|
|
|
|
#if USE_SPLIT_PTLOCKS
|
|
@@ -1439,6 +1453,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
|
|
unsigned long, unsigned long,
|
|
unsigned long, unsigned long);
|
|
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
|
|
+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
|
|
|
|
/* These take the mm semaphore themselves */
|
|
extern unsigned long vm_brk(unsigned long, unsigned long);
|
|
@@ -1501,6 +1516,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
|
|
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
|
|
struct vm_area_struct **pprev);
|
|
|
|
+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
|
|
+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
|
|
+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
|
|
+
|
|
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
|
|
NULL if none. Assume start_addr < end_addr. */
|
|
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
|
|
@@ -1529,15 +1548,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
|
|
return vma;
|
|
}
|
|
|
|
-#ifdef CONFIG_MMU
|
|
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
|
|
-#else
|
|
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
|
-{
|
|
- return __pgprot(0);
|
|
-}
|
|
-#endif
|
|
-
|
|
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
|
|
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
|
|
unsigned long pfn, unsigned long size, pgprot_t);
|
|
@@ -1670,7 +1680,7 @@ extern int unpoison_memory(unsigned long pfn);
|
|
extern int sysctl_memory_failure_early_kill;
|
|
extern int sysctl_memory_failure_recovery;
|
|
extern void shake_page(struct page *p, int access);
|
|
-extern atomic_long_t mce_bad_pages;
|
|
+extern atomic_long_unchecked_t mce_bad_pages;
|
|
extern int soft_offline_page(struct page *page, int flags);
|
|
|
|
extern void dump_page(struct page *page);
|
|
@@ -1701,5 +1711,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
|
static inline bool page_is_guard(struct page *page) { return false; }
|
|
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
|
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
|
|
+#else
|
|
+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
|
|
+#endif
|
|
+
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _LINUX_MM_H */
|
|
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
|
|
index cb33f1a..087e450 100644
|
|
--- a/include/linux/mm_types.h
|
|
+++ b/include/linux/mm_types.h
|
|
@@ -267,6 +267,8 @@ struct vm_area_struct {
|
|
#ifdef CONFIG_NUMA
|
|
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
|
|
#endif
|
|
+
|
|
+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
|
|
};
|
|
|
|
struct core_thread {
|
|
@@ -342,7 +344,7 @@ struct mm_struct {
|
|
unsigned long def_flags;
|
|
unsigned long nr_ptes; /* Page table pages */
|
|
unsigned long start_code, end_code, start_data, end_data;
|
|
- unsigned long start_brk, brk, start_stack;
|
|
+ unsigned long brk_gap, start_brk, brk, start_stack;
|
|
unsigned long arg_start, arg_end, env_start, env_end;
|
|
|
|
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
|
|
@@ -393,6 +395,24 @@ struct mm_struct {
|
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
|
struct cpumask cpumask_allocation;
|
|
#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+ unsigned long pax_flags;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_DLRESOLVE
|
|
+ unsigned long call_dl_resolve;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
|
|
+ unsigned long call_syscall;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_ASLR
|
|
+ unsigned long delta_mmap; /* randomized offset */
|
|
+ unsigned long delta_stack; /* randomized offset */
|
|
+#endif
|
|
+
|
|
};
|
|
|
|
static inline void mm_init_cpumask(struct mm_struct *mm)
|
|
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
|
|
index ee2baf0..e24a58c 100644
|
|
--- a/include/linux/mmu_notifier.h
|
|
+++ b/include/linux/mmu_notifier.h
|
|
@@ -256,12 +256,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
|
*/
|
|
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
|
|
({ \
|
|
- pte_t __pte; \
|
|
+ pte_t ___pte; \
|
|
struct vm_area_struct *___vma = __vma; \
|
|
unsigned long ___address = __address; \
|
|
- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
|
|
+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
|
|
mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
|
|
- __pte; \
|
|
+ ___pte; \
|
|
})
|
|
|
|
#define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
|
|
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
|
|
index 53b654f..c8901e4 100644
|
|
--- a/include/linux/mmzone.h
|
|
+++ b/include/linux/mmzone.h
|
|
@@ -424,7 +424,7 @@ struct zone {
|
|
unsigned long flags; /* zone flags, see below */
|
|
|
|
/* Zone statistics */
|
|
- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
|
|
/*
|
|
* The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
|
|
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
|
|
index 839767f..515567e 100644
|
|
--- a/include/linux/mod_devicetable.h
|
|
+++ b/include/linux/mod_devicetable.h
|
|
@@ -12,7 +12,7 @@
|
|
typedef unsigned long kernel_ulong_t;
|
|
#endif
|
|
|
|
-#define PCI_ANY_ID (~0)
|
|
+#define PCI_ANY_ID ((__u16)~0)
|
|
|
|
struct pci_device_id {
|
|
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
|
|
@@ -138,7 +138,7 @@ struct usb_device_id {
|
|
#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
|
|
#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
|
|
|
|
-#define HID_ANY_ID (~0)
|
|
+#define HID_ANY_ID (~0U)
|
|
|
|
struct hid_device_id {
|
|
__u16 bus;
|
|
diff --git a/include/linux/module.h b/include/linux/module.h
|
|
index fbcafe2..e5d9587 100644
|
|
--- a/include/linux/module.h
|
|
+++ b/include/linux/module.h
|
|
@@ -17,6 +17,7 @@
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/tracepoint.h>
|
|
#include <linux/export.h>
|
|
+#include <linux/fs.h>
|
|
|
|
#include <linux/percpu.h>
|
|
#include <asm/module.h>
|
|
@@ -273,19 +274,16 @@ struct module
|
|
int (*init)(void);
|
|
|
|
/* If this is non-NULL, vfree after init() returns */
|
|
- void *module_init;
|
|
+ void *module_init_rx, *module_init_rw;
|
|
|
|
/* Here is the actual code + data, vfree'd on unload. */
|
|
- void *module_core;
|
|
+ void *module_core_rx, *module_core_rw;
|
|
|
|
/* Here are the sizes of the init and core sections */
|
|
- unsigned int init_size, core_size;
|
|
+ unsigned int init_size_rw, core_size_rw;
|
|
|
|
/* The size of the executable code in each section. */
|
|
- unsigned int init_text_size, core_text_size;
|
|
-
|
|
- /* Size of RO sections of the module (text+rodata) */
|
|
- unsigned int init_ro_size, core_ro_size;
|
|
+ unsigned int init_size_rx, core_size_rx;
|
|
|
|
/* Arch-specific module values */
|
|
struct mod_arch_specific arch;
|
|
@@ -341,6 +339,10 @@ struct module
|
|
#ifdef CONFIG_EVENT_TRACING
|
|
struct ftrace_event_call **trace_events;
|
|
unsigned int num_trace_events;
|
|
+ struct file_operations trace_id;
|
|
+ struct file_operations trace_enable;
|
|
+ struct file_operations trace_format;
|
|
+ struct file_operations trace_filter;
|
|
#endif
|
|
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
unsigned int num_ftrace_callsites;
|
|
@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
|
|
bool is_module_percpu_address(unsigned long addr);
|
|
bool is_module_text_address(unsigned long addr);
|
|
|
|
+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ if (ktla_ktva(addr) >= (unsigned long)start &&
|
|
+ ktla_ktva(addr) < (unsigned long)start + size)
|
|
+ return 1;
|
|
+#endif
|
|
+
|
|
+ return ((void *)addr >= start && (void *)addr < start + size);
|
|
+}
|
|
+
|
|
+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
|
|
+{
|
|
+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
|
|
+}
|
|
+
|
|
+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
|
|
+{
|
|
+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
|
|
+}
|
|
+
|
|
+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
|
|
+{
|
|
+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
|
|
+}
|
|
+
|
|
+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
|
|
+{
|
|
+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
|
|
+}
|
|
+
|
|
static inline int within_module_core(unsigned long addr, struct module *mod)
|
|
{
|
|
- return (unsigned long)mod->module_core <= addr &&
|
|
- addr < (unsigned long)mod->module_core + mod->core_size;
|
|
+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
|
|
}
|
|
|
|
static inline int within_module_init(unsigned long addr, struct module *mod)
|
|
{
|
|
- return (unsigned long)mod->module_init <= addr &&
|
|
- addr < (unsigned long)mod->module_init + mod->init_size;
|
|
+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
|
|
}
|
|
|
|
/* Search for module by name: must hold module_mutex. */
|
|
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
|
|
index b2be02e..72d2f78 100644
|
|
--- a/include/linux/moduleloader.h
|
|
+++ b/include/linux/moduleloader.h
|
|
@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
|
|
|
|
/* Allocator used for allocating struct module, core sections and init
|
|
sections. Returns NULL on failure. */
|
|
-void *module_alloc(unsigned long size);
|
|
+void *module_alloc(unsigned long size) __size_overflow(1);
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+void *module_alloc_exec(unsigned long size) __size_overflow(1);
|
|
+#else
|
|
+#define module_alloc_exec(x) module_alloc(x)
|
|
+#endif
|
|
|
|
/* Free memory returned from module_alloc. */
|
|
void module_free(struct module *mod, void *module_region);
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+void module_free_exec(struct module *mod, void *module_region);
|
|
+#else
|
|
+#define module_free_exec(x, y) module_free((x), (y))
|
|
+#endif
|
|
+
|
|
/* Apply the given relocation to the (simplified) ELF. Return -error
|
|
or 0. */
|
|
int apply_relocate(Elf_Shdr *sechdrs,
|
|
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
|
|
index 944bc18..042d291 100644
|
|
--- a/include/linux/moduleparam.h
|
|
+++ b/include/linux/moduleparam.h
|
|
@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
|
|
* @len is usually just sizeof(string).
|
|
*/
|
|
#define module_param_string(name, string, len, perm) \
|
|
- static const struct kparam_string __param_string_##name \
|
|
+ static const struct kparam_string __param_string_##name __used \
|
|
= { len, string }; \
|
|
__module_param_call(MODULE_PARAM_PREFIX, name, \
|
|
¶m_ops_string, \
|
|
@@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
|
|
*/
|
|
#define module_param_array_named(name, array, type, nump, perm) \
|
|
param_check_##type(name, &(array)[0]); \
|
|
- static const struct kparam_array __param_arr_##name \
|
|
+ static const struct kparam_array __param_arr_##name __used \
|
|
= { .max = ARRAY_SIZE(array), .num = nump, \
|
|
.ops = ¶m_ops_##type, \
|
|
.elemsize = sizeof(array[0]), .elem = array }; \
|
|
diff --git a/include/linux/namei.h b/include/linux/namei.h
|
|
index ffc0213..2c1f2cb 100644
|
|
--- a/include/linux/namei.h
|
|
+++ b/include/linux/namei.h
|
|
@@ -24,7 +24,7 @@ struct nameidata {
|
|
unsigned seq;
|
|
int last_type;
|
|
unsigned depth;
|
|
- char *saved_names[MAX_NESTED_LINKS + 1];
|
|
+ const char *saved_names[MAX_NESTED_LINKS + 1];
|
|
|
|
/* Intent data */
|
|
union {
|
|
@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
|
|
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
|
|
extern void unlock_rename(struct dentry *, struct dentry *);
|
|
|
|
-static inline void nd_set_link(struct nameidata *nd, char *path)
|
|
+static inline void nd_set_link(struct nameidata *nd, const char *path)
|
|
{
|
|
nd->saved_names[nd->depth] = path;
|
|
}
|
|
|
|
-static inline char *nd_get_link(struct nameidata *nd)
|
|
+static inline const char *nd_get_link(const struct nameidata *nd)
|
|
{
|
|
return nd->saved_names[nd->depth];
|
|
}
|
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
|
index 5b46501..fe4eead 100644
|
|
--- a/include/linux/netdevice.h
|
|
+++ b/include/linux/netdevice.h
|
|
@@ -1003,6 +1003,7 @@ struct net_device_ops {
|
|
int (*ndo_neigh_construct)(struct neighbour *n);
|
|
void (*ndo_neigh_destroy)(struct neighbour *n);
|
|
};
|
|
+typedef struct net_device_ops __no_const net_device_ops_no_const;
|
|
|
|
/*
|
|
* The DEVICE structure.
|
|
@@ -1064,7 +1065,7 @@ struct net_device {
|
|
int iflink;
|
|
|
|
struct net_device_stats stats;
|
|
- atomic_long_t rx_dropped; /* dropped packets by core network
|
|
+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
|
|
* Do not use this in drivers.
|
|
*/
|
|
|
|
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
|
|
index c65a18a..0c05f3a 100644
|
|
--- a/include/linux/of_pdt.h
|
|
+++ b/include/linux/of_pdt.h
|
|
@@ -32,7 +32,7 @@ struct of_pdt_ops {
|
|
|
|
/* return 0 on success; fill in 'len' with number of bytes in path */
|
|
int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
|
|
-};
|
|
+} __no_const;
|
|
|
|
extern void *prom_early_alloc(unsigned long size);
|
|
|
|
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
|
|
index a4c5624..79d6d88 100644
|
|
--- a/include/linux/oprofile.h
|
|
+++ b/include/linux/oprofile.h
|
|
@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
|
|
int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
|
|
char const * name, ulong * val);
|
|
|
|
-/** Create a file for read-only access to an atomic_t. */
|
|
+/** Create a file for read-only access to an atomic_unchecked_t. */
|
|
int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
|
|
- char const * name, atomic_t * val);
|
|
+ char const * name, atomic_unchecked_t * val);
|
|
|
|
/** create a directory */
|
|
struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
|
|
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
|
|
index efe0889..e23ff9b 100644
|
|
--- a/include/linux/perf_event.h
|
|
+++ b/include/linux/perf_event.h
|
|
@@ -884,8 +884,8 @@ struct perf_event {
|
|
|
|
enum perf_event_active_state state;
|
|
unsigned int attach_state;
|
|
- local64_t count;
|
|
- atomic64_t child_count;
|
|
+ local64_t count; /* PaX: fix it one day */
|
|
+ atomic64_unchecked_t child_count;
|
|
|
|
/*
|
|
* These are the total time in nanoseconds that the event
|
|
@@ -936,8 +936,8 @@ struct perf_event {
|
|
* These accumulate total time (in nanoseconds) that children
|
|
* events have been enabled and running, respectively.
|
|
*/
|
|
- atomic64_t child_total_time_enabled;
|
|
- atomic64_t child_total_time_running;
|
|
+ atomic64_unchecked_t child_total_time_enabled;
|
|
+ atomic64_unchecked_t child_total_time_running;
|
|
|
|
/*
|
|
* Protect attach/detach and child_list:
|
|
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
|
|
index e1ac1ce..0675fed 100644
|
|
--- a/include/linux/pipe_fs_i.h
|
|
+++ b/include/linux/pipe_fs_i.h
|
|
@@ -45,9 +45,9 @@ struct pipe_buffer {
|
|
struct pipe_inode_info {
|
|
wait_queue_head_t wait;
|
|
unsigned int nrbufs, curbuf, buffers;
|
|
- unsigned int readers;
|
|
- unsigned int writers;
|
|
- unsigned int waiting_writers;
|
|
+ atomic_t readers;
|
|
+ atomic_t writers;
|
|
+ atomic_t waiting_writers;
|
|
unsigned int r_counter;
|
|
unsigned int w_counter;
|
|
struct page *tmp_page;
|
|
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
|
|
index f271860..6b3bec5 100644
|
|
--- a/include/linux/pm_runtime.h
|
|
+++ b/include/linux/pm_runtime.h
|
|
@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
|
|
|
|
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
|
{
|
|
- ACCESS_ONCE(dev->power.last_busy) = jiffies;
|
|
+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
|
|
}
|
|
|
|
#else /* !CONFIG_PM_RUNTIME */
|
|
diff --git a/include/linux/poison.h b/include/linux/poison.h
|
|
index 2110a81..13a11bb 100644
|
|
--- a/include/linux/poison.h
|
|
+++ b/include/linux/poison.h
|
|
@@ -19,8 +19,8 @@
|
|
* under normal circumstances, used to verify that nobody uses
|
|
* non-initialized list entries.
|
|
*/
|
|
-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
|
|
-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
|
|
+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
|
|
+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
|
|
|
|
/********** include/linux/timer.h **********/
|
|
/*
|
|
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
|
|
index 87a03c7..03fd5cbe 100644
|
|
--- a/include/linux/preempt.h
|
|
+++ b/include/linux/preempt.h
|
|
@@ -132,7 +132,7 @@ struct preempt_ops {
|
|
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
|
|
void (*sched_out)(struct preempt_notifier *notifier,
|
|
struct task_struct *next);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* preempt_notifier - key for installing preemption notifiers
|
|
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
|
|
index 85c5073..9508b22 100644
|
|
--- a/include/linux/proc_fs.h
|
|
+++ b/include/linux/proc_fs.h
|
|
@@ -258,7 +258,7 @@ union proc_op {
|
|
int (*proc_show)(struct seq_file *m,
|
|
struct pid_namespace *ns, struct pid *pid,
|
|
struct task_struct *task);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct ctl_table_header;
|
|
struct ctl_table;
|
|
diff --git a/include/linux/random.h b/include/linux/random.h
|
|
index 54b1fd3..81fb5da 100644
|
|
--- a/include/linux/random.h
|
|
+++ b/include/linux/random.h
|
|
@@ -53,6 +53,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
|
|
unsigned int value);
|
|
extern void add_interrupt_randomness(int irq, int irq_flags);
|
|
|
|
+#ifdef CONFIG_PAX_LATENT_ENTROPY
|
|
+extern void transfer_latent_entropy(void);
|
|
+#endif
|
|
+
|
|
extern void get_random_bytes(void *buf, int nbytes);
|
|
extern void get_random_bytes_arch(void *buf, int nbytes);
|
|
void generate_random_uuid(unsigned char uuid_out[16]);
|
|
@@ -70,12 +74,17 @@ void srandom32(u32 seed);
|
|
|
|
u32 prandom32(struct rnd_state *);
|
|
|
|
+static inline unsigned long pax_get_random_long(void)
|
|
+{
|
|
+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
|
|
+}
|
|
+
|
|
/*
|
|
* Handle minimum values for seeds
|
|
*/
|
|
static inline u32 __seed(u32 x, u32 m)
|
|
{
|
|
- return (x < m) ? x + m : x;
|
|
+ return (x <= m) ? x + m + 1 : x;
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
|
|
index e0879a7..a12f962 100644
|
|
--- a/include/linux/reboot.h
|
|
+++ b/include/linux/reboot.h
|
|
@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
|
|
* Architecture-specific implementations of sys_reboot commands.
|
|
*/
|
|
|
|
-extern void machine_restart(char *cmd);
|
|
-extern void machine_halt(void);
|
|
-extern void machine_power_off(void);
|
|
+extern void machine_restart(char *cmd) __noreturn;
|
|
+extern void machine_halt(void) __noreturn;
|
|
+extern void machine_power_off(void) __noreturn;
|
|
|
|
extern void machine_shutdown(void);
|
|
struct pt_regs;
|
|
@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
|
|
*/
|
|
|
|
extern void kernel_restart_prepare(char *cmd);
|
|
-extern void kernel_restart(char *cmd);
|
|
-extern void kernel_halt(void);
|
|
-extern void kernel_power_off(void);
|
|
+extern void kernel_restart(char *cmd) __noreturn;
|
|
+extern void kernel_halt(void) __noreturn;
|
|
+extern void kernel_power_off(void) __noreturn;
|
|
|
|
extern int C_A_D; /* for sysctl */
|
|
void ctrl_alt_del(void);
|
|
@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
|
|
* Emergency restart, callable from an interrupt handler.
|
|
*/
|
|
|
|
-extern void emergency_restart(void);
|
|
+extern void emergency_restart(void) __noreturn;
|
|
#include <asm/emergency-restart.h>
|
|
|
|
#endif
|
|
diff --git a/include/linux/relay.h b/include/linux/relay.h
|
|
index 91cacc3..b55ff74 100644
|
|
--- a/include/linux/relay.h
|
|
+++ b/include/linux/relay.h
|
|
@@ -160,7 +160,7 @@ struct rchan_callbacks
|
|
* The callback should return 0 if successful, negative if not.
|
|
*/
|
|
int (*remove_buf_file)(struct dentry *dentry);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* CONFIG_RELAY kernel API, kernel/relay.c
|
|
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
|
|
index 7dec42c..73a251d 100644
|
|
--- a/include/linux/rfkill.h
|
|
+++ b/include/linux/rfkill.h
|
|
@@ -147,6 +147,7 @@ struct rfkill_ops {
|
|
void (*query)(struct rfkill *rfkill, void *data);
|
|
int (*set_block)(void *data, bool blocked);
|
|
};
|
|
+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
|
|
|
|
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
|
|
/**
|
|
diff --git a/include/linux/rio.h b/include/linux/rio.h
|
|
index 4d50611..c6858a2 100644
|
|
--- a/include/linux/rio.h
|
|
+++ b/include/linux/rio.h
|
|
@@ -315,7 +315,7 @@ struct rio_ops {
|
|
int mbox, void *buffer, size_t len);
|
|
int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
|
|
void *(*get_inb_message)(struct rio_mport *mport, int mbox);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define RIO_RESOURCE_MEM 0x00000100
|
|
#define RIO_RESOURCE_DOORBELL 0x00000200
|
|
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
|
|
index 2f69ff4..65b7009 100644
|
|
--- a/include/linux/rmap.h
|
|
+++ b/include/linux/rmap.h
|
|
@@ -140,9 +140,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
|
|
void anon_vma_init(void); /* create anon_vma_cachep */
|
|
int anon_vma_prepare(struct vm_area_struct *);
|
|
void unlink_anon_vmas(struct vm_area_struct *);
|
|
-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
|
|
+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
|
|
void anon_vma_moveto_tail(struct vm_area_struct *);
|
|
-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
|
|
+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
|
|
|
|
static inline void anon_vma_merge(struct vm_area_struct *vma,
|
|
struct vm_area_struct *next)
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
|
index 3fd25d8..d04575f 100644
|
|
--- a/include/linux/sched.h
|
|
+++ b/include/linux/sched.h
|
|
@@ -100,6 +100,7 @@ struct bio_list;
|
|
struct fs_struct;
|
|
struct perf_event_context;
|
|
struct blk_plug;
|
|
+struct linux_binprm;
|
|
|
|
/*
|
|
* List of flags we want to share for kernel threads,
|
|
@@ -386,10 +387,13 @@ struct user_namespace;
|
|
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
|
|
|
|
extern int sysctl_max_map_count;
|
|
+extern unsigned long sysctl_heap_stack_gap;
|
|
|
|
#include <linux/aio.h>
|
|
|
|
#ifdef CONFIG_MMU
|
|
+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
|
|
+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
|
|
extern void arch_pick_mmap_layout(struct mm_struct *mm);
|
|
extern unsigned long
|
|
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
|
|
@@ -1401,8 +1405,8 @@ struct task_struct {
|
|
struct list_head thread_node;
|
|
|
|
struct completion *vfork_done; /* for vfork() */
|
|
- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
|
- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
|
+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
|
+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
|
|
|
cputime_t utime, stime, utimescaled, stimescaled;
|
|
cputime_t gtime;
|
|
@@ -1441,6 +1445,10 @@ struct task_struct {
|
|
#endif
|
|
/* CPU-specific state of this task */
|
|
struct thread_struct thread;
|
|
+/* thread_info moved to task_struct */
|
|
+#ifdef CONFIG_X86
|
|
+ struct thread_info tinfo;
|
|
+#endif
|
|
/* filesystem information */
|
|
struct fs_struct *fs;
|
|
/* open file information */
|
|
@@ -1634,6 +1642,51 @@ struct task_struct {
|
|
#endif
|
|
};
|
|
|
|
+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
|
|
+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
|
|
+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
|
|
+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
|
|
+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
|
|
+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
|
|
+
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+extern int pax_softmode;
|
|
+#endif
|
|
+
|
|
+extern int pax_check_flags(unsigned long *);
|
|
+
|
|
+/* if tsk != current then task_lock must be held on it */
|
|
+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
|
|
+static inline unsigned long pax_get_flags(struct task_struct *tsk)
|
|
+{
|
|
+ if (likely(tsk->mm))
|
|
+ return tsk->mm->pax_flags;
|
|
+ else
|
|
+ return 0UL;
|
|
+}
|
|
+
|
|
+/* if tsk != current then task_lock must be held on it */
|
|
+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
|
|
+{
|
|
+ if (likely(tsk->mm)) {
|
|
+ tsk->mm->pax_flags = flags;
|
|
+ return 0;
|
|
+ }
|
|
+ return -EINVAL;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
|
|
+extern void pax_set_initial_flags(struct linux_binprm *bprm);
|
|
+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
|
|
+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
|
|
+#endif
|
|
+
|
|
+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
|
|
+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
|
|
+extern void pax_report_refcount_overflow(struct pt_regs *regs);
|
|
+extern void check_object_size(const void *ptr, unsigned long n, bool to);
|
|
+
|
|
/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
|
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
|
|
|
@@ -2206,7 +2259,9 @@ void yield(void);
|
|
extern struct exec_domain default_exec_domain;
|
|
|
|
union thread_union {
|
|
+#ifndef CONFIG_X86
|
|
struct thread_info thread_info;
|
|
+#endif
|
|
unsigned long stack[THREAD_SIZE/sizeof(long)];
|
|
};
|
|
|
|
@@ -2382,7 +2437,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
|
|
extern void exit_itimers(struct signal_struct *);
|
|
extern void flush_itimer_signals(void);
|
|
|
|
-extern void do_group_exit(int);
|
|
+extern __noreturn void do_group_exit(int);
|
|
|
|
extern void daemonize(const char *, ...);
|
|
extern int allow_signal(int);
|
|
@@ -2583,9 +2638,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
|
|
|
|
#endif
|
|
|
|
-static inline int object_is_on_stack(void *obj)
|
|
+static inline int object_starts_on_stack(void *obj)
|
|
{
|
|
- void *stack = task_stack_page(current);
|
|
+ const void *stack = task_stack_page(current);
|
|
|
|
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
|
|
}
|
|
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
|
|
index 899fbb4..1cb4138 100644
|
|
--- a/include/linux/screen_info.h
|
|
+++ b/include/linux/screen_info.h
|
|
@@ -43,7 +43,8 @@ struct screen_info {
|
|
__u16 pages; /* 0x32 */
|
|
__u16 vesa_attributes; /* 0x34 */
|
|
__u32 capabilities; /* 0x36 */
|
|
- __u8 _reserved[6]; /* 0x3a */
|
|
+ __u16 vesapm_size; /* 0x3a */
|
|
+ __u8 _reserved[4]; /* 0x3c */
|
|
} __attribute__((packed));
|
|
|
|
#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
|
|
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
|
|
index e156ce1..109b800 100644
|
|
--- a/include/linux/seq_file.h
|
|
+++ b/include/linux/seq_file.h
|
|
@@ -35,6 +35,7 @@ struct seq_operations {
|
|
void * (*next) (struct seq_file *m, void *v, loff_t *pos);
|
|
int (*show) (struct seq_file *m, void *v);
|
|
};
|
|
+typedef struct seq_operations __no_const seq_operations_no_const;
|
|
|
|
#define SEQ_SKIP 1
|
|
|
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
|
index 4424db2..483358e 100644
|
|
--- a/include/linux/skbuff.h
|
|
+++ b/include/linux/skbuff.h
|
|
@@ -673,7 +673,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
|
|
*/
|
|
static inline int skb_queue_empty(const struct sk_buff_head *list)
|
|
{
|
|
- return list->next == (struct sk_buff *)list;
|
|
+ return list->next == (const struct sk_buff *)list;
|
|
}
|
|
|
|
/**
|
|
@@ -686,7 +686,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
|
|
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
|
|
const struct sk_buff *skb)
|
|
{
|
|
- return skb->next == (struct sk_buff *)list;
|
|
+ return skb->next == (const struct sk_buff *)list;
|
|
}
|
|
|
|
/**
|
|
@@ -699,7 +699,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
|
|
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
|
|
const struct sk_buff *skb)
|
|
{
|
|
- return skb->prev == (struct sk_buff *)list;
|
|
+ return skb->prev == (const struct sk_buff *)list;
|
|
}
|
|
|
|
/**
|
|
@@ -1612,7 +1612,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
|
|
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
|
|
*/
|
|
#ifndef NET_SKB_PAD
|
|
-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
|
|
+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
|
|
#endif
|
|
|
|
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
|
|
diff --git a/include/linux/slab.h b/include/linux/slab.h
|
|
index 67d5d94..bbd740b 100644
|
|
--- a/include/linux/slab.h
|
|
+++ b/include/linux/slab.h
|
|
@@ -11,12 +11,20 @@
|
|
|
|
#include <linux/gfp.h>
|
|
#include <linux/types.h>
|
|
+#include <linux/err.h>
|
|
|
|
/*
|
|
* Flags to pass to kmem_cache_create().
|
|
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
|
|
*/
|
|
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
|
|
+#else
|
|
+#define SLAB_USERCOPY 0x00000000UL
|
|
+#endif
|
|
+
|
|
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
|
|
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
|
|
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
|
|
@@ -87,10 +95,13 @@
|
|
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
|
|
* Both make kfree a no-op.
|
|
*/
|
|
-#define ZERO_SIZE_PTR ((void *)16)
|
|
+#define ZERO_SIZE_PTR \
|
|
+({ \
|
|
+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
|
|
+ (void *)(-MAX_ERRNO-1L); \
|
|
+})
|
|
|
|
-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
|
- (unsigned long)ZERO_SIZE_PTR)
|
|
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
|
|
|
|
/*
|
|
* struct kmem_cache related prototypes
|
|
@@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
|
|
void kfree(const void *);
|
|
void kzfree(const void *);
|
|
size_t ksize(const void *);
|
|
+const char *check_heap_object(const void *ptr, unsigned long n, bool to);
|
|
+bool is_usercopy_object(const void *ptr);
|
|
|
|
/*
|
|
* Allocator specific definitions. These are mainly used to establish optimized
|
|
@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
|
*/
|
|
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
|
|
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
|
|
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
|
+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
|
|
#define kmalloc_track_caller(size, flags) \
|
|
__kmalloc_track_caller(size, flags, _RET_IP_)
|
|
#else
|
|
@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
|
*/
|
|
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
|
|
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
|
|
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
|
|
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
|
|
#define kmalloc_node_track_caller(size, flags, node) \
|
|
__kmalloc_node_track_caller(size, flags, node, \
|
|
_RET_IP_)
|
|
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
|
|
index fbd1117..0a3d314 100644
|
|
--- a/include/linux/slab_def.h
|
|
+++ b/include/linux/slab_def.h
|
|
@@ -66,10 +66,10 @@ struct kmem_cache {
|
|
unsigned long node_allocs;
|
|
unsigned long node_frees;
|
|
unsigned long node_overflow;
|
|
- atomic_t allochit;
|
|
- atomic_t allocmiss;
|
|
- atomic_t freehit;
|
|
- atomic_t freemiss;
|
|
+ atomic_unchecked_t allochit;
|
|
+ atomic_unchecked_t allocmiss;
|
|
+ atomic_unchecked_t freehit;
|
|
+ atomic_unchecked_t freemiss;
|
|
|
|
/*
|
|
* If debugging is enabled, then the allocator can add additional
|
|
@@ -103,11 +103,16 @@ struct cache_sizes {
|
|
#ifdef CONFIG_ZONE_DMA
|
|
struct kmem_cache *cs_dmacachep;
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ struct kmem_cache *cs_usercopycachep;
|
|
+#endif
|
|
+
|
|
};
|
|
extern struct cache_sizes malloc_sizes[];
|
|
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
|
-void *__kmalloc(size_t size, gfp_t flags);
|
|
+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
|
|
|
|
#ifdef CONFIG_TRACING
|
|
extern void *kmem_cache_alloc_trace(size_t size,
|
|
@@ -150,6 +155,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|
cachep = malloc_sizes[i].cs_dmacachep;
|
|
else
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ if (flags & GFP_USERCOPY)
|
|
+ cachep = malloc_sizes[i].cs_usercopycachep;
|
|
+ else
|
|
+#endif
|
|
+
|
|
cachep = malloc_sizes[i].cs_cachep;
|
|
|
|
ret = kmem_cache_alloc_trace(size, cachep, flags);
|
|
@@ -160,7 +172,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
|
|
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|
|
|
#ifdef CONFIG_TRACING
|
|
@@ -203,6 +215,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
cachep = malloc_sizes[i].cs_dmacachep;
|
|
else
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ if (flags & GFP_USERCOPY)
|
|
+ cachep = malloc_sizes[i].cs_usercopycachep;
|
|
+ else
|
|
+#endif
|
|
+
|
|
cachep = malloc_sizes[i].cs_cachep;
|
|
|
|
return kmem_cache_alloc_node_trace(size, cachep, flags, node);
|
|
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
|
|
index 0ec00b3..22b4715 100644
|
|
--- a/include/linux/slob_def.h
|
|
+++ b/include/linux/slob_def.h
|
|
@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
|
|
return kmem_cache_alloc_node(cachep, flags, -1);
|
|
}
|
|
|
|
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
|
|
|
|
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
{
|
|
@@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|
return __kmalloc_node(size, flags, -1);
|
|
}
|
|
|
|
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
|
|
+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
|
|
{
|
|
return kmalloc(size, flags);
|
|
}
|
|
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
|
|
index c2f8c8b..d992a41 100644
|
|
--- a/include/linux/slub_def.h
|
|
+++ b/include/linux/slub_def.h
|
|
@@ -92,7 +92,7 @@ struct kmem_cache {
|
|
struct kmem_cache_order_objects max;
|
|
struct kmem_cache_order_objects min;
|
|
gfp_t allocflags; /* gfp flags to use on each alloc */
|
|
- int refcount; /* Refcount for slab cache destroy */
|
|
+ atomic_t refcount; /* Refcount for slab cache destroy */
|
|
void (*ctor)(void *);
|
|
int inuse; /* Offset to metadata */
|
|
int align; /* Alignment */
|
|
@@ -153,7 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
|
|
* Sorry that the following has to be that ugly but some versions of GCC
|
|
* have trouble with constant propagation and loops.
|
|
*/
|
|
-static __always_inline int kmalloc_index(size_t size)
|
|
+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
|
|
{
|
|
if (!size)
|
|
return 0;
|
|
@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
|
}
|
|
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
|
-void *__kmalloc(size_t size, gfp_t flags);
|
|
+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
|
|
|
|
static __always_inline void *
|
|
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
|
@@ -259,7 +259,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
|
}
|
|
#endif
|
|
|
|
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
|
+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
|
|
{
|
|
unsigned int order = get_order(size);
|
|
return kmalloc_order_trace(size, flags, order);
|
|
@@ -284,7 +284,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
|
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|
|
|
#ifdef CONFIG_TRACING
|
|
diff --git a/include/linux/sonet.h b/include/linux/sonet.h
|
|
index de8832d..0147b46 100644
|
|
--- a/include/linux/sonet.h
|
|
+++ b/include/linux/sonet.h
|
|
@@ -61,7 +61,7 @@ struct sonet_stats {
|
|
#include <linux/atomic.h>
|
|
|
|
struct k_sonet_stats {
|
|
-#define __HANDLE_ITEM(i) atomic_t i
|
|
+#define __HANDLE_ITEM(i) atomic_unchecked_t i
|
|
__SONET_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
};
|
|
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
|
|
index 523547e..2cb7140 100644
|
|
--- a/include/linux/sunrpc/clnt.h
|
|
+++ b/include/linux/sunrpc/clnt.h
|
|
@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
|
|
{
|
|
switch (sap->sa_family) {
|
|
case AF_INET:
|
|
- return ntohs(((struct sockaddr_in *)sap)->sin_port);
|
|
+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
|
|
case AF_INET6:
|
|
- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
|
|
+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
|
|
static inline bool __rpc_copy_addr4(struct sockaddr *dst,
|
|
const struct sockaddr *src)
|
|
{
|
|
- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
|
|
+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
|
|
struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
|
|
|
|
dsin->sin_family = ssin->sin_family;
|
|
@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
|
|
if (sa->sa_family != AF_INET6)
|
|
return 0;
|
|
|
|
- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
|
|
+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
|
|
}
|
|
|
|
#endif /* __KERNEL__ */
|
|
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
|
|
index dc0c3cc..8503fb6 100644
|
|
--- a/include/linux/sunrpc/sched.h
|
|
+++ b/include/linux/sunrpc/sched.h
|
|
@@ -106,6 +106,7 @@ struct rpc_call_ops {
|
|
void (*rpc_count_stats)(struct rpc_task *, void *);
|
|
void (*rpc_release)(void *);
|
|
};
|
|
+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
|
|
|
|
struct rpc_task_setup {
|
|
struct rpc_task *task;
|
|
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
|
|
index 0b8e3e6..33e0a01 100644
|
|
--- a/include/linux/sunrpc/svc_rdma.h
|
|
+++ b/include/linux/sunrpc/svc_rdma.h
|
|
@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
|
|
extern unsigned int svcrdma_max_requests;
|
|
extern unsigned int svcrdma_max_req_size;
|
|
|
|
-extern atomic_t rdma_stat_recv;
|
|
-extern atomic_t rdma_stat_read;
|
|
-extern atomic_t rdma_stat_write;
|
|
-extern atomic_t rdma_stat_sq_starve;
|
|
-extern atomic_t rdma_stat_rq_starve;
|
|
-extern atomic_t rdma_stat_rq_poll;
|
|
-extern atomic_t rdma_stat_rq_prod;
|
|
-extern atomic_t rdma_stat_sq_poll;
|
|
-extern atomic_t rdma_stat_sq_prod;
|
|
+extern atomic_unchecked_t rdma_stat_recv;
|
|
+extern atomic_unchecked_t rdma_stat_read;
|
|
+extern atomic_unchecked_t rdma_stat_write;
|
|
+extern atomic_unchecked_t rdma_stat_sq_starve;
|
|
+extern atomic_unchecked_t rdma_stat_rq_starve;
|
|
+extern atomic_unchecked_t rdma_stat_rq_poll;
|
|
+extern atomic_unchecked_t rdma_stat_rq_prod;
|
|
+extern atomic_unchecked_t rdma_stat_sq_poll;
|
|
+extern atomic_unchecked_t rdma_stat_sq_prod;
|
|
|
|
#define RPCRDMA_VERSION 1
|
|
|
|
diff --git a/include/linux/sysctl.h.rej b/include/linux/sysctl.h.rej
|
|
new file mode 100644
|
|
index 0000000..910abfd
|
|
--- /dev/null
|
|
+++ b/include/linux/sysctl.h.rej
|
|
@@ -0,0 +1,14 @@
|
|
+diff a/include/linux/sysctl.h b/include/linux/sysctl.h (rejected hunks)
|
|
+@@ -156,7 +156,11 @@ enum
|
|
+ KERN_BOOT_REASON = 77, /* int: identify reason system was booted */
|
|
+ };
|
|
+
|
|
+-
|
|
++#ifdef CONFIG_PAX_SOFTMODE
|
|
++enum {
|
|
++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
|
|
++};
|
|
++#endif
|
|
+
|
|
+ /* CTL_VM names: */
|
|
+ enum
|
|
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
|
|
index ff7dc08..893e1bd 100644
|
|
--- a/include/linux/tty_ldisc.h
|
|
+++ b/include/linux/tty_ldisc.h
|
|
@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
|
|
|
|
struct module *owner;
|
|
|
|
- int refcount;
|
|
+ atomic_t refcount;
|
|
};
|
|
|
|
struct tty_ldisc {
|
|
diff --git a/include/linux/types.h b/include/linux/types.h
|
|
index 7f480db..175c256 100644
|
|
--- a/include/linux/types.h
|
|
+++ b/include/linux/types.h
|
|
@@ -220,10 +220,26 @@ typedef struct {
|
|
int counter;
|
|
} atomic_t;
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+typedef struct {
|
|
+ int counter;
|
|
+} atomic_unchecked_t;
|
|
+#else
|
|
+typedef atomic_t atomic_unchecked_t;
|
|
+#endif
|
|
+
|
|
#ifdef CONFIG_64BIT
|
|
typedef struct {
|
|
long counter;
|
|
} atomic64_t;
|
|
+
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+typedef struct {
|
|
+ long counter;
|
|
+} atomic64_unchecked_t;
|
|
+#else
|
|
+typedef atomic64_t atomic64_unchecked_t;
|
|
+#endif
|
|
#endif
|
|
|
|
struct list_head {
|
|
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
|
|
index 5ca0951..ab496a5 100644
|
|
--- a/include/linux/uaccess.h
|
|
+++ b/include/linux/uaccess.h
|
|
@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
|
|
long ret; \
|
|
mm_segment_t old_fs = get_fs(); \
|
|
\
|
|
- set_fs(KERNEL_DS); \
|
|
pagefault_disable(); \
|
|
- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
|
|
- pagefault_enable(); \
|
|
+ set_fs(KERNEL_DS); \
|
|
+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
|
|
set_fs(old_fs); \
|
|
+ pagefault_enable(); \
|
|
ret; \
|
|
})
|
|
|
|
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
|
|
index 99c1b4d..bb94261 100644
|
|
--- a/include/linux/unaligned/access_ok.h
|
|
+++ b/include/linux/unaligned/access_ok.h
|
|
@@ -6,32 +6,32 @@
|
|
|
|
static inline u16 get_unaligned_le16(const void *p)
|
|
{
|
|
- return le16_to_cpup((__le16 *)p);
|
|
+ return le16_to_cpup((const __le16 *)p);
|
|
}
|
|
|
|
static inline u32 get_unaligned_le32(const void *p)
|
|
{
|
|
- return le32_to_cpup((__le32 *)p);
|
|
+ return le32_to_cpup((const __le32 *)p);
|
|
}
|
|
|
|
static inline u64 get_unaligned_le64(const void *p)
|
|
{
|
|
- return le64_to_cpup((__le64 *)p);
|
|
+ return le64_to_cpup((const __le64 *)p);
|
|
}
|
|
|
|
static inline u16 get_unaligned_be16(const void *p)
|
|
{
|
|
- return be16_to_cpup((__be16 *)p);
|
|
+ return be16_to_cpup((const __be16 *)p);
|
|
}
|
|
|
|
static inline u32 get_unaligned_be32(const void *p)
|
|
{
|
|
- return be32_to_cpup((__be32 *)p);
|
|
+ return be32_to_cpup((const __be32 *)p);
|
|
}
|
|
|
|
static inline u64 get_unaligned_be64(const void *p)
|
|
{
|
|
- return be64_to_cpup((__be64 *)p);
|
|
+ return be64_to_cpup((const __be64 *)p);
|
|
}
|
|
|
|
static inline void put_unaligned_le16(u16 val, void *p)
|
|
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
|
|
index 547e59c..db6ad19 100644
|
|
--- a/include/linux/usb/renesas_usbhs.h
|
|
+++ b/include/linux/usb/renesas_usbhs.h
|
|
@@ -39,7 +39,7 @@ enum {
|
|
*/
|
|
struct renesas_usbhs_driver_callback {
|
|
int (*notify_hotplug)(struct platform_device *pdev);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* callback functions for platform
|
|
@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
|
|
* VBUS control is needed for Host
|
|
*/
|
|
int (*set_vbus)(struct platform_device *pdev, int enable);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* parameters for renesas usbhs
|
|
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
|
|
index 6f8fbcf..a0e9cbd 100644
|
|
--- a/include/linux/vermagic.h
|
|
+++ b/include/linux/vermagic.h
|
|
@@ -25,9 +25,28 @@
|
|
#define MODULE_ARCH_VERMAGIC ""
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_REFCOUNT
|
|
+#define MODULE_PAX_REFCOUNT "REFCOUNT "
|
|
+#else
|
|
+#define MODULE_PAX_REFCOUNT ""
|
|
+#endif
|
|
+
|
|
+#ifdef CONSTIFY_PLUGIN
|
|
+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
|
|
+#else
|
|
+#define MODULE_CONSTIFY_PLUGIN ""
|
|
+#endif
|
|
+
|
|
+#ifdef STACKLEAK_PLUGIN
|
|
+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
|
|
+#else
|
|
+#define MODULE_STACKLEAK_PLUGIN ""
|
|
+#endif
|
|
+
|
|
#define VERMAGIC_STRING \
|
|
UTS_RELEASE " " \
|
|
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
|
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
|
- MODULE_ARCH_VERMAGIC
|
|
+ MODULE_ARCH_VERMAGIC \
|
|
+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN
|
|
|
|
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
|
|
index 500421f..2553845 100644
|
|
--- a/include/linux/vmalloc.h
|
|
+++ b/include/linux/vmalloc.h
|
|
@@ -125,8 +125,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
|
|
extern void free_vm_area(struct vm_struct *area);
|
|
|
|
/* for /dev/kmem */
|
|
-extern long vread(char *buf, char *addr, unsigned long count);
|
|
-extern long vwrite(char *buf, char *addr, unsigned long count);
|
|
+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
|
|
+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
|
|
|
|
/*
|
|
* Internals. Dont't use..
|
|
diff --git a/include/linux/vmalloc.h.rej b/include/linux/vmalloc.h.rej
|
|
new file mode 100644
|
|
index 0000000..0e3c8fa
|
|
--- /dev/null
|
|
+++ b/include/linux/vmalloc.h.rej
|
|
@@ -0,0 +1,23 @@
|
|
+--- include/linux/vmalloc.h 2012-03-19 10:39:11.388049209 +0100
|
|
++++ include/linux/vmalloc.h 2012-05-22 15:28:31.531384607 +0200
|
|
+@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining
|
|
+ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
|
|
+ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
|
|
+ #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
|
|
++
|
|
++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
|
|
++#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
|
|
++#endif
|
|
++
|
|
+ /* bits [20..32] reserved for arch specific ioremap internals */
|
|
+
|
|
+ /*
|
|
+@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned lo
|
|
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
|
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
|
|
+- pgprot_t prot, int node, void *caller);
|
|
++ pgprot_t prot, int node, void *caller) __size_overflow(1);
|
|
+ extern void vfree(const void *addr);
|
|
+
|
|
+ extern void *vmap(struct page **pages, unsigned int count,
|
|
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
|
|
index 2d59889..664e8528 100644
|
|
--- a/include/linux/vmstat.h
|
|
+++ b/include/linux/vmstat.h
|
|
@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
|
|
/*
|
|
* Zone based page accounting with per cpu differentials.
|
|
*/
|
|
-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
|
|
static inline void zone_page_state_add(long x, struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
- atomic_long_add(x, &zone->vm_stat[item]);
|
|
- atomic_long_add(x, &vm_stat[item]);
|
|
+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
|
|
+ atomic_long_add_unchecked(x, &vm_stat[item]);
|
|
}
|
|
|
|
static inline unsigned long global_page_state(enum zone_stat_item item)
|
|
{
|
|
- long x = atomic_long_read(&vm_stat[item]);
|
|
+ long x = atomic_long_read_unchecked(&vm_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
|
|
static inline unsigned long zone_page_state(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
- long x = atomic_long_read(&zone->vm_stat[item]);
|
|
+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
|
|
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
- long x = atomic_long_read(&zone->vm_stat[item]);
|
|
+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
|
|
|
|
#ifdef CONFIG_SMP
|
|
int cpu;
|
|
@@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
|
|
|
|
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
- atomic_long_inc(&zone->vm_stat[item]);
|
|
- atomic_long_inc(&vm_stat[item]);
|
|
+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
|
|
+ atomic_long_inc_unchecked(&vm_stat[item]);
|
|
}
|
|
|
|
static inline void __inc_zone_page_state(struct page *page,
|
|
@@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
|
|
|
|
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
- atomic_long_dec(&zone->vm_stat[item]);
|
|
- atomic_long_dec(&vm_stat[item]);
|
|
+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
|
|
+ atomic_long_dec_unchecked(&vm_stat[item]);
|
|
}
|
|
|
|
static inline void __dec_zone_page_state(struct page *page,
|
|
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
|
|
index e5d1220..ef6e4069 100644
|
|
--- a/include/linux/xattr.h
|
|
+++ b/include/linux/xattr.h
|
|
@@ -57,6 +57,11 @@
|
|
#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
|
|
#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
|
|
|
|
+/* User namespace */
|
|
+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
|
|
+#define XATTR_PAX_FLAGS_SUFFIX "flags"
|
|
+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
|
|
+
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/types.h>
|
|
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
|
|
index 4aeff96..b378cdc 100644
|
|
--- a/include/media/saa7146_vv.h
|
|
+++ b/include/media/saa7146_vv.h
|
|
@@ -163,7 +163,7 @@ struct saa7146_ext_vv
|
|
int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
|
|
|
|
/* the extension can override this */
|
|
- struct v4l2_ioctl_ops ops;
|
|
+ v4l2_ioctl_ops_no_const ops;
|
|
/* pointer to the saa7146 core ops */
|
|
const struct v4l2_ioctl_ops *core_ops;
|
|
|
|
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
|
|
index 96d2221..2292f89 100644
|
|
--- a/include/media/v4l2-dev.h
|
|
+++ b/include/media/v4l2-dev.h
|
|
@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
|
|
|
|
|
|
struct v4l2_file_operations {
|
|
- struct module *owner;
|
|
+ struct module * const owner;
|
|
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
|
|
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
|
|
unsigned int (*poll) (struct file *, struct poll_table_struct *);
|
|
@@ -71,6 +71,7 @@ struct v4l2_file_operations {
|
|
int (*open) (struct file *);
|
|
int (*release) (struct file *);
|
|
};
|
|
+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
|
|
|
|
/*
|
|
* Newer version of video_device, handled by videodev2.c
|
|
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
|
|
index 3cb939c..f23c6bb 100644
|
|
--- a/include/media/v4l2-ioctl.h
|
|
+++ b/include/media/v4l2-ioctl.h
|
|
@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
|
|
long (*vidioc_default) (struct file *file, void *fh,
|
|
bool valid_prio, int cmd, void *arg);
|
|
};
|
|
-
|
|
+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
|
|
|
|
/* v4l debugging and diagnostics */
|
|
|
|
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
|
|
index 6db8ecf..8c23861 100644
|
|
--- a/include/net/caif/caif_hsi.h
|
|
+++ b/include/net/caif/caif_hsi.h
|
|
@@ -98,7 +98,7 @@ struct cfhsi_drv {
|
|
void (*rx_done_cb) (struct cfhsi_drv *drv);
|
|
void (*wake_up_cb) (struct cfhsi_drv *drv);
|
|
void (*wake_down_cb) (struct cfhsi_drv *drv);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* Structure implemented by HSI device. */
|
|
struct cfhsi_dev {
|
|
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
|
|
index 9e5425b..8136ffc 100644
|
|
--- a/include/net/caif/cfctrl.h
|
|
+++ b/include/net/caif/cfctrl.h
|
|
@@ -52,7 +52,7 @@ struct cfctrl_rsp {
|
|
void (*radioset_rsp)(void);
|
|
void (*reject_rsp)(struct cflayer *layer, u8 linkid,
|
|
struct cflayer *client_layer);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* Link Setup Parameters for CAIF-Links. */
|
|
struct cfctrl_link_param {
|
|
@@ -101,8 +101,8 @@ struct cfctrl_request_info {
|
|
struct cfctrl {
|
|
struct cfsrvl serv;
|
|
struct cfctrl_rsp res;
|
|
- atomic_t req_seq_no;
|
|
- atomic_t rsp_seq_no;
|
|
+ atomic_unchecked_t req_seq_no;
|
|
+ atomic_unchecked_t rsp_seq_no;
|
|
struct list_head list;
|
|
/* Protects from simultaneous access to first_req list */
|
|
spinlock_t info_list_lock;
|
|
diff --git a/include/net/flow.h b/include/net/flow.h
|
|
index 3fe9261..1dc2920 100644
|
|
--- a/include/net/flow.h
|
|
+++ b/include/net/flow.h
|
|
@@ -227,6 +227,6 @@ extern struct flow_cache_object *flow_cache_lookup(
|
|
|
|
extern void flow_cache_flush(void);
|
|
extern void flow_cache_flush_deferred(void);
|
|
-extern atomic_t flow_cache_genid;
|
|
+extern atomic_unchecked_t flow_cache_genid;
|
|
|
|
#endif
|
|
diff --git a/include/net/inetpeer.h.rej b/include/net/inetpeer.h.rej
|
|
new file mode 100644
|
|
index 0000000..7a07598
|
|
--- /dev/null
|
|
+++ b/include/net/inetpeer.h.rej
|
|
@@ -0,0 +1,26 @@
|
|
+--- include/net/inetpeer.h 2012-07-21 01:28:45.278708106 +0200
|
|
++++ include/net/inetpeer.h 2012-07-21 01:28:56.938708446 +0200
|
|
+@@ -51,8 +51,8 @@ struct inet_peer {
|
|
+ */
|
|
+ union {
|
|
+ struct {
|
|
+- atomic_t rid; /* Frag reception counter */
|
|
+- atomic_t ip_id_count; /* IP ID for the next packet */
|
|
++ atomic_unchecked_t rid; /* Frag reception counter */
|
|
++ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
|
|
+ __u32 tcp_ts;
|
|
+ __u32 tcp_ts_stamp;
|
|
+ };
|
|
+@@ -118,11 +118,11 @@ static inline int inet_getid(struct inet
|
|
+ more++;
|
|
+ inet_peer_refcheck(p);
|
|
+ do {
|
|
+- old = atomic_read(&p->ip_id_count);
|
|
++ old = atomic_read_unchecked(&p->ip_id_count);
|
|
+ new = old + more;
|
|
+ if (!new)
|
|
+ new = 1;
|
|
+- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
|
|
++ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
|
|
+ return new;
|
|
+ }
|
|
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
|
|
index 2124004..3713897 100644
|
|
--- a/include/net/ip_fib.h
|
|
+++ b/include/net/ip_fib.h
|
|
@@ -144,7 +144,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
|
|
|
|
#define FIB_RES_SADDR(net, res) \
|
|
((FIB_RES_NH(res).nh_saddr_genid == \
|
|
- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
|
|
+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
|
|
FIB_RES_NH(res).nh_saddr : \
|
|
fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
|
|
#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
|
|
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
|
|
index 2389959..2965e05 100644
|
|
--- a/include/net/ip_vs.h
|
|
+++ b/include/net/ip_vs.h
|
|
@@ -510,7 +510,7 @@ struct ip_vs_conn {
|
|
struct ip_vs_conn *control; /* Master control connection */
|
|
atomic_t n_control; /* Number of controlled ones */
|
|
struct ip_vs_dest *dest; /* real server */
|
|
- atomic_t in_pkts; /* incoming packet counter */
|
|
+ atomic_unchecked_t in_pkts; /* incoming packet counter */
|
|
|
|
/* packet transmitter for different forwarding methods. If it
|
|
mangles the packet, it must return NF_DROP or better NF_STOLEN,
|
|
@@ -648,7 +648,7 @@ struct ip_vs_dest {
|
|
__be16 port; /* port number of the server */
|
|
union nf_inet_addr addr; /* IP address of the server */
|
|
volatile unsigned flags; /* dest status flags */
|
|
- atomic_t conn_flags; /* flags to copy to conn */
|
|
+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
|
|
atomic_t weight; /* server weight */
|
|
|
|
atomic_t refcnt; /* reference counter */
|
|
diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
|
|
index 69b610a..fe3962c 100644
|
|
--- a/include/net/irda/ircomm_core.h
|
|
+++ b/include/net/irda/ircomm_core.h
|
|
@@ -51,7 +51,7 @@ typedef struct {
|
|
int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
|
|
int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
|
|
struct ircomm_info *);
|
|
-} call_t;
|
|
+} __no_const call_t;
|
|
|
|
struct ircomm_cb {
|
|
irda_queue_t queue;
|
|
diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
|
|
index 59ba38bc..d515662 100644
|
|
--- a/include/net/irda/ircomm_tty.h
|
|
+++ b/include/net/irda/ircomm_tty.h
|
|
@@ -35,6 +35,7 @@
|
|
#include <linux/termios.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/tty.h> /* struct tty_struct */
|
|
+#include <asm/local.h>
|
|
|
|
#include <net/irda/irias_object.h>
|
|
#include <net/irda/ircomm_core.h>
|
|
@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
|
|
unsigned short close_delay;
|
|
unsigned short closing_wait; /* time to wait before closing */
|
|
|
|
- int open_count;
|
|
- int blocked_open; /* # of blocked opens */
|
|
+ local_t open_count;
|
|
+ local_t blocked_open; /* # of blocked opens */
|
|
|
|
/* Protect concurent access to :
|
|
* o self->open_count
|
|
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
|
|
index cc7c197..9f2da2a 100644
|
|
--- a/include/net/iucv/af_iucv.h
|
|
+++ b/include/net/iucv/af_iucv.h
|
|
@@ -141,7 +141,7 @@ struct iucv_sock {
|
|
struct iucv_sock_list {
|
|
struct hlist_head head;
|
|
rwlock_t lock;
|
|
- atomic_t autobind_name;
|
|
+ atomic_unchecked_t autobind_name;
|
|
};
|
|
|
|
unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
|
|
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
|
|
index 34c996f..bb3b4d4 100644
|
|
--- a/include/net/neighbour.h
|
|
+++ b/include/net/neighbour.h
|
|
@@ -123,7 +123,7 @@ struct neigh_ops {
|
|
void (*error_report)(struct neighbour *, struct sk_buff *);
|
|
int (*output)(struct neighbour *, struct sk_buff *);
|
|
int (*connected_output)(struct neighbour *, struct sk_buff *);
|
|
-};
|
|
+} __do_const;
|
|
|
|
struct pneigh_entry {
|
|
struct pneigh_entry *next;
|
|
diff --git a/include/net/netlink.h b/include/net/netlink.h
|
|
index c0f13ad..a33df64 100644
|
|
--- a/include/net/netlink.h
|
|
+++ b/include/net/netlink.h
|
|
@@ -548,7 +548,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
|
|
static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
|
|
{
|
|
if (mark)
|
|
- skb_trim(skb, (unsigned char *) mark - skb->data);
|
|
+ skb_trim(skb, (const unsigned char *) mark - skb->data);
|
|
}
|
|
|
|
/**
|
|
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
|
|
index 8869426..dfb5ab1 100644
|
|
--- a/include/net/netns/ipv4.h
|
|
+++ b/include/net/netns/ipv4.h
|
|
@@ -59,8 +59,8 @@ struct netns_ipv4 {
|
|
int sysctl_fwmark_reflect;
|
|
int sysctl_tcp_fwmark_accept;
|
|
|
|
- atomic_t rt_genid;
|
|
- atomic_t dev_addr_genid;
|
|
+ atomic_unchecked_t rt_genid;
|
|
+ atomic_unchecked_t dev_addr_genid;
|
|
|
|
#ifdef CONFIG_IP_MROUTE
|
|
#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
|
|
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
|
|
index 43c8bbcdb..96d0164 100644
|
|
--- a/include/net/sctp/sctp.h
|
|
+++ b/include/net/sctp/sctp.h
|
|
@@ -318,9 +318,9 @@ do { \
|
|
|
|
#else /* SCTP_DEBUG */
|
|
|
|
-#define SCTP_DEBUG_PRINTK(whatever...)
|
|
-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
|
|
-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
|
|
+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
|
|
+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
|
|
+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
|
|
#define SCTP_ENABLE_DEBUG
|
|
#define SCTP_DISABLE_DEBUG
|
|
#define SCTP_ASSERT(expr, str, func)
|
|
diff --git a/include/net/sock.h b/include/net/sock.h
|
|
index bdc23ff..ceaa2a8 100644
|
|
--- a/include/net/sock.h
|
|
+++ b/include/net/sock.h
|
|
@@ -304,7 +304,7 @@ struct sock {
|
|
#ifdef CONFIG_RPS
|
|
__u32 sk_rxhash;
|
|
#endif
|
|
- atomic_t sk_drops;
|
|
+ atomic_unchecked_t sk_drops;
|
|
int sk_rcvbuf;
|
|
|
|
struct sk_filter __rcu *sk_filter;
|
|
@@ -1706,7 +1706,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
|
|
}
|
|
|
|
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
|
|
- char __user *from, char *to,
|
|
+ char __user *from, unsigned char *to,
|
|
int copy, int offset)
|
|
{
|
|
if (skb->ip_summed == CHECKSUM_NONE) {
|
|
diff --git a/include/net/tcp.h b/include/net/tcp.h
|
|
index c870ea1..a890d78 100644
|
|
--- a/include/net/tcp.h
|
|
+++ b/include/net/tcp.h
|
|
@@ -1438,7 +1438,7 @@ struct tcp_seq_afinfo {
|
|
char *name;
|
|
sa_family_t family;
|
|
const struct file_operations *seq_fops;
|
|
- struct seq_operations seq_ops;
|
|
+ seq_operations_no_const seq_ops;
|
|
};
|
|
|
|
struct tcp_iter_state {
|
|
diff --git a/include/net/udp.h b/include/net/udp.h
|
|
index d0bf5b6..3ca4896 100644
|
|
--- a/include/net/udp.h
|
|
+++ b/include/net/udp.h
|
|
@@ -245,7 +245,7 @@ struct udp_seq_afinfo {
|
|
sa_family_t family;
|
|
struct udp_table *udp_table;
|
|
const struct file_operations *seq_fops;
|
|
- struct seq_operations seq_ops;
|
|
+ seq_operations_no_const seq_ops;
|
|
};
|
|
|
|
struct udp_iter_state {
|
|
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
|
|
index 9f7e94b..4d5314a 100644
|
|
--- a/include/net/xfrm.h
|
|
+++ b/include/net/xfrm.h
|
|
@@ -508,7 +508,7 @@ struct xfrm_policy {
|
|
struct timer_list timer;
|
|
|
|
struct flow_cache_object flo;
|
|
- atomic_t genid;
|
|
+ atomic_unchecked_t genid;
|
|
u32 priority;
|
|
u32 index;
|
|
struct xfrm_mark mark;
|
|
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
|
|
index 1a046b1..ee0bef0 100644
|
|
--- a/include/rdma/iw_cm.h
|
|
+++ b/include/rdma/iw_cm.h
|
|
@@ -122,7 +122,7 @@ struct iw_cm_verbs {
|
|
int backlog);
|
|
|
|
int (*destroy_listen)(struct iw_cm_id *cm_id);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/**
|
|
* iw_create_cm_id - Create an IW CM identifier.
|
|
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
|
|
index 8f9dfba..610ab6c 100644
|
|
--- a/include/scsi/libfc.h
|
|
+++ b/include/scsi/libfc.h
|
|
@@ -756,6 +756,7 @@ struct libfc_function_template {
|
|
*/
|
|
void (*disc_stop_final) (struct fc_lport *);
|
|
};
|
|
+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
|
|
|
|
/**
|
|
* struct fc_disc - Discovery context
|
|
@@ -861,7 +862,7 @@ struct fc_lport {
|
|
struct fc_vport *vport;
|
|
|
|
/* Operational Information */
|
|
- struct libfc_function_template tt;
|
|
+ libfc_function_template_no_const tt;
|
|
u8 link_up;
|
|
u8 qfull;
|
|
enum fc_lport_state state;
|
|
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
|
|
index ef54805..3310956 100644
|
|
--- a/include/scsi/scsi_device.h
|
|
+++ b/include/scsi/scsi_device.h
|
|
@@ -167,9 +167,9 @@ struct scsi_device {
|
|
unsigned int max_device_blocked; /* what device_blocked counts down from */
|
|
#define SCSI_DEFAULT_DEVICE_BLOCKED 3
|
|
|
|
- atomic_t iorequest_cnt;
|
|
- atomic_t iodone_cnt;
|
|
- atomic_t ioerr_cnt;
|
|
+ atomic_unchecked_t iorequest_cnt;
|
|
+ atomic_unchecked_t iodone_cnt;
|
|
+ atomic_unchecked_t ioerr_cnt;
|
|
|
|
struct device sdev_gendev,
|
|
sdev_dev;
|
|
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
|
|
index 719faf1..d1154d4 100644
|
|
--- a/include/scsi/scsi_transport_fc.h
|
|
+++ b/include/scsi/scsi_transport_fc.h
|
|
@@ -739,7 +739,7 @@ struct fc_function_template {
|
|
unsigned long show_host_system_hostname:1;
|
|
|
|
unsigned long disable_target_scan:1;
|
|
-};
|
|
+} __do_const;
|
|
|
|
|
|
/**
|
|
diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
|
|
index 030b87c..98a6954 100644
|
|
--- a/include/sound/ak4xxx-adda.h
|
|
+++ b/include/sound/ak4xxx-adda.h
|
|
@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
|
|
void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
|
|
unsigned char val);
|
|
void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
|
|
|
|
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
|
|
index 8c05e47a..2b5df97 100644
|
|
--- a/include/sound/hwdep.h
|
|
+++ b/include/sound/hwdep.h
|
|
@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
|
|
struct snd_hwdep_dsp_status *status);
|
|
int (*dsp_load)(struct snd_hwdep *hw,
|
|
struct snd_hwdep_dsp_image *image);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct snd_hwdep {
|
|
struct snd_card *card;
|
|
diff --git a/include/sound/info.h b/include/sound/info.h
|
|
index 9ca1a49..aba1728 100644
|
|
--- a/include/sound/info.h
|
|
+++ b/include/sound/info.h
|
|
@@ -44,7 +44,7 @@ struct snd_info_entry_text {
|
|
struct snd_info_buffer *buffer);
|
|
void (*write)(struct snd_info_entry *entry,
|
|
struct snd_info_buffer *buffer);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct snd_info_entry_ops {
|
|
int (*open)(struct snd_info_entry *entry,
|
|
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
|
|
index 8f76344..3844827 100644
|
|
--- a/include/sound/pcm.h
|
|
+++ b/include/sound/pcm.h
|
|
@@ -86,6 +86,7 @@ struct snd_pcm_ops {
|
|
int (*ack)(struct snd_pcm_substream *substream);
|
|
int (*restart)(struct snd_pcm_substream *substream);
|
|
};
|
|
+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
|
|
|
|
/*
|
|
*
|
|
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
|
|
index af1b49e..a5d55a5 100644
|
|
--- a/include/sound/sb16_csp.h
|
|
+++ b/include/sound/sb16_csp.h
|
|
@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
|
|
int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
|
|
int (*csp_stop) (struct snd_sb_csp * p);
|
|
int (*csp_qsound_transfer) (struct snd_sb_csp * p);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* CSP private data
|
|
diff --git a/include/sound/soc.h.rej b/include/sound/soc.h.rej
|
|
new file mode 100644
|
|
index 0000000..b207cdf
|
|
--- /dev/null
|
|
+++ b/include/sound/soc.h.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- include/sound/soc.h 2012-05-21 11:33:39.179929925 +0200
|
|
++++ include/sound/soc.h 2012-05-21 12:10:11.664049005 +0200
|
|
+@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
|
|
+ /* platform IO - used for platform DAPM */
|
|
+ unsigned int (*read)(struct snd_soc_platform *, unsigned int);
|
|
+ int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
|
|
+-};
|
|
++} __do_const;
|
|
+
|
|
+ struct snd_soc_platform {
|
|
+ const char *name;
|
|
diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
|
|
index 4119966..1a4671c 100644
|
|
--- a/include/sound/ymfpci.h
|
|
+++ b/include/sound/ymfpci.h
|
|
@@ -358,7 +358,7 @@ struct snd_ymfpci {
|
|
spinlock_t reg_lock;
|
|
spinlock_t voice_lock;
|
|
wait_queue_head_t interrupt_sleep;
|
|
- atomic_t interrupt_sleep_count;
|
|
+ atomic_unchecked_t interrupt_sleep_count;
|
|
struct snd_info_entry *proc_entry;
|
|
const struct firmware *dsp_microcode;
|
|
const struct firmware *controller_microcode;
|
|
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
|
|
index 9d0603c..fcfff17 100644
|
|
--- a/include/target/target_core_base.h
|
|
+++ b/include/target/target_core_base.h
|
|
@@ -449,7 +449,7 @@ struct t10_reservation_ops {
|
|
int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
|
|
int (*t10_pr_register)(struct se_cmd *);
|
|
int (*t10_pr_clear)(struct se_cmd *);
|
|
-};
|
|
+} __no_const;
|
|
|
|
struct t10_reservation {
|
|
/* Reservation effects all target ports */
|
|
@@ -579,7 +579,7 @@ struct se_cmd {
|
|
atomic_t t_se_count;
|
|
atomic_t t_task_cdbs_left;
|
|
atomic_t t_task_cdbs_ex_left;
|
|
- atomic_t t_task_cdbs_sent;
|
|
+ atomic_unchecked_t t_task_cdbs_sent;
|
|
unsigned int transport_state;
|
|
#define CMD_T_ABORTED (1 << 0)
|
|
#define CMD_T_ACTIVE (1 << 1)
|
|
@@ -807,7 +807,7 @@ struct se_device {
|
|
spinlock_t stats_lock;
|
|
/* Active commands on this virtual SE device */
|
|
atomic_t simple_cmds;
|
|
- atomic_t dev_ordered_id;
|
|
+ atomic_unchecked_t dev_ordered_id;
|
|
atomic_t execute_tasks;
|
|
atomic_t dev_ordered_sync;
|
|
atomic_t dev_qf_count;
|
|
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
|
|
index 0296174..2208301 100644
|
|
--- a/include/trace/events/irq.h
|
|
+++ b/include/trace/events/irq.h
|
|
@@ -36,7 +36,7 @@ struct softirq_action;
|
|
*/
|
|
TRACE_EVENT(irq_handler_entry,
|
|
|
|
- TP_PROTO(int irq, struct irqaction *action),
|
|
+ TP_PROTO(int irq, const struct irqaction *action),
|
|
|
|
TP_ARGS(irq, action),
|
|
|
|
@@ -69,7 +69,7 @@ TRACE_EVENT(irq_handler_entry,
|
|
*/
|
|
TRACE_EVENT(irq_handler_exit,
|
|
|
|
- TP_PROTO(int irq, struct irqaction *action, int ret),
|
|
+ TP_PROTO(int irq, const struct irqaction *action, int ret),
|
|
|
|
TP_ARGS(irq, action, ret),
|
|
|
|
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
|
|
index f9466fa..f4e2b81 100644
|
|
--- a/include/video/udlfb.h
|
|
+++ b/include/video/udlfb.h
|
|
@@ -53,10 +53,10 @@ struct dlfb_data {
|
|
u32 pseudo_palette[256];
|
|
int blank_mode; /*one of FB_BLANK_ */
|
|
/* blit-only rendering path metrics, exposed through sysfs */
|
|
- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
|
|
- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
|
|
- atomic_t bytes_sent; /* to usb, after compression including overhead */
|
|
- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
|
|
+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
|
|
+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
|
|
+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
|
|
+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
|
|
};
|
|
|
|
#define NR_USB_REQUEST_I2C_SUB_IO 0x02
|
|
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
|
|
index 0993a22..32ba2fe 100644
|
|
--- a/include/video/uvesafb.h
|
|
+++ b/include/video/uvesafb.h
|
|
@@ -177,6 +177,7 @@ struct uvesafb_par {
|
|
u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
|
|
u8 pmi_setpal; /* PMI for palette changes */
|
|
u16 *pmi_base; /* protected mode interface location */
|
|
+ u8 *pmi_code; /* protected mode code location */
|
|
void *pmi_start;
|
|
void *pmi_pal;
|
|
u8 *vbe_state_orig; /*
|
|
diff --git a/init/Kconfig b/init/Kconfig
|
|
index 770d720..4460460 100644
|
|
--- a/init/Kconfig
|
|
+++ b/init/Kconfig
|
|
@@ -1262,7 +1262,7 @@ config SLUB_DEBUG
|
|
|
|
config COMPAT_BRK
|
|
bool "Disable heap randomization"
|
|
- default y
|
|
+ default n
|
|
help
|
|
Randomizing heap placement makes heap exploits harder, but it
|
|
also breaks ancient binaries (including anything libc5 based).
|
|
diff --git a/init/do_mounts.c b/init/do_mounts.c
|
|
index 42b0707..c06eef4 100644
|
|
--- a/init/do_mounts.c
|
|
+++ b/init/do_mounts.c
|
|
@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
|
|
static int __init do_mount_root(char *name, char *fs, int flags, void *data)
|
|
{
|
|
struct super_block *s;
|
|
- int err = sys_mount(name, "/root", fs, flags, data);
|
|
+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
|
|
if (err)
|
|
return err;
|
|
|
|
- sys_chdir((const char __user __force *)"/root");
|
|
+ sys_chdir((const char __force_user *)"/root");
|
|
s = current->fs->pwd.dentry->d_sb;
|
|
ROOT_DEV = s->s_dev;
|
|
printk(KERN_INFO
|
|
@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
|
|
va_start(args, fmt);
|
|
vsprintf(buf, fmt, args);
|
|
va_end(args);
|
|
- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
|
|
+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
|
|
if (fd >= 0) {
|
|
sys_ioctl(fd, FDEJECT, 0);
|
|
sys_close(fd);
|
|
}
|
|
printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
|
|
- fd = sys_open("/dev/console", O_RDWR, 0);
|
|
+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
|
|
if (fd >= 0) {
|
|
sys_ioctl(fd, TCGETS, (long)&termios);
|
|
termios.c_lflag &= ~ICANON;
|
|
sys_ioctl(fd, TCSETSF, (long)&termios);
|
|
- sys_read(fd, &c, 1);
|
|
+ sys_read(fd, (char __user *)&c, 1);
|
|
termios.c_lflag |= ICANON;
|
|
sys_ioctl(fd, TCSETSF, (long)&termios);
|
|
sys_close(fd);
|
|
@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
|
|
mount_root();
|
|
out:
|
|
devtmpfs_mount("dev");
|
|
- sys_mount(".", "/", NULL, MS_MOVE, NULL);
|
|
- sys_chroot((const char __user __force *)".");
|
|
+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
|
|
+ sys_chroot((const char __force_user *)".");
|
|
}
|
|
diff --git a/init/do_mounts.h b/init/do_mounts.h
|
|
index f5b978a..69dbfe8 100644
|
|
--- a/init/do_mounts.h
|
|
+++ b/init/do_mounts.h
|
|
@@ -15,15 +15,15 @@ extern int root_mountflags;
|
|
|
|
static inline int create_dev(char *name, dev_t dev)
|
|
{
|
|
- sys_unlink(name);
|
|
- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
|
|
+ sys_unlink((char __force_user *)name);
|
|
+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
|
|
}
|
|
|
|
#if BITS_PER_LONG == 32
|
|
static inline u32 bstat(char *name)
|
|
{
|
|
struct stat64 stat;
|
|
- if (sys_stat64(name, &stat) != 0)
|
|
+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
|
|
return 0;
|
|
if (!S_ISBLK(stat.st_mode))
|
|
return 0;
|
|
@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
|
|
static inline u32 bstat(char *name)
|
|
{
|
|
struct stat stat;
|
|
- if (sys_newstat(name, &stat) != 0)
|
|
+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
|
|
return 0;
|
|
if (!S_ISBLK(stat.st_mode))
|
|
return 0;
|
|
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
|
|
index 9047330..de0d1fb 100644
|
|
--- a/init/do_mounts_initrd.c
|
|
+++ b/init/do_mounts_initrd.c
|
|
@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
|
|
create_dev("/dev/root.old", Root_RAM0);
|
|
/* mount initrd on rootfs' /root */
|
|
mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
|
|
- sys_mkdir("/old", 0700);
|
|
- root_fd = sys_open("/", 0, 0);
|
|
- old_fd = sys_open("/old", 0, 0);
|
|
+ sys_mkdir((const char __force_user *)"/old", 0700);
|
|
+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
|
|
+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
|
|
/* move initrd over / and chdir/chroot in initrd root */
|
|
- sys_chdir("/root");
|
|
- sys_mount(".", "/", NULL, MS_MOVE, NULL);
|
|
- sys_chroot(".");
|
|
+ sys_chdir((const char __force_user *)"/root");
|
|
+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
|
|
+ sys_chroot((const char __force_user *)".");
|
|
|
|
/*
|
|
* In case that a resume from disk is carried out by linuxrc or one of
|
|
@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
|
|
|
|
/* move initrd to rootfs' /old */
|
|
sys_fchdir(old_fd);
|
|
- sys_mount("/", ".", NULL, MS_MOVE, NULL);
|
|
+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
|
|
/* switch root and cwd back to / of rootfs */
|
|
sys_fchdir(root_fd);
|
|
- sys_chroot(".");
|
|
+ sys_chroot((const char __force_user *)".");
|
|
sys_close(old_fd);
|
|
sys_close(root_fd);
|
|
|
|
if (new_decode_dev(real_root_dev) == Root_RAM0) {
|
|
- sys_chdir("/old");
|
|
+ sys_chdir((const char __force_user *)"/old");
|
|
return;
|
|
}
|
|
|
|
@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
|
|
mount_root();
|
|
|
|
printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
|
|
- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
|
|
+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
|
|
if (!error)
|
|
printk("okay\n");
|
|
else {
|
|
- int fd = sys_open("/dev/root.old", O_RDWR, 0);
|
|
+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
|
|
if (error == -ENOENT)
|
|
printk("/initrd does not exist. Ignored.\n");
|
|
else
|
|
printk("failed\n");
|
|
printk(KERN_NOTICE "Unmounting old root\n");
|
|
- sys_umount("/old", MNT_DETACH);
|
|
+ sys_umount((char __force_user *)"/old", MNT_DETACH);
|
|
printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
|
|
if (fd < 0) {
|
|
error = fd;
|
|
@@ -115,11 +115,11 @@ int __init initrd_load(void)
|
|
* mounted in the normal path.
|
|
*/
|
|
if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
|
|
- sys_unlink("/initrd.image");
|
|
+ sys_unlink((const char __force_user *)"/initrd.image");
|
|
handle_initrd();
|
|
return 1;
|
|
}
|
|
}
|
|
- sys_unlink("/initrd.image");
|
|
+ sys_unlink((const char __force_user *)"/initrd.image");
|
|
return 0;
|
|
}
|
|
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
|
|
index 32c4799..c27ee74 100644
|
|
--- a/init/do_mounts_md.c
|
|
+++ b/init/do_mounts_md.c
|
|
@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
|
|
partitioned ? "_d" : "", minor,
|
|
md_setup_args[ent].device_names);
|
|
|
|
- fd = sys_open(name, 0, 0);
|
|
+ fd = sys_open((char __force_user *)name, 0, 0);
|
|
if (fd < 0) {
|
|
printk(KERN_ERR "md: open failed - cannot start "
|
|
"array %s\n", name);
|
|
@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
|
|
* array without it
|
|
*/
|
|
sys_close(fd);
|
|
- fd = sys_open(name, 0, 0);
|
|
+ fd = sys_open((char __force_user *)name, 0, 0);
|
|
sys_ioctl(fd, BLKRRPART, 0);
|
|
}
|
|
sys_close(fd);
|
|
@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
|
|
|
|
wait_for_device_probe();
|
|
|
|
- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
|
|
+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
|
|
if (fd >= 0) {
|
|
sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
|
|
sys_close(fd);
|
|
diff --git a/init/initramfs.c b/init/initramfs.c
|
|
index 8216c30..25e8e32 100644
|
|
--- a/init/initramfs.c
|
|
+++ b/init/initramfs.c
|
|
@@ -74,7 +74,7 @@ static void __init free_hash(void)
|
|
}
|
|
}
|
|
|
|
-static long __init do_utime(char __user *filename, time_t mtime)
|
|
+static long __init do_utime(__force char __user *filename, time_t mtime)
|
|
{
|
|
struct timespec t[2];
|
|
|
|
@@ -109,7 +109,7 @@ static void __init dir_utime(void)
|
|
struct dir_entry *de, *tmp;
|
|
list_for_each_entry_safe(de, tmp, &dir_list, list) {
|
|
list_del(&de->list);
|
|
- do_utime(de->name, de->mtime);
|
|
+ do_utime((char __force_user *)de->name, de->mtime);
|
|
kfree(de->name);
|
|
kfree(de);
|
|
}
|
|
@@ -271,7 +271,7 @@ static int __init maybe_link(void)
|
|
if (nlink >= 2) {
|
|
char *old = find_link(major, minor, ino, mode, collected);
|
|
if (old)
|
|
- return (sys_link(old, collected) < 0) ? -1 : 1;
|
|
+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
|
|
}
|
|
return 0;
|
|
}
|
|
@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
|
|
{
|
|
struct stat st;
|
|
|
|
- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
|
|
+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
|
|
if (S_ISDIR(st.st_mode))
|
|
- sys_rmdir(path);
|
|
+ sys_rmdir((char __force_user *)path);
|
|
else
|
|
- sys_unlink(path);
|
|
+ sys_unlink((char __force_user *)path);
|
|
}
|
|
}
|
|
|
|
@@ -305,7 +305,7 @@ static int __init do_name(void)
|
|
int openflags = O_WRONLY|O_CREAT;
|
|
if (ml != 1)
|
|
openflags |= O_TRUNC;
|
|
- wfd = sys_open(collected, openflags, mode);
|
|
+ wfd = sys_open((char __force_user *)collected, openflags, mode);
|
|
|
|
if (wfd >= 0) {
|
|
sys_fchown(wfd, uid, gid);
|
|
@@ -317,17 +317,17 @@ static int __init do_name(void)
|
|
}
|
|
}
|
|
} else if (S_ISDIR(mode)) {
|
|
- sys_mkdir(collected, mode);
|
|
- sys_chown(collected, uid, gid);
|
|
- sys_chmod(collected, mode);
|
|
+ sys_mkdir((char __force_user *)collected, mode);
|
|
+ sys_chown((char __force_user *)collected, uid, gid);
|
|
+ sys_chmod((char __force_user *)collected, mode);
|
|
dir_add(collected, mtime);
|
|
} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
|
|
S_ISFIFO(mode) || S_ISSOCK(mode)) {
|
|
if (maybe_link() == 0) {
|
|
- sys_mknod(collected, mode, rdev);
|
|
- sys_chown(collected, uid, gid);
|
|
- sys_chmod(collected, mode);
|
|
- do_utime(collected, mtime);
|
|
+ sys_mknod((char __force_user *)collected, mode, rdev);
|
|
+ sys_chown((char __force_user *)collected, uid, gid);
|
|
+ sys_chmod((char __force_user *)collected, mode);
|
|
+ do_utime((char __force_user *)collected, mtime);
|
|
}
|
|
}
|
|
return 0;
|
|
@@ -336,15 +336,15 @@ static int __init do_name(void)
|
|
static int __init do_copy(void)
|
|
{
|
|
if (count >= body_len) {
|
|
- sys_write(wfd, victim, body_len);
|
|
+ sys_write(wfd, (char __force_user *)victim, body_len);
|
|
sys_close(wfd);
|
|
- do_utime(vcollected, mtime);
|
|
+ do_utime((char __force_user *)vcollected, mtime);
|
|
kfree(vcollected);
|
|
eat(body_len);
|
|
state = SkipIt;
|
|
return 0;
|
|
} else {
|
|
- sys_write(wfd, victim, count);
|
|
+ sys_write(wfd, (char __force_user *)victim, count);
|
|
body_len -= count;
|
|
eat(count);
|
|
return 1;
|
|
@@ -355,9 +355,9 @@ static int __init do_symlink(void)
|
|
{
|
|
collected[N_ALIGN(name_len) + body_len] = '\0';
|
|
clean_path(collected, 0);
|
|
- sys_symlink(collected + N_ALIGN(name_len), collected);
|
|
- sys_lchown(collected, uid, gid);
|
|
- do_utime(collected, mtime);
|
|
+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
|
|
+ sys_lchown((char __force_user *)collected, uid, gid);
|
|
+ do_utime((char __force_user *)collected, mtime);
|
|
state = SkipIt;
|
|
next_state = Reset;
|
|
return 0;
|
|
diff --git a/init/main.c b/init/main.c
|
|
index f91924b..5646c8d 100644
|
|
--- a/init/main.c
|
|
+++ b/init/main.c
|
|
@@ -149,6 +149,49 @@ static int __init set_reset_devices(char *str)
|
|
|
|
__setup("reset_devices", set_reset_devices);
|
|
|
|
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+extern char pax_enter_kernel_user[];
|
|
+extern char pax_exit_kernel_user[];
|
|
+extern pgdval_t clone_pgd_mask;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
|
|
+static int __init setup_pax_nouderef(char *str)
|
|
+{
|
|
+#ifdef CONFIG_X86_32
|
|
+ unsigned int cpu;
|
|
+ struct desc_struct *gdt;
|
|
+
|
|
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
|
|
+ gdt = get_cpu_gdt_table(cpu);
|
|
+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
|
|
+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
|
|
+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
|
|
+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
|
|
+ }
|
|
+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
|
|
+#else
|
|
+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
|
|
+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
|
|
+ clone_pgd_mask = ~(pgdval_t)0UL;
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+early_param("pax_nouderef", setup_pax_nouderef);
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+int pax_softmode;
|
|
+
|
|
+static int __init setup_pax_softmode(char *str)
|
|
+{
|
|
+ get_option(&str, &pax_softmode);
|
|
+ return 1;
|
|
+}
|
|
+__setup("pax_softmode=", setup_pax_softmode);
|
|
+#endif
|
|
+
|
|
static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
|
|
const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
|
|
static const char *panic_later, *panic_param;
|
|
@@ -683,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
|
|
{
|
|
int count = preempt_count();
|
|
int ret;
|
|
+ const char *msg1 = "", *msg2 = "";
|
|
|
|
if (initcall_debug)
|
|
ret = do_one_initcall_debug(fn);
|
|
@@ -695,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
|
|
sprintf(msgbuf, "error code %d ", ret);
|
|
|
|
if (preempt_count() != count) {
|
|
- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
|
|
+ msg1 = " preemption imbalance";
|
|
preempt_count() = count;
|
|
}
|
|
if (irqs_disabled()) {
|
|
- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
|
|
+ msg2 = " disabled interrupts";
|
|
local_irq_enable();
|
|
}
|
|
- if (msgbuf[0]) {
|
|
- printk("initcall %pF returned with %s\n", fn, msgbuf);
|
|
+ if (msgbuf[0] || *msg1 || *msg2) {
|
|
+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
|
|
}
|
|
|
|
return ret;
|
|
@@ -756,8 +800,14 @@ static void __init do_initcall_level(int level)
|
|
level, level,
|
|
repair_env_string);
|
|
|
|
- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
|
|
+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
|
|
do_one_initcall(*fn);
|
|
+
|
|
+#ifdef CONFIG_PAX_LATENT_ENTROPY
|
|
+ transfer_latent_entropy();
|
|
+#endif
|
|
+
|
|
+ }
|
|
}
|
|
|
|
static void __init do_initcalls(void)
|
|
@@ -792,8 +842,14 @@ static void __init do_pre_smp_initcalls(void)
|
|
{
|
|
initcall_t *fn;
|
|
|
|
- for (fn = __initcall_start; fn < __initcall0_start; fn++)
|
|
+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
|
|
do_one_initcall(*fn);
|
|
+
|
|
+#ifdef CONFIG_PAX_LATENT_ENTROPY
|
|
+ transfer_latent_entropy();
|
|
+#endif
|
|
+
|
|
+ }
|
|
}
|
|
|
|
static void run_init_process(const char *init_filename)
|
|
@@ -875,7 +931,7 @@ static int __init kernel_init(void * unused)
|
|
do_basic_setup();
|
|
|
|
/* Open the /dev/console on the rootfs, this should never fail */
|
|
- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
|
|
+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
|
|
printk(KERN_WARNING "Warning: unable to open an initial console.\n");
|
|
|
|
(void) sys_dup(0);
|
|
@@ -888,7 +944,7 @@ static int __init kernel_init(void * unused)
|
|
if (!ramdisk_execute_command)
|
|
ramdisk_execute_command = "/init";
|
|
|
|
- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
|
|
+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
|
|
ramdisk_execute_command = NULL;
|
|
prepare_namespace();
|
|
}
|
|
diff --git a/ipc/msg.c b/ipc/msg.c
|
|
index 25f1a61..58f7ac1 100644
|
|
--- a/ipc/msg.c
|
|
+++ b/ipc/msg.c
|
|
@@ -311,18 +311,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
|
|
return security_msg_queue_associate(msq, msgflg);
|
|
}
|
|
|
|
+static struct ipc_ops msg_ops = {
|
|
+ .getnew = newque,
|
|
+ .associate = msg_security,
|
|
+ .more_checks = NULL
|
|
+};
|
|
+
|
|
SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
|
|
{
|
|
struct ipc_namespace *ns;
|
|
- struct ipc_ops msg_ops;
|
|
struct ipc_params msg_params;
|
|
|
|
ns = current->nsproxy->ipc_ns;
|
|
|
|
- msg_ops.getnew = newque;
|
|
- msg_ops.associate = msg_security;
|
|
- msg_ops.more_checks = NULL;
|
|
-
|
|
msg_params.key = key;
|
|
msg_params.flg = msgflg;
|
|
|
|
diff --git a/ipc/sem.c b/ipc/sem.c
|
|
index 5215a81..cfc0cac 100644
|
|
--- a/ipc/sem.c
|
|
+++ b/ipc/sem.c
|
|
@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
|
|
return 0;
|
|
}
|
|
|
|
+static struct ipc_ops sem_ops = {
|
|
+ .getnew = newary,
|
|
+ .associate = sem_security,
|
|
+ .more_checks = sem_more_checks
|
|
+};
|
|
+
|
|
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
|
|
{
|
|
struct ipc_namespace *ns;
|
|
- struct ipc_ops sem_ops;
|
|
struct ipc_params sem_params;
|
|
|
|
ns = current->nsproxy->ipc_ns;
|
|
@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
|
|
if (nsems < 0 || nsems > ns->sc_semmsl)
|
|
return -EINVAL;
|
|
|
|
- sem_ops.getnew = newary;
|
|
- sem_ops.associate = sem_security;
|
|
- sem_ops.more_checks = sem_more_checks;
|
|
-
|
|
sem_params.key = key;
|
|
sem_params.flg = semflg;
|
|
sem_params.u.nsems = nsems;
|
|
diff --git a/ipc/shm.c b/ipc/shm.c
|
|
index a02ef57..e19f5df 100644
|
|
--- a/ipc/shm.c
|
|
+++ b/ipc/shm.c
|
|
@@ -561,18 +561,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
|
|
return 0;
|
|
}
|
|
|
|
+static struct ipc_ops shm_ops = {
|
|
+ .getnew = newseg,
|
|
+ .associate = shm_security,
|
|
+ .more_checks = shm_more_checks
|
|
+};
|
|
+
|
|
SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
|
|
{
|
|
struct ipc_namespace *ns;
|
|
- struct ipc_ops shm_ops;
|
|
struct ipc_params shm_params;
|
|
|
|
ns = current->nsproxy->ipc_ns;
|
|
|
|
- shm_ops.getnew = newseg;
|
|
- shm_ops.associate = shm_security;
|
|
- shm_ops.more_checks = shm_more_checks;
|
|
-
|
|
shm_params.key = key;
|
|
shm_params.flg = shmflg;
|
|
shm_params.u.size = size;
|
|
@@ -990,6 +991,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
|
|
f_mode = FMODE_READ | FMODE_WRITE;
|
|
}
|
|
if (shmflg & SHM_EXEC) {
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
|
|
+ goto out;
|
|
+#endif
|
|
+
|
|
prot |= PROT_EXEC;
|
|
acc_mode |= S_IXUGO;
|
|
}
|
|
diff --git a/kernel/acct.c b/kernel/acct.c
|
|
index 02e6167..54824f7 100644
|
|
--- a/kernel/acct.c
|
|
+++ b/kernel/acct.c
|
|
@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
|
|
*/
|
|
flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
|
|
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
|
|
- file->f_op->write(file, (char *)&ac,
|
|
+ file->f_op->write(file, (char __force_user *)&ac,
|
|
sizeof(acct_t), &file->f_pos);
|
|
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
|
|
set_fs(fs);
|
|
diff --git a/kernel/audit.c b/kernel/audit.c
|
|
index b4efae8..a80d715 100644
|
|
--- a/kernel/audit.c
|
|
+++ b/kernel/audit.c
|
|
@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
|
|
3) suppressed due to audit_rate_limit
|
|
4) suppressed due to audit_backlog_limit
|
|
*/
|
|
-static atomic_t audit_lost = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
|
|
|
|
/* The netlink socket. */
|
|
static struct sock *audit_sock;
|
|
@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
|
|
unsigned long now;
|
|
int print;
|
|
|
|
- atomic_inc(&audit_lost);
|
|
+ atomic_inc_unchecked(&audit_lost);
|
|
|
|
print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
|
|
|
|
@@ -690,7 +690,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|
status_set.pid = audit_pid;
|
|
status_set.rate_limit = audit_rate_limit;
|
|
status_set.backlog_limit = audit_backlog_limit;
|
|
- status_set.lost = atomic_read(&audit_lost);
|
|
+ status_set.lost = atomic_read_unchecked(&audit_lost);
|
|
status_set.backlog = skb_queue_len(&audit_skb_queue);
|
|
audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
|
|
&status_set, sizeof(status_set));
|
|
diff --git a/kernel/audit.c.rej b/kernel/audit.c.rej
|
|
new file mode 100644
|
|
index 0000000..7a429c2
|
|
--- /dev/null
|
|
+++ b/kernel/audit.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- kernel/audit.c 2012-05-21 11:33:39.347929934 +0200
|
|
++++ kernel/audit.c 2012-05-21 12:10:11.704049007 +0200
|
|
+@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
|
|
+ printk(KERN_WARNING
|
|
+ "audit: audit_lost=%d audit_rate_limit=%d "
|
|
+ "audit_backlog_limit=%d\n",
|
|
+- atomic_read(&audit_lost),
|
|
++ atomic_read_unchecked(&audit_lost),
|
|
+ audit_rate_limit,
|
|
+ audit_backlog_limit);
|
|
+ audit_panic(message);
|
|
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
|
|
index fb34250..e139b139 100644
|
|
--- a/kernel/auditsc.c
|
|
+++ b/kernel/auditsc.c
|
|
@@ -2298,7 +2298,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
|
|
}
|
|
|
|
/* global counter which is incremented every time something logs in */
|
|
-static atomic_t session_id = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
|
|
|
|
/**
|
|
* audit_set_loginuid - set current task's audit_context loginuid
|
|
@@ -2322,7 +2322,7 @@ int audit_set_loginuid(uid_t loginuid)
|
|
return -EPERM;
|
|
#endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
|
|
|
|
- sessionid = atomic_inc_return(&session_id);
|
|
+ sessionid = atomic_inc_return_unchecked(&session_id);
|
|
if (context && context->in_syscall) {
|
|
struct audit_buffer *ab;
|
|
|
|
diff --git a/kernel/capability.c b/kernel/capability.c
|
|
index 3f1adb6..4b292a4 100644
|
|
--- a/kernel/capability.c
|
|
+++ b/kernel/capability.c
|
|
@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
|
|
* before modification is attempted and the application
|
|
* fails.
|
|
*/
|
|
+ if (tocopy > ARRAY_SIZE(kdata))
|
|
+ return -EFAULT;
|
|
+
|
|
if (copy_to_user(dataptr, kdata, tocopy
|
|
* sizeof(struct __user_cap_data_struct))) {
|
|
return -EFAULT;
|
|
diff --git a/kernel/compat.c b/kernel/compat.c
|
|
index d2c67aa..7fe61b2 100644
|
|
--- a/kernel/compat.c
|
|
+++ b/kernel/compat.c
|
|
@@ -220,7 +220,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
|
|
mm_segment_t oldfs;
|
|
long ret;
|
|
|
|
- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
|
|
+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
ret = hrtimer_nanosleep_restart(restart);
|
|
@@ -252,7 +252,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
ret = hrtimer_nanosleep(&tu,
|
|
- rmtp ? (struct timespec __user *)&rmt : NULL,
|
|
+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
|
|
HRTIMER_MODE_REL, CLOCK_MONOTONIC);
|
|
set_fs(oldfs);
|
|
|
|
@@ -361,7 +361,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_sigpending((old_sigset_t __user *) &s);
|
|
+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
|
|
set_fs(old_fs);
|
|
if (ret == 0)
|
|
ret = put_user(s, set);
|
|
@@ -451,7 +451,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_old_getrlimit(resource, &r);
|
|
+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
|
|
set_fs(old_fs);
|
|
|
|
if (!ret) {
|
|
@@ -523,7 +523,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
|
|
mm_segment_t old_fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_getrusage(who, (struct rusage __user *) &r);
|
|
+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
|
|
set_fs(old_fs);
|
|
|
|
if (ret)
|
|
@@ -550,8 +550,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
|
|
set_fs (KERNEL_DS);
|
|
ret = sys_wait4(pid,
|
|
(stat_addr ?
|
|
- (unsigned int __user *) &status : NULL),
|
|
- options, (struct rusage __user *) &r);
|
|
+ (unsigned int __force_user *) &status : NULL),
|
|
+ options, (struct rusage __force_user *) &r);
|
|
set_fs (old_fs);
|
|
|
|
if (ret > 0) {
|
|
@@ -576,8 +576,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
|
|
- uru ? (struct rusage __user *)&ru : NULL);
|
|
+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
|
|
+ uru ? (struct rusage __force_user *)&ru : NULL);
|
|
set_fs(old_fs);
|
|
|
|
if ((ret < 0) || (info.si_signo == 0))
|
|
@@ -707,8 +707,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_timer_settime(timer_id, flags,
|
|
- (struct itimerspec __user *) &newts,
|
|
- (struct itimerspec __user *) &oldts);
|
|
+ (struct itimerspec __force_user *) &newts,
|
|
+ (struct itimerspec __force_user *) &oldts);
|
|
set_fs(oldfs);
|
|
if (!err && old && put_compat_itimerspec(old, &oldts))
|
|
return -EFAULT;
|
|
@@ -725,7 +725,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_timer_gettime(timer_id,
|
|
- (struct itimerspec __user *) &ts);
|
|
+ (struct itimerspec __force_user *) &ts);
|
|
set_fs(oldfs);
|
|
if (!err && put_compat_itimerspec(setting, &ts))
|
|
return -EFAULT;
|
|
@@ -744,7 +744,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_clock_settime(which_clock,
|
|
- (struct timespec __user *) &ts);
|
|
+ (struct timespec __force_user *) &ts);
|
|
set_fs(oldfs);
|
|
return err;
|
|
}
|
|
@@ -759,7 +759,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_clock_gettime(which_clock,
|
|
- (struct timespec __user *) &ts);
|
|
+ (struct timespec __force_user *) &ts);
|
|
set_fs(oldfs);
|
|
if (!err && put_compat_timespec(&ts, tp))
|
|
return -EFAULT;
|
|
@@ -779,7 +779,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
|
|
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
|
|
+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
|
|
set_fs(oldfs);
|
|
|
|
err = compat_put_timex(utp, &txc);
|
|
@@ -799,7 +799,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_clock_getres(which_clock,
|
|
- (struct timespec __user *) &ts);
|
|
+ (struct timespec __force_user *) &ts);
|
|
set_fs(oldfs);
|
|
if (!err && tp && put_compat_timespec(&ts, tp))
|
|
return -EFAULT;
|
|
@@ -811,9 +811,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
|
|
long err;
|
|
mm_segment_t oldfs;
|
|
struct timespec tu;
|
|
- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
|
|
+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
|
|
|
|
- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
|
|
+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = clock_nanosleep_restart(restart);
|
|
@@ -845,8 +845,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = sys_clock_nanosleep(which_clock, flags,
|
|
- (struct timespec __user *) &in,
|
|
- (struct timespec __user *) &out);
|
|
+ (struct timespec __force_user *) &in,
|
|
+ (struct timespec __force_user *) &out);
|
|
set_fs(oldfs);
|
|
|
|
if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
|
|
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
|
|
index 35b94ac..6a5a607 100644
|
|
--- a/kernel/debug/debug_core.c
|
|
+++ b/kernel/debug/debug_core.c
|
|
@@ -128,7 +128,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
|
|
*/
|
|
static atomic_t masters_in_kgdb;
|
|
static atomic_t slaves_in_kgdb;
|
|
-static atomic_t kgdb_break_tasklet_var;
|
|
+static atomic_unchecked_t kgdb_break_tasklet_var;
|
|
atomic_t kgdb_setting_breakpoint;
|
|
|
|
struct task_struct *kgdb_usethread;
|
|
@@ -138,7 +138,7 @@ int kgdb_single_step;
|
|
static pid_t kgdb_sstep_pid;
|
|
|
|
/* to keep track of the CPU which is doing the single stepping*/
|
|
-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
|
|
+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
|
|
|
|
/*
|
|
* If you are debugging a problem where roundup (the collection of
|
|
@@ -546,7 +546,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
|
|
* kernel will only try for the value of sstep_tries before
|
|
* giving up and continuing on.
|
|
*/
|
|
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
|
|
+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
|
|
(kgdb_info[cpu].task &&
|
|
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
|
|
atomic_set(&kgdb_active, -1);
|
|
@@ -640,8 +640,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
|
|
}
|
|
|
|
kgdb_restore:
|
|
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
|
- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
|
|
+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
|
|
+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
|
|
if (kgdb_info[sstep_cpu].task)
|
|
kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
|
|
else
|
|
@@ -873,18 +873,18 @@ static void kgdb_unregister_callbacks(void)
|
|
static void kgdb_tasklet_bpt(unsigned long ing)
|
|
{
|
|
kgdb_breakpoint();
|
|
- atomic_set(&kgdb_break_tasklet_var, 0);
|
|
+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
|
|
}
|
|
|
|
static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
|
|
|
|
void kgdb_schedule_breakpoint(void)
|
|
{
|
|
- if (atomic_read(&kgdb_break_tasklet_var) ||
|
|
+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
|
|
atomic_read(&kgdb_active) != -1 ||
|
|
atomic_read(&kgdb_setting_breakpoint))
|
|
return;
|
|
- atomic_inc(&kgdb_break_tasklet_var);
|
|
+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
|
|
tasklet_schedule(&kgdb_tasklet_breakpoint);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
|
|
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
|
|
index d1342c2..2c72f70 100644
|
|
--- a/kernel/debug/kdb/kdb_main.c
|
|
+++ b/kernel/debug/kdb/kdb_main.c
|
|
@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
|
list_for_each_entry(mod, kdb_modules, list) {
|
|
|
|
kdb_printf("%-20s%8u 0x%p ", mod->name,
|
|
- mod->core_size, (void *)mod);
|
|
+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
|
|
#ifdef CONFIG_MODULE_UNLOAD
|
|
kdb_printf("%4ld ", module_refcount(mod));
|
|
#endif
|
|
@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
|
|
kdb_printf(" (Loading)");
|
|
else
|
|
kdb_printf(" (Live)");
|
|
- kdb_printf(" 0x%p", mod->module_core);
|
|
+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
|
|
|
|
#ifdef CONFIG_MODULE_UNLOAD
|
|
{
|
|
diff --git a/kernel/events/core.c b/kernel/events/core.c
|
|
index 946dba5..574b92d 100644
|
|
--- a/kernel/events/core.c
|
|
+++ b/kernel/events/core.c
|
|
@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
|
|
return 0;
|
|
}
|
|
|
|
-static atomic64_t perf_event_id;
|
|
+static atomic64_unchecked_t perf_event_id;
|
|
|
|
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
|
enum event_type_t event_type);
|
|
@@ -2727,7 +2727,7 @@ static void __perf_event_read(void *info)
|
|
|
|
static inline u64 perf_event_count(struct perf_event *event)
|
|
{
|
|
- return local64_read(&event->count) + atomic64_read(&event->child_count);
|
|
+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
|
|
}
|
|
|
|
static u64 perf_event_read(struct perf_event *event)
|
|
@@ -3073,9 +3073,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
|
|
mutex_lock(&event->child_mutex);
|
|
total += perf_event_read(event);
|
|
*enabled += event->total_time_enabled +
|
|
- atomic64_read(&event->child_total_time_enabled);
|
|
+ atomic64_read_unchecked(&event->child_total_time_enabled);
|
|
*running += event->total_time_running +
|
|
- atomic64_read(&event->child_total_time_running);
|
|
+ atomic64_read_unchecked(&event->child_total_time_running);
|
|
|
|
list_for_each_entry(child, &event->child_list, child_list) {
|
|
total += perf_event_read(child);
|
|
@@ -3487,10 +3487,10 @@ void perf_event_update_userpage(struct perf_event *event)
|
|
userpg->offset -= local64_read(&event->hw.prev_count);
|
|
|
|
userpg->time_enabled = enabled +
|
|
- atomic64_read(&event->child_total_time_enabled);
|
|
+ atomic64_read_unchecked(&event->child_total_time_enabled);
|
|
|
|
userpg->time_running = running +
|
|
- atomic64_read(&event->child_total_time_running);
|
|
+ atomic64_read_unchecked(&event->child_total_time_running);
|
|
|
|
arch_perf_update_userpage(userpg, now);
|
|
|
|
@@ -4012,11 +4012,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
|
|
values[n++] = perf_event_count(event);
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
|
values[n++] = enabled +
|
|
- atomic64_read(&event->child_total_time_enabled);
|
|
+ atomic64_read_unchecked(&event->child_total_time_enabled);
|
|
}
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
|
values[n++] = running +
|
|
- atomic64_read(&event->child_total_time_running);
|
|
+ atomic64_read_unchecked(&event->child_total_time_running);
|
|
}
|
|
if (read_format & PERF_FORMAT_ID)
|
|
values[n++] = primary_event_id(event);
|
|
@@ -4694,12 +4694,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
|
* need to add enough zero bytes after the string to handle
|
|
* the 64bit alignment we do later.
|
|
*/
|
|
- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
|
|
+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
|
|
if (!buf) {
|
|
name = strncpy(tmp, "//enomem", sizeof(tmp));
|
|
goto got_name;
|
|
}
|
|
- name = d_path(&file->f_path, buf, PATH_MAX);
|
|
+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
|
|
if (IS_ERR(name)) {
|
|
name = strncpy(tmp, "//toolong", sizeof(tmp));
|
|
goto got_name;
|
|
@@ -6127,7 +6127,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|
event->parent = parent_event;
|
|
|
|
event->ns = get_pid_ns(current->nsproxy->pid_ns);
|
|
- event->id = atomic64_inc_return(&perf_event_id);
|
|
+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
|
|
|
|
event->state = PERF_EVENT_STATE_INACTIVE;
|
|
|
|
@@ -6711,10 +6711,10 @@ static void sync_child_event(struct perf_event *child_event,
|
|
/*
|
|
* Add back the child's count to the parent's count:
|
|
*/
|
|
- atomic64_add(child_val, &parent_event->child_count);
|
|
- atomic64_add(child_event->total_time_enabled,
|
|
+ atomic64_add_unchecked(child_val, &parent_event->child_count);
|
|
+ atomic64_add_unchecked(child_event->total_time_enabled,
|
|
&parent_event->child_total_time_enabled);
|
|
- atomic64_add(child_event->total_time_running,
|
|
+ atomic64_add_unchecked(child_event->total_time_running,
|
|
&parent_event->child_total_time_running);
|
|
|
|
/*
|
|
diff --git a/kernel/exit.c b/kernel/exit.c
|
|
index f28427b..db16445 100644
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -383,7 +383,7 @@ int allow_signal(int sig)
|
|
* know it'll be handled, so that they don't get converted to
|
|
* SIGKILL or just silently dropped.
|
|
*/
|
|
- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
|
|
+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
|
|
recalc_sigpending();
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
return 0;
|
|
@@ -1106,7 +1106,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
|
|
* Take down every thread in the group. This is called by fatal signals
|
|
* as well as by sys_exit_group (below).
|
|
*/
|
|
-void
|
|
+__noreturn void
|
|
do_group_exit(int exit_code)
|
|
{
|
|
struct signal_struct *sig = current->signal;
|
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
|
index c48862f..c35da73 100644
|
|
--- a/kernel/fork.c
|
|
+++ b/kernel/fork.c
|
|
@@ -313,7 +313,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|
*stackend = STACK_END_MAGIC; /* for overflow detection */
|
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
- tsk->stack_canary = get_random_int();
|
|
+ tsk->stack_canary = pax_get_random_long();
|
|
#endif
|
|
|
|
/*
|
|
@@ -337,13 +337,78 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
|
|
+{
|
|
+ struct vm_area_struct *tmp;
|
|
+ unsigned long charge;
|
|
+ struct mempolicy *pol;
|
|
+ struct file *file;
|
|
+
|
|
+ charge = 0;
|
|
+ if (mpnt->vm_flags & VM_ACCOUNT) {
|
|
+ unsigned long len;
|
|
+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
|
|
+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
|
|
+ goto fail_nomem;
|
|
+ charge = len;
|
|
+ }
|
|
+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
+ if (!tmp)
|
|
+ goto fail_nomem;
|
|
+ *tmp = *mpnt;
|
|
+ tmp->vm_mm = mm;
|
|
+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
|
|
+ pol = mpol_dup(vma_policy(mpnt));
|
|
+ if (IS_ERR(pol))
|
|
+ goto fail_nomem_policy;
|
|
+ vma_set_policy(tmp, pol);
|
|
+ if (anon_vma_fork(tmp, mpnt))
|
|
+ goto fail_nomem_anon_vma_fork;
|
|
+ tmp->vm_flags &= ~VM_LOCKED;
|
|
+ tmp->vm_next = tmp->vm_prev = NULL;
|
|
+ tmp->vm_mirror = NULL;
|
|
+ file = tmp->vm_file;
|
|
+ if (file) {
|
|
+ struct inode *inode = file->f_path.dentry->d_inode;
|
|
+ struct address_space *mapping = file->f_mapping;
|
|
+
|
|
+ get_file(file);
|
|
+ if (tmp->vm_flags & VM_DENYWRITE)
|
|
+ atomic_dec(&inode->i_writecount);
|
|
+ mutex_lock(&mapping->i_mmap_mutex);
|
|
+ if (tmp->vm_flags & VM_SHARED)
|
|
+ mapping->i_mmap_writable++;
|
|
+ flush_dcache_mmap_lock(mapping);
|
|
+ /* insert tmp into the share list, just after mpnt */
|
|
+ vma_prio_tree_add(tmp, mpnt);
|
|
+ flush_dcache_mmap_unlock(mapping);
|
|
+ mutex_unlock(&mapping->i_mmap_mutex);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Clear hugetlb-related page reserves for children. This only
|
|
+ * affects MAP_PRIVATE mappings. Faults generated by the child
|
|
+ * are not guaranteed to succeed, even if read-only
|
|
+ */
|
|
+ if (is_vm_hugetlb_page(tmp))
|
|
+ reset_vma_resv_huge_pages(tmp);
|
|
+
|
|
+ return tmp;
|
|
+
|
|
+fail_nomem_anon_vma_fork:
|
|
+ mpol_put(pol);
|
|
+fail_nomem_policy:
|
|
+ kmem_cache_free(vm_area_cachep, tmp);
|
|
+fail_nomem:
|
|
+ vm_unacct_memory(charge);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
int retval;
|
|
- unsigned long charge;
|
|
- struct mempolicy *pol;
|
|
|
|
down_write(&oldmm->mmap_sem);
|
|
flush_cache_dup_mm(oldmm);
|
|
@@ -355,8 +420,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
mm->locked_vm = 0;
|
|
mm->mmap = NULL;
|
|
mm->mmap_cache = NULL;
|
|
- mm->free_area_cache = oldmm->mmap_base;
|
|
- mm->cached_hole_size = ~0UL;
|
|
+ mm->free_area_cache = oldmm->free_area_cache;
|
|
+ mm->cached_hole_size = oldmm->cached_hole_size;
|
|
mm->map_count = 0;
|
|
cpumask_clear(mm_cpumask(mm));
|
|
mm->mm_rb = RB_ROOT;
|
|
@@ -372,8 +437,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
|
|
prev = NULL;
|
|
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
|
|
- struct file *file;
|
|
-
|
|
if (mpnt->vm_flags & VM_DONTCOPY) {
|
|
long pages = vma_pages(mpnt);
|
|
mm->total_vm -= pages;
|
|
@@ -381,56 +444,13 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
-pages);
|
|
continue;
|
|
}
|
|
- charge = 0;
|
|
- if (mpnt->vm_flags & VM_ACCOUNT) {
|
|
- unsigned long len;
|
|
- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
|
|
- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
|
|
- goto fail_nomem;
|
|
- charge = len;
|
|
- }
|
|
- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
- if (!tmp)
|
|
- goto fail_nomem;
|
|
- *tmp = *mpnt;
|
|
- INIT_LIST_HEAD(&tmp->anon_vma_chain);
|
|
- pol = mpol_dup(vma_policy(mpnt));
|
|
- retval = PTR_ERR(pol);
|
|
- if (IS_ERR(pol))
|
|
- goto fail_nomem_policy;
|
|
- vma_set_policy(tmp, pol);
|
|
- tmp->vm_mm = mm;
|
|
- if (anon_vma_fork(tmp, mpnt))
|
|
- goto fail_nomem_anon_vma_fork;
|
|
- tmp->vm_flags &= ~VM_LOCKED;
|
|
- tmp->vm_next = tmp->vm_prev = NULL;
|
|
- file = tmp->vm_file;
|
|
- if (file) {
|
|
- struct inode *inode = file->f_path.dentry->d_inode;
|
|
- struct address_space *mapping = file->f_mapping;
|
|
-
|
|
- get_file(file);
|
|
- if (tmp->vm_flags & VM_DENYWRITE)
|
|
- atomic_dec(&inode->i_writecount);
|
|
- mutex_lock(&mapping->i_mmap_mutex);
|
|
- if (tmp->vm_flags & VM_SHARED)
|
|
- mapping->i_mmap_writable++;
|
|
- flush_dcache_mmap_lock(mapping);
|
|
- /* insert tmp into the share list, just after mpnt */
|
|
- vma_prio_tree_add(tmp, mpnt);
|
|
- flush_dcache_mmap_unlock(mapping);
|
|
- mutex_unlock(&mapping->i_mmap_mutex);
|
|
+ tmp = dup_vma(mm, oldmm, mpnt);
|
|
+ if (!tmp) {
|
|
+ retval = -ENOMEM;
|
|
+ goto out;
|
|
}
|
|
|
|
/*
|
|
- * Clear hugetlb-related page reserves for children. This only
|
|
- * affects MAP_PRIVATE mappings. Faults generated by the child
|
|
- * are not guaranteed to succeed, even if read-only
|
|
- */
|
|
- if (is_vm_hugetlb_page(tmp))
|
|
- reset_vma_resv_huge_pages(tmp);
|
|
-
|
|
- /*
|
|
* Link in the new vma and copy the page table entries.
|
|
*/
|
|
*pprev = tmp;
|
|
@@ -451,6 +471,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
if (retval)
|
|
goto out;
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ struct vm_area_struct *mpnt_m;
|
|
+
|
|
+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
|
|
+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
|
|
+
|
|
+ if (!mpnt->vm_mirror)
|
|
+ continue;
|
|
+
|
|
+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
|
|
+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
|
|
+ mpnt->vm_mirror = mpnt_m;
|
|
+ } else {
|
|
+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
|
|
+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
|
|
+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
|
|
+ mpnt->vm_mirror->vm_mirror = mpnt;
|
|
+ }
|
|
+ }
|
|
+ BUG_ON(mpnt_m);
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* a new mm has just been created */
|
|
arch_dup_mmap(oldmm, mm);
|
|
retval = 0;
|
|
@@ -459,14 +504,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
flush_tlb_mm(oldmm);
|
|
up_write(&oldmm->mmap_sem);
|
|
return retval;
|
|
-fail_nomem_anon_vma_fork:
|
|
- mpol_put(pol);
|
|
-fail_nomem_policy:
|
|
- kmem_cache_free(vm_area_cachep, tmp);
|
|
-fail_nomem:
|
|
- retval = -ENOMEM;
|
|
- vm_unacct_memory(charge);
|
|
- goto out;
|
|
}
|
|
|
|
static inline int mm_alloc_pgd(struct mm_struct *mm)
|
|
@@ -921,7 +958,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
|
|
spin_unlock(&fs->lock);
|
|
return -EAGAIN;
|
|
}
|
|
- fs->users++;
|
|
+ atomic_inc(&fs->users);
|
|
spin_unlock(&fs->lock);
|
|
return 0;
|
|
}
|
|
@@ -1765,7 +1802,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
|
|
return 0;
|
|
|
|
/* don't need lock here; in the worst case we'll do useless copy */
|
|
- if (fs->users == 1)
|
|
+ if (atomic_read(&fs->users) == 1)
|
|
return 0;
|
|
|
|
*new_fsp = copy_fs_struct(fs);
|
|
@@ -1854,7 +1891,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
|
|
fs = current->fs;
|
|
spin_lock(&fs->lock);
|
|
current->fs = new_fs;
|
|
- if (--fs->users)
|
|
+ if (atomic_dec_return(&fs->users))
|
|
new_fs = NULL;
|
|
else
|
|
new_fs = fs;
|
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
|
index 1ae5049..982fcbf 100644
|
|
--- a/kernel/futex.c
|
|
+++ b/kernel/futex.c
|
|
@@ -243,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
|
|
struct page *page, *page_head;
|
|
int err, ro = 0;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
|
|
+ return -EFAULT;
|
|
+#endif
|
|
+
|
|
/*
|
|
* The futex address must be "naturally" aligned.
|
|
*/
|
|
@@ -2859,6 +2864,7 @@ static int __init futex_init(void)
|
|
{
|
|
u32 curval;
|
|
int i;
|
|
+ mm_segment_t oldfs;
|
|
|
|
/*
|
|
* This will fail and we want it. Some arch implementations do
|
|
@@ -2870,8 +2876,11 @@ static int __init futex_init(void)
|
|
* implementation, the non-functional ones will return
|
|
* -ENOSYS.
|
|
*/
|
|
+ oldfs = get_fs();
|
|
+ set_fs(USER_DS);
|
|
if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
|
|
futex_cmpxchg_enabled = 1;
|
|
+ set_fs(oldfs);
|
|
|
|
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
|
|
plist_head_init(&futex_queues[i].chain);
|
|
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
|
|
index 9b22d03..6295b62 100644
|
|
--- a/kernel/gcov/base.c
|
|
+++ b/kernel/gcov/base.c
|
|
@@ -102,11 +102,6 @@ void gcov_enable_events(void)
|
|
}
|
|
|
|
#ifdef CONFIG_MODULES
|
|
-static inline int within(void *addr, void *start, unsigned long size)
|
|
-{
|
|
- return ((addr >= start) && (addr < start + size));
|
|
-}
|
|
-
|
|
/* Update list and generate events when modules are unloaded. */
|
|
static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
|
|
void *data)
|
|
@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
|
|
prev = NULL;
|
|
/* Remove entries located in module from linked list. */
|
|
for (info = gcov_info_head; info; info = info->next) {
|
|
- if (within(info, mod->module_core, mod->core_size)) {
|
|
+ if (within_module_core_rw((unsigned long)info, mod)) {
|
|
if (prev)
|
|
prev->next = info->next;
|
|
else
|
|
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
|
|
index db4dc6b..c7cc9a7 100644
|
|
--- a/kernel/hrtimer.c
|
|
+++ b/kernel/hrtimer.c
|
|
@@ -1448,7 +1448,7 @@ void hrtimer_peek_ahead_timers(void)
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
-static void run_hrtimer_softirq(struct softirq_action *h)
|
|
+static void run_hrtimer_softirq(void)
|
|
{
|
|
hrtimer_peek_ahead_timers();
|
|
}
|
|
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
|
|
index 4304919..408c4c0 100644
|
|
--- a/kernel/jump_label.c
|
|
+++ b/kernel/jump_label.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/sort.h>
|
|
#include <linux/err.h>
|
|
#include <linux/static_key.h>
|
|
+#include <linux/mm.h>
|
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
|
|
|
|
size = (((unsigned long)stop - (unsigned long)start)
|
|
/ sizeof(struct jump_entry));
|
|
+ pax_open_kernel();
|
|
sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static void jump_label_update(struct static_key *key, int enable);
|
|
@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
|
|
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
|
|
struct jump_entry *iter;
|
|
|
|
+ pax_open_kernel();
|
|
for (iter = iter_start; iter < iter_stop; iter++) {
|
|
if (within_module_init(iter->code, mod))
|
|
iter->code = 0;
|
|
}
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
static int
|
|
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
|
|
index 2169fee..56966b7 100644
|
|
--- a/kernel/kallsyms.c
|
|
+++ b/kernel/kallsyms.c
|
|
@@ -53,12 +53,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
|
|
|
|
static inline int is_kernel_inittext(unsigned long addr)
|
|
{
|
|
+ if (system_state != SYSTEM_BOOTING)
|
|
+ return 0;
|
|
+
|
|
if (addr >= (unsigned long)_sinittext
|
|
&& addr <= (unsigned long)_einittext)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+#ifdef CONFIG_MODULES
|
|
+static inline int is_module_text(unsigned long addr)
|
|
+{
|
|
+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
|
|
+ return 1;
|
|
+
|
|
+ addr = ktla_ktva(addr);
|
|
+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
|
|
+}
|
|
+#else
|
|
+static inline int is_module_text(unsigned long addr)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+#endif
|
|
+
|
|
static inline int is_kernel_text(unsigned long addr)
|
|
{
|
|
if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
|
|
@@ -69,13 +90,28 @@ static inline int is_kernel_text(unsigned long addr)
|
|
|
|
static inline int is_kernel(unsigned long addr)
|
|
{
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
|
|
+ return 1;
|
|
+
|
|
+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
|
|
+#else
|
|
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
|
|
+#endif
|
|
+
|
|
return 1;
|
|
return in_gate_area_no_mm(addr);
|
|
}
|
|
|
|
static int is_ksym_addr(unsigned long addr)
|
|
{
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (is_module_text(addr))
|
|
+ return 0;
|
|
+#endif
|
|
+
|
|
if (all_var)
|
|
return is_kernel(addr);
|
|
|
|
@@ -470,7 +506,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
|
|
|
|
static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
|
|
{
|
|
- iter->name[0] = '\0';
|
|
iter->nameoff = get_symbol_offset(new_pos);
|
|
iter->pos = new_pos;
|
|
}
|
|
@@ -556,7 +591,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
|
|
struct kallsym_iter *iter;
|
|
int ret;
|
|
|
|
- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
|
|
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
|
if (!iter)
|
|
return -ENOMEM;
|
|
reset_iter(iter, 0);
|
|
diff --git a/kernel/kexec.c b/kernel/kexec.c
|
|
index aef7893..619faa8 100644
|
|
--- a/kernel/kexec.c
|
|
+++ b/kernel/kexec.c
|
|
@@ -1050,7 +1050,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
|
unsigned long flags)
|
|
{
|
|
struct compat_kexec_segment in;
|
|
- struct kexec_segment out, __user *ksegments;
|
|
+ struct kexec_segment out;
|
|
+ struct kexec_segment __user *ksegments;
|
|
unsigned long i, result;
|
|
|
|
/* Don't allow clients that don't understand the native
|
|
diff --git a/kernel/kmod.c b/kernel/kmod.c
|
|
index f2490e1..14134b9 100644
|
|
--- a/kernel/kmod.c
|
|
+++ b/kernel/kmod.c
|
|
@@ -267,7 +267,7 @@ static int wait_for_helper(void *data)
|
|
*
|
|
* Thus the __user pointer cast is valid here.
|
|
*/
|
|
- sys_wait4(pid, (int __user *)&ret, 0, NULL);
|
|
+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
|
|
|
|
/*
|
|
* If ret is 0, either ____call_usermodehelper failed and the
|
|
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
|
|
index c62b854..cb67968 100644
|
|
--- a/kernel/kprobes.c
|
|
+++ b/kernel/kprobes.c
|
|
@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
* kernel image and loaded module images reside. This is required
|
|
* so x86_64 can correctly handle the %rip-relative fixups.
|
|
*/
|
|
- kip->insns = module_alloc(PAGE_SIZE);
|
|
+ kip->insns = module_alloc_exec(PAGE_SIZE);
|
|
if (!kip->insns) {
|
|
kfree(kip);
|
|
return NULL;
|
|
@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
|
|
*/
|
|
if (!list_is_singular(&kip->list)) {
|
|
list_del(&kip->list);
|
|
- module_free(NULL, kip->insns);
|
|
+ module_free_exec(NULL, kip->insns);
|
|
kfree(kip);
|
|
}
|
|
return 1;
|
|
@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
|
|
{
|
|
int i, err = 0;
|
|
unsigned long offset = 0, size = 0;
|
|
- char *modname, namebuf[128];
|
|
+ char *modname, namebuf[KSYM_NAME_LEN];
|
|
const char *symbol_name;
|
|
void *addr;
|
|
struct kprobe_blackpoint *kb;
|
|
@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
|
|
const char *sym = NULL;
|
|
unsigned int i = *(loff_t *) v;
|
|
unsigned long offset = 0;
|
|
- char *modname, namebuf[128];
|
|
+ char *modname, namebuf[KSYM_NAME_LEN];
|
|
|
|
head = &kprobe_table[i];
|
|
preempt_disable();
|
|
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
|
|
index fdcf7ec..944fac1 100644
|
|
--- a/kernel/lockdep.c
|
|
+++ b/kernel/lockdep.c
|
|
@@ -590,6 +590,10 @@ static int static_obj(void *obj)
|
|
end = (unsigned long) &_end,
|
|
addr = (unsigned long) obj;
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ start = ktla_ktva(start);
|
|
+#endif
|
|
+
|
|
/*
|
|
* static variable?
|
|
*/
|
|
@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|
if (!static_obj(lock->key)) {
|
|
debug_locks_off();
|
|
printk("INFO: trying to register non-static key.\n");
|
|
+ printk("lock:%pS key:%pS.\n", lock, lock->key);
|
|
printk("the code is fine but needs lockdep annotation.\n");
|
|
printk("turning off the locking correctness validator.\n");
|
|
dump_stack();
|
|
@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|
if (!class)
|
|
return 0;
|
|
}
|
|
- atomic_inc((atomic_t *)&class->ops);
|
|
+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
|
|
if (very_verbose(class)) {
|
|
printk("\nacquire class [%p] %s", class->key, class->name);
|
|
if (class->name_version > 1)
|
|
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
|
|
index 91c32a0..b2c71c5 100644
|
|
--- a/kernel/lockdep_proc.c
|
|
+++ b/kernel/lockdep_proc.c
|
|
@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
|
|
|
|
static void print_name(struct seq_file *m, struct lock_class *class)
|
|
{
|
|
- char str[128];
|
|
+ char str[KSYM_NAME_LEN];
|
|
const char *name = class->name;
|
|
|
|
if (!name) {
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index 5e39896..4199198 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -114,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
|
|
|
|
/* Bounds of module allocation, for speeding __module_address.
|
|
* Protected by module_mutex. */
|
|
-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
|
|
+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
|
|
+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
|
|
|
|
int register_module_notifier(struct notifier_block * nb)
|
|
{
|
|
@@ -278,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
|
|
return true;
|
|
|
|
list_for_each_entry_rcu(mod, &modules, list) {
|
|
- struct symsearch arr[] = {
|
|
+ struct symsearch modarr[] = {
|
|
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
|
|
NOT_GPL_ONLY, false },
|
|
{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
|
|
@@ -300,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
|
|
#endif
|
|
};
|
|
|
|
- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
|
|
+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
|
|
return true;
|
|
}
|
|
return false;
|
|
@@ -432,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
|
|
static int percpu_modalloc(struct module *mod,
|
|
unsigned long size, unsigned long align)
|
|
{
|
|
- if (align > PAGE_SIZE) {
|
|
+ if (align-1 >= PAGE_SIZE) {
|
|
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
|
|
mod->name, align, PAGE_SIZE);
|
|
align = PAGE_SIZE;
|
|
@@ -1032,7 +1033,7 @@ struct module_attribute module_uevent =
|
|
static ssize_t show_coresize(struct module_attribute *mattr,
|
|
struct module_kobject *mk, char *buffer)
|
|
{
|
|
- return sprintf(buffer, "%u\n", mk->mod->core_size);
|
|
+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
|
|
}
|
|
|
|
static struct module_attribute modinfo_coresize =
|
|
@@ -1041,7 +1042,7 @@ static struct module_attribute modinfo_coresize =
|
|
static ssize_t show_initsize(struct module_attribute *mattr,
|
|
struct module_kobject *mk, char *buffer)
|
|
{
|
|
- return sprintf(buffer, "%u\n", mk->mod->init_size);
|
|
+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
|
|
}
|
|
|
|
static struct module_attribute modinfo_initsize =
|
|
@@ -1721,21 +1722,21 @@ static void set_section_ro_nx(void *base,
|
|
|
|
static void unset_module_core_ro_nx(struct module *mod)
|
|
{
|
|
- set_page_attributes(mod->module_core + mod->core_text_size,
|
|
- mod->module_core + mod->core_size,
|
|
+ set_page_attributes(mod->module_core_rw,
|
|
+ mod->module_core_rw + mod->core_size_rw,
|
|
set_memory_x);
|
|
- set_page_attributes(mod->module_core,
|
|
- mod->module_core + mod->core_ro_size,
|
|
+ set_page_attributes(mod->module_core_rx,
|
|
+ mod->module_core_rx + mod->core_size_rx,
|
|
set_memory_rw);
|
|
}
|
|
|
|
static void unset_module_init_ro_nx(struct module *mod)
|
|
{
|
|
- set_page_attributes(mod->module_init + mod->init_text_size,
|
|
- mod->module_init + mod->init_size,
|
|
+ set_page_attributes(mod->module_init_rw,
|
|
+ mod->module_init_rw + mod->init_size_rw,
|
|
set_memory_x);
|
|
- set_page_attributes(mod->module_init,
|
|
- mod->module_init + mod->init_ro_size,
|
|
+ set_page_attributes(mod->module_init_rx,
|
|
+ mod->module_init_rx + mod->init_size_rx,
|
|
set_memory_rw);
|
|
}
|
|
|
|
@@ -1746,14 +1747,14 @@ void set_all_modules_text_rw(void)
|
|
|
|
mutex_lock(&module_mutex);
|
|
list_for_each_entry_rcu(mod, &modules, list) {
|
|
- if ((mod->module_core) && (mod->core_text_size)) {
|
|
- set_page_attributes(mod->module_core,
|
|
- mod->module_core + mod->core_text_size,
|
|
+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
|
|
+ set_page_attributes(mod->module_core_rx,
|
|
+ mod->module_core_rx + mod->core_size_rx,
|
|
set_memory_rw);
|
|
}
|
|
- if ((mod->module_init) && (mod->init_text_size)) {
|
|
- set_page_attributes(mod->module_init,
|
|
- mod->module_init + mod->init_text_size,
|
|
+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
|
|
+ set_page_attributes(mod->module_init_rx,
|
|
+ mod->module_init_rx + mod->init_size_rx,
|
|
set_memory_rw);
|
|
}
|
|
}
|
|
@@ -1767,14 +1768,14 @@ void set_all_modules_text_ro(void)
|
|
|
|
mutex_lock(&module_mutex);
|
|
list_for_each_entry_rcu(mod, &modules, list) {
|
|
- if ((mod->module_core) && (mod->core_text_size)) {
|
|
- set_page_attributes(mod->module_core,
|
|
- mod->module_core + mod->core_text_size,
|
|
+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
|
|
+ set_page_attributes(mod->module_core_rx,
|
|
+ mod->module_core_rx + mod->core_size_rx,
|
|
set_memory_ro);
|
|
}
|
|
- if ((mod->module_init) && (mod->init_text_size)) {
|
|
- set_page_attributes(mod->module_init,
|
|
- mod->module_init + mod->init_text_size,
|
|
+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
|
|
+ set_page_attributes(mod->module_init_rx,
|
|
+ mod->module_init_rx + mod->init_size_rx,
|
|
set_memory_ro);
|
|
}
|
|
}
|
|
@@ -1820,16 +1821,19 @@ static void free_module(struct module *mod)
|
|
|
|
/* This may be NULL, but that's OK */
|
|
unset_module_init_ro_nx(mod);
|
|
- module_free(mod, mod->module_init);
|
|
+ module_free(mod, mod->module_init_rw);
|
|
+ module_free_exec(mod, mod->module_init_rx);
|
|
kfree(mod->args);
|
|
percpu_modfree(mod);
|
|
|
|
/* Free lock-classes: */
|
|
- lockdep_free_key_range(mod->module_core, mod->core_size);
|
|
+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
|
|
+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
|
|
|
|
/* Finally, free the core (containing the module structure) */
|
|
unset_module_core_ro_nx(mod);
|
|
- module_free(mod, mod->module_core);
|
|
+ module_free_exec(mod, mod->module_core_rx);
|
|
+ module_free(mod, mod->module_core_rw);
|
|
|
|
#ifdef CONFIG_MPU
|
|
update_protections(current->mm);
|
|
@@ -1922,7 +1926,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
|
|
ksym = resolve_symbol_wait(mod, info, name);
|
|
/* Ok if resolved. */
|
|
if (ksym && !IS_ERR(ksym)) {
|
|
+ pax_open_kernel();
|
|
sym[i].st_value = ksym->value;
|
|
+ pax_close_kernel();
|
|
break;
|
|
}
|
|
|
|
@@ -1941,7 +1947,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
|
|
secbase = (unsigned long)mod_percpu(mod);
|
|
else
|
|
secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
|
|
+ pax_open_kernel();
|
|
sym[i].st_value += secbase;
|
|
+ pax_close_kernel();
|
|
break;
|
|
}
|
|
}
|
|
@@ -2049,22 +2057,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
|
|
|| s->sh_entsize != ~0UL
|
|
|| strstarts(sname, ".init"))
|
|
continue;
|
|
- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
|
|
+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
|
|
+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
|
|
+ else
|
|
+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
|
|
pr_debug("\t%s\n", sname);
|
|
}
|
|
- switch (m) {
|
|
- case 0: /* executable */
|
|
- mod->core_size = debug_align(mod->core_size);
|
|
- mod->core_text_size = mod->core_size;
|
|
- break;
|
|
- case 1: /* RO: text and ro-data */
|
|
- mod->core_size = debug_align(mod->core_size);
|
|
- mod->core_ro_size = mod->core_size;
|
|
- break;
|
|
- case 3: /* whole core */
|
|
- mod->core_size = debug_align(mod->core_size);
|
|
- break;
|
|
- }
|
|
}
|
|
|
|
pr_debug("Init section allocation order:\n");
|
|
@@ -2078,23 +2076,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
|
|
|| s->sh_entsize != ~0UL
|
|
|| !strstarts(sname, ".init"))
|
|
continue;
|
|
- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
|
|
- | INIT_OFFSET_MASK);
|
|
+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
|
|
+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
|
|
+ else
|
|
+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
|
|
+ s->sh_entsize |= INIT_OFFSET_MASK;
|
|
pr_debug("\t%s\n", sname);
|
|
}
|
|
- switch (m) {
|
|
- case 0: /* executable */
|
|
- mod->init_size = debug_align(mod->init_size);
|
|
- mod->init_text_size = mod->init_size;
|
|
- break;
|
|
- case 1: /* RO: text and ro-data */
|
|
- mod->init_size = debug_align(mod->init_size);
|
|
- mod->init_ro_size = mod->init_size;
|
|
- break;
|
|
- case 3: /* whole init */
|
|
- mod->init_size = debug_align(mod->init_size);
|
|
- break;
|
|
- }
|
|
}
|
|
}
|
|
|
|
@@ -2266,7 +2254,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
|
|
|
|
/* Put symbol section at end of init part of module. */
|
|
symsect->sh_flags |= SHF_ALLOC;
|
|
- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
|
|
+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
|
|
info->index.sym) | INIT_OFFSET_MASK;
|
|
pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
|
|
|
|
@@ -2286,13 +2274,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
|
|
}
|
|
|
|
/* Append room for core symbols at end of core part. */
|
|
- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
|
|
- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
|
|
- mod->core_size += strtab_size;
|
|
+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
|
|
+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
|
|
+ mod->core_size_rx += strtab_size;
|
|
|
|
/* Put string table section at end of init part of module. */
|
|
strsect->sh_flags |= SHF_ALLOC;
|
|
- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
|
|
+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
|
|
info->index.str) | INIT_OFFSET_MASK;
|
|
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
|
|
}
|
|
@@ -2310,12 +2298,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
|
|
/* Make sure we get permanent strtab: don't use info->strtab. */
|
|
mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
|
|
|
|
+ pax_open_kernel();
|
|
+
|
|
/* Set types up while we still have access to sections. */
|
|
for (i = 0; i < mod->num_symtab; i++)
|
|
mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
|
|
|
|
- mod->core_symtab = dst = mod->module_core + info->symoffs;
|
|
- mod->core_strtab = s = mod->module_core + info->stroffs;
|
|
+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
|
|
+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
|
|
src = mod->symtab;
|
|
*s++ = 0;
|
|
for (ndst = i = 0; i < mod->num_symtab; i++) {
|
|
@@ -2328,6 +2318,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
|
|
}
|
|
}
|
|
mod->core_num_syms = ndst;
|
|
+
|
|
+ pax_close_kernel();
|
|
}
|
|
#else
|
|
static inline void layout_symtab(struct module *mod, struct load_info *info)
|
|
@@ -2361,17 +2353,33 @@ void * __weak module_alloc(unsigned long size)
|
|
return size == 0 ? NULL : vmalloc_exec(size);
|
|
}
|
|
|
|
-static void *module_alloc_update_bounds(unsigned long size)
|
|
+static void *module_alloc_update_bounds_rw(unsigned long size)
|
|
{
|
|
void *ret = module_alloc(size);
|
|
|
|
if (ret) {
|
|
mutex_lock(&module_mutex);
|
|
/* Update module bounds. */
|
|
- if ((unsigned long)ret < module_addr_min)
|
|
- module_addr_min = (unsigned long)ret;
|
|
- if ((unsigned long)ret + size > module_addr_max)
|
|
- module_addr_max = (unsigned long)ret + size;
|
|
+ if ((unsigned long)ret < module_addr_min_rw)
|
|
+ module_addr_min_rw = (unsigned long)ret;
|
|
+ if ((unsigned long)ret + size > module_addr_max_rw)
|
|
+ module_addr_max_rw = (unsigned long)ret + size;
|
|
+ mutex_unlock(&module_mutex);
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void *module_alloc_update_bounds_rx(unsigned long size)
|
|
+{
|
|
+ void *ret = module_alloc_exec(size);
|
|
+
|
|
+ if (ret) {
|
|
+ mutex_lock(&module_mutex);
|
|
+ /* Update module bounds. */
|
|
+ if ((unsigned long)ret < module_addr_min_rx)
|
|
+ module_addr_min_rx = (unsigned long)ret;
|
|
+ if ((unsigned long)ret + size > module_addr_max_rx)
|
|
+ module_addr_max_rx = (unsigned long)ret + size;
|
|
mutex_unlock(&module_mutex);
|
|
}
|
|
return ret;
|
|
@@ -2548,8 +2556,14 @@ static struct module *setup_load_info(struct load_info *info)
|
|
static int check_modinfo(struct module *mod, struct load_info *info)
|
|
{
|
|
const char *modmagic = get_modinfo(info, "vermagic");
|
|
+ const char *license = get_modinfo(info, "license");
|
|
int err;
|
|
|
|
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
|
|
+ if (!license || !license_is_gpl_compatible(license))
|
|
+ return -ENOEXEC;
|
|
+#endif
|
|
+
|
|
/* This is allowed: modprobe --force will invalidate it. */
|
|
if (!modmagic) {
|
|
err = try_to_force_load(mod, "bad vermagic");
|
|
@@ -2572,7 +2586,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
|
|
}
|
|
|
|
/* Set up license info based on the info section */
|
|
- set_license(mod, get_modinfo(info, "license"));
|
|
+ set_license(mod, license);
|
|
|
|
return 0;
|
|
}
|
|
@@ -2666,7 +2680,7 @@ static int move_module(struct module *mod, struct load_info *info)
|
|
void *ptr;
|
|
|
|
/* Do the allocs. */
|
|
- ptr = module_alloc_update_bounds(mod->core_size);
|
|
+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
|
|
/*
|
|
* The pointer to this block is stored in the module structure
|
|
* which is inside the block. Just mark it as not being a
|
|
@@ -2676,23 +2690,50 @@ static int move_module(struct module *mod, struct load_info *info)
|
|
if (!ptr)
|
|
return -ENOMEM;
|
|
|
|
- memset(ptr, 0, mod->core_size);
|
|
- mod->module_core = ptr;
|
|
+ memset(ptr, 0, mod->core_size_rw);
|
|
+ mod->module_core_rw = ptr;
|
|
|
|
- ptr = module_alloc_update_bounds(mod->init_size);
|
|
+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
|
|
/*
|
|
* The pointer to this block is stored in the module structure
|
|
* which is inside the block. This block doesn't need to be
|
|
* scanned as it contains data and code that will be freed
|
|
* after the module is initialized.
|
|
*/
|
|
- kmemleak_ignore(ptr);
|
|
- if (!ptr && mod->init_size) {
|
|
- module_free(mod, mod->module_core);
|
|
+ kmemleak_not_leak(ptr);
|
|
+ if (!ptr && mod->init_size_rw) {
|
|
+ module_free(mod, mod->module_core_rw);
|
|
return -ENOMEM;
|
|
}
|
|
- memset(ptr, 0, mod->init_size);
|
|
- mod->module_init = ptr;
|
|
+ memset(ptr, 0, mod->init_size_rw);
|
|
+ mod->module_init_rw = ptr;
|
|
+
|
|
+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
|
|
+ kmemleak_not_leak(ptr);
|
|
+ if (!ptr) {
|
|
+ module_free(mod, mod->module_init_rw);
|
|
+ module_free(mod, mod->module_core_rw);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ pax_open_kernel();
|
|
+ memset(ptr, 0, mod->core_size_rx);
|
|
+ pax_close_kernel();
|
|
+ mod->module_core_rx = ptr;
|
|
+
|
|
+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
|
|
+ kmemleak_not_leak(ptr);
|
|
+ if (!ptr && mod->init_size_rx) {
|
|
+ module_free_exec(mod, mod->module_core_rx);
|
|
+ module_free(mod, mod->module_init_rw);
|
|
+ module_free(mod, mod->module_core_rw);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ pax_open_kernel();
|
|
+ memset(ptr, 0, mod->init_size_rx);
|
|
+ pax_close_kernel();
|
|
+ mod->module_init_rx = ptr;
|
|
|
|
/* Transfer each section which specifies SHF_ALLOC */
|
|
pr_debug("final section addresses:\n");
|
|
@@ -2703,16 +2744,45 @@ static int move_module(struct module *mod, struct load_info *info)
|
|
if (!(shdr->sh_flags & SHF_ALLOC))
|
|
continue;
|
|
|
|
- if (shdr->sh_entsize & INIT_OFFSET_MASK)
|
|
- dest = mod->module_init
|
|
- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
|
|
- else
|
|
- dest = mod->module_core + shdr->sh_entsize;
|
|
+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
|
|
+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
|
|
+ dest = mod->module_init_rw
|
|
+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
|
|
+ else
|
|
+ dest = mod->module_init_rx
|
|
+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
|
|
+ } else {
|
|
+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
|
|
+ dest = mod->module_core_rw + shdr->sh_entsize;
|
|
+ else
|
|
+ dest = mod->module_core_rx + shdr->sh_entsize;
|
|
+ }
|
|
+
|
|
+ if (shdr->sh_type != SHT_NOBITS) {
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+#ifdef CONFIG_X86_64
|
|
+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
|
|
+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
|
|
+#endif
|
|
+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
|
|
+ pax_open_kernel();
|
|
+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
|
|
+ pax_close_kernel();
|
|
+ } else
|
|
+#endif
|
|
|
|
- if (shdr->sh_type != SHT_NOBITS)
|
|
memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
|
|
+ }
|
|
/* Update sh_addr to point to copy in image. */
|
|
- shdr->sh_addr = (unsigned long)dest;
|
|
+
|
|
+#ifdef CONFIG_PAX_KERNEXEC
|
|
+ if (shdr->sh_flags & SHF_EXECINSTR)
|
|
+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
|
|
+ else
|
|
+#endif
|
|
+
|
|
+ shdr->sh_addr = (unsigned long)dest;
|
|
pr_debug("\t0x%lx %s\n",
|
|
(long)shdr->sh_addr, info->secstrings + shdr->sh_name);
|
|
}
|
|
@@ -2767,12 +2837,12 @@ static void flush_module_icache(const struct module *mod)
|
|
* Do it before processing of module parameters, so the module
|
|
* can provide parameter accessor functions of its own.
|
|
*/
|
|
- if (mod->module_init)
|
|
- flush_icache_range((unsigned long)mod->module_init,
|
|
- (unsigned long)mod->module_init
|
|
- + mod->init_size);
|
|
- flush_icache_range((unsigned long)mod->module_core,
|
|
- (unsigned long)mod->module_core + mod->core_size);
|
|
+ if (mod->module_init_rx)
|
|
+ flush_icache_range((unsigned long)mod->module_init_rx,
|
|
+ (unsigned long)mod->module_init_rx
|
|
+ + mod->init_size_rx);
|
|
+ flush_icache_range((unsigned long)mod->module_core_rx,
|
|
+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
|
|
|
|
set_fs(old_fs);
|
|
}
|
|
@@ -2842,8 +2912,10 @@ static struct module *layout_and_allocate(struct load_info *info)
|
|
static void module_deallocate(struct module *mod, struct load_info *info)
|
|
{
|
|
percpu_modfree(mod);
|
|
- module_free(mod, mod->module_init);
|
|
- module_free(mod, mod->module_core);
|
|
+ module_free_exec(mod, mod->module_init_rx);
|
|
+ module_free_exec(mod, mod->module_core_rx);
|
|
+ module_free(mod, mod->module_init_rw);
|
|
+ module_free(mod, mod->module_core_rw);
|
|
}
|
|
|
|
int __weak module_finalize(const Elf_Ehdr *hdr,
|
|
@@ -3092,11 +3164,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
|
|
mod->strtab = mod->core_strtab;
|
|
#endif
|
|
unset_module_init_ro_nx(mod);
|
|
- module_free(mod, mod->module_init);
|
|
- mod->module_init = NULL;
|
|
- mod->init_size = 0;
|
|
- mod->init_ro_size = 0;
|
|
- mod->init_text_size = 0;
|
|
+ module_free(mod, mod->module_init_rw);
|
|
+ module_free_exec(mod, mod->module_init_rx);
|
|
+ mod->module_init_rw = NULL;
|
|
+ mod->module_init_rx = NULL;
|
|
+ mod->init_size_rw = 0;
|
|
+ mod->init_size_rx = 0;
|
|
mutex_unlock(&module_mutex);
|
|
|
|
return 0;
|
|
@@ -3127,10 +3200,16 @@ static const char *get_ksymbol(struct module *mod,
|
|
unsigned long nextval;
|
|
|
|
/* At worse, next value is at end of module */
|
|
- if (within_module_init(addr, mod))
|
|
- nextval = (unsigned long)mod->module_init+mod->init_text_size;
|
|
+ if (within_module_init_rx(addr, mod))
|
|
+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
|
|
+ else if (within_module_init_rw(addr, mod))
|
|
+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
|
|
+ else if (within_module_core_rx(addr, mod))
|
|
+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
|
|
+ else if (within_module_core_rw(addr, mod))
|
|
+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
|
|
else
|
|
- nextval = (unsigned long)mod->module_core+mod->core_text_size;
|
|
+ return NULL;
|
|
|
|
/* Scan for closest preceding symbol, and next symbol. (ELF
|
|
starts real symbols at 1). */
|
|
@@ -3365,7 +3444,7 @@ static int m_show(struct seq_file *m, void *p)
|
|
char buf[8];
|
|
|
|
seq_printf(m, "%s %u",
|
|
- mod->name, mod->init_size + mod->core_size);
|
|
+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
|
|
print_unload_info(m, mod);
|
|
|
|
/* Informative for users. */
|
|
@@ -3374,7 +3453,7 @@ static int m_show(struct seq_file *m, void *p)
|
|
mod->state == MODULE_STATE_COMING ? "Loading":
|
|
"Live");
|
|
/* Used by oprofile and other similar tools. */
|
|
- seq_printf(m, " 0x%pK", mod->module_core);
|
|
+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
|
|
|
|
/* Taints info */
|
|
if (mod->taints)
|
|
@@ -3469,12 +3548,12 @@ struct module *__module_address(unsigned long addr)
|
|
{
|
|
struct module *mod;
|
|
|
|
- if (addr < module_addr_min || addr > module_addr_max)
|
|
+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
|
|
+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
|
|
return NULL;
|
|
|
|
list_for_each_entry_rcu(mod, &modules, list)
|
|
- if (within_module_core(addr, mod)
|
|
- || within_module_init(addr, mod))
|
|
+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
|
|
return mod;
|
|
return NULL;
|
|
}
|
|
@@ -3508,11 +3587,20 @@ bool is_module_text_address(unsigned long addr)
|
|
*/
|
|
struct module *__module_text_address(unsigned long addr)
|
|
{
|
|
- struct module *mod = __module_address(addr);
|
|
+ struct module *mod;
|
|
+
|
|
+#ifdef CONFIG_X86_32
|
|
+ addr = ktla_ktva(addr);
|
|
+#endif
|
|
+
|
|
+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
|
|
+ return NULL;
|
|
+
|
|
+ mod = __module_address(addr);
|
|
+
|
|
if (mod) {
|
|
/* Make sure it's within the text section. */
|
|
- if (!within(addr, mod->module_init, mod->init_text_size)
|
|
- && !within(addr, mod->module_core, mod->core_text_size))
|
|
+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
|
|
mod = NULL;
|
|
}
|
|
return mod;
|
|
diff --git a/kernel/module.c.rej b/kernel/module.c.rej
|
|
new file mode 100644
|
|
index 0000000..d5a4f7d
|
|
--- /dev/null
|
|
+++ b/kernel/module.c.rej
|
|
@@ -0,0 +1,26 @@
|
|
+diff a/kernel/module.c b/kernel/module.c (rejected hunks)
|
|
+@@ -3320,16 +3392,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
|
|
+ #endif
|
|
+
|
|
+ /* Set RO and NX regions for core */
|
|
+- set_section_ro_nx(mod->module_core,
|
|
+- mod->core_text_size,
|
|
+- mod->core_ro_size,
|
|
+- mod->core_size);
|
|
++ set_section_ro_nx(mod->module_core_rx,
|
|
++ mod->core_size_rx,
|
|
++ mod->core_size_rx,
|
|
++ mod->core_size_rx);
|
|
+
|
|
+ /* Set RO and NX regions for init */
|
|
+- set_section_ro_nx(mod->module_init,
|
|
+- mod->init_text_size,
|
|
+- mod->init_ro_size,
|
|
+- mod->init_size);
|
|
++ set_section_ro_nx(mod->module_init_rx,
|
|
++ mod->init_size_rx,
|
|
++ mod->init_size_rx,
|
|
++ mod->init_size_rx);
|
|
+
|
|
+ do_mod_ctors(mod);
|
|
+ /* Start the module */
|
|
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
|
|
index 7e3443f..b2a1e6b 100644
|
|
--- a/kernel/mutex-debug.c
|
|
+++ b/kernel/mutex-debug.c
|
|
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
|
|
}
|
|
|
|
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
- struct thread_info *ti)
|
|
+ struct task_struct *task)
|
|
{
|
|
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
|
|
|
|
/* Mark the current thread as blocked on the lock: */
|
|
- ti->task->blocked_on = waiter;
|
|
+ task->blocked_on = waiter;
|
|
}
|
|
|
|
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
- struct thread_info *ti)
|
|
+ struct task_struct *task)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
|
- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
|
|
- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
|
|
- ti->task->blocked_on = NULL;
|
|
+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
|
+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
|
|
+ task->blocked_on = NULL;
|
|
|
|
list_del_init(&waiter->list);
|
|
waiter->task = NULL;
|
|
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
|
|
index 0799fd3..d06ae3b 100644
|
|
--- a/kernel/mutex-debug.h
|
|
+++ b/kernel/mutex-debug.h
|
|
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
|
|
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
|
|
extern void debug_mutex_add_waiter(struct mutex *lock,
|
|
struct mutex_waiter *waiter,
|
|
- struct thread_info *ti);
|
|
+ struct task_struct *task);
|
|
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
- struct thread_info *ti);
|
|
+ struct task_struct *task);
|
|
extern void debug_mutex_unlock(struct mutex *lock);
|
|
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
|
struct lock_class_key *key);
|
|
diff --git a/kernel/mutex.c b/kernel/mutex.c
|
|
index a307cc9..27fd2e9 100644
|
|
--- a/kernel/mutex.c
|
|
+++ b/kernel/mutex.c
|
|
@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
spin_lock_mutex(&lock->wait_lock, flags);
|
|
|
|
debug_mutex_lock_common(lock, &waiter);
|
|
- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
|
+ debug_mutex_add_waiter(lock, &waiter, task);
|
|
|
|
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
|
list_add_tail(&waiter.list, &lock->wait_list);
|
|
@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
* TASK_UNINTERRUPTIBLE case.)
|
|
*/
|
|
if (unlikely(signal_pending_state(state, task))) {
|
|
- mutex_remove_waiter(lock, &waiter,
|
|
- task_thread_info(task));
|
|
+ mutex_remove_waiter(lock, &waiter, task);
|
|
mutex_release(&lock->dep_map, 1, ip);
|
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
|
|
|
@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|
done:
|
|
lock_acquired(&lock->dep_map, ip);
|
|
/* got the lock - rejoice! */
|
|
- mutex_remove_waiter(lock, &waiter, current_thread_info());
|
|
+ mutex_remove_waiter(lock, &waiter, task);
|
|
mutex_set_owner(lock);
|
|
|
|
/* set it to 0 if there are no waiters left: */
|
|
diff --git a/kernel/panic.c b/kernel/panic.c
|
|
index 5c20d28..b381bcc 100644
|
|
--- a/kernel/panic.c
|
|
+++ b/kernel/panic.c
|
|
@@ -486,7 +486,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
|
|
*/
|
|
void __stack_chk_fail(void)
|
|
{
|
|
- panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
|
+ dump_stack();
|
|
+ panic("stack-protector: Kernel stack is corrupted in: %pS\n",
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(__stack_chk_fail);
|
|
diff --git a/kernel/pid.c b/kernel/pid.c
|
|
index 7acf590..82cb575 100644
|
|
--- a/kernel/pid.c
|
|
+++ b/kernel/pid.c
|
|
@@ -45,7 +45,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
|
|
|
|
int pid_max = PID_MAX_DEFAULT;
|
|
|
|
-#define RESERVED_PIDS 300
|
|
+#define RESERVED_PIDS 500
|
|
|
|
int pid_max_min = RESERVED_PIDS + 1;
|
|
int pid_max_max = PID_MAX_LIMIT;
|
|
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
|
|
index acbb79c..0c87615 100644
|
|
--- a/kernel/posix-cpu-timers.c
|
|
+++ b/kernel/posix-cpu-timers.c
|
|
@@ -1597,14 +1597,14 @@ struct k_clock clock_posix_cpu = {
|
|
|
|
static __init int init_posix_cpu_timers(void)
|
|
{
|
|
- struct k_clock process = {
|
|
+ static struct k_clock process = {
|
|
.clock_getres = process_cpu_clock_getres,
|
|
.clock_get = process_cpu_clock_get,
|
|
.timer_create = process_cpu_timer_create,
|
|
.nsleep = process_cpu_nsleep,
|
|
.nsleep_restart = process_cpu_nsleep_restart,
|
|
};
|
|
- struct k_clock thread = {
|
|
+ static struct k_clock thread = {
|
|
.clock_getres = thread_cpu_clock_getres,
|
|
.clock_get = thread_cpu_clock_get,
|
|
.timer_create = thread_cpu_timer_create,
|
|
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
|
|
index 02824a5..508811d 100644
|
|
--- a/kernel/posix-timers.c
|
|
+++ b/kernel/posix-timers.c
|
|
@@ -129,7 +129,7 @@ static DEFINE_SPINLOCK(idr_lock);
|
|
* which we beg off on and pass to do_sys_settimeofday().
|
|
*/
|
|
|
|
-static struct k_clock posix_clocks[MAX_CLOCKS];
|
|
+static struct k_clock *posix_clocks[MAX_CLOCKS];
|
|
|
|
/*
|
|
* These ones are defined below.
|
|
@@ -227,7 +227,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
|
|
*/
|
|
static __init int init_posix_timers(void)
|
|
{
|
|
- struct k_clock clock_realtime = {
|
|
+ static struct k_clock clock_realtime = {
|
|
.clock_getres = hrtimer_get_res,
|
|
.clock_get = posix_clock_realtime_get,
|
|
.clock_set = posix_clock_realtime_set,
|
|
@@ -239,7 +239,7 @@ static __init int init_posix_timers(void)
|
|
.timer_get = common_timer_get,
|
|
.timer_del = common_timer_del,
|
|
};
|
|
- struct k_clock clock_monotonic = {
|
|
+ static struct k_clock clock_monotonic = {
|
|
.clock_getres = hrtimer_get_res,
|
|
.clock_get = posix_ktime_get_ts,
|
|
.nsleep = common_nsleep,
|
|
@@ -249,19 +249,19 @@ static __init int init_posix_timers(void)
|
|
.timer_get = common_timer_get,
|
|
.timer_del = common_timer_del,
|
|
};
|
|
- struct k_clock clock_monotonic_raw = {
|
|
+ static struct k_clock clock_monotonic_raw = {
|
|
.clock_getres = hrtimer_get_res,
|
|
.clock_get = posix_get_monotonic_raw,
|
|
};
|
|
- struct k_clock clock_realtime_coarse = {
|
|
+ static struct k_clock clock_realtime_coarse = {
|
|
.clock_getres = posix_get_coarse_res,
|
|
.clock_get = posix_get_realtime_coarse,
|
|
};
|
|
- struct k_clock clock_monotonic_coarse = {
|
|
+ static struct k_clock clock_monotonic_coarse = {
|
|
.clock_getres = posix_get_coarse_res,
|
|
.clock_get = posix_get_monotonic_coarse,
|
|
};
|
|
- struct k_clock clock_boottime = {
|
|
+ static struct k_clock clock_boottime = {
|
|
.clock_getres = hrtimer_get_res,
|
|
.clock_get = posix_get_boottime,
|
|
.nsleep = common_nsleep,
|
|
@@ -473,7 +473,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
|
|
return;
|
|
}
|
|
|
|
- posix_clocks[clock_id] = *new_clock;
|
|
+ posix_clocks[clock_id] = new_clock;
|
|
}
|
|
EXPORT_SYMBOL_GPL(posix_timers_register_clock);
|
|
|
|
@@ -519,9 +519,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
|
|
return (id & CLOCKFD_MASK) == CLOCKFD ?
|
|
&clock_posix_dynamic : &clock_posix_cpu;
|
|
|
|
- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
|
|
+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
|
|
return NULL;
|
|
- return &posix_clocks[id];
|
|
+ return posix_clocks[id];
|
|
}
|
|
|
|
static int common_timer_create(struct k_itimer *new_timer)
|
|
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
|
|
index d523593..68197a4 100644
|
|
--- a/kernel/power/poweroff.c
|
|
+++ b/kernel/power/poweroff.c
|
|
@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
|
|
.enable_mask = SYSRQ_ENABLE_BOOT,
|
|
};
|
|
|
|
-static int pm_sysrq_init(void)
|
|
+static int __init pm_sysrq_init(void)
|
|
{
|
|
register_sysrq_key('o', &sysrq_poweroff_op);
|
|
return 0;
|
|
diff --git a/kernel/power/process.c b/kernel/power/process.c
|
|
index bf89a05..8ec05f5 100644
|
|
--- a/kernel/power/process.c
|
|
+++ b/kernel/power/process.c
|
|
@@ -48,6 +48,8 @@ static int try_to_freeze_tasks(bool user_only)
|
|
|
|
while (true) {
|
|
todo = 0;
|
|
+ if (time_after(jiffies, end_time))
|
|
+ timedout = true;
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
if (p == current || !freeze_task(p))
|
|
@@ -63,9 +65,13 @@ static int try_to_freeze_tasks(bool user_only)
|
|
* guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
|
|
* transition can't race with task state testing here.
|
|
*/
|
|
- if (!task_is_stopped_or_traced(p) &&
|
|
- !freezer_should_skip(p))
|
|
+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
|
|
todo++;
|
|
+ if (timedout) {
|
|
+ printk(KERN_ERR "Task refusing to freeze:\n");
|
|
+ sched_show_task(p);
|
|
+ }
|
|
+ }
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
diff --git a/kernel/power/process.c.rej b/kernel/power/process.c.rej
|
|
new file mode 100644
|
|
index 0000000..d110847
|
|
--- /dev/null
|
|
+++ b/kernel/power/process.c.rej
|
|
@@ -0,0 +1,19 @@
|
|
+--- kernel/power/process.c 2012-05-21 11:33:39.551929945 +0200
|
|
++++ kernel/power/process.c 2012-05-21 12:10:11.796049012 +0200
|
|
+@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user
|
|
+ u64 elapsed_csecs64;
|
|
+ unsigned int elapsed_csecs;
|
|
+ bool wakeup = false;
|
|
++ bool timedout = false;
|
|
+
|
|
+ do_gettimeofday(&start);
|
|
+
|
|
+@@ -75,7 +82,7 @@ static int try_to_freeze_tasks(bool user
|
|
+ todo += wq_busy;
|
|
+ }
|
|
+
|
|
+- if (!todo || time_after(jiffies, end_time))
|
|
++ if (!todo || timedout)
|
|
+ break;
|
|
+
|
|
+ if (pm_wakeup_pending()) {
|
|
diff --git a/kernel/profile.c b/kernel/profile.c
|
|
index 76b8e77..a2930e8 100644
|
|
--- a/kernel/profile.c
|
|
+++ b/kernel/profile.c
|
|
@@ -39,7 +39,7 @@ struct profile_hit {
|
|
/* Oprofile timer tick hook */
|
|
static int (*timer_hook)(struct pt_regs *) __read_mostly;
|
|
|
|
-static atomic_t *prof_buffer;
|
|
+static atomic_unchecked_t *prof_buffer;
|
|
static unsigned long prof_len, prof_shift;
|
|
|
|
int prof_on __read_mostly;
|
|
@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
|
|
hits[i].pc = 0;
|
|
continue;
|
|
}
|
|
- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
|
|
+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
|
|
hits[i].hits = hits[i].pc = 0;
|
|
}
|
|
}
|
|
@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
|
|
* Add the current hit(s) and flush the write-queue out
|
|
* to the global buffer:
|
|
*/
|
|
- atomic_add(nr_hits, &prof_buffer[pc]);
|
|
+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
|
|
for (i = 0; i < NR_PROFILE_HIT; ++i) {
|
|
- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
|
|
+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
|
|
hits[i].pc = hits[i].hits = 0;
|
|
}
|
|
out:
|
|
@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
|
|
{
|
|
unsigned long pc;
|
|
pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
|
|
- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
|
|
+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
|
|
}
|
|
#endif /* !CONFIG_SMP */
|
|
|
|
@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|
return -EFAULT;
|
|
buf++; p++; count--; read++;
|
|
}
|
|
- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
|
|
+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
|
|
if (copy_to_user(buf, (void *)pnt, count))
|
|
return -EFAULT;
|
|
read += count;
|
|
@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
|
|
}
|
|
#endif
|
|
profile_discard_flip_buffers();
|
|
- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
|
|
+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
|
|
return count;
|
|
}
|
|
|
|
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
|
|
index d9e71e3..4b5cffa6 100644
|
|
--- a/kernel/ptrace.c
|
|
+++ b/kernel/ptrace.c
|
|
@@ -527,7 +527,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
|
|
break;
|
|
return -EIO;
|
|
}
|
|
- if (copy_to_user(dst, buf, retval))
|
|
+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
|
|
return -EFAULT;
|
|
copied += retval;
|
|
src += retval;
|
|
@@ -732,7 +732,7 @@ int ptrace_request(struct task_struct *child, long request,
|
|
bool seized = child->ptrace & PT_SEIZED;
|
|
int ret = -EIO;
|
|
siginfo_t siginfo, *si;
|
|
- void __user *datavp = (void __user *) data;
|
|
+ void __user *datavp = (__force void __user *) data;
|
|
unsigned long __user *datalp = datavp;
|
|
unsigned long flags;
|
|
|
|
@@ -969,7 +969,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
|
|
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
|
|
if (copied != sizeof(tmp))
|
|
return -EIO;
|
|
- return put_user(tmp, (unsigned long __user *)data);
|
|
+ return put_user(tmp, (__force unsigned long __user *)data);
|
|
}
|
|
|
|
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
|
|
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
|
|
index 60a56f4..54bb54f 100644
|
|
--- a/kernel/rcutiny.c
|
|
+++ b/kernel/rcutiny.c
|
|
@@ -46,7 +46,7 @@
|
|
struct rcu_ctrlblk;
|
|
static void invoke_rcu_callbacks(void);
|
|
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
|
|
-static void rcu_process_callbacks(struct softirq_action *unused);
|
|
+static void rcu_process_callbacks(void);
|
|
static void __call_rcu(struct rcu_head *head,
|
|
void (*func)(struct rcu_head *rcu),
|
|
struct rcu_ctrlblk *rcp);
|
|
@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
|
rcu_is_callbacks_kthread()));
|
|
}
|
|
|
|
-static void rcu_process_callbacks(struct softirq_action *unused)
|
|
+static void rcu_process_callbacks(void)
|
|
{
|
|
__rcu_process_callbacks(&rcu_sched_ctrlblk);
|
|
__rcu_process_callbacks(&rcu_bh_ctrlblk);
|
|
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
|
|
index 977250f..17060324 100644
|
|
--- a/kernel/rcutiny_plugin.h
|
|
+++ b/kernel/rcutiny_plugin.h
|
|
@@ -942,7 +942,7 @@ static int rcu_kthread(void *arg)
|
|
have_rcu_kthread_work = morework;
|
|
local_irq_restore(flags);
|
|
if (work)
|
|
- rcu_process_callbacks(NULL);
|
|
+ rcu_process_callbacks();
|
|
schedule_timeout_interruptible(1); /* Leave CPU for others. */
|
|
}
|
|
|
|
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
|
|
index a89b381..efdcad8 100644
|
|
--- a/kernel/rcutorture.c
|
|
+++ b/kernel/rcutorture.c
|
|
@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
|
|
{ 0 };
|
|
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
|
|
{ 0 };
|
|
-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
|
|
-static atomic_t n_rcu_torture_alloc;
|
|
-static atomic_t n_rcu_torture_alloc_fail;
|
|
-static atomic_t n_rcu_torture_free;
|
|
-static atomic_t n_rcu_torture_mberror;
|
|
-static atomic_t n_rcu_torture_error;
|
|
+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
|
|
+static atomic_unchecked_t n_rcu_torture_alloc;
|
|
+static atomic_unchecked_t n_rcu_torture_alloc_fail;
|
|
+static atomic_unchecked_t n_rcu_torture_free;
|
|
+static atomic_unchecked_t n_rcu_torture_mberror;
|
|
+static atomic_unchecked_t n_rcu_torture_error;
|
|
static long n_rcu_torture_boost_ktrerror;
|
|
static long n_rcu_torture_boost_rterror;
|
|
static long n_rcu_torture_boost_failure;
|
|
@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
|
|
|
|
spin_lock_bh(&rcu_torture_lock);
|
|
if (list_empty(&rcu_torture_freelist)) {
|
|
- atomic_inc(&n_rcu_torture_alloc_fail);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
|
|
spin_unlock_bh(&rcu_torture_lock);
|
|
return NULL;
|
|
}
|
|
- atomic_inc(&n_rcu_torture_alloc);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_alloc);
|
|
p = rcu_torture_freelist.next;
|
|
list_del_init(p);
|
|
spin_unlock_bh(&rcu_torture_lock);
|
|
@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
|
|
static void
|
|
rcu_torture_free(struct rcu_torture *p)
|
|
{
|
|
- atomic_inc(&n_rcu_torture_free);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_free);
|
|
spin_lock_bh(&rcu_torture_lock);
|
|
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
|
|
spin_unlock_bh(&rcu_torture_lock);
|
|
@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
|
|
i = rp->rtort_pipe_count;
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
|
i = RCU_TORTURE_PIPE_LEN;
|
|
- atomic_inc(&rcu_torture_wcount[i]);
|
|
+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
|
|
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
|
|
rp->rtort_mbtest = 0;
|
|
rcu_torture_free(rp);
|
|
@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
|
|
i = rp->rtort_pipe_count;
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
|
i = RCU_TORTURE_PIPE_LEN;
|
|
- atomic_inc(&rcu_torture_wcount[i]);
|
|
+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
|
|
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
|
|
rp->rtort_mbtest = 0;
|
|
list_del(&rp->rtort_free);
|
|
@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
|
|
i = old_rp->rtort_pipe_count;
|
|
if (i > RCU_TORTURE_PIPE_LEN)
|
|
i = RCU_TORTURE_PIPE_LEN;
|
|
- atomic_inc(&rcu_torture_wcount[i]);
|
|
+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
|
|
old_rp->rtort_pipe_count++;
|
|
cur_ops->deferred_free(old_rp);
|
|
}
|
|
@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
|
|
}
|
|
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
|
|
if (p->rtort_mbtest == 0)
|
|
- atomic_inc(&n_rcu_torture_mberror);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_mberror);
|
|
spin_lock(&rand_lock);
|
|
cur_ops->read_delay(&rand);
|
|
n_rcu_torture_timers++;
|
|
@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
|
|
}
|
|
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
|
|
if (p->rtort_mbtest == 0)
|
|
- atomic_inc(&n_rcu_torture_mberror);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_mberror);
|
|
cur_ops->read_delay(&rand);
|
|
preempt_disable();
|
|
pipe_count = p->rtort_pipe_count;
|
|
@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
|
|
rcu_torture_current,
|
|
rcu_torture_current_version,
|
|
list_empty(&rcu_torture_freelist),
|
|
- atomic_read(&n_rcu_torture_alloc),
|
|
- atomic_read(&n_rcu_torture_alloc_fail),
|
|
- atomic_read(&n_rcu_torture_free),
|
|
- atomic_read(&n_rcu_torture_mberror),
|
|
+ atomic_read_unchecked(&n_rcu_torture_alloc),
|
|
+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
|
|
+ atomic_read_unchecked(&n_rcu_torture_free),
|
|
+ atomic_read_unchecked(&n_rcu_torture_mberror),
|
|
n_rcu_torture_boost_ktrerror,
|
|
n_rcu_torture_boost_rterror,
|
|
n_rcu_torture_boost_failure,
|
|
@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
|
|
n_online_attempts,
|
|
n_offline_successes,
|
|
n_offline_attempts);
|
|
- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
|
|
+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
|
|
n_rcu_torture_boost_ktrerror != 0 ||
|
|
n_rcu_torture_boost_rterror != 0 ||
|
|
n_rcu_torture_boost_failure != 0)
|
|
@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
|
|
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
|
|
if (i > 1) {
|
|
cnt += sprintf(&page[cnt], "!!! ");
|
|
- atomic_inc(&n_rcu_torture_error);
|
|
+ atomic_inc_unchecked(&n_rcu_torture_error);
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
cnt += sprintf(&page[cnt], "Reader Pipe: ");
|
|
@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
|
|
cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
|
cnt += sprintf(&page[cnt], " %d",
|
|
- atomic_read(&rcu_torture_wcount[i]));
|
|
+ atomic_read_unchecked(&rcu_torture_wcount[i]));
|
|
}
|
|
cnt += sprintf(&page[cnt], "\n");
|
|
if (cur_ops->stats)
|
|
@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
|
|
|
|
if (cur_ops->cleanup)
|
|
cur_ops->cleanup();
|
|
- if (atomic_read(&n_rcu_torture_error))
|
|
+ if (atomic_read_unchecked(&n_rcu_torture_error))
|
|
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
|
|
else if (n_online_successes != n_online_attempts ||
|
|
n_offline_successes != n_offline_attempts)
|
|
@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
|
|
|
|
rcu_torture_current = NULL;
|
|
rcu_torture_current_version = 0;
|
|
- atomic_set(&n_rcu_torture_alloc, 0);
|
|
- atomic_set(&n_rcu_torture_alloc_fail, 0);
|
|
- atomic_set(&n_rcu_torture_free, 0);
|
|
- atomic_set(&n_rcu_torture_mberror, 0);
|
|
- atomic_set(&n_rcu_torture_error, 0);
|
|
+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
|
|
+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
|
|
+ atomic_set_unchecked(&n_rcu_torture_free, 0);
|
|
+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
|
|
+ atomic_set_unchecked(&n_rcu_torture_error, 0);
|
|
n_rcu_torture_boost_ktrerror = 0;
|
|
n_rcu_torture_boost_rterror = 0;
|
|
n_rcu_torture_boost_failure = 0;
|
|
n_rcu_torture_boosts = 0;
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
|
- atomic_set(&rcu_torture_wcount[i], 0);
|
|
+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
|
|
for_each_possible_cpu(cpu) {
|
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
|
per_cpu(rcu_torture_count, cpu)[i] = 0;
|
|
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
|
|
index 880179c..5300824 100644
|
|
--- a/kernel/rcutree.c
|
|
+++ b/kernel/rcutree.c
|
|
@@ -367,9 +367,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
rcu_prepare_for_idle(smp_processor_id());
|
|
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
|
smp_mb__before_atomic_inc(); /* See above. */
|
|
- atomic_inc(&rdtp->dynticks);
|
|
+ atomic_inc_unchecked(&rdtp->dynticks);
|
|
smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
|
|
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
|
+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
|
|
|
|
/*
|
|
* The idle task is not permitted to enter the idle loop while
|
|
@@ -458,10 +458,10 @@ void rcu_irq_exit(void)
|
|
static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
{
|
|
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
|
|
- atomic_inc(&rdtp->dynticks);
|
|
+ atomic_inc_unchecked(&rdtp->dynticks);
|
|
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
|
smp_mb__after_atomic_inc(); /* See above. */
|
|
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
|
+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
|
|
rcu_cleanup_after_idle(smp_processor_id());
|
|
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
|
|
if (!is_idle_task(current)) {
|
|
@@ -555,14 +555,14 @@ void rcu_nmi_enter(void)
|
|
struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
|
if (rdtp->dynticks_nmi_nesting == 0 &&
|
|
- (atomic_read(&rdtp->dynticks) & 0x1))
|
|
+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
|
|
return;
|
|
rdtp->dynticks_nmi_nesting++;
|
|
smp_mb__before_atomic_inc(); /* Force delay from prior write. */
|
|
- atomic_inc(&rdtp->dynticks);
|
|
+ atomic_inc_unchecked(&rdtp->dynticks);
|
|
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
|
smp_mb__after_atomic_inc(); /* See above. */
|
|
- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
|
+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
|
|
}
|
|
|
|
/**
|
|
@@ -581,9 +581,9 @@ void rcu_nmi_exit(void)
|
|
return;
|
|
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
|
smp_mb__before_atomic_inc(); /* See above. */
|
|
- atomic_inc(&rdtp->dynticks);
|
|
+ atomic_inc_unchecked(&rdtp->dynticks);
|
|
smp_mb__after_atomic_inc(); /* Force delay to next write. */
|
|
- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
|
+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
|
|
}
|
|
|
|
#ifdef CONFIG_PROVE_RCU
|
|
@@ -599,7 +599,7 @@ int rcu_is_cpu_idle(void)
|
|
int ret;
|
|
|
|
preempt_disable();
|
|
- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
|
+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
|
preempt_enable();
|
|
return ret;
|
|
}
|
|
@@ -669,7 +669,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
|
|
*/
|
|
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
|
{
|
|
- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
|
+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
|
|
return (rdp->dynticks_snap & 0x1) == 0;
|
|
}
|
|
|
|
@@ -684,7 +684,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
unsigned int curr;
|
|
unsigned int snap;
|
|
|
|
- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
|
|
+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
|
|
snap = (unsigned int)rdp->dynticks_snap;
|
|
|
|
/*
|
|
@@ -714,10 +714,10 @@ static int jiffies_till_stall_check(void)
|
|
* for CONFIG_RCU_CPU_STALL_TIMEOUT.
|
|
*/
|
|
if (till_stall_check < 3) {
|
|
- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
|
|
+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
|
|
till_stall_check = 3;
|
|
} else if (till_stall_check > 300) {
|
|
- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
|
|
+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
|
|
till_stall_check = 300;
|
|
}
|
|
return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
|
|
@@ -1823,7 +1823,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
/*
|
|
* Do RCU core processing for the current CPU.
|
|
*/
|
|
-static void rcu_process_callbacks(struct softirq_action *unused)
|
|
+static void rcu_process_callbacks(void)
|
|
{
|
|
trace_rcu_utilization("Start RCU core");
|
|
__rcu_process_callbacks(&rcu_sched_state,
|
|
@@ -2015,8 +2015,8 @@ void synchronize_rcu_bh(void)
|
|
}
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
|
|
|
|
-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
|
|
-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
|
|
|
|
static int synchronize_sched_expedited_cpu_stop(void *data)
|
|
{
|
|
@@ -2077,7 +2077,7 @@ void synchronize_sched_expedited(void)
|
|
int firstsnap, s, snap, trycount = 0;
|
|
|
|
/* Note that atomic_inc_return() implies full memory barrier. */
|
|
- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
|
|
+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
|
|
get_online_cpus();
|
|
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
|
|
|
|
@@ -2099,7 +2099,7 @@ void synchronize_sched_expedited(void)
|
|
}
|
|
|
|
/* Check to see if someone else did our work for us. */
|
|
- s = atomic_read(&sync_sched_expedited_done);
|
|
+ s = atomic_read_unchecked(&sync_sched_expedited_done);
|
|
if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
|
|
smp_mb(); /* ensure test happens before caller kfree */
|
|
return;
|
|
@@ -2114,7 +2114,7 @@ void synchronize_sched_expedited(void)
|
|
* grace period works for us.
|
|
*/
|
|
get_online_cpus();
|
|
- snap = atomic_read(&sync_sched_expedited_started);
|
|
+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
|
|
smp_mb(); /* ensure read is before try_stop_cpus(). */
|
|
}
|
|
|
|
@@ -2125,12 +2125,12 @@ void synchronize_sched_expedited(void)
|
|
* than we did beat us to the punch.
|
|
*/
|
|
do {
|
|
- s = atomic_read(&sync_sched_expedited_done);
|
|
+ s = atomic_read_unchecked(&sync_sched_expedited_done);
|
|
if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
|
|
smp_mb(); /* ensure test happens before caller kfree */
|
|
break;
|
|
}
|
|
- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
|
|
+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
|
|
|
|
put_online_cpus();
|
|
}
|
|
@@ -2394,7 +2394,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
rdp->qlen = 0;
|
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
|
- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
|
+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
|
|
rdp->cpu = cpu;
|
|
rdp->rsp = rsp;
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
@@ -2422,8 +2422,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
|
rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
rdp->blimit = blimit;
|
|
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
- atomic_set(&rdp->dynticks->dynticks,
|
|
- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
|
|
+ atomic_set_unchecked(&rdp->dynticks->dynticks,
|
|
+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
|
|
rcu_prepare_for_idle_init(cpu);
|
|
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
|
diff --git a/kernel/rcutree.h.rej b/kernel/rcutree.h.rej
|
|
new file mode 100644
|
|
index 0000000..a05a1d5
|
|
--- /dev/null
|
|
+++ b/kernel/rcutree.h.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- kernel/rcutree.h 2012-05-21 11:33:39.615929950 +0200
|
|
++++ kernel/rcutree.h 2012-05-21 12:10:11.820049014 +0200
|
|
+@@ -87,7 +87,7 @@ struct rcu_dynticks {
|
|
+ long long dynticks_nesting; /* Track irq/process nesting level. */
|
|
+ /* Process level is worth LLONG_MAX/2. */
|
|
+ int dynticks_nmi_nesting; /* Track NMI nesting level. */
|
|
+- atomic_t dynticks; /* Even value for idle, else odd. */
|
|
++ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
|
|
+ };
|
|
+
|
|
+ /* RCU's kthread states for tracing. */
|
|
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
|
|
index de85e4c..820170e 100644
|
|
--- a/kernel/rcutree_plugin.h
|
|
+++ b/kernel/rcutree_plugin.h
|
|
@@ -913,7 +913,7 @@ void synchronize_rcu_expedited(void)
|
|
|
|
/* Clean up and exit. */
|
|
smp_mb(); /* ensure expedited GP seen before counter increment. */
|
|
- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
|
|
+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
|
|
unlock_mb_ret:
|
|
mutex_unlock(&sync_rcu_preempt_exp_mutex);
|
|
mb_ret:
|
|
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
|
|
index 6b4c76b..e5fe3ee 100644
|
|
--- a/kernel/rcutree_trace.c
|
|
+++ b/kernel/rcutree_trace.c
|
|
@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
|
|
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
|
|
rdp->qs_pending);
|
|
seq_printf(m, " dt=%d/%llx/%d df=%lu",
|
|
- atomic_read(&rdp->dynticks->dynticks),
|
|
+ atomic_read_unchecked(&rdp->dynticks->dynticks),
|
|
rdp->dynticks->dynticks_nesting,
|
|
rdp->dynticks->dynticks_nmi_nesting,
|
|
rdp->dynticks_fqs);
|
|
@@ -139,7 +139,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
|
|
rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
|
|
rdp->qs_pending);
|
|
seq_printf(m, ",%d,%llx,%d,%lu",
|
|
- atomic_read(&rdp->dynticks->dynticks),
|
|
+ atomic_read_unchecked(&rdp->dynticks->dynticks),
|
|
rdp->dynticks->dynticks_nesting,
|
|
rdp->dynticks->dynticks_nmi_nesting,
|
|
rdp->dynticks_fqs);
|
|
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
|
|
index 98ec494..4241d6d 100644
|
|
--- a/kernel/rtmutex-tester.c
|
|
+++ b/kernel/rtmutex-tester.c
|
|
@@ -20,7 +20,7 @@
|
|
#define MAX_RT_TEST_MUTEXES 8
|
|
|
|
static spinlock_t rttest_lock;
|
|
-static atomic_t rttest_event;
|
|
+static atomic_unchecked_t rttest_event;
|
|
|
|
struct test_thread_data {
|
|
int opcode;
|
|
@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
|
|
case RTTEST_LOCKCONT:
|
|
td->mutexes[td->opdata] = 1;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
return 0;
|
|
|
|
case RTTEST_RESET:
|
|
@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
return 0;
|
|
|
|
case RTTEST_RESETEVENT:
|
|
- atomic_set(&rttest_event, 0);
|
|
+ atomic_set_unchecked(&rttest_event, 0);
|
|
return 0;
|
|
|
|
default:
|
|
@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
return ret;
|
|
|
|
td->mutexes[id] = 1;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
rt_mutex_lock(&mutexes[id]);
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
td->mutexes[id] = 4;
|
|
return 0;
|
|
|
|
@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
return ret;
|
|
|
|
td->mutexes[id] = 1;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
td->mutexes[id] = ret ? 0 : 4;
|
|
return ret ? -EINTR : 0;
|
|
|
|
@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
|
|
if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
|
|
return ret;
|
|
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
rt_mutex_unlock(&mutexes[id]);
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
td->mutexes[id] = 0;
|
|
return 0;
|
|
|
|
@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
|
|
break;
|
|
|
|
td->mutexes[dat] = 2;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
break;
|
|
|
|
default:
|
|
@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
|
|
return;
|
|
|
|
td->mutexes[dat] = 3;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
break;
|
|
|
|
case RTTEST_LOCKNOWAIT:
|
|
@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
|
|
return;
|
|
|
|
td->mutexes[dat] = 1;
|
|
- td->event = atomic_add_return(1, &rttest_event);
|
|
+ td->event = atomic_add_return_unchecked(1, &rttest_event);
|
|
return;
|
|
|
|
default:
|
|
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
|
|
index 15f60d0..7e50319 100644
|
|
--- a/kernel/sched/auto_group.c
|
|
+++ b/kernel/sched/auto_group.c
|
|
@@ -11,7 +11,7 @@
|
|
|
|
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
|
|
static struct autogroup autogroup_default;
|
|
-static atomic_t autogroup_seq_nr;
|
|
+static atomic_unchecked_t autogroup_seq_nr;
|
|
|
|
void __init autogroup_init(struct task_struct *init_task)
|
|
{
|
|
@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
|
|
|
|
kref_init(&ag->kref);
|
|
init_rwsem(&ag->lock);
|
|
- ag->id = atomic_inc_return(&autogroup_seq_nr);
|
|
+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
|
|
ag->tg = tg;
|
|
#ifdef CONFIG_RT_GROUP_SCHED
|
|
/*
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
|
index c9ad0d0..fe4813d 100644
|
|
--- a/kernel/sched/fair.c
|
|
+++ b/kernel/sched/fair.c
|
|
@@ -5164,7 +5164,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
|
|
* run_rebalance_domains is triggered when needed from the scheduler tick.
|
|
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
|
|
*/
|
|
-static void run_rebalance_domains(struct softirq_action *h)
|
|
+static void run_rebalance_domains(void)
|
|
{
|
|
int this_cpu = smp_processor_id();
|
|
struct rq *this_rq = cpu_rq(this_cpu);
|
|
diff --git a/kernel/signal.c b/kernel/signal.c
|
|
index 5d8a6a7..1f203bb 100644
|
|
--- a/kernel/signal.c
|
|
+++ b/kernel/signal.c
|
|
@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
|
|
|
|
int print_fatal_signals __read_mostly;
|
|
|
|
-static void __user *sig_handler(struct task_struct *t, int sig)
|
|
+static __sighandler_t sig_handler(struct task_struct *t, int sig)
|
|
{
|
|
return t->sighand->action[sig - 1].sa.sa_handler;
|
|
}
|
|
|
|
-static int sig_handler_ignored(void __user *handler, int sig)
|
|
+static int sig_handler_ignored(__sighandler_t handler, int sig)
|
|
{
|
|
/* Is it explicitly or implicitly ignored? */
|
|
return handler == SIG_IGN ||
|
|
@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
|
|
|
|
static int sig_task_ignored(struct task_struct *t, int sig, bool force)
|
|
{
|
|
- void __user *handler;
|
|
+ __sighandler_t handler;
|
|
|
|
handler = sig_handler(t, sig);
|
|
|
|
@@ -492,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
|
|
|
|
int unhandled_signal(struct task_struct *tsk, int sig)
|
|
{
|
|
- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
|
|
+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
|
|
if (is_global_init(tsk))
|
|
return 1;
|
|
if (handler != SIG_IGN && handler != SIG_DFL)
|
|
diff --git a/kernel/smp.c.rej b/kernel/smp.c.rej
|
|
new file mode 100644
|
|
index 0000000..0067c8c
|
|
--- /dev/null
|
|
+++ b/kernel/smp.c.rej
|
|
@@ -0,0 +1,28 @@
|
|
+diff a/kernel/smp.c b/kernel/smp.c (rejected hunks)
|
|
+@@ -594,22 +594,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
|
|
+ }
|
|
+ EXPORT_SYMBOL(smp_call_function);
|
|
+
|
|
+-void ipi_call_lock(void)
|
|
++void ipi_call_lock(void) __acquires(call_function.lock)
|
|
+ {
|
|
+ raw_spin_lock(&call_function.lock);
|
|
+ }
|
|
+
|
|
+-void ipi_call_unlock(void)
|
|
++void ipi_call_unlock(void) __releases(call_function.lock)
|
|
+ {
|
|
+ raw_spin_unlock(&call_function.lock);
|
|
+ }
|
|
+
|
|
+-void ipi_call_lock_irq(void)
|
|
++void ipi_call_lock_irq(void) __acquires(call_function.lock)
|
|
+ {
|
|
+ raw_spin_lock_irq(&call_function.lock);
|
|
+ }
|
|
+
|
|
+-void ipi_call_unlock_irq(void)
|
|
++void ipi_call_unlock_irq(void) __releases(call_function.lock)
|
|
+ {
|
|
+ raw_spin_unlock_irq(&call_function.lock);
|
|
+ }
|
|
diff --git a/kernel/softirq.c b/kernel/softirq.c
|
|
index b14e919..7a4fb16 100644
|
|
--- a/kernel/softirq.c
|
|
+++ b/kernel/softirq.c
|
|
@@ -57,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
|
|
|
|
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
|
|
|
-char *softirq_to_name[NR_SOFTIRQS] = {
|
|
+const char * const softirq_to_name[NR_SOFTIRQS] = {
|
|
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
|
|
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
|
};
|
|
@@ -236,7 +236,7 @@ asmlinkage void __do_softirq(void)
|
|
kstat_incr_softirqs_this_cpu(vec_nr);
|
|
|
|
trace_softirq_entry(vec_nr);
|
|
- h->action(h);
|
|
+ h->action();
|
|
trace_softirq_exit(vec_nr);
|
|
if (unlikely(prev_count != preempt_count())) {
|
|
printk(KERN_ERR "huh, entered softirq %u %s %p"
|
|
@@ -383,9 +383,11 @@ void __raise_softirq_irqoff(unsigned int nr)
|
|
or_softirq_pending(1UL << nr);
|
|
}
|
|
|
|
-void open_softirq(int nr, void (*action)(struct softirq_action *))
|
|
+void open_softirq(int nr, void (*action)(void))
|
|
{
|
|
- softirq_vec[nr].action = action;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&softirq_vec[nr].action = action;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/*
|
|
@@ -439,7 +441,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
|
|
|
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
|
|
|
|
-static void tasklet_action(struct softirq_action *a)
|
|
+static void tasklet_action(void)
|
|
{
|
|
struct tasklet_struct *list;
|
|
|
|
@@ -474,7 +476,7 @@ static void tasklet_action(struct softirq_action *a)
|
|
}
|
|
}
|
|
|
|
-static void tasklet_hi_action(struct softirq_action *a)
|
|
+static void tasklet_hi_action(void)
|
|
{
|
|
struct tasklet_struct *list;
|
|
|
|
diff --git a/kernel/sys.c b/kernel/sys.c
|
|
index ef30e10..5f73dc7 100644
|
|
--- a/kernel/sys.c
|
|
+++ b/kernel/sys.c
|
|
@@ -1282,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
|
|
return -EFAULT;
|
|
|
|
down_read(&uts_sem);
|
|
- error = __copy_to_user(&name->sysname, &utsname()->sysname,
|
|
+ error = __copy_to_user(name->sysname, &utsname()->sysname,
|
|
__OLD_UTS_LEN);
|
|
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
|
|
- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
|
|
+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
|
|
__OLD_UTS_LEN);
|
|
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
|
|
- error |= __copy_to_user(&name->release, &utsname()->release,
|
|
+ error |= __copy_to_user(name->release, &utsname()->release,
|
|
__OLD_UTS_LEN);
|
|
error |= __put_user(0, name->release + __OLD_UTS_LEN);
|
|
- error |= __copy_to_user(&name->version, &utsname()->version,
|
|
+ error |= __copy_to_user(name->version, &utsname()->version,
|
|
__OLD_UTS_LEN);
|
|
error |= __put_user(0, name->version + __OLD_UTS_LEN);
|
|
- error |= __copy_to_user(&name->machine, &utsname()->machine,
|
|
+ error |= __copy_to_user(name->machine, &utsname()->machine,
|
|
__OLD_UTS_LEN);
|
|
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
|
|
up_read(&uts_sem);
|
|
@@ -2018,7 +2018,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
|
error = get_dumpable(me->mm);
|
|
break;
|
|
case PR_SET_DUMPABLE:
|
|
- if (arg2 < 0 || arg2 > 1) {
|
|
+ if (arg2 > 1) {
|
|
error = -EINVAL;
|
|
break;
|
|
}
|
|
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
|
|
index 7364c50..affedd1 100644
|
|
--- a/kernel/sysctl.c
|
|
+++ b/kernel/sysctl.c
|
|
@@ -217,6 +217,20 @@ extern struct ctl_table epoll_table[];
|
|
int sysctl_legacy_va_layout;
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+static ctl_table pax_table[] = {
|
|
+ {
|
|
+ .procname = "softmode",
|
|
+ .data = &pax_softmode,
|
|
+ .maxlen = sizeof(unsigned int),
|
|
+ .mode = 0600,
|
|
+ .proc_handler = &proc_dointvec,
|
|
+ },
|
|
+
|
|
+ { }
|
|
+};
|
|
+#endif
|
|
+
|
|
/* The default sysctl tables: */
|
|
|
|
static struct ctl_table sysctl_base_table[] = {
|
|
@@ -263,6 +277,15 @@ static int max_extfrag_threshold = 1000;
|
|
#endif
|
|
|
|
static struct ctl_table kern_table[] = {
|
|
+
|
|
+#ifdef CONFIG_PAX_SOFTMODE
|
|
+ {
|
|
+ .procname = "pax",
|
|
+ .mode = 0500,
|
|
+ .child = pax_table,
|
|
+ },
|
|
+#endif
|
|
+
|
|
{
|
|
.procname = "sched_child_runs_first",
|
|
.data = &sysctl_sched_child_runs_first,
|
|
@@ -1266,6 +1289,13 @@ static struct ctl_table vm_table[] = {
|
|
.proc_handler = proc_dointvec_minmax,
|
|
.extra1 = &zero,
|
|
},
|
|
+ {
|
|
+ .procname = "heap_stack_gap",
|
|
+ .data = &sysctl_heap_stack_gap,
|
|
+ .maxlen = sizeof(sysctl_heap_stack_gap),
|
|
+ .mode = 0644,
|
|
+ .proc_handler = proc_doulongvec_minmax,
|
|
+ },
|
|
#else
|
|
{
|
|
.procname = "nr_trim_pages",
|
|
@@ -1801,6 +1831,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
|
|
len = strlen(tmp);
|
|
if (len > *size)
|
|
len = *size;
|
|
+ if (len > sizeof(tmp))
|
|
+ len = sizeof(tmp);
|
|
if (copy_to_user(*buf, tmp, len))
|
|
return -EFAULT;
|
|
*size -= len;
|
|
@@ -2117,8 +2149,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
|
|
*i = val;
|
|
} else {
|
|
val = convdiv * (*i) / convmul;
|
|
- if (!first)
|
|
+ if (!first) {
|
|
err = proc_put_char(&buffer, &left, '\t');
|
|
+ if (err)
|
|
+ break;
|
|
+ }
|
|
err = proc_put_long(&buffer, &left, val, false);
|
|
if (err)
|
|
break;
|
|
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
|
|
index d7af6a3..a3937b4 100644
|
|
--- a/kernel/sysctl_binary.c
|
|
+++ b/kernel/sysctl_binary.c
|
|
@@ -992,7 +992,7 @@ static ssize_t bin_intvec(struct file *file,
|
|
int i;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
|
|
+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out_kfree;
|
|
@@ -1037,7 +1037,7 @@ static ssize_t bin_intvec(struct file *file,
|
|
}
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_write(file, buffer, str - buffer, &pos);
|
|
+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out_kfree;
|
|
@@ -1070,7 +1070,7 @@ static ssize_t bin_ulongvec(struct file *file,
|
|
int i;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
|
|
+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out_kfree;
|
|
@@ -1115,7 +1115,7 @@ static ssize_t bin_ulongvec(struct file *file,
|
|
}
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_write(file, buffer, str - buffer, &pos);
|
|
+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out_kfree;
|
|
@@ -1141,7 +1141,7 @@ static ssize_t bin_uuid(struct file *file,
|
|
int i;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
|
|
+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out;
|
|
@@ -1188,7 +1188,7 @@ static ssize_t bin_dn_node_address(struct file *file,
|
|
__le16 dnaddr;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
|
|
+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out;
|
|
@@ -1237,7 +1237,7 @@ static ssize_t bin_dn_node_address(struct file *file,
|
|
le16_to_cpu(dnaddr) & 0x3ff);
|
|
|
|
set_fs(KERNEL_DS);
|
|
- result = vfs_write(file, buf, len, &pos);
|
|
+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
|
|
set_fs(old_fs);
|
|
if (result < 0)
|
|
goto out;
|
|
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
|
|
index c4f3fdd..678706e 100644
|
|
--- a/kernel/time/alarmtimer.c
|
|
+++ b/kernel/time/alarmtimer.c
|
|
@@ -1016,7 +1016,7 @@ static int __init alarmtimer_init(void)
|
|
struct platform_device *pdev;
|
|
int error = 0;
|
|
int i;
|
|
- struct k_clock alarm_clock = {
|
|
+ static struct k_clock alarm_clock = {
|
|
.clock_getres = alarm_clock_getres,
|
|
.clock_get = alarm_clock_get,
|
|
.timer_create = alarm_timer_create,
|
|
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
|
|
index edb173f..f901f27 100644
|
|
--- a/kernel/time/tick-broadcast.c
|
|
+++ b/kernel/time/tick-broadcast.c
|
|
@@ -120,7 +120,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
|
* then clear the broadcast bit.
|
|
*/
|
|
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
|
|
- int cpu = smp_processor_id();
|
|
+ cpu = smp_processor_id();
|
|
|
|
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
|
|
tick_broadcast_clear_oneshot(cpu);
|
|
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
|
|
index 0b537f2..484445d 100644
|
|
--- a/kernel/time/timer_stats.c
|
|
+++ b/kernel/time/timer_stats.c
|
|
@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
|
|
static unsigned long nr_entries;
|
|
static struct entry entries[MAX_ENTRIES];
|
|
|
|
-static atomic_t overflow_count;
|
|
+static atomic_unchecked_t overflow_count;
|
|
|
|
/*
|
|
* The entries are in a hash-table, for fast lookup:
|
|
@@ -140,7 +140,7 @@ static void reset_entries(void)
|
|
nr_entries = 0;
|
|
memset(entries, 0, sizeof(entries));
|
|
memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
|
|
- atomic_set(&overflow_count, 0);
|
|
+ atomic_set_unchecked(&overflow_count, 0);
|
|
}
|
|
|
|
static struct entry *alloc_entry(void)
|
|
@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
|
if (likely(entry))
|
|
entry->count++;
|
|
else
|
|
- atomic_inc(&overflow_count);
|
|
+ atomic_inc_unchecked(&overflow_count);
|
|
|
|
out_unlock:
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
@@ -300,9 +300,9 @@ static int tstats_show(struct seq_file *m, void *v)
|
|
|
|
seq_puts(m, "Timer Stats Version: v0.2\n");
|
|
seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
|
|
- if (atomic_read(&overflow_count))
|
|
+ if (atomic_read_unchecked(&overflow_count))
|
|
seq_printf(m, "Overflow: %d entries\n",
|
|
- atomic_read(&overflow_count));
|
|
+ atomic_read_unchecked(&overflow_count));
|
|
|
|
for (i = 0; i < nr_entries; i++) {
|
|
entry = entries + i;
|
|
diff --git a/kernel/timer.c b/kernel/timer.c
|
|
index 5a2b451..8b8ff14 100644
|
|
--- a/kernel/timer.c
|
|
+++ b/kernel/timer.c
|
|
@@ -1381,7 +1381,7 @@ void update_process_times(int user_tick)
|
|
/*
|
|
* This function runs timers and the timer-tq in bottom half context.
|
|
*/
|
|
-static void run_timer_softirq(struct softirq_action *h)
|
|
+static void run_timer_softirq(void)
|
|
{
|
|
struct tvec_base *base = __this_cpu_read(tvec_bases);
|
|
|
|
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
|
|
index 097ed06..6bda6b6 100644
|
|
--- a/kernel/trace/blktrace.c
|
|
+++ b/kernel/trace/blktrace.c
|
|
@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
|
|
struct blk_trace *bt = filp->private_data;
|
|
char buf[16];
|
|
|
|
- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
|
|
+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
|
|
|
|
return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
|
|
}
|
|
@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
|
|
return 1;
|
|
|
|
bt = buf->chan->private_data;
|
|
- atomic_inc(&bt->dropped);
|
|
+ atomic_inc_unchecked(&bt->dropped);
|
|
return 0;
|
|
}
|
|
|
|
@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
|
|
|
bt->dir = dir;
|
|
bt->dev = dev;
|
|
- atomic_set(&bt->dropped, 0);
|
|
+ atomic_set_unchecked(&bt->dropped, 0);
|
|
|
|
ret = -EIO;
|
|
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
|
|
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
|
|
index 09a80ae..de2ff44 100644
|
|
--- a/kernel/trace/ftrace.c
|
|
+++ b/kernel/trace/ftrace.c
|
|
@@ -1814,12 +1814,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
|
|
if (unlikely(ftrace_disabled))
|
|
return 0;
|
|
|
|
+ ret = ftrace_arch_code_modify_prepare();
|
|
+ FTRACE_WARN_ON(ret);
|
|
+ if (ret)
|
|
+ return 0;
|
|
+
|
|
ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
|
+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
|
|
if (ret) {
|
|
ftrace_bug(ret, ip);
|
|
- return 0;
|
|
}
|
|
- return 1;
|
|
+ return ret ? 0 : 1;
|
|
}
|
|
|
|
/*
|
|
@@ -3020,7 +3025,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
|
|
|
|
int
|
|
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
|
- void *data)
|
|
+ void *data)
|
|
{
|
|
struct ftrace_func_probe *entry;
|
|
struct ftrace_page *pg;
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index 8ab1bf8..803061f 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -4448,10 +4448,9 @@ static const struct file_operations tracing_dyn_info_fops = {
|
|
};
|
|
#endif
|
|
|
|
-static struct dentry *d_tracer;
|
|
-
|
|
struct dentry *tracing_init_dentry(void)
|
|
{
|
|
+ static struct dentry *d_tracer;
|
|
static int once;
|
|
|
|
if (d_tracer)
|
|
@@ -4471,10 +4470,9 @@ struct dentry *tracing_init_dentry(void)
|
|
return d_tracer;
|
|
}
|
|
|
|
-static struct dentry *d_percpu;
|
|
-
|
|
struct dentry *tracing_dentry_percpu(void)
|
|
{
|
|
+ static struct dentry *d_percpu;
|
|
static int once;
|
|
struct dentry *d_tracer;
|
|
|
|
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
|
|
index 2f737f5..02e110d 100644
|
|
--- a/kernel/trace/trace_events.c
|
|
+++ b/kernel/trace/trace_events.c
|
|
@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
|
|
struct ftrace_module_file_ops {
|
|
struct list_head list;
|
|
struct module *mod;
|
|
- struct file_operations id;
|
|
- struct file_operations enable;
|
|
- struct file_operations format;
|
|
- struct file_operations filter;
|
|
};
|
|
|
|
static struct ftrace_module_file_ops *
|
|
@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
|
|
|
|
file_ops->mod = mod;
|
|
|
|
- file_ops->id = ftrace_event_id_fops;
|
|
- file_ops->id.owner = mod;
|
|
-
|
|
- file_ops->enable = ftrace_enable_fops;
|
|
- file_ops->enable.owner = mod;
|
|
-
|
|
- file_ops->filter = ftrace_event_filter_fops;
|
|
- file_ops->filter.owner = mod;
|
|
-
|
|
- file_ops->format = ftrace_event_format_fops;
|
|
- file_ops->format.owner = mod;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&mod->trace_id.owner = mod;
|
|
+ *(void **)&mod->trace_enable.owner = mod;
|
|
+ *(void **)&mod->trace_filter.owner = mod;
|
|
+ *(void **)&mod->trace_format.owner = mod;
|
|
+ pax_close_kernel();
|
|
|
|
list_add(&file_ops->list, &ftrace_module_file_list);
|
|
|
|
@@ -1376,8 +1367,8 @@ static void trace_module_add_events(struct module *mod)
|
|
|
|
for_each_event(call, start, end) {
|
|
__trace_add_event_call(*call, mod,
|
|
- &file_ops->id, &file_ops->enable,
|
|
- &file_ops->filter, &file_ops->format);
|
|
+ &mod->trace_id, &mod->trace_enable,
|
|
+ &mod->trace_filter, &mod->trace_format);
|
|
}
|
|
}
|
|
|
|
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
|
|
index 580a05e..9b31acb 100644
|
|
--- a/kernel/trace/trace_kprobe.c
|
|
+++ b/kernel/trace/trace_kprobe.c
|
|
@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|
long ret;
|
|
int maxlen = get_rloc_len(*(u32 *)dest);
|
|
u8 *dst = get_rloc_data(dest);
|
|
- u8 *src = addr;
|
|
+ const u8 __user *src = (const u8 __force_user *)addr;
|
|
mm_segment_t old_fs = get_fs();
|
|
if (!maxlen)
|
|
return;
|
|
@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|
pagefault_disable();
|
|
do
|
|
ret = __copy_from_user_inatomic(dst++, src++, 1);
|
|
- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
|
|
+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
|
|
dst[-1] = '\0';
|
|
pagefault_enable();
|
|
set_fs(old_fs);
|
|
@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
|
|
((u8 *)get_rloc_data(dest))[0] = '\0';
|
|
*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
|
|
} else
|
|
- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
|
|
+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
|
|
get_rloc_offs(*(u32 *)dest));
|
|
}
|
|
/* Return the length of string -- including null terminal byte */
|
|
@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
|
|
set_fs(KERNEL_DS);
|
|
pagefault_disable();
|
|
do {
|
|
- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
|
|
+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
|
|
len++;
|
|
} while (c && ret == 0 && len < MAX_STRING_SIZE);
|
|
pagefault_enable();
|
|
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
|
|
index fd3c8aa..5f324a6 100644
|
|
--- a/kernel/trace/trace_mmiotrace.c
|
|
+++ b/kernel/trace/trace_mmiotrace.c
|
|
@@ -24,7 +24,7 @@ struct header_iter {
|
|
static struct trace_array *mmio_trace_array;
|
|
static bool overrun_detected;
|
|
static unsigned long prev_overruns;
|
|
-static atomic_t dropped_count;
|
|
+static atomic_unchecked_t dropped_count;
|
|
|
|
static void mmio_reset_data(struct trace_array *tr)
|
|
{
|
|
@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
|
|
|
|
static unsigned long count_overruns(struct trace_iterator *iter)
|
|
{
|
|
- unsigned long cnt = atomic_xchg(&dropped_count, 0);
|
|
+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
|
|
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
|
|
|
if (over > prev_overruns)
|
|
@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
|
|
sizeof(*entry), 0, pc);
|
|
if (!event) {
|
|
- atomic_inc(&dropped_count);
|
|
+ atomic_inc_unchecked(&dropped_count);
|
|
return;
|
|
}
|
|
entry = ring_buffer_event_data(event);
|
|
@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
|
|
sizeof(*entry), 0, pc);
|
|
if (!event) {
|
|
- atomic_inc(&dropped_count);
|
|
+ atomic_inc_unchecked(&dropped_count);
|
|
return;
|
|
}
|
|
entry = ring_buffer_event_data(event);
|
|
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
|
|
index cb29ce2..a736224 100644
|
|
--- a/kernel/trace/trace_output.c
|
|
+++ b/kernel/trace/trace_output.c
|
|
@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
|
|
|
|
p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
|
|
if (!IS_ERR(p)) {
|
|
- p = mangle_path(s->buffer + s->len, p, "\n");
|
|
+ p = mangle_path(s->buffer + s->len, p, "\n\\");
|
|
if (p) {
|
|
s->len = p - s->buffer;
|
|
return 1;
|
|
diff --git a/kernel/trace/trace_stack.c.rej b/kernel/trace/trace_stack.c.rej
|
|
new file mode 100644
|
|
index 0000000..a6ab037
|
|
--- /dev/null
|
|
+++ b/kernel/trace/trace_stack.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- kernel/trace/trace_stack.c 2012-03-19 10:39:11.988049175 +0100
|
|
++++ kernel/trace/trace_stack.c 2012-05-21 12:10:11.900049018 +0200
|
|
+@@ -53,7 +53,7 @@ static inline void check_stack(void)
|
|
+ return;
|
|
+
|
|
+ /* we do not handle interrupt stacks yet */
|
|
+- if (!object_is_on_stack(&this_size))
|
|
++ if (!object_starts_on_stack(&this_size))
|
|
+ return;
|
|
+
|
|
+ local_irq_save(flags);
|
|
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
|
|
index 209b379..7f76423 100644
|
|
--- a/kernel/trace/trace_workqueue.c
|
|
+++ b/kernel/trace/trace_workqueue.c
|
|
@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
|
|
int cpu;
|
|
pid_t pid;
|
|
/* Can be inserted from interrupt or user context, need to be atomic */
|
|
- atomic_t inserted;
|
|
+ atomic_unchecked_t inserted;
|
|
/*
|
|
* Don't need to be atomic, works are serialized in a single workqueue thread
|
|
* on a single CPU.
|
|
@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
|
|
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
|
|
list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
|
|
if (node->pid == wq_thread->pid) {
|
|
- atomic_inc(&node->inserted);
|
|
+ atomic_inc_unchecked(&node->inserted);
|
|
goto found;
|
|
}
|
|
}
|
|
@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
|
|
tsk = get_pid_task(pid, PIDTYPE_PID);
|
|
if (tsk) {
|
|
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
|
|
- atomic_read(&cws->inserted), cws->executed,
|
|
+ atomic_read_unchecked(&cws->inserted), cws->executed,
|
|
tsk->comm);
|
|
put_task_struct(tsk);
|
|
}
|
|
diff --git a/lib/bitmap.c b/lib/bitmap.c
|
|
index 3ed0f82..757b69e 100644
|
|
--- a/lib/bitmap.c
|
|
+++ b/lib/bitmap.c
|
|
@@ -427,7 +427,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
|
|
{
|
|
int c, old_c, totaldigits, ndigits, nchunks, nbits;
|
|
u32 chunk;
|
|
- const char __user __force *ubuf = (const char __user __force *)buf;
|
|
+ const char __user *ubuf = (const char __force_user *)buf;
|
|
|
|
bitmap_zero(maskp, nmaskbits);
|
|
|
|
@@ -512,7 +512,7 @@ int bitmap_parse_user(const char __user *ubuf,
|
|
{
|
|
if (!access_ok(VERIFY_READ, ubuf, ulen))
|
|
return -EFAULT;
|
|
- return __bitmap_parse((const char __force *)ubuf,
|
|
+ return __bitmap_parse((const char __force_kernel *)ubuf,
|
|
ulen, 1, maskp, nmaskbits);
|
|
|
|
}
|
|
@@ -705,7 +705,7 @@ int bitmap_parselist_user(const char __user *ubuf,
|
|
{
|
|
if (!access_ok(VERIFY_READ, ubuf, ulen))
|
|
return -EFAULT;
|
|
- return __bitmap_parselist((const char __force *)ubuf,
|
|
+ return __bitmap_parselist((const char __force_kernel *)ubuf,
|
|
ulen, 1, maskp, nmaskbits);
|
|
}
|
|
EXPORT_SYMBOL(bitmap_parselist_user);
|
|
diff --git a/lib/bitmap.c.rej b/lib/bitmap.c.rej
|
|
new file mode 100644
|
|
index 0000000..750189f
|
|
--- /dev/null
|
|
+++ b/lib/bitmap.c.rej
|
|
@@ -0,0 +1,10 @@
|
|
+diff a/lib/bitmap.c b/lib/bitmap.c (rejected hunks)
|
|
+@@ -604,7 +604,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
|
|
+ {
|
|
+ unsigned a, b;
|
|
+ int c, old_c, totaldigits;
|
|
+- const char __user __force *ubuf = (const char __user __force *)buf;
|
|
++ const char __user *ubuf = (const char __force_user *)buf;
|
|
+ int exp_digit, in_range;
|
|
+
|
|
+ totaldigits = c = 0;
|
|
diff --git a/lib/bug.c b/lib/bug.c
|
|
index a28c141..2bd3d95 100644
|
|
--- a/lib/bug.c
|
|
+++ b/lib/bug.c
|
|
@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
|
|
return BUG_TRAP_TYPE_NONE;
|
|
|
|
bug = find_bug(bugaddr);
|
|
+ if (!bug)
|
|
+ return BUG_TRAP_TYPE_NONE;
|
|
|
|
file = NULL;
|
|
line = 0;
|
|
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
|
|
index 0ab9ae8..f01ceca 100644
|
|
--- a/lib/debugobjects.c
|
|
+++ b/lib/debugobjects.c
|
|
@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
|
|
if (limit > 4)
|
|
return;
|
|
|
|
- is_on_stack = object_is_on_stack(addr);
|
|
+ is_on_stack = object_starts_on_stack(addr);
|
|
if (is_on_stack == onstack)
|
|
return;
|
|
|
|
diff --git a/lib/devres.c b/lib/devres.c
|
|
index 9c76b3a..b92adbf 100644
|
|
--- a/lib/devres.c
|
|
+++ b/lib/devres.c
|
|
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
|
|
void devm_iounmap(struct device *dev, void __iomem *addr)
|
|
{
|
|
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
|
|
- (void *)addr));
|
|
+ (void __force *)addr));
|
|
iounmap(addr);
|
|
}
|
|
EXPORT_SYMBOL(devm_iounmap);
|
|
@@ -223,7 +223,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
|
{
|
|
ioport_unmap(addr);
|
|
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
|
|
- devm_ioport_map_match, (void *)addr));
|
|
+ devm_ioport_map_match, (void __force *)addr));
|
|
}
|
|
EXPORT_SYMBOL(devm_ioport_unmap);
|
|
|
|
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
|
|
index 13ef233..5241683 100644
|
|
--- a/lib/dma-debug.c
|
|
+++ b/lib/dma-debug.c
|
|
@@ -924,7 +924,7 @@ static void check_unmap(struct dma_debug_entry *ref)
|
|
|
|
static void check_for_stack(struct device *dev, void *addr)
|
|
{
|
|
- if (object_is_on_stack(addr))
|
|
+ if (object_starts_on_stack(addr))
|
|
err_printk(dev, NULL, "DMA-API: device driver maps memory from"
|
|
"stack [addr=%p]\n", addr);
|
|
}
|
|
diff --git a/lib/extable.c b/lib/extable.c
|
|
index 4cac81e..63e9b8f 100644
|
|
--- a/lib/extable.c
|
|
+++ b/lib/extable.c
|
|
@@ -13,6 +13,7 @@
|
|
#include <linux/init.h>
|
|
#include <linux/sort.h>
|
|
#include <asm/uaccess.h>
|
|
+#include <asm/pgtable.h>
|
|
|
|
#ifndef ARCH_HAS_SORT_EXTABLE
|
|
/*
|
|
@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
|
|
void sort_extable(struct exception_table_entry *start,
|
|
struct exception_table_entry *finish)
|
|
{
|
|
+ pax_open_kernel();
|
|
sort(start, finish - start, sizeof(struct exception_table_entry),
|
|
cmp_ex, NULL);
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
#ifdef CONFIG_MODULES
|
|
diff --git a/lib/inflate.c b/lib/inflate.c
|
|
index 013a761..c28f3fc 100644
|
|
--- a/lib/inflate.c
|
|
+++ b/lib/inflate.c
|
|
@@ -269,7 +269,7 @@ static void free(void *where)
|
|
malloc_ptr = free_mem_ptr;
|
|
}
|
|
#else
|
|
-#define malloc(a) kmalloc(a, GFP_KERNEL)
|
|
+#define malloc(a) kmalloc((a), GFP_KERNEL)
|
|
#define free(a) kfree(a)
|
|
#endif
|
|
|
|
diff --git a/lib/ioremap.c b/lib/ioremap.c
|
|
index 0c9216c..863bd89 100644
|
|
--- a/lib/ioremap.c
|
|
+++ b/lib/ioremap.c
|
|
@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
|
|
unsigned long next;
|
|
|
|
phys_addr -= addr;
|
|
- pmd = pmd_alloc(&init_mm, pud, addr);
|
|
+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
do {
|
|
@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
|
|
unsigned long next;
|
|
|
|
phys_addr -= addr;
|
|
- pud = pud_alloc(&init_mm, pgd, addr);
|
|
+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
do {
|
|
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
|
|
index 3ac50dc..240bb7e 100644
|
|
--- a/lib/radix-tree.c
|
|
+++ b/lib/radix-tree.c
|
|
@@ -79,7 +79,7 @@ struct radix_tree_preload {
|
|
int nr;
|
|
struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
|
|
};
|
|
-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
|
|
+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
|
|
|
|
static inline void *ptr_to_indirect(void *ptr)
|
|
{
|
|
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
|
|
index ddd07c6..7d443d1 100644
|
|
--- a/lib/vsprintf.c
|
|
+++ b/lib/vsprintf.c
|
|
@@ -870,12 +870,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
|
{
|
|
if (!ptr && *fmt != 'K') {
|
|
/*
|
|
- * Print (null) with the same width as a pointer so it makes
|
|
+ * Print (nil) with the same width as a pointer so it makes
|
|
* tabular output look nice.
|
|
*/
|
|
if (spec.field_width == -1)
|
|
spec.field_width = 2 * sizeof(void *);
|
|
- return string(buf, end, "(null)", spec);
|
|
+ return string(buf, end, "(nil)", spec);
|
|
}
|
|
|
|
switch (*fmt) {
|
|
@@ -1686,11 +1686,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
|
|
typeof(type) value; \
|
|
if (sizeof(type) == 8) { \
|
|
args = PTR_ALIGN(args, sizeof(u32)); \
|
|
- *(u32 *)&value = *(u32 *)args; \
|
|
- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
|
|
+ *(u32 *)&value = *(const u32 *)args; \
|
|
+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
|
|
} else { \
|
|
args = PTR_ALIGN(args, sizeof(type)); \
|
|
- value = *(typeof(type) *)args; \
|
|
+ value = *(const typeof(type) *)args; \
|
|
} \
|
|
args += sizeof(type); \
|
|
value; \
|
|
@@ -1753,7 +1753,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
|
|
case FORMAT_TYPE_STR: {
|
|
const char *str_arg = args;
|
|
args += strlen(str_arg) + 1;
|
|
- str = string(str, end, (char *)str_arg, spec);
|
|
+ str = string(str, end, str_arg, spec);
|
|
break;
|
|
}
|
|
|
|
diff --git a/mm/Kconfig b/mm/Kconfig
|
|
index bbab5a6..ac87a0b 100644
|
|
--- a/mm/Kconfig
|
|
+++ b/mm/Kconfig
|
|
@@ -247,10 +247,10 @@ config KSM
|
|
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
|
|
|
|
config DEFAULT_MMAP_MIN_ADDR
|
|
- int "Low address space to protect from user allocation"
|
|
+ int "Low address space to protect from user allocation"
|
|
depends on MMU
|
|
- default 4096
|
|
- help
|
|
+ default 32768
|
|
+ help
|
|
This is the portion of low virtual memory which should be protected
|
|
from userspace allocation. Keeping a user from writing to low pages
|
|
can help reduce the impact of kernel NULL pointer bugs.
|
|
diff --git a/mm/filemap.c b/mm/filemap.c
|
|
index 8ed5c5c..6159395 100644
|
|
--- a/mm/filemap.c
|
|
+++ b/mm/filemap.c
|
|
@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
if (!mapping->a_ops->readpage)
|
|
- return -ENOEXEC;
|
|
+ return -ENODEV;
|
|
file_accessed(file);
|
|
vma->vm_ops = &generic_file_vm_ops;
|
|
vma->vm_flags |= VM_CAN_NONLINEAR;
|
|
diff --git a/mm/fremap.c b/mm/fremap.c
|
|
index 9ed4fd4..c42648d 100644
|
|
--- a/mm/fremap.c
|
|
+++ b/mm/fremap.c
|
|
@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
|
|
retry:
|
|
vma = find_vma(mm, start);
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
|
|
+ goto out;
|
|
+#endif
|
|
+
|
|
/*
|
|
* Make sure the vma is shared, that it supports prefaulting,
|
|
* and that the remapped range is valid and fully within
|
|
diff --git a/mm/highmem.c b/mm/highmem.c
|
|
index 57d82c6..e9e0552 100644
|
|
--- a/mm/highmem.c
|
|
+++ b/mm/highmem.c
|
|
@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
|
|
* So no dangers, even with speculative execution.
|
|
*/
|
|
page = pte_page(pkmap_page_table[i]);
|
|
+ pax_open_kernel();
|
|
pte_clear(&init_mm, (unsigned long)page_address(page),
|
|
&pkmap_page_table[i]);
|
|
-
|
|
+ pax_close_kernel();
|
|
set_page_address(page, NULL);
|
|
need_flush = 1;
|
|
}
|
|
@@ -186,9 +187,11 @@ static inline unsigned long map_new_virtual(struct page *page)
|
|
}
|
|
}
|
|
vaddr = PKMAP_ADDR(last_pkmap_nr);
|
|
+
|
|
+ pax_open_kernel();
|
|
set_pte_at(&init_mm, vaddr,
|
|
&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
|
|
-
|
|
+ pax_close_kernel();
|
|
pkmap_count[last_pkmap_nr] = 1;
|
|
set_page_address(page, (void *)vaddr);
|
|
|
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
|
index 8978c1b..f456bba 100644
|
|
--- a/mm/huge_memory.c
|
|
+++ b/mm/huge_memory.c
|
|
@@ -733,7 +733,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
* run pte_offset_map on the pmd, if an huge pmd could
|
|
* materialize from under us from a different thread.
|
|
*/
|
|
- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
|
|
+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
|
|
return VM_FAULT_OOM;
|
|
/* if an huge pmd materialized from under us just retry later */
|
|
if (unlikely(pmd_trans_huge(*pmd)))
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index 9fa0186..6ad964d 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -2503,6 +2503,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
return 1;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ struct vm_area_struct *vma_m;
|
|
+ unsigned long address_m;
|
|
+ pte_t *ptep_m;
|
|
+
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (!vma_m)
|
|
+ return;
|
|
+
|
|
+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
|
|
+ get_page(page_m);
|
|
+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
|
|
+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Hugetlb_cow() should be called with page lock of the original hugepage held.
|
|
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
|
|
@@ -2615,6 +2636,11 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
make_huge_pte(vma, new_page, 1));
|
|
page_remove_rmap(old_page);
|
|
hugepage_add_new_anon_rmap(new_page, vma, address);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_huge_pte(vma, address, new_page);
|
|
+#endif
|
|
+
|
|
/* Make the old page be freed below */
|
|
new_page = old_page;
|
|
mmu_notifier_invalidate_range_end(mm,
|
|
@@ -2769,6 +2795,10 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
&& (vma->vm_flags & VM_SHARED)));
|
|
set_huge_pte_at(mm, address, ptep, new_pte);
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_huge_pte(vma, address, page);
|
|
+#endif
|
|
+
|
|
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
|
/* Optimization, do the COW without a second fault */
|
|
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
|
|
@@ -2812,6 +2842,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
VM_FAULT_SET_HINDEX(h - hstates);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m) {
|
|
+ unsigned long address_m;
|
|
+
|
|
+ if (vma->vm_start > vma_m->vm_start) {
|
|
+ address_m = address;
|
|
+ address -= SEGMEXEC_TASK_SIZE;
|
|
+ vma = vma_m;
|
|
+ h = hstate_vma(vma);
|
|
+ } else
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+
|
|
+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
|
|
+ return VM_FAULT_OOM;
|
|
+ address_m &= HPAGE_MASK;
|
|
+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
|
|
if (!ptep)
|
|
return VM_FAULT_OOM;
|
|
diff --git a/mm/hugetlb.c.rej b/mm/hugetlb.c.rej
|
|
new file mode 100644
|
|
index 0000000..fb998d0
|
|
--- /dev/null
|
|
+++ b/mm/hugetlb.c.rej
|
|
@@ -0,0 +1,12 @@
|
|
+diff a/mm/hugetlb.c b/mm/hugetlb.c (rejected hunks)
|
|
+@@ -2814,6 +2844,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
+ static DEFINE_MUTEX(hugetlb_instantiation_mutex);
|
|
+ struct hstate *h = hstate_vma(vma);
|
|
+
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ struct vm_area_struct *vma_m;
|
|
++#endif
|
|
++
|
|
+ address &= huge_page_mask(h);
|
|
+
|
|
+ ptep = huge_pte_offset(mm, address);
|
|
diff --git a/mm/internal.h b/mm/internal.h
|
|
index f5369cc..daf5904 100644
|
|
--- a/mm/internal.h
|
|
+++ b/mm/internal.h
|
|
@@ -97,6 +97,7 @@ extern bool zone_reclaimable(struct zone *zone);
|
|
* in mm/page_alloc.c
|
|
*/
|
|
extern void __free_pages_bootmem(struct page *page, unsigned int order);
|
|
+extern void free_compound_page(struct page *page);
|
|
extern void prep_compound_page(struct page *page, unsigned long order);
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
extern bool is_free_buddy_page(struct page *page);
|
|
diff --git a/mm/maccess.c b/mm/maccess.c
|
|
index d53adf9..03a24bf 100644
|
|
--- a/mm/maccess.c
|
|
+++ b/mm/maccess.c
|
|
@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
|
|
set_fs(KERNEL_DS);
|
|
pagefault_disable();
|
|
ret = __copy_from_user_inatomic(dst,
|
|
- (__force const void __user *)src, size);
|
|
+ (const void __force_user *)src, size);
|
|
pagefault_enable();
|
|
set_fs(old_fs);
|
|
|
|
@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
|
|
|
|
set_fs(KERNEL_DS);
|
|
pagefault_disable();
|
|
- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
|
|
+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
|
|
pagefault_enable();
|
|
set_fs(old_fs);
|
|
|
|
diff --git a/mm/madvise.c b/mm/madvise.c
|
|
index b075d1d..a3c1d11 100644
|
|
--- a/mm/madvise.c
|
|
+++ b/mm/madvise.c
|
|
@@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
|
|
pgoff_t pgoff;
|
|
unsigned long new_flags = vma->vm_flags;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+#endif
|
|
+
|
|
switch (behavior) {
|
|
case MADV_NORMAL:
|
|
new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
|
|
@@ -118,6 +122,13 @@ static long madvise_behavior(struct vm_area_struct * vma,
|
|
/*
|
|
* vm_flags is protected by the mmap_sem held in write mode.
|
|
*/
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m)
|
|
+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
|
|
+#endif
|
|
+
|
|
vma->vm_flags = new_flags;
|
|
|
|
out:
|
|
@@ -176,6 +187,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
|
|
struct vm_area_struct ** prev,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+#endif
|
|
+
|
|
*prev = vma;
|
|
if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
|
|
return -EINVAL;
|
|
@@ -188,6 +204,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
|
|
zap_page_range(vma, start, end - start, &details);
|
|
} else
|
|
zap_page_range(vma, start, end - start, NULL);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m) {
|
|
+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
|
|
+ struct zap_details details = {
|
|
+ .nonlinear_vma = vma_m,
|
|
+ .last_index = ULONG_MAX,
|
|
+ };
|
|
+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
|
|
+ } else
|
|
+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
|
|
+ }
|
|
+#endif
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -395,6 +426,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
|
|
if (end < start)
|
|
goto out;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ if (end > SEGMEXEC_TASK_SIZE)
|
|
+ goto out;
|
|
+ } else
|
|
+#endif
|
|
+
|
|
+ if (end > TASK_SIZE)
|
|
+ goto out;
|
|
+
|
|
error = 0;
|
|
if (end == start)
|
|
goto out;
|
|
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
|
|
index 52afb5b..5727a5b 100644
|
|
--- a/mm/memory-failure.c
|
|
+++ b/mm/memory-failure.c
|
|
@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
|
|
|
|
int sysctl_memory_failure_recovery __read_mostly = 1;
|
|
|
|
-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
|
|
+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
|
|
|
|
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
|
|
|
|
@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
|
|
pfn, t->comm, t->pid);
|
|
si.si_signo = SIGBUS;
|
|
si.si_errno = 0;
|
|
- si.si_addr = (void *)addr;
|
|
+ si.si_addr = (void __user *)addr;
|
|
#ifdef __ARCH_SI_TRAPNO
|
|
si.si_trapno = trapno;
|
|
#endif
|
|
@@ -1041,7 +1041,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|
}
|
|
|
|
nr_pages = 1 << compound_trans_order(hpage);
|
|
- atomic_long_add(nr_pages, &mce_bad_pages);
|
|
+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
|
|
|
|
/*
|
|
* We need/can do nothing about count=0 pages.
|
|
@@ -1132,7 +1132,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
|
}
|
|
if (hwpoison_filter(p)) {
|
|
if (TestClearPageHWPoison(p))
|
|
- atomic_long_sub(nr_pages, &mce_bad_pages);
|
|
+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
|
|
unlock_page(hpage);
|
|
put_page(hpage);
|
|
return 0;
|
|
@@ -1327,7 +1327,7 @@ int unpoison_memory(unsigned long pfn)
|
|
return 0;
|
|
}
|
|
if (TestClearPageHWPoison(p))
|
|
- atomic_long_sub(nr_pages, &mce_bad_pages);
|
|
+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
|
|
pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
|
|
return 0;
|
|
}
|
|
@@ -1341,7 +1341,7 @@ int unpoison_memory(unsigned long pfn)
|
|
*/
|
|
if (TestClearPageHWPoison(page)) {
|
|
pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
|
|
- atomic_long_sub(nr_pages, &mce_bad_pages);
|
|
+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
|
|
freeit = 1;
|
|
if (PageHuge(page))
|
|
clear_page_hwpoison_huge_page(page);
|
|
@@ -1601,7 +1601,7 @@ int soft_offline_page(struct page *page, int flags)
|
|
return ret;
|
|
|
|
done:
|
|
- atomic_long_add(1, &mce_bad_pages);
|
|
+ atomic_long_add_unchecked(1, &mce_bad_pages);
|
|
SetPageHWPoison(page);
|
|
/* keep elevated page count for bad page */
|
|
return ret;
|
|
diff --git a/mm/memory-failure.c.rej b/mm/memory-failure.c.rej
|
|
new file mode 100644
|
|
index 0000000..10a09a0
|
|
--- /dev/null
|
|
+++ b/mm/memory-failure.c.rej
|
|
@@ -0,0 +1,20 @@
|
|
+--- mm/memory-failure.c 2012-08-09 20:18:52.153847491 +0200
|
|
++++ mm/memory-failure.c 2012-08-09 20:19:06.153846744 +0200
|
|
+@@ -1068,7 +1068,7 @@ int memory_failure(unsigned long pfn, in
|
|
+ if (!PageHWPoison(hpage)
|
|
+ || (hwpoison_filter(p) && TestClearPageHWPoison(p))
|
|
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
|
|
+- atomic_long_sub(nr_pages, &mce_bad_pages);
|
|
++ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
|
|
+ return 0;
|
|
+ }
|
|
+ set_page_hwpoison_huge_page(hpage);
|
|
+@@ -1448,7 +1448,7 @@ static int soft_offline_huge_page(struct
|
|
+ }
|
|
+ done:
|
|
+ if (!PageHWPoison(hpage))
|
|
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
|
|
++ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
|
|
+ set_page_hwpoison_huge_page(hpage);
|
|
+ dequeue_hwpoisoned_huge_page(hpage);
|
|
+ /* keep elevated page count for bad page */
|
|
diff --git a/mm/memory.c b/mm/memory.c
|
|
index 7c35fa7..50a6c25 100644
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -441,8 +441,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|
return;
|
|
|
|
pmd = pmd_offset(pud, start);
|
|
+
|
|
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
|
|
pud_clear(pud);
|
|
pmd_free_tlb(tlb, pmd, start);
|
|
+#endif
|
|
+
|
|
}
|
|
|
|
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
@@ -473,9 +477,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
if (end - 1 > ceiling - 1)
|
|
return;
|
|
|
|
+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
|
|
pud = pud_offset(pgd, start);
|
|
pgd_clear(pgd);
|
|
pud_free_tlb(tlb, pud, start);
|
|
+#endif
|
|
+
|
|
}
|
|
|
|
/*
|
|
@@ -1609,12 +1616,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
|
|
return page;
|
|
}
|
|
|
|
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
|
|
-{
|
|
- return stack_guard_page_start(vma, addr) ||
|
|
- stack_guard_page_end(vma, addr+PAGE_SIZE);
|
|
-}
|
|
-
|
|
/**
|
|
* __get_user_pages() - pin user pages in memory
|
|
* @tsk: task_struct of target task
|
|
@@ -1687,10 +1688,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
|
i = 0;
|
|
|
|
- do {
|
|
+ while (nr_pages) {
|
|
struct vm_area_struct *vma;
|
|
|
|
- vma = find_extend_vma(mm, start);
|
|
+ vma = find_vma(mm, start);
|
|
if (!vma && in_gate_area(mm, start)) {
|
|
unsigned long pg = start & PAGE_MASK;
|
|
pgd_t *pgd;
|
|
@@ -1751,7 +1752,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
}
|
|
}
|
|
|
|
- if (!vma ||
|
|
+ if (!vma || start < vma->vm_start ||
|
|
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
|
!(vm_flags & vma->vm_flags))
|
|
return i ? : -EFAULT;
|
|
@@ -1778,11 +1779,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
int ret;
|
|
unsigned int fault_flags = 0;
|
|
|
|
- /* For mlock, just skip the stack guard page. */
|
|
- if (foll_flags & FOLL_MLOCK) {
|
|
- if (stack_guard_page(vma, start))
|
|
- goto next_page;
|
|
- }
|
|
if (foll_flags & FOLL_WRITE)
|
|
fault_flags |= FAULT_FLAG_WRITE;
|
|
if (nonblocking)
|
|
@@ -1856,7 +1852,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
start += PAGE_SIZE;
|
|
nr_pages--;
|
|
} while (nr_pages && start < vma->vm_end);
|
|
- } while (nr_pages);
|
|
+ }
|
|
return i;
|
|
}
|
|
EXPORT_SYMBOL(__get_user_pages);
|
|
@@ -2068,6 +2064,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
|
|
page_add_file_rmap(page);
|
|
set_pte_at(mm, addr, pte, mk_pte(page, prot));
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_file_pte(vma, addr, page, ptl);
|
|
+#endif
|
|
+
|
|
retval = 0;
|
|
pte_unmap_unlock(pte, ptl);
|
|
return retval;
|
|
@@ -2102,10 +2102,22 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
|
|
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
|
struct page *page)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+#endif
|
|
+
|
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
return -EFAULT;
|
|
if (!page_count(page))
|
|
return -EINVAL;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m)
|
|
+ vma_m->vm_flags |= VM_INSERTPAGE;
|
|
+#endif
|
|
+
|
|
vma->vm_flags |= VM_INSERTPAGE;
|
|
return insert_page(vma, addr, page, vma->vm_page_prot);
|
|
}
|
|
@@ -2191,6 +2203,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
|
unsigned long pfn)
|
|
{
|
|
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
|
|
+ BUG_ON(vma->vm_mirror);
|
|
|
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
|
return -EFAULT;
|
|
@@ -2445,7 +2458,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|
|
|
BUG_ON(pud_huge(*pud));
|
|
|
|
- pmd = pmd_alloc(mm, pud, addr);
|
|
+ pmd = (mm == &init_mm) ?
|
|
+ pmd_alloc_kernel(mm, pud, addr) :
|
|
+ pmd_alloc(mm, pud, addr);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
do {
|
|
@@ -2465,7 +2480,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|
unsigned long next;
|
|
int err;
|
|
|
|
- pud = pud_alloc(mm, pgd, addr);
|
|
+ pud = (mm == &init_mm) ?
|
|
+ pud_alloc_kernel(mm, pgd, addr) :
|
|
+ pud_alloc(mm, pgd, addr);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
do {
|
|
@@ -2553,6 +2570,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
|
|
copy_user_highpage(dst, src, va, vma);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ spinlock_t *ptl;
|
|
+ pte_t *pte, entry;
|
|
+
|
|
+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
+ entry = *pte;
|
|
+ if (!pte_present(entry)) {
|
|
+ if (!pte_none(entry)) {
|
|
+ BUG_ON(pte_file(entry));
|
|
+ free_swap_and_cache(pte_to_swp_entry(entry));
|
|
+ pte_clear_not_present_full(mm, address, pte, 0);
|
|
+ }
|
|
+ } else {
|
|
+ struct page *page;
|
|
+
|
|
+ flush_cache_page(vma, address, pte_pfn(entry));
|
|
+ entry = ptep_clear_flush(vma, address, pte);
|
|
+ BUG_ON(pte_dirty(entry));
|
|
+ page = vm_normal_page(vma, address, entry);
|
|
+ if (page) {
|
|
+ update_hiwater_rss(mm);
|
|
+ if (PageAnon(page))
|
|
+ dec_mm_counter_fast(mm, MM_ANONPAGES);
|
|
+ else
|
|
+ dec_mm_counter_fast(mm, MM_FILEPAGES);
|
|
+ page_remove_rmap(page);
|
|
+ page_cache_release(page);
|
|
+ }
|
|
+ }
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+}
|
|
+
|
|
+/* PaX: if vma is mirrored, synchronize the mirror's PTE
|
|
+ *
|
|
+ * the ptl of the lower mapped page is held on entry and is not released on exit
|
|
+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
|
|
+ */
|
|
+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ unsigned long address_m;
|
|
+ spinlock_t *ptl_m;
|
|
+ struct vm_area_struct *vma_m;
|
|
+ pmd_t *pmd_m;
|
|
+ pte_t *pte_m, entry_m;
|
|
+
|
|
+ BUG_ON(!page_m || !PageAnon(page_m));
|
|
+
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (!vma_m)
|
|
+ return;
|
|
+
|
|
+ BUG_ON(!PageLocked(page_m));
|
|
+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
|
|
+ pte_m = pte_offset_map(pmd_m, address_m);
|
|
+ ptl_m = pte_lockptr(mm, pmd_m);
|
|
+ if (ptl != ptl_m) {
|
|
+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
|
|
+ if (!pte_none(*pte_m))
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
|
|
+ page_cache_get(page_m);
|
|
+ page_add_anon_rmap(page_m, vma_m, address_m);
|
|
+ inc_mm_counter_fast(mm, MM_ANONPAGES);
|
|
+ set_pte_at(mm, address_m, pte_m, entry_m);
|
|
+ update_mmu_cache(vma_m, address_m, entry_m);
|
|
+out:
|
|
+ if (ptl != ptl_m)
|
|
+ spin_unlock(ptl_m);
|
|
+ pte_unmap(pte_m);
|
|
+ unlock_page(page_m);
|
|
+}
|
|
+
|
|
+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ unsigned long address_m;
|
|
+ spinlock_t *ptl_m;
|
|
+ struct vm_area_struct *vma_m;
|
|
+ pmd_t *pmd_m;
|
|
+ pte_t *pte_m, entry_m;
|
|
+
|
|
+ BUG_ON(!page_m || PageAnon(page_m));
|
|
+
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (!vma_m)
|
|
+ return;
|
|
+
|
|
+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
|
|
+ pte_m = pte_offset_map(pmd_m, address_m);
|
|
+ ptl_m = pte_lockptr(mm, pmd_m);
|
|
+ if (ptl != ptl_m) {
|
|
+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
|
|
+ if (!pte_none(*pte_m))
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
|
|
+ page_cache_get(page_m);
|
|
+ page_add_file_rmap(page_m);
|
|
+ inc_mm_counter_fast(mm, MM_FILEPAGES);
|
|
+ set_pte_at(mm, address_m, pte_m, entry_m);
|
|
+ update_mmu_cache(vma_m, address_m, entry_m);
|
|
+out:
|
|
+ if (ptl != ptl_m)
|
|
+ spin_unlock(ptl_m);
|
|
+ pte_unmap(pte_m);
|
|
+}
|
|
+
|
|
+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
|
|
+{
|
|
+ struct mm_struct *mm = vma->vm_mm;
|
|
+ unsigned long address_m;
|
|
+ spinlock_t *ptl_m;
|
|
+ struct vm_area_struct *vma_m;
|
|
+ pmd_t *pmd_m;
|
|
+ pte_t *pte_m, entry_m;
|
|
+
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (!vma_m)
|
|
+ return;
|
|
+
|
|
+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
|
|
+ pte_m = pte_offset_map(pmd_m, address_m);
|
|
+ ptl_m = pte_lockptr(mm, pmd_m);
|
|
+ if (ptl != ptl_m) {
|
|
+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
|
|
+ if (!pte_none(*pte_m))
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
|
|
+ set_pte_at(mm, address_m, pte_m, entry_m);
|
|
+out:
|
|
+ if (ptl != ptl_m)
|
|
+ spin_unlock(ptl_m);
|
|
+ pte_unmap(pte_m);
|
|
+}
|
|
+
|
|
+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
|
|
+{
|
|
+ struct page *page_m;
|
|
+ pte_t entry;
|
|
+
|
|
+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
|
|
+ goto out;
|
|
+
|
|
+ entry = *pte;
|
|
+ page_m = vm_normal_page(vma, address, entry);
|
|
+ if (!page_m)
|
|
+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
|
|
+ else if (PageAnon(page_m)) {
|
|
+ if (pax_find_mirror_vma(vma)) {
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+ lock_page(page_m);
|
|
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
|
|
+ if (pte_same(entry, *pte))
|
|
+ pax_mirror_anon_pte(vma, address, page_m, ptl);
|
|
+ else
|
|
+ unlock_page(page_m);
|
|
+ }
|
|
+ } else
|
|
+ pax_mirror_file_pte(vma, address, page_m, ptl);
|
|
+
|
|
+out:
|
|
+ pte_unmap_unlock(pte, ptl);
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* This routine handles present pages, when users try to write
|
|
* to a shared page. It is done by copying the page to a new address
|
|
@@ -2771,6 +2968,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
*/
|
|
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
if (likely(pte_same(*page_table, orig_pte))) {
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (pax_find_mirror_vma(vma))
|
|
+ BUG_ON(!trylock_page(new_page));
|
|
+#endif
|
|
+
|
|
if (old_page) {
|
|
if (!PageAnon(old_page)) {
|
|
dec_mm_counter_fast(mm, MM_FILEPAGES);
|
|
@@ -2822,6 +3025,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
page_remove_rmap(old_page);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_anon_pte(vma, address, new_page, ptl);
|
|
+#endif
|
|
+
|
|
/* Free the old page.. */
|
|
new_page = old_page;
|
|
ret |= VM_FAULT_WRITE;
|
|
@@ -3111,6 +3318,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
swap_free(entry);
|
|
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
|
|
try_to_free_swap(page);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
|
|
+#endif
|
|
+
|
|
unlock_page(page);
|
|
if (swapcache) {
|
|
/*
|
|
@@ -3134,6 +3346,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
/* No need to invalidate - it was non-present before */
|
|
update_mmu_cache(vma, address, page_table);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_anon_pte(vma, address, page, ptl);
|
|
+#endif
|
|
+
|
|
unlock:
|
|
pte_unmap_unlock(page_table, ptl);
|
|
out:
|
|
@@ -3234,6 +3451,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
if (!pte_none(*page_table))
|
|
goto release;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (pax_find_mirror_vma(vma))
|
|
+ BUG_ON(!trylock_page(page));
|
|
+#endif
|
|
+
|
|
inc_mm_counter_fast(mm, MM_ANONPAGES);
|
|
page_add_new_anon_rmap(page, vma, address);
|
|
setpte:
|
|
@@ -3241,6 +3463,12 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
/* No need to invalidate - it was non-present before */
|
|
update_mmu_cache(vma, address, page_table);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (page)
|
|
+ pax_mirror_anon_pte(vma, address, page, ptl);
|
|
+#endif
|
|
+
|
|
unlock:
|
|
pte_unmap_unlock(page_table, ptl);
|
|
return 0;
|
|
@@ -3384,6 +3612,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
*/
|
|
/* Only go through if we didn't race with anybody else... */
|
|
if (likely(pte_same(*page_table, orig_pte))) {
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (anon && pax_find_mirror_vma(vma))
|
|
+ BUG_ON(!trylock_page(page));
|
|
+#endif
|
|
+
|
|
flush_icache_page(vma, page);
|
|
entry = mk_pte(page, vma->vm_page_prot);
|
|
if (flags & FAULT_FLAG_WRITE)
|
|
@@ -3403,6 +3637,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
/* no need to invalidate: a not-present page won't be cached */
|
|
update_mmu_cache(vma, address, page_table);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (anon)
|
|
+ pax_mirror_anon_pte(vma, address, page, ptl);
|
|
+ else
|
|
+ pax_mirror_file_pte(vma, address, page, ptl);
|
|
+#endif
|
|
+
|
|
} else {
|
|
if (cow_page)
|
|
mem_cgroup_uncharge_page(cow_page);
|
|
@@ -3556,6 +3798,12 @@ int handle_pte_fault(struct mm_struct *mm,
|
|
if (flags & FAULT_FLAG_WRITE)
|
|
flush_tlb_fix_spurious_fault(vma, address);
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ pax_mirror_pte(vma, address, pte, pmd, ptl);
|
|
+ return 0;
|
|
+#endif
|
|
+
|
|
unlock:
|
|
pte_unmap_unlock(pte, ptl);
|
|
return 0;
|
|
@@ -3572,6 +3820,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+#endif
|
|
+
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
count_vm_event(PGFAULT);
|
|
@@ -3763,6 +4015,34 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
|
|
pmd_t *pmd;
|
|
pte_t *ptep;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m) {
|
|
+ unsigned long address_m;
|
|
+ pgd_t *pgd_m;
|
|
+ pud_t *pud_m;
|
|
+ pmd_t *pmd_m;
|
|
+
|
|
+ if (vma->vm_start > vma_m->vm_start) {
|
|
+ address_m = address;
|
|
+ address -= SEGMEXEC_TASK_SIZE;
|
|
+ vma = vma_m;
|
|
+ } else
|
|
+ address_m = address + SEGMEXEC_TASK_SIZE;
|
|
+
|
|
+ pgd_m = pgd_offset(mm, address_m);
|
|
+ pud_m = pud_alloc(mm, pgd_m, address_m);
|
|
+ if (!pud_m)
|
|
+ return VM_FAULT_OOM;
|
|
+ pmd_m = pmd_alloc(mm, pud_m, address_m);
|
|
+ if (!pmd_m)
|
|
+ return VM_FAULT_OOM;
|
|
+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
|
|
+ return VM_FAULT_OOM;
|
|
+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
|
|
+ }
|
|
+#endif
|
|
+
|
|
pgd = pgd_offset(mm, address);
|
|
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
|
goto out;
|
|
diff --git a/mm/memory.c.rej b/mm/memory.c.rej
|
|
new file mode 100644
|
|
index 0000000..b18c29f
|
|
--- /dev/null
|
|
+++ b/mm/memory.c.rej
|
|
@@ -0,0 +1,149 @@
|
|
+--- mm/memory.c 2012-05-21 11:33:40.095929975 +0200
|
|
++++ mm/memory.c 2012-05-21 12:10:11.964049021 +0200
|
|
+@@ -3276,40 +3493,6 @@ out_release:
|
|
+ }
|
|
+
|
|
+ /*
|
|
+- * This is like a special single-page "expand_{down|up}wards()",
|
|
+- * except we must first make sure that 'address{-|+}PAGE_SIZE'
|
|
+- * doesn't hit another vma.
|
|
+- */
|
|
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
|
|
+-{
|
|
+- address &= PAGE_MASK;
|
|
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
|
|
+- struct vm_area_struct *prev = vma->vm_prev;
|
|
+-
|
|
+- /*
|
|
+- * Is there a mapping abutting this one below?
|
|
+- *
|
|
+- * That's only ok if it's the same stack mapping
|
|
+- * that has gotten split..
|
|
+- */
|
|
+- if (prev && prev->vm_end == address)
|
|
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
|
|
+-
|
|
+- expand_downwards(vma, address - PAGE_SIZE);
|
|
+- }
|
|
+- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
|
|
+- struct vm_area_struct *next = vma->vm_next;
|
|
+-
|
|
+- /* As VM_GROWSDOWN but s/below/above/ */
|
|
+- if (next && next->vm_start == address + PAGE_SIZE)
|
|
+- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
|
|
+-
|
|
+- expand_upwards(vma, address + PAGE_SIZE);
|
|
+- }
|
|
+- return 0;
|
|
+-}
|
|
+-
|
|
+-/*
|
|
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
+ * but allow concurrent faults), and pte mapped but not yet locked.
|
|
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
|
|
+@@ -3318,27 +3501,23 @@ static int do_anonymous_page(struct mm_s
|
|
+ unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
+ unsigned int flags)
|
|
+ {
|
|
+- struct page *page;
|
|
++ struct page *page = NULL;
|
|
+ spinlock_t *ptl;
|
|
+ pte_t entry;
|
|
+
|
|
+- pte_unmap(page_table);
|
|
+-
|
|
+- /* Check if we need to add a guard page to the stack */
|
|
+- if (check_stack_guard_page(vma, address) < 0)
|
|
+- return VM_FAULT_SIGBUS;
|
|
+-
|
|
+- /* Use the zero-page for reads */
|
|
+ if (!(flags & FAULT_FLAG_WRITE)) {
|
|
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
|
|
+ vma->vm_page_prot));
|
|
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
++ ptl = pte_lockptr(mm, pmd);
|
|
++ spin_lock(ptl);
|
|
+ if (!pte_none(*page_table))
|
|
+ goto unlock;
|
|
+ goto setpte;
|
|
+ }
|
|
+
|
|
+ /* Allocate our own private page. */
|
|
++ pte_unmap(page_table);
|
|
++
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
+ goto oom;
|
|
+ page = alloc_zeroed_user_highpage_movable(vma, address);
|
|
+@@ -3798,7 +4040,7 @@ int handle_mm_fault(struct mm_struct *mm
|
|
+ * run pte_offset_map on the pmd, if an huge pmd could
|
|
+ * materialize from under us from a different thread.
|
|
+ */
|
|
+- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
|
|
++ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
|
|
+ return VM_FAULT_OOM;
|
|
+ /* if an huge pmd materialized from under us just retry later */
|
|
+ if (unlikely(pmd_trans_huge(*pmd)))
|
|
+@@ -3835,6 +4077,23 @@ int __pud_alloc(struct mm_struct *mm, pg
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
+ return 0;
|
|
+ }
|
|
++
|
|
++int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
|
|
++{
|
|
++ pud_t *new = pud_alloc_one(mm, address);
|
|
++ if (!new)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ smp_wmb(); /* See comment in __pte_alloc */
|
|
++
|
|
++ spin_lock(&mm->page_table_lock);
|
|
++ if (pgd_present(*pgd)) /* Another has populated it */
|
|
++ pud_free(mm, new);
|
|
++ else
|
|
++ pgd_populate_kernel(mm, pgd, new);
|
|
++ spin_unlock(&mm->page_table_lock);
|
|
++ return 0;
|
|
++}
|
|
+ #endif /* __PAGETABLE_PUD_FOLDED */
|
|
+
|
|
+ #ifndef __PAGETABLE_PMD_FOLDED
|
|
+@@ -3865,6 +4124,30 @@ int __pmd_alloc(struct mm_struct *mm, pu
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
+ return 0;
|
|
+ }
|
|
++
|
|
++int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
|
++{
|
|
++ pmd_t *new = pmd_alloc_one(mm, address);
|
|
++ if (!new)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ smp_wmb(); /* See comment in __pte_alloc */
|
|
++
|
|
++ spin_lock(&mm->page_table_lock);
|
|
++#ifndef __ARCH_HAS_4LEVEL_HACK
|
|
++ if (pud_present(*pud)) /* Another has populated it */
|
|
++ pmd_free(mm, new);
|
|
++ else
|
|
++ pud_populate_kernel(mm, pud, new);
|
|
++#else
|
|
++ if (pgd_present(*pud)) /* Another has populated it */
|
|
++ pmd_free(mm, new);
|
|
++ else
|
|
++ pgd_populate_kernel(mm, pud, new);
|
|
++#endif /* __ARCH_HAS_4LEVEL_HACK */
|
|
++ spin_unlock(&mm->page_table_lock);
|
|
++ return 0;
|
|
++}
|
|
+ #endif /* __PAGETABLE_PMD_FOLDED */
|
|
+
|
|
+ int make_pages_present(unsigned long addr, unsigned long end)
|
|
+@@ -3902,7 +4185,7 @@ static int __init gate_vma_init(void)
|
|
+ gate_vma.vm_start = FIXADDR_USER_START;
|
|
+ gate_vma.vm_end = FIXADDR_USER_END;
|
|
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
|
+- gate_vma.vm_page_prot = __P101;
|
|
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
|
|
index 82c6242..285f7cf 100644
|
|
--- a/mm/mempolicy.c
|
|
+++ b/mm/mempolicy.c
|
|
@@ -653,6 +653,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned long vmstart;
|
|
unsigned long vmend;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+#endif
|
|
+
|
|
vma = find_vma(mm, start);
|
|
if (!vma || vma->vm_start > start)
|
|
return -EFAULT;
|
|
@@ -901,6 +905,16 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
|
get_policy_nodemask(pol, nmask);
|
|
task_unlock(current);
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
|
|
+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
|
|
+ if (err)
|
|
+ goto out;
|
|
+ }
|
|
+#endif
|
|
+
|
|
}
|
|
|
|
out:
|
|
@@ -1124,6 +1138,17 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|
|
|
if (end < start)
|
|
return -EINVAL;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ if (end > SEGMEXEC_TASK_SIZE)
|
|
+ return -EINVAL;
|
|
+ } else
|
|
+#endif
|
|
+
|
|
+ if (end > TASK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
if (end == start)
|
|
return 0;
|
|
|
|
diff --git a/mm/mlock.c b/mm/mlock.c
|
|
index 39a36db..66283f0 100644
|
|
--- a/mm/mlock.c
|
|
+++ b/mm/mlock.c
|
|
@@ -383,7 +383,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
|
|
{
|
|
unsigned long nstart, end, tmp;
|
|
struct vm_area_struct * vma, * prev;
|
|
- int error;
|
|
+ int error = 0;
|
|
|
|
VM_BUG_ON(start & ~PAGE_MASK);
|
|
VM_BUG_ON(len != PAGE_ALIGN(len));
|
|
@@ -392,6 +392,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
|
|
return -EINVAL;
|
|
if (end == start)
|
|
return 0;
|
|
+ if (end > TASK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
vma = find_vma(current->mm, start);
|
|
if (!vma || vma->vm_start > start)
|
|
return -ENOMEM;
|
|
@@ -403,6 +406,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
|
|
for (nstart = start ; ; ) {
|
|
vm_flags_t newflags;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
|
|
+ break;
|
|
+#endif
|
|
+
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
|
newflags = vma->vm_flags | VM_LOCKED;
|
|
@@ -531,17 +539,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
|
|
static int do_mlockall(int flags)
|
|
{
|
|
struct vm_area_struct * vma, * prev = NULL;
|
|
- unsigned int def_flags = 0;
|
|
|
|
if (flags & MCL_FUTURE)
|
|
- def_flags = VM_LOCKED;
|
|
- current->mm->def_flags = def_flags;
|
|
+ current->mm->def_flags |= VM_LOCKED;
|
|
+ else
|
|
+ current->mm->def_flags &= ~VM_LOCKED;
|
|
if (flags == MCL_FUTURE)
|
|
goto out;
|
|
|
|
for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
|
|
vm_flags_t newflags;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
|
|
+ break;
|
|
+#endif
|
|
+
|
|
+ BUG_ON(vma->vm_end > TASK_SIZE);
|
|
newflags = vma->vm_flags | VM_LOCKED;
|
|
if (!(flags & MCL_CURRENT))
|
|
newflags &= ~VM_LOCKED;
|
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
|
index 126dfe5..c2eb66c 100644
|
|
--- a/mm/mmap.c
|
|
+++ b/mm/mmap.c
|
|
@@ -47,6 +47,16 @@
|
|
#define arch_rebalance_pgtables(addr, len) (addr)
|
|
#endif
|
|
|
|
+static inline void verify_mm_writelocked(struct mm_struct *mm)
|
|
+{
|
|
+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
|
|
+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
|
|
+ up_read(&mm->mmap_sem);
|
|
+ BUG();
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
static void unmap_region(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
|
unsigned long start, unsigned long end);
|
|
@@ -72,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
|
|
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
|
|
*
|
|
*/
|
|
-pgprot_t protection_map[16] = {
|
|
+pgprot_t protection_map[16] __read_only = {
|
|
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
|
|
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
|
|
};
|
|
|
|
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
|
|
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
|
|
{
|
|
- return __pgprot(pgprot_val(protection_map[vm_flags &
|
|
+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
|
|
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
|
|
pgprot_val(arch_vm_get_page_prot(vm_flags)));
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
|
|
+ if (!(__supported_pte_mask & _PAGE_NX) &&
|
|
+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
|
|
+ (vm_flags & (VM_READ | VM_WRITE)))
|
|
+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
|
|
+#endif
|
|
+
|
|
+ return prot;
|
|
}
|
|
EXPORT_SYMBOL(vm_get_page_prot);
|
|
|
|
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
|
|
int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
|
|
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
|
|
+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
|
|
/*
|
|
* Make sure vm_committed_as in one cacheline and not cacheline shared with
|
|
* other variables. It can be updated by several CPUs frequently.
|
|
@@ -229,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
|
|
struct vm_area_struct *next = vma->vm_next;
|
|
|
|
might_sleep();
|
|
+ BUG_ON(vma->vm_mirror);
|
|
if (vma->vm_ops && vma->vm_ops->close)
|
|
vma->vm_ops->close(vma);
|
|
if (vma->vm_file) {
|
|
@@ -811,6 +832,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
|
if (next && next->vm_end == end) /* cases 6, 7, 8 */
|
|
next = next->vm_next;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (prev)
|
|
+ prev_m = pax_find_mirror_vma(prev);
|
|
+ if (area)
|
|
+ area_m = pax_find_mirror_vma(area);
|
|
+ if (next)
|
|
+ next_m = pax_find_mirror_vma(next);
|
|
+#endif
|
|
+
|
|
/*
|
|
* Can it merge with the predecessor?
|
|
*/
|
|
@@ -830,9 +860,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
|
/* cases 1, 6 */
|
|
err = vma_adjust(prev, prev->vm_start,
|
|
next->vm_end, prev->vm_pgoff, NULL);
|
|
- } else /* cases 2, 5, 7 */
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!err && prev_m)
|
|
+ err = vma_adjust(prev_m, prev_m->vm_start,
|
|
+ next_m->vm_end, prev_m->vm_pgoff, NULL);
|
|
+#endif
|
|
+
|
|
+ } else { /* cases 2, 5, 7 */
|
|
err = vma_adjust(prev, prev->vm_start,
|
|
end, prev->vm_pgoff, NULL);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!err && prev_m)
|
|
+ err = vma_adjust(prev_m, prev_m->vm_start,
|
|
+ end_m, prev_m->vm_pgoff, NULL);
|
|
+#endif
|
|
+
|
|
+ }
|
|
if (err)
|
|
return NULL;
|
|
khugepaged_enter_vma_merge(prev, vm_flags);
|
|
@@ -960,14 +1005,11 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
|
|
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
|
|
struct file *file, long pages)
|
|
{
|
|
- const unsigned long stack_flags
|
|
- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
|
|
-
|
|
if (file) {
|
|
mm->shared_vm += pages;
|
|
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
|
|
mm->exec_vm += pages;
|
|
- } else if (flags & stack_flags)
|
|
+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
|
|
mm->stack_vm += pages;
|
|
if (flags & (VM_RESERVED|VM_IO))
|
|
mm->reserved_vm += pages;
|
|
@@ -1007,7 +1049,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
* (the exception is when the underlying filesystem is noexec
|
|
* mounted, in which case we dont add PROT_EXEC.)
|
|
*/
|
|
- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
|
+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
|
|
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
|
|
prot |= PROT_EXEC;
|
|
|
|
@@ -1033,7 +1075,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
/* Obtain the address to map to. we verify (or select) it and ensure
|
|
* that it represents a valid section of the address space.
|
|
*/
|
|
- addr = get_unmapped_area(file, addr, len, pgoff, flags);
|
|
+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
|
|
if (addr & ~PAGE_MASK)
|
|
return addr;
|
|
|
|
@@ -1044,6 +1086,28 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
|
|
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
|
|
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
|
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (mm->pax_flags & MF_PAX_MPROTECT) {
|
|
+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
|
|
+
|
|
+#ifdef CONFIG_PAX_EMUPLT
|
|
+ vm_flags &= ~VM_EXEC;
|
|
+#else
|
|
+ return -EPERM;
|
|
+#endif
|
|
+
|
|
+ if (!(vm_flags & VM_EXEC))
|
|
+ vm_flags &= ~VM_MAYEXEC;
|
|
+ else
|
|
+ vm_flags &= ~VM_MAYWRITE;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
|
|
+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
|
|
+ vm_flags &= ~VM_PAGEEXEC;
|
|
+#endif
|
|
+
|
|
if (flags & MAP_LOCKED)
|
|
if (!can_do_mlock())
|
|
return -EPERM;
|
|
@@ -1234,7 +1298,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
|
|
vm_flags_t vm_flags = vma->vm_flags;
|
|
|
|
/* If it was private or non-writable, the write bit is already clear */
|
|
- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
|
|
+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
|
|
return 0;
|
|
|
|
/* The backer wishes to know when pages are first written to? */
|
|
@@ -1300,14 +1364,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m = NULL;
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * mm->mmap_sem is required to protect against another thread
|
|
+ * changing the mappings in case we sleep.
|
|
+ */
|
|
+ verify_mm_writelocked(mm);
|
|
+
|
|
/* Clear old maps */
|
|
error = -ENOMEM;
|
|
-munmap_back:
|
|
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
if (vma && vma->vm_start < addr + len) {
|
|
if (do_munmap(mm, addr, len))
|
|
return -ENOMEM;
|
|
- goto munmap_back;
|
|
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
+ BUG_ON(vma && vma->vm_start < addr + len);
|
|
}
|
|
|
|
/*
|
|
@@ -1353,6 +1427,16 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
goto unacct_error;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
|
|
+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
+ if (!vma_m) {
|
|
+ error = -ENOMEM;
|
|
+ goto free_vma;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
vma->vm_mm = mm;
|
|
vma->vm_start = addr;
|
|
vma->vm_end = addr + len;
|
|
@@ -1377,6 +1461,19 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
error = file->f_op->mmap(file, vma);
|
|
if (error)
|
|
goto unmap_and_free_vma;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m && (vm_flags & VM_EXECUTABLE))
|
|
+ added_exe_file_vma(mm);
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
|
|
+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
|
|
+ vma->vm_flags |= VM_PAGEEXEC;
|
|
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (vm_flags & VM_EXECUTABLE)
|
|
added_exe_file_vma(mm);
|
|
|
|
@@ -1414,6 +1511,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
file = vma->vm_file;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m)
|
|
+ BUG_ON(pax_mirror_vma(vma_m, vma));
|
|
+#endif
|
|
+
|
|
/* Once vma denies write, undo our temporary denial count */
|
|
if (correct_wcount)
|
|
atomic_inc(&inode->i_writecount);
|
|
@@ -1422,6 +1524,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
|
|
mm->total_vm += len >> PAGE_SHIFT;
|
|
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
|
|
+ track_exec_limit(mm, addr, addr + len, vm_flags);
|
|
if (vm_flags & VM_LOCKED) {
|
|
if (!mlock_vma_pages_range(vma, addr, addr + len))
|
|
mm->locked_vm += (len >> PAGE_SHIFT);
|
|
@@ -1439,6 +1542,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
|
|
charged = 0;
|
|
free_vma:
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m)
|
|
+ kmem_cache_free(vm_area_cachep, vma_m);
|
|
+#endif
|
|
+
|
|
kmem_cache_free(vm_area_cachep, vma);
|
|
unacct_error:
|
|
if (charged)
|
|
@@ -1446,6 +1555,44 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
|
return error;
|
|
}
|
|
|
|
+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
|
|
+{
|
|
+ if (!vma) {
|
|
+#ifdef CONFIG_STACK_GROWSUP
|
|
+ if (addr > sysctl_heap_stack_gap)
|
|
+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
|
|
+ else
|
|
+ vma = find_vma(current->mm, 0);
|
|
+ if (vma && (vma->vm_flags & VM_GROWSUP))
|
|
+ return false;
|
|
+#endif
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (addr + len > vma->vm_start)
|
|
+ return false;
|
|
+
|
|
+ if (vma->vm_flags & VM_GROWSDOWN)
|
|
+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
|
|
+#ifdef CONFIG_STACK_GROWSUP
|
|
+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
|
|
+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
|
|
+#endif
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
|
|
+{
|
|
+ if (vma->vm_start < len)
|
|
+ return -ENOMEM;
|
|
+ if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
+ return vma->vm_start - len;
|
|
+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
|
|
+ return vma->vm_start - len - sysctl_heap_stack_gap;
|
|
+ return -ENOMEM;
|
|
+}
|
|
+
|
|
/* Get an address range which is currently unmapped.
|
|
* For shmat() with addr=0.
|
|
*
|
|
@@ -1472,18 +1619,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
- vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
- return addr;
|
|
+ if (TASK_SIZE - len >= addr) {
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ return addr;
|
|
+ }
|
|
}
|
|
if (len > mm->cached_hole_size) {
|
|
- start_addr = addr = mm->free_area_cache;
|
|
+ start_addr = addr = mm->free_area_cache;
|
|
} else {
|
|
- start_addr = addr = TASK_UNMAPPED_BASE;
|
|
- mm->cached_hole_size = 0;
|
|
+ start_addr = addr = mm->mmap_base;
|
|
+ mm->cached_hole_size = 0;
|
|
}
|
|
|
|
full_search:
|
|
@@ -1494,34 +1646,40 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
* Start a new search - just in case we missed
|
|
* some holes.
|
|
*/
|
|
- if (start_addr != TASK_UNMAPPED_BASE) {
|
|
- addr = TASK_UNMAPPED_BASE;
|
|
- start_addr = addr;
|
|
+ if (start_addr != mm->mmap_base) {
|
|
+ start_addr = addr = mm->mmap_base;
|
|
mm->cached_hole_size = 0;
|
|
goto full_search;
|
|
}
|
|
return -ENOMEM;
|
|
}
|
|
- if (!vma || addr + len <= vma->vm_start) {
|
|
- /*
|
|
- * Remember the place where we stopped the search:
|
|
- */
|
|
- mm->free_area_cache = addr + len;
|
|
- return addr;
|
|
- }
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ break;
|
|
if (addr + mm->cached_hole_size < vma->vm_start)
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
addr = vma->vm_end;
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Remember the place where we stopped the search:
|
|
+ */
|
|
+ mm->free_area_cache = addr + len;
|
|
+ return addr;
|
|
}
|
|
#endif
|
|
|
|
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
|
|
+ return;
|
|
+#endif
|
|
+
|
|
/*
|
|
* Is this a new hole at the lowest possible address?
|
|
*/
|
|
- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
|
|
+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
|
|
mm->free_area_cache = addr;
|
|
}
|
|
|
|
@@ -1537,7 +1695,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
{
|
|
struct vm_area_struct *vma;
|
|
struct mm_struct *mm = current->mm;
|
|
- unsigned long addr = addr0, start_addr;
|
|
+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
|
|
|
|
/* requested length too big for entire address space */
|
|
if (len > TASK_SIZE)
|
|
@@ -1546,13 +1704,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
if (flags & MAP_FIXED)
|
|
return addr;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
|
|
+#endif
|
|
+
|
|
/* requesting a specific address */
|
|
if (addr) {
|
|
addr = PAGE_ALIGN(addr);
|
|
- vma = find_vma(mm, addr);
|
|
- if (TASK_SIZE - len >= addr &&
|
|
- (!vma || addr + len <= vma->vm_start))
|
|
- return addr;
|
|
+ if (TASK_SIZE - len >= addr) {
|
|
+ vma = find_vma(mm, addr);
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
+ return addr;
|
|
+ }
|
|
}
|
|
|
|
/* check if free_area_cache is useful for us */
|
|
@@ -1576,7 +1739,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
* return with success:
|
|
*/
|
|
vma = find_vma(mm, addr);
|
|
- if (!vma || addr+len <= vma->vm_start)
|
|
+ if (check_heap_stack_gap(vma, addr, len))
|
|
/* remember the address as a hint for next time */
|
|
return (mm->free_area_cache = addr);
|
|
|
|
@@ -1585,8 +1748,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
mm->cached_hole_size = vma->vm_start - addr;
|
|
|
|
/* try just below the current vma->vm_start */
|
|
- addr = vma->vm_start-len;
|
|
- } while (len < vma->vm_start);
|
|
+ addr = skip_heap_stack_gap(vma, len);
|
|
+ } while (!IS_ERR_VALUE(addr));
|
|
|
|
fail:
|
|
/*
|
|
@@ -1609,13 +1772,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
* can happen with large stack limits and large mmap()
|
|
* allocations.
|
|
*/
|
|
+ mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
+ mm->free_area_cache = mm->mmap_base;
|
|
mm->cached_hole_size = ~0UL;
|
|
- mm->free_area_cache = TASK_UNMAPPED_BASE;
|
|
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
|
|
/*
|
|
* Restore the topdown base:
|
|
*/
|
|
- mm->free_area_cache = mm->mmap_base;
|
|
+ mm->mmap_base = base;
|
|
+ mm->free_area_cache = base;
|
|
mm->cached_hole_size = ~0UL;
|
|
|
|
return addr;
|
|
@@ -1624,6 +1795,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|
|
|
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
|
|
+ return;
|
|
+#endif
|
|
+
|
|
/*
|
|
* Is this a new hole at the highest possible address?
|
|
*/
|
|
@@ -1631,8 +1808,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
|
|
mm->free_area_cache = addr;
|
|
|
|
/* dont allow allocations above current base */
|
|
- if (mm->free_area_cache > mm->mmap_base)
|
|
+ if (mm->free_area_cache > mm->mmap_base) {
|
|
mm->free_area_cache = mm->mmap_base;
|
|
+ mm->cached_hole_size = ~0UL;
|
|
+ }
|
|
}
|
|
|
|
unsigned long
|
|
@@ -1728,6 +1907,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
|
|
return vma;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
|
|
+{
|
|
+ struct vm_area_struct *vma_m;
|
|
+
|
|
+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
|
|
+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
|
|
+ BUG_ON(vma->vm_mirror);
|
|
+ return NULL;
|
|
+ }
|
|
+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
|
|
+ vma_m = vma->vm_mirror;
|
|
+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
|
|
+ BUG_ON(vma->vm_file != vma_m->vm_file);
|
|
+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
|
|
+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
|
|
+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
|
|
+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
|
|
+ return vma_m;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Verify that the stack growth is acceptable and
|
|
* update accounting. This is shared with both the
|
|
@@ -1790,34 +1991,42 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
|
|
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
{
|
|
int error;
|
|
+ bool locknext;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
return -EFAULT;
|
|
|
|
+ /* Also guard against wrapping around to address 0. */
|
|
+ if (address < PAGE_ALIGN(address+1))
|
|
+ address = PAGE_ALIGN(address+1);
|
|
+ else
|
|
+ return -ENOMEM;
|
|
+
|
|
/*
|
|
* We must make sure the anon_vma is allocated
|
|
* so that the anon_vma locking is not a noop.
|
|
*/
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
return -ENOMEM;
|
|
+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
|
|
+ if (locknext && anon_vma_prepare(vma->vm_next))
|
|
+ return -ENOMEM;
|
|
vma_lock_anon_vma(vma);
|
|
+ if (locknext)
|
|
+ vma_lock_anon_vma(vma->vm_next);
|
|
|
|
/*
|
|
* vma->vm_start/vm_end cannot change under us because the caller
|
|
* is required to hold the mmap_sem in read mode. We need the
|
|
- * anon_vma lock to serialize against concurrent expand_stacks.
|
|
- * Also guard against wrapping around to address 0.
|
|
+ * anon_vma locks to serialize against concurrent expand_stacks
|
|
+ * and expand_upwards.
|
|
*/
|
|
- if (address < PAGE_ALIGN(address+4))
|
|
- address = PAGE_ALIGN(address+4);
|
|
- else {
|
|
- vma_unlock_anon_vma(vma);
|
|
- return -ENOMEM;
|
|
- }
|
|
error = 0;
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
- if (address > vma->vm_end) {
|
|
+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
|
|
+ error = -ENOMEM;
|
|
+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
|
|
unsigned long size, grow;
|
|
|
|
size = address - vma->vm_start;
|
|
@@ -1832,6 +2041,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
|
}
|
|
}
|
|
}
|
|
+ if (locknext)
|
|
+ vma_unlock_anon_vma(vma->vm_next);
|
|
vma_unlock_anon_vma(vma);
|
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
|
return error;
|
|
@@ -1845,6 +2056,8 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
unsigned long address)
|
|
{
|
|
int error;
|
|
+ bool lockprev = false;
|
|
+ struct vm_area_struct *prev;
|
|
|
|
/*
|
|
* We must make sure the anon_vma is allocated
|
|
@@ -1858,6 +2071,15 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
if (error)
|
|
return error;
|
|
|
|
+ prev = vma->vm_prev;
|
|
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
|
|
+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
|
|
+#endif
|
|
+ if (lockprev && anon_vma_prepare(prev))
|
|
+ return -ENOMEM;
|
|
+ if (lockprev)
|
|
+ vma_lock_anon_vma(prev);
|
|
+
|
|
vma_lock_anon_vma(vma);
|
|
|
|
/*
|
|
@@ -1867,9 +2089,17 @@ int expand_downwards(struct vm_area_struct *vma,
|
|
*/
|
|
|
|
/* Somebody else might have raced and expanded it already */
|
|
- if (address < vma->vm_start) {
|
|
+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
|
|
+ error = -ENOMEM;
|
|
+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
|
|
unsigned long size, grow;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m;
|
|
+
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+#endif
|
|
+
|
|
size = vma->vm_end - address;
|
|
grow = (vma->vm_start - address) >> PAGE_SHIFT;
|
|
|
|
@@ -1980,6 +2210,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
|
|
do {
|
|
long nrpages = vma_pages(vma);
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
|
|
+ vma = remove_vma(vma);
|
|
+ continue;
|
|
+ }
|
|
+#endif
|
|
+
|
|
mm->total_vm -= nrpages;
|
|
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
|
|
vma = remove_vma(vma);
|
|
@@ -2025,6 +2262,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
|
|
vma->vm_prev = NULL;
|
|
do {
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma->vm_mirror) {
|
|
+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
|
|
+ vma->vm_mirror->vm_mirror = NULL;
|
|
+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
|
|
+ vma->vm_mirror = NULL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
rb_erase(&vma->vm_rb, &mm->mm_rb);
|
|
mm->map_count--;
|
|
tail_vma = vma;
|
|
@@ -2053,14 +2300,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
struct vm_area_struct *new;
|
|
int err = -ENOMEM;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m, *new_m = NULL;
|
|
+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
if (is_vm_hugetlb_page(vma) && (addr &
|
|
~(huge_page_mask(hstate_vma(vma)))))
|
|
return -EINVAL;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ vma_m = pax_find_mirror_vma(vma);
|
|
+#endif
|
|
+
|
|
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
if (!new)
|
|
goto out_err;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m) {
|
|
+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
+ if (!new_m) {
|
|
+ kmem_cache_free(vm_area_cachep, new);
|
|
+ goto out_err;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* most fields are the same, copy all, and then fixup */
|
|
*new = *vma;
|
|
|
|
@@ -2073,6 +2339,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m) {
|
|
+ *new_m = *vma_m;
|
|
+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
|
|
+ new_m->vm_mirror = new;
|
|
+ new->vm_mirror = new_m;
|
|
+
|
|
+ if (new_below)
|
|
+ new_m->vm_end = addr_m;
|
|
+ else {
|
|
+ new_m->vm_start = addr_m;
|
|
+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
pol = mpol_dup(vma_policy(vma));
|
|
if (IS_ERR(pol)) {
|
|
err = PTR_ERR(pol);
|
|
@@ -2098,6 +2380,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
else
|
|
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (!err && vma_m) {
|
|
+ if (anon_vma_clone(new_m, vma_m))
|
|
+ goto out_free_mpol;
|
|
+
|
|
+ mpol_get(pol);
|
|
+ vma_set_policy(new_m, pol);
|
|
+
|
|
+ if (new_m->vm_file) {
|
|
+ get_file(new_m->vm_file);
|
|
+ if (vma_m->vm_flags & VM_EXECUTABLE)
|
|
+ added_exe_file_vma(mm);
|
|
+ }
|
|
+
|
|
+ if (new_m->vm_ops && new_m->vm_ops->open)
|
|
+ new_m->vm_ops->open(new_m);
|
|
+
|
|
+ if (new_below)
|
|
+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
|
|
+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
|
|
+ else
|
|
+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
|
|
+
|
|
+ if (err) {
|
|
+ if (new_m->vm_ops && new_m->vm_ops->close)
|
|
+ new_m->vm_ops->close(new_m);
|
|
+ if (new_m->vm_file) {
|
|
+ if (vma_m->vm_flags & VM_EXECUTABLE)
|
|
+ removed_exe_file_vma(mm);
|
|
+ fput(new_m->vm_file);
|
|
+ }
|
|
+ mpol_put(pol);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* Success. */
|
|
if (!err)
|
|
return 0;
|
|
@@ -2110,10 +2428,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
removed_exe_file_vma(mm);
|
|
fput(new->vm_file);
|
|
}
|
|
- unlink_anon_vmas(new);
|
|
out_free_mpol:
|
|
mpol_put(pol);
|
|
out_free_vma:
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (new_m) {
|
|
+ unlink_anon_vmas(new_m);
|
|
+ kmem_cache_free(vm_area_cachep, new_m);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ unlink_anon_vmas(new);
|
|
kmem_cache_free(vm_area_cachep, new);
|
|
out_err:
|
|
return err;
|
|
@@ -2126,6 +2452,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
|
|
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, int new_below)
|
|
{
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
|
|
+ if (mm->map_count >= sysctl_max_map_count-1)
|
|
+ return -ENOMEM;
|
|
+ } else
|
|
+#endif
|
|
+
|
|
if (mm->map_count >= sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
@@ -2137,11 +2472,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
* work. This now handles partial unmappings.
|
|
* Jeremy Fitzhardinge <jeremy@goop.org>
|
|
*/
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
|
|
{
|
|
+ int ret = __do_munmap(mm, start, len);
|
|
+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
|
|
+ return ret;
|
|
+
|
|
+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
|
|
+}
|
|
+
|
|
+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
|
|
+#else
|
|
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
|
|
+#endif
|
|
+{
|
|
unsigned long end;
|
|
struct vm_area_struct *vma, *prev, *last;
|
|
|
|
+ /*
|
|
+ * mm->mmap_sem is required to protect against another thread
|
|
+ * changing the mappings in case we sleep.
|
|
+ */
|
|
+ verify_mm_writelocked(mm);
|
|
+
|
|
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
|
|
return -EINVAL;
|
|
|
|
@@ -2216,6 +2570,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
|
|
/* Fix up all other VM information */
|
|
remove_vma_list(mm, vma);
|
|
|
|
+ track_exec_limit(mm, start, end, 0UL);
|
|
+
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(do_munmap);
|
|
@@ -2225,6 +2581,13 @@ int vm_munmap(unsigned long start, size_t len)
|
|
int ret;
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
|
|
+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
|
|
+ return -EINVAL;
|
|
+#endif
|
|
+
|
|
down_write(&mm->mmap_sem);
|
|
ret = do_munmap(mm, start, len);
|
|
up_write(&mm->mmap_sem);
|
|
@@ -2238,16 +2601,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
|
|
return vm_munmap(addr, len);
|
|
}
|
|
|
|
-static inline void verify_mm_writelocked(struct mm_struct *mm)
|
|
-{
|
|
-#ifdef CONFIG_DEBUG_VM
|
|
- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
|
|
- WARN_ON(1);
|
|
- up_read(&mm->mmap_sem);
|
|
- }
|
|
-#endif
|
|
-}
|
|
-
|
|
/*
|
|
* this is really a simplified "do_mmap". it only handles
|
|
* anonymous maps. eventually we may be able to do some
|
|
@@ -2261,6 +2614,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
|
|
struct rb_node ** rb_link, * rb_parent;
|
|
pgoff_t pgoff = addr >> PAGE_SHIFT;
|
|
int error;
|
|
+ unsigned long charged;
|
|
|
|
len = PAGE_ALIGN(len);
|
|
if (!len)
|
|
@@ -2272,16 +2626,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
|
|
|
|
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
|
|
|
|
+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
|
|
+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
|
|
+ flags &= ~VM_EXEC;
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (mm->pax_flags & MF_PAX_MPROTECT)
|
|
+ flags &= ~VM_MAYEXEC;
|
|
+#endif
|
|
+
|
|
+ }
|
|
+#endif
|
|
+
|
|
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
|
|
if (error & ~PAGE_MASK)
|
|
return error;
|
|
|
|
+ charged = len >> PAGE_SHIFT;
|
|
+
|
|
/*
|
|
* mlock MCL_FUTURE?
|
|
*/
|
|
if (mm->def_flags & VM_LOCKED) {
|
|
unsigned long locked, lock_limit;
|
|
- locked = len >> PAGE_SHIFT;
|
|
+ locked = charged;
|
|
locked += mm->locked_vm;
|
|
lock_limit = rlimit(RLIMIT_MEMLOCK);
|
|
lock_limit >>= PAGE_SHIFT;
|
|
@@ -2298,22 +2666,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
|
|
/*
|
|
* Clear old maps. this also does some error checking for us
|
|
*/
|
|
- munmap_back:
|
|
vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
if (vma && vma->vm_start < addr + len) {
|
|
if (do_munmap(mm, addr, len))
|
|
return -ENOMEM;
|
|
- goto munmap_back;
|
|
+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
+ BUG_ON(vma && vma->vm_start < addr + len);
|
|
}
|
|
|
|
/* Check against address space limits *after* clearing old maps... */
|
|
- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
|
|
+ if (!may_expand_vm(mm, charged))
|
|
return -ENOMEM;
|
|
|
|
if (mm->map_count > sysctl_max_map_count)
|
|
return -ENOMEM;
|
|
|
|
- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
|
|
+ if (security_vm_enough_memory_mm(mm, charged))
|
|
return -ENOMEM;
|
|
|
|
/* Can we just expand an old private anonymous mapping? */
|
|
@@ -2327,7 +2695,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
|
|
*/
|
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
if (!vma) {
|
|
- vm_unacct_memory(len >> PAGE_SHIFT);
|
|
+ vm_unacct_memory(charged);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
@@ -2341,11 +2709,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
out:
|
|
perf_event_mmap(vma);
|
|
- mm->total_vm += len >> PAGE_SHIFT;
|
|
+ mm->total_vm += charged;
|
|
if (flags & VM_LOCKED) {
|
|
if (!mlock_vma_pages_range(vma, addr, addr + len))
|
|
- mm->locked_vm += (len >> PAGE_SHIFT);
|
|
+ mm->locked_vm += charged;
|
|
}
|
|
+ track_exec_limit(mm, addr, addr + len, flags);
|
|
return addr;
|
|
}
|
|
|
|
@@ -2401,8 +2770,10 @@ void exit_mmap(struct mm_struct *mm)
|
|
* Walk the list again, actually closing and freeing it,
|
|
* with preemption enabled, without holding any MM locks.
|
|
*/
|
|
- while (vma)
|
|
+ while (vma) {
|
|
+ vma->vm_mirror = NULL;
|
|
vma = remove_vma(vma);
|
|
+ }
|
|
|
|
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
|
|
}
|
|
@@ -2416,6 +2787,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
|
|
struct vm_area_struct * __vma, * prev;
|
|
struct rb_node ** rb_link, * rb_parent;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m = NULL;
|
|
+#endif
|
|
+
|
|
/*
|
|
* The vm_pgoff of a purely anonymous vma should be irrelevant
|
|
* until its first write fault, when page's anon_vma and index
|
|
@@ -2438,7 +2813,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
|
|
if ((vma->vm_flags & VM_ACCOUNT) &&
|
|
security_vm_enough_memory_mm(mm, vma_pages(vma)))
|
|
return -ENOMEM;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
|
|
+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
+ if (!vma_m)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+#endif
|
|
+
|
|
vma_link(mm, vma, prev, rb_link, rb_parent);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (vma_m)
|
|
+ BUG_ON(pax_mirror_vma(vma_m, vma));
|
|
+#endif
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2457,6 +2847,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|
struct mempolicy *pol;
|
|
bool faulted_in_anon_vma = true;
|
|
|
|
+ BUG_ON(vma->vm_mirror);
|
|
+
|
|
/*
|
|
* If anonymous vma has not yet been faulted, update new pgoff
|
|
* to match new location, to increase its chance of merging.
|
|
@@ -2525,6 +2917,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|
return NULL;
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
|
|
+{
|
|
+ struct vm_area_struct *prev_m;
|
|
+ struct rb_node **rb_link_m, *rb_parent_m;
|
|
+ struct mempolicy *pol_m;
|
|
+
|
|
+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
|
|
+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
|
|
+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
|
|
+ *vma_m = *vma;
|
|
+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
|
|
+ if (anon_vma_clone(vma_m, vma))
|
|
+ return -ENOMEM;
|
|
+ pol_m = vma_policy(vma_m);
|
|
+ mpol_get(pol_m);
|
|
+ vma_set_policy(vma_m, pol_m);
|
|
+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
|
|
+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
|
|
+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
|
|
+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
|
|
+ if (vma_m->vm_file)
|
|
+ get_file(vma_m->vm_file);
|
|
+ if (vma_m->vm_ops && vma_m->vm_ops->open)
|
|
+ vma_m->vm_ops->open(vma_m);
|
|
+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
|
|
+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
|
|
+ vma_m->vm_mirror = vma;
|
|
+ vma->vm_mirror = vma_m;
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
/*
|
|
* Return true if the calling process may expand its vm space by the passed
|
|
* number of pages
|
|
@@ -2536,6 +2961,11 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
|
|
|
|
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
|
|
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ cur -= mm->brk_gap;
|
|
+#endif
|
|
+
|
|
if (cur + npages > lim)
|
|
return 0;
|
|
return 1;
|
|
@@ -2606,6 +3036,17 @@ int install_special_mapping(struct mm_struct *mm,
|
|
vma->vm_start = addr;
|
|
vma->vm_end = addr + len;
|
|
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (mm->pax_flags & MF_PAX_MPROTECT) {
|
|
+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
|
|
+ return -EPERM;
|
|
+ if (!(vm_flags & VM_EXEC))
|
|
+ vm_flags &= ~VM_MAYEXEC;
|
|
+ else
|
|
+ vm_flags &= ~VM_MAYWRITE;
|
|
+ }
|
|
+#endif
|
|
+
|
|
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
|
|
diff --git a/mm/mmap.c.rej b/mm/mmap.c.rej
|
|
new file mode 100644
|
|
index 0000000..3979d2d
|
|
--- /dev/null
|
|
+++ b/mm/mmap.c.rej
|
|
@@ -0,0 +1,103 @@
|
|
+--- mm/mmap.c 2012-05-21 11:33:40.107929975 +0200
|
|
++++ mm/mmap.c 2012-05-22 16:53:56.195111150 +0200
|
|
+@@ -711,6 +732,12 @@ static int
|
|
+ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
|
|
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
|
|
+ {
|
|
++
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
|
|
++ return 0;
|
|
++#endif
|
|
++
|
|
+ if (is_mergeable_vma(vma, file, vm_flags) &&
|
|
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
|
+ if (vma->vm_pgoff == vm_pgoff)
|
|
+@@ -730,6 +757,12 @@ static int
|
|
+ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
|
+ struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
|
|
+ {
|
|
++
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
|
|
++ return 0;
|
|
++#endif
|
|
++
|
|
+ if (is_mergeable_vma(vma, file, vm_flags) &&
|
|
+ is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
|
+ pgoff_t vm_pglen;
|
|
+@@ -772,13 +805,20 @@ can_vma_merge_after(struct vm_area_struc
|
|
+ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
|
+ struct vm_area_struct *prev, unsigned long addr,
|
|
+ unsigned long end, unsigned long vm_flags,
|
|
+- struct anon_vma *anon_vma, struct file *file,
|
|
++ struct anon_vma *anon_vma, struct file *file,
|
|
+ pgoff_t pgoff, struct mempolicy *policy)
|
|
+ {
|
|
+ pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
|
|
+ struct vm_area_struct *area, *next;
|
|
+ int err;
|
|
+
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
|
|
++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
|
|
++
|
|
++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
|
|
++#endif
|
|
++
|
|
+ /*
|
|
+ * We later require that vma->vm_flags == vm_flags,
|
|
+ * so this tests vma->vm_flags & VM_SPECIAL, too.
|
|
+@@ -853,12 +917,27 @@ struct vm_area_struct *vma_merge(struct
|
|
+ mpol_equal(policy, vma_policy(next)) &&
|
|
+ can_vma_merge_before(next, vm_flags,
|
|
+ anon_vma, file, pgoff+pglen)) {
|
|
+- if (prev && addr < prev->vm_end) /* case 4 */
|
|
++ if (prev && addr < prev->vm_end) { /* case 4 */
|
|
+ err = vma_adjust(prev, prev->vm_start,
|
|
+ addr, prev->vm_pgoff, NULL);
|
|
+- else /* cases 3, 8 */
|
|
++
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ if (!err && prev_m)
|
|
++ err = vma_adjust(prev_m, prev_m->vm_start,
|
|
++ addr_m, prev_m->vm_pgoff, NULL);
|
|
++#endif
|
|
++
|
|
++ } else { /* cases 3, 8 */
|
|
+ err = vma_adjust(area, addr, next->vm_end,
|
|
+ next->vm_pgoff - pglen, NULL);
|
|
++
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ if (!err && area_m)
|
|
++ err = vma_adjust(area_m, addr_m, next_m->vm_end,
|
|
++ next_m->vm_pgoff - pglen, NULL);
|
|
++#endif
|
|
++
|
|
++ }
|
|
+ if (err)
|
|
+ return NULL;
|
|
+ khugepaged_enter_vma_merge(area);
|
|
+@@ -2050,11 +2314,22 @@ int expand_downwards(struct vm_area_stru
|
|
+ if (!error) {
|
|
+ vma->vm_start = address;
|
|
+ vma->vm_pgoff -= grow;
|
|
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
|
|
++
|
|
++#ifdef CONFIG_PAX_SEGMEXEC
|
|
++ if (vma_m) {
|
|
++ vma_m->vm_start -= grow << PAGE_SHIFT;
|
|
++ vma_m->vm_pgoff -= grow;
|
|
++ }
|
|
++#endif
|
|
++
|
|
+ perf_event_mmap(vma);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ vma_unlock_anon_vma(vma);
|
|
++ if (lockprev)
|
|
++ vma_unlock_anon_vma(prev);
|
|
+ khugepaged_enter_vma_merge(vma);
|
|
+ return error;
|
|
+ }
|
|
diff --git a/mm/mprotect.c b/mm/mprotect.c
|
|
index 10add6b..ef5f2cc 100644
|
|
--- a/mm/mprotect.c
|
|
+++ b/mm/mprotect.c
|
|
@@ -23,10 +23,17 @@
|
|
#include <linux/mmu_notifier.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/perf_event.h>
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+#include <linux/elf.h>
|
|
+#include <linux/binfmts.h>
|
|
+#endif
|
|
+
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/tlbflush.h>
|
|
+#include <asm/mmu_context.h>
|
|
|
|
#ifndef pgprot_modify
|
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|
@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
|
|
flush_tlb_range(vma, start, end);
|
|
}
|
|
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+/* called while holding the mmap semaphor for writing except stack expansion */
|
|
+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
|
|
+{
|
|
+ unsigned long oldlimit, newlimit = 0UL;
|
|
+
|
|
+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
|
|
+ return;
|
|
+
|
|
+ spin_lock(&mm->page_table_lock);
|
|
+ oldlimit = mm->context.user_cs_limit;
|
|
+ if ((prot & VM_EXEC) && oldlimit < end)
|
|
+ /* USER_CS limit moved up */
|
|
+ newlimit = end;
|
|
+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
|
|
+ /* USER_CS limit moved down */
|
|
+ newlimit = start;
|
|
+
|
|
+ if (newlimit) {
|
|
+ mm->context.user_cs_limit = newlimit;
|
|
+
|
|
+#ifdef CONFIG_SMP
|
|
+ wmb();
|
|
+ cpus_clear(mm->context.cpu_user_cs_mask);
|
|
+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
|
|
+#endif
|
|
+
|
|
+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
|
|
+ }
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
+ if (newlimit == end) {
|
|
+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
|
|
+
|
|
+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
|
|
+ if (is_vm_hugetlb_page(vma))
|
|
+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
|
|
+ else
|
|
+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
int
|
|
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
unsigned long start, unsigned long end, unsigned long newflags)
|
|
@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
int error;
|
|
int dirty_accountable = 0;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m = NULL;
|
|
+ unsigned long start_m, end_m;
|
|
+
|
|
+ start_m = start + SEGMEXEC_TASK_SIZE;
|
|
+ end_m = end + SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
if (newflags == oldflags) {
|
|
*pprev = vma;
|
|
return 0;
|
|
}
|
|
|
|
+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
|
|
+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
|
|
+
|
|
+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
/*
|
|
* If we make a private mapping writable we increase our commit;
|
|
* but (without finer accounting) cannot reduce our commit if we
|
|
@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
}
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
|
|
+ if (start != vma->vm_start) {
|
|
+ error = split_vma(mm, vma, start, 1);
|
|
+ if (error)
|
|
+ goto fail;
|
|
+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
|
|
+ *pprev = (*pprev)->vm_next;
|
|
+ }
|
|
+
|
|
+ if (end != vma->vm_end) {
|
|
+ error = split_vma(mm, vma, end, 0);
|
|
+ if (error)
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (pax_find_mirror_vma(vma)) {
|
|
+ error = __do_munmap(mm, start_m, end_m - start_m);
|
|
+ if (error)
|
|
+ goto fail;
|
|
+ } else {
|
|
+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
|
+ if (!vma_m) {
|
|
+ error = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+ vma->vm_flags = newflags;
|
|
+ error = pax_mirror_vma(vma_m, vma);
|
|
+ if (error) {
|
|
+ vma->vm_flags = oldflags;
|
|
+ goto fail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
/*
|
|
* First try to merge with previous and/or next vma.
|
|
*/
|
|
@@ -205,9 +308,21 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
* vm_flags and vm_page_prot are protected by the mmap_sem
|
|
* held in write mode.
|
|
*/
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
|
|
+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
|
|
+#endif
|
|
+
|
|
vma->vm_flags = newflags;
|
|
+
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (mm->binfmt && mm->binfmt->handle_mprotect)
|
|
+ mm->binfmt->handle_mprotect(vma, newflags);
|
|
+#endif
|
|
+
|
|
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
|
|
- vm_get_page_prot(newflags));
|
|
+ vm_get_page_prot(vma->vm_flags));
|
|
|
|
if (vma_wants_writenotify(vma)) {
|
|
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
|
@@ -249,6 +364,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
|
|
end = start + len;
|
|
if (end <= start)
|
|
return -ENOMEM;
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
|
|
+ if (end > SEGMEXEC_TASK_SIZE)
|
|
+ return -EINVAL;
|
|
+ } else
|
|
+#endif
|
|
+
|
|
+ if (end > TASK_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
if (!arch_validate_prot(prot))
|
|
return -EINVAL;
|
|
|
|
@@ -256,7 +382,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
|
|
/*
|
|
* Does the application expect PROT_READ to imply PROT_EXEC:
|
|
*/
|
|
- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
|
+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
|
|
prot |= PROT_EXEC;
|
|
|
|
vm_flags = calc_vm_prot_bits(prot);
|
|
@@ -289,6 +415,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
|
|
if (start > vma->vm_start)
|
|
prev = vma;
|
|
|
|
+#ifdef CONFIG_PAX_MPROTECT
|
|
+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
|
|
+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
|
|
+#endif
|
|
+
|
|
for (nstart = start ; ; ) {
|
|
unsigned long newflags;
|
|
|
|
@@ -312,6 +443,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
|
|
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
|
|
if (error)
|
|
goto out;
|
|
+
|
|
+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
|
|
+
|
|
nstart = tmp;
|
|
|
|
if (nstart < prev->vm_end)
|
|
diff --git a/mm/mremap.c b/mm/mremap.c
|
|
index db8d983..76506cb 100644
|
|
--- a/mm/mremap.c
|
|
+++ b/mm/mremap.c
|
|
@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|
continue;
|
|
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
|
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
|
+
|
|
+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
|
|
+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
|
|
+ pte = pte_exprotect(pte);
|
|
+#endif
|
|
+
|
|
set_pte_at(mm, new_addr, new_pte, pte);
|
|
}
|
|
|
|
@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
|
|
if (is_vm_hugetlb_page(vma))
|
|
goto Einval;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (pax_find_mirror_vma(vma))
|
|
+ goto Einval;
|
|
+#endif
|
|
+
|
|
/* We can't remap across vm area boundaries */
|
|
if (old_len > vma->vm_end - addr)
|
|
goto Efault;
|
|
@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
unsigned long map_flags;
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
|
|
if (new_addr & ~PAGE_MASK)
|
|
goto out;
|
|
|
|
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
+
|
|
+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
|
|
goto out;
|
|
|
|
/* Check if the location we're moving into overlaps the
|
|
* old location at all, and fail if it does.
|
|
*/
|
|
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
|
|
- goto out;
|
|
-
|
|
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
|
|
+ if (addr + old_len > new_addr && new_addr + new_len > addr)
|
|
goto out;
|
|
|
|
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
|
@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
|
|
struct vm_area_struct *vma;
|
|
unsigned long ret = -EINVAL;
|
|
unsigned long charged = 0;
|
|
+ unsigned long pax_task_size = TASK_SIZE;
|
|
|
|
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
|
|
goto out;
|
|
@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
|
|
if (!new_len)
|
|
goto out;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
|
|
+ pax_task_size = SEGMEXEC_TASK_SIZE;
|
|
+#endif
|
|
+
|
|
+ pax_task_size -= PAGE_SIZE;
|
|
+
|
|
+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
|
|
+ old_len > pax_task_size || addr > pax_task_size-old_len)
|
|
+ goto out;
|
|
+
|
|
if (flags & MREMAP_FIXED) {
|
|
if (flags & MREMAP_MAYMOVE)
|
|
ret = mremap_to(addr, old_len, new_addr, new_len);
|
|
@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
|
|
addr + new_len);
|
|
}
|
|
ret = addr;
|
|
+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
|
|
goto out;
|
|
}
|
|
}
|
|
@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
|
|
ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
|
|
if (ret)
|
|
goto out;
|
|
+
|
|
+ map_flags = vma->vm_flags;
|
|
ret = move_vma(vma, addr, old_len, new_len, new_addr);
|
|
+ if (!(ret & ~PAGE_MASK)) {
|
|
+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
|
|
+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
|
|
+ }
|
|
}
|
|
out:
|
|
if (ret & ~PAGE_MASK)
|
|
diff --git a/mm/nommu.c b/mm/nommu.c
|
|
index 75ff3c1..3b4d4ad 100644
|
|
--- a/mm/nommu.c
|
|
+++ b/mm/nommu.c
|
|
@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
|
|
int sysctl_overcommit_ratio = 50; /* default is 50% */
|
|
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
|
|
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
|
|
-int heap_stack_gap = 0;
|
|
|
|
atomic_long_t mmap_pages_allocated;
|
|
|
|
@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
|
|
EXPORT_SYMBOL(find_vma);
|
|
|
|
/*
|
|
- * find a VMA
|
|
- * - we don't extend stack VMAs under NOMMU conditions
|
|
- */
|
|
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|
-{
|
|
- return find_vma(mm, addr);
|
|
-}
|
|
-
|
|
-/*
|
|
* expand a stack to a given address
|
|
* - not supported under NOMMU conditions
|
|
*/
|
|
@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
/* most fields are the same, copy all, and then fixup */
|
|
*new = *vma;
|
|
+ INIT_LIST_HEAD(&new->anon_vma_chain);
|
|
*region = *vma->vm_region;
|
|
new->vm_region = region;
|
|
|
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
|
index 1e3e4c0..7a1c9cb 100644
|
|
--- a/mm/page_alloc.c
|
|
+++ b/mm/page_alloc.c
|
|
@@ -356,7 +356,7 @@ static void bad_page(struct page *page)
|
|
* This usage means that zero-order pages may not be compound.
|
|
*/
|
|
|
|
-static void free_compound_page(struct page *page)
|
|
+void free_compound_page(struct page *page)
|
|
{
|
|
__free_pages_ok(page, compound_order(page));
|
|
}
|
|
@@ -726,6 +726,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
|
|
int i;
|
|
int bad = 0;
|
|
|
|
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
|
|
+ unsigned long index = 1UL << order;
|
|
+#endif
|
|
+
|
|
trace_mm_page_free(page, order);
|
|
kmemcheck_free_shadow(page, order);
|
|
|
|
@@ -741,6 +745,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
|
|
debug_check_no_obj_freed(page_address(page),
|
|
PAGE_SIZE << order);
|
|
}
|
|
+
|
|
+#ifdef CONFIG_PAX_MEMORY_SANITIZE
|
|
+ for (; index; --index)
|
|
+ sanitize_highpage(page + index - 1);
|
|
+#endif
|
|
+
|
|
arch_free_page(page, order);
|
|
kernel_map_pages(page, 1 << order, 0);
|
|
|
|
@@ -892,8 +902,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
|
|
arch_alloc_page(page, order);
|
|
kernel_map_pages(page, 1 << order, 1);
|
|
|
|
+#ifndef CONFIG_PAX_MEMORY_SANITIZE
|
|
if (gfp_flags & __GFP_ZERO)
|
|
prep_zero_page(page, order, gfp_flags);
|
|
+#endif
|
|
|
|
if (order && (gfp_flags & __GFP_COMP))
|
|
prep_compound_page(page, order);
|
|
diff --git a/mm/percpu.c b/mm/percpu.c
|
|
index 13b2eef..36e02b4 100644
|
|
--- a/mm/percpu.c
|
|
+++ b/mm/percpu.c
|
|
@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
|
|
static unsigned int pcpu_high_unit_cpu __read_mostly;
|
|
|
|
/* the address of the first chunk which starts with the kernel static area */
|
|
-void *pcpu_base_addr __read_mostly;
|
|
+void *pcpu_base_addr __read_only;
|
|
EXPORT_SYMBOL_GPL(pcpu_base_addr);
|
|
|
|
static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
|
|
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
|
|
index b63e84a..86ca800 100644
|
|
--- a/mm/process_vm_access.c
|
|
+++ b/mm/process_vm_access.c
|
|
@@ -258,19 +258,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
|
|
size_t iov_l_curr_offset = 0;
|
|
ssize_t iov_len;
|
|
|
|
+ return -ENOSYS; // PaX: until properly audited
|
|
+
|
|
/*
|
|
* Work out how many pages of struct pages we're going to need
|
|
* when eventually calling get_user_pages
|
|
*/
|
|
for (i = 0; i < riovcnt; i++) {
|
|
iov_len = rvec[i].iov_len;
|
|
- if (iov_len > 0) {
|
|
- nr_pages_iov = ((unsigned long)rvec[i].iov_base
|
|
- + iov_len)
|
|
- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
|
|
- / PAGE_SIZE + 1;
|
|
- nr_pages = max(nr_pages, nr_pages_iov);
|
|
- }
|
|
+ if (iov_len <= 0)
|
|
+ continue;
|
|
+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
|
|
+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
|
|
+ nr_pages = max(nr_pages, nr_pages_iov);
|
|
}
|
|
|
|
if (nr_pages == 0)
|
|
diff --git a/mm/rmap.c b/mm/rmap.c
|
|
index 46dfc4e..396e273 100644
|
|
--- a/mm/rmap.c
|
|
+++ b/mm/rmap.c
|
|
@@ -171,6 +171,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
struct anon_vma_chain *avc;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct anon_vma_chain *avc_m = NULL;
|
|
+#endif
|
|
+
|
|
might_sleep();
|
|
if (unlikely(!anon_vma)) {
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
@@ -180,6 +184,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|
if (!avc)
|
|
goto out_enomem;
|
|
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
|
|
+ if (!avc_m)
|
|
+ goto out_enomem_free_avc;
|
|
+#endif
|
|
+
|
|
anon_vma = find_mergeable_anon_vma(vma);
|
|
allocated = NULL;
|
|
if (!anon_vma) {
|
|
@@ -193,6 +203,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|
/* page_table_lock to protect against threads */
|
|
spin_lock(&mm->page_table_lock);
|
|
if (likely(!vma->anon_vma)) {
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
|
|
+
|
|
+ if (vma_m) {
|
|
+ BUG_ON(vma_m->anon_vma);
|
|
+ vma_m->anon_vma = anon_vma;
|
|
+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
|
|
+ avc_m = NULL;
|
|
+ }
|
|
+#endif
|
|
+
|
|
vma->anon_vma = anon_vma;
|
|
anon_vma_chain_link(vma, avc, anon_vma);
|
|
/* vma reference or self-parent link for new root */
|
|
@@ -205,12 +227,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
|
|
|
|
if (unlikely(allocated))
|
|
put_anon_vma(allocated);
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (unlikely(avc_m))
|
|
+ anon_vma_chain_free(avc_m);
|
|
+#endif
|
|
+
|
|
if (unlikely(avc))
|
|
anon_vma_chain_free(avc);
|
|
}
|
|
return 0;
|
|
|
|
out_enomem_free_avc:
|
|
+
|
|
+#ifdef CONFIG_PAX_SEGMEXEC
|
|
+ if (avc_m)
|
|
+ anon_vma_chain_free(avc_m);
|
|
+#endif
|
|
+
|
|
anon_vma_chain_free(avc);
|
|
out_enomem:
|
|
return -ENOMEM;
|
|
@@ -254,7 +288,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
|
|
* good chance of avoiding scanning the whole hierarchy when it searches where
|
|
* page is mapped.
|
|
*/
|
|
-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
|
|
+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
|
|
{
|
|
struct anon_vma_chain *avc, *pavc;
|
|
struct anon_vma *root = NULL;
|
|
@@ -353,7 +387,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
|
|
* the corresponding VMA in the parent process is attached to.
|
|
* Returns 0 on success, non-zero on failure.
|
|
*/
|
|
-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
|
+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
|
|
{
|
|
struct anon_vma_chain *avc;
|
|
struct anon_vma *anon_vma;
|
|
diff --git a/mm/shmem.c b/mm/shmem.c
|
|
index 2e4d10d..8e92d3b 100644
|
|
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
|
|
#define BOGO_DIRENT_SIZE 20
|
|
|
|
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
|
|
-#define SHORT_SYMLINK_LEN 128
|
|
+#define SHORT_SYMLINK_LEN 64
|
|
|
|
/*
|
|
* vmtruncate_range() communicates with shmem_fault via
|
|
@@ -2367,8 +2367,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
|
|
int err = -ENOMEM;
|
|
|
|
/* Round up to L1_CACHE_BYTES to resist false sharing */
|
|
- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
|
|
- L1_CACHE_BYTES), GFP_KERNEL);
|
|
+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
|
|
if (!sbinfo)
|
|
return -ENOMEM;
|
|
|
|
diff --git a/mm/slab.c b/mm/slab.c
|
|
index 3714dd9..de7f15e 100644
|
|
--- a/mm/slab.c
|
|
+++ b/mm/slab.c
|
|
@@ -153,7 +153,7 @@
|
|
|
|
/* Legal flag mask for kmem_cache_create(). */
|
|
#if DEBUG
|
|
-# define CREATE_MASK (SLAB_RED_ZONE | \
|
|
+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
|
|
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
|
|
SLAB_CACHE_DMA | \
|
|
SLAB_STORE_USER | \
|
|
@@ -161,7 +161,7 @@
|
|
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
|
|
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
|
|
#else
|
|
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
|
|
+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
|
|
SLAB_CACHE_DMA | \
|
|
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
|
|
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
|
|
@@ -290,7 +290,7 @@ struct kmem_list3 {
|
|
* Need this for bootstrapping a per node allocator.
|
|
*/
|
|
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
|
|
-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
|
|
+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
|
|
#define CACHE_CACHE 0
|
|
#define SIZE_AC MAX_NUMNODES
|
|
#define SIZE_L3 (2 * MAX_NUMNODES)
|
|
@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
|
|
if ((x)->max_freeable < i) \
|
|
(x)->max_freeable = i; \
|
|
} while (0)
|
|
-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
|
|
-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
|
|
-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
|
|
-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
|
|
+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
|
|
+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
|
|
+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
|
|
+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
|
|
#else
|
|
#define STATS_INC_ACTIVE(x) do { } while (0)
|
|
#define STATS_DEC_ACTIVE(x) do { } while (0)
|
|
@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
|
|
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
|
*/
|
|
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
|
- const struct slab *slab, void *obj)
|
|
+ const struct slab *slab, const void *obj)
|
|
{
|
|
u32 offset = (obj - slab->s_mem);
|
|
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
|
|
@@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
|
|
struct cache_names {
|
|
char *name;
|
|
char *name_dma;
|
|
+ char *name_usercopy;
|
|
};
|
|
|
|
static struct cache_names __initdata cache_names[] = {
|
|
-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
|
|
+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
|
|
#include <linux/kmalloc_sizes.h>
|
|
- {NULL,}
|
|
+ {NULL}
|
|
#undef CACHE
|
|
};
|
|
|
|
@@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
|
|
if (unlikely(gfpflags & GFP_DMA))
|
|
return csizep->cs_dmacachep;
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ if (unlikely(gfpflags & GFP_USERCOPY))
|
|
+ return csizep->cs_usercopycachep;
|
|
+#endif
|
|
+
|
|
return csizep->cs_cachep;
|
|
}
|
|
|
|
@@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
|
|
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
|
|
sizes[INDEX_AC].cs_size,
|
|
ARCH_KMALLOC_MINALIGN,
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
|
|
NULL);
|
|
|
|
if (INDEX_AC != INDEX_L3) {
|
|
@@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
|
|
kmem_cache_create(names[INDEX_L3].name,
|
|
sizes[INDEX_L3].cs_size,
|
|
ARCH_KMALLOC_MINALIGN,
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
|
|
NULL);
|
|
}
|
|
|
|
@@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
|
|
sizes->cs_cachep = kmem_cache_create(names->name,
|
|
sizes->cs_size,
|
|
ARCH_KMALLOC_MINALIGN,
|
|
- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
|
|
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
|
|
NULL);
|
|
}
|
|
#ifdef CONFIG_ZONE_DMA
|
|
@@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
|
|
SLAB_PANIC,
|
|
NULL);
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ sizes->cs_usercopycachep = kmem_cache_create(
|
|
+ names->name_usercopy,
|
|
+ sizes->cs_size,
|
|
+ ARCH_KMALLOC_MINALIGN,
|
|
+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
|
|
+ NULL);
|
|
+#endif
|
|
+
|
|
sizes++;
|
|
names++;
|
|
}
|
|
@@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
|
|
}
|
|
/* cpu stats */
|
|
{
|
|
- unsigned long allochit = atomic_read(&cachep->allochit);
|
|
- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
|
|
- unsigned long freehit = atomic_read(&cachep->freehit);
|
|
- unsigned long freemiss = atomic_read(&cachep->freemiss);
|
|
+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
|
|
+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
|
|
+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
|
|
+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
|
|
|
|
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
|
|
allochit, allocmiss, freehit, freemiss);
|
|
@@ -4659,6 +4676,61 @@ static int __init slab_proc_init(void)
|
|
module_init(slab_proc_init);
|
|
#endif
|
|
|
|
+bool is_usercopy_object(const void *ptr)
|
|
+{
|
|
+ struct page *page;
|
|
+ struct kmem_cache *cachep;
|
|
+
|
|
+ if (ZERO_OR_NULL_PTR(ptr))
|
|
+ return false;
|
|
+
|
|
+ if (!virt_addr_valid(ptr))
|
|
+ return false;
|
|
+
|
|
+ page = virt_to_head_page(ptr);
|
|
+
|
|
+ if (!PageSlab(page))
|
|
+ return false;
|
|
+
|
|
+ cachep = page_get_cache(page);
|
|
+ return cachep->flags & SLAB_USERCOPY;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
|
|
+{
|
|
+ struct page *page;
|
|
+ struct kmem_cache *cachep;
|
|
+ struct slab *slabp;
|
|
+ unsigned int objnr;
|
|
+ unsigned long offset;
|
|
+
|
|
+ if (ZERO_OR_NULL_PTR(ptr))
|
|
+ return "<null>";
|
|
+
|
|
+ if (!virt_addr_valid(ptr))
|
|
+ return NULL;
|
|
+
|
|
+ page = virt_to_head_page(ptr);
|
|
+
|
|
+ if (!PageSlab(page))
|
|
+ return NULL;
|
|
+
|
|
+ cachep = page_get_cache(page);
|
|
+ if (!(cachep->flags & SLAB_USERCOPY))
|
|
+ return cachep->name;
|
|
+
|
|
+ slabp = page_get_slab(page);
|
|
+ objnr = obj_to_index(cachep, slabp, ptr);
|
|
+ BUG_ON(objnr >= cachep->num);
|
|
+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
|
|
+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
|
|
+ return NULL;
|
|
+
|
|
+ return cachep->name;
|
|
+}
|
|
+#endif
|
|
+
|
|
/**
|
|
* ksize - get the actual amount of memory allocated for a given object
|
|
* @objp: Pointer to the object
|
|
diff --git a/mm/slob.c b/mm/slob.c
|
|
index 8105be4..3c15e57 100644
|
|
--- a/mm/slob.c
|
|
+++ b/mm/slob.c
|
|
@@ -29,7 +29,7 @@
|
|
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
|
|
* alloc_pages() directly, allocating compound pages so the page order
|
|
* does not have to be separately tracked, and also stores the exact
|
|
- * allocation size in page->private so that it can be used to accurately
|
|
+ * allocation size in slob_page->size so that it can be used to accurately
|
|
* provide ksize(). These objects are detected in kfree() because slob_page()
|
|
* is false for them.
|
|
*
|
|
@@ -58,6 +58,7 @@
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
+#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h> /* struct reclaim_state */
|
|
@@ -102,7 +103,8 @@ struct slob_page {
|
|
unsigned long flags; /* mandatory */
|
|
atomic_t _count; /* mandatory */
|
|
slobidx_t units; /* free units left in page */
|
|
- unsigned long pad[2];
|
|
+ unsigned long pad[1];
|
|
+ unsigned long size; /* size when >=PAGE_SIZE */
|
|
slob_t *free; /* first free slob_t in page */
|
|
struct list_head list; /* linked list of free pages */
|
|
};
|
|
@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
|
|
*/
|
|
static inline int is_slob_page(struct slob_page *sp)
|
|
{
|
|
- return PageSlab((struct page *)sp);
|
|
+ return PageSlab((struct page *)sp) && !sp->size;
|
|
}
|
|
|
|
static inline void set_slob_page(struct slob_page *sp)
|
|
@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
|
|
|
|
static inline struct slob_page *slob_page(const void *addr)
|
|
{
|
|
- return (struct slob_page *)virt_to_page(addr);
|
|
+ return (struct slob_page *)virt_to_head_page(addr);
|
|
}
|
|
|
|
/*
|
|
@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
|
|
/*
|
|
* Return the size of a slob block.
|
|
*/
|
|
-static slobidx_t slob_units(slob_t *s)
|
|
+static slobidx_t slob_units(const slob_t *s)
|
|
{
|
|
if (s->units > 0)
|
|
return s->units;
|
|
@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
|
|
/*
|
|
* Return the next free slob block pointer after this one.
|
|
*/
|
|
-static slob_t *slob_next(slob_t *s)
|
|
+static slob_t *slob_next(const slob_t *s)
|
|
{
|
|
slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
|
|
slobidx_t next;
|
|
@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
|
|
/*
|
|
* Returns true if s is the last free block in its page.
|
|
*/
|
|
-static int slob_last(slob_t *s)
|
|
+static int slob_last(const slob_t *s)
|
|
{
|
|
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
|
|
}
|
|
@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
|
|
if (!page)
|
|
return NULL;
|
|
|
|
+ set_slob_page(page);
|
|
return page_address(page);
|
|
}
|
|
|
|
@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|
if (!b)
|
|
return NULL;
|
|
sp = slob_page(b);
|
|
- set_slob_page(sp);
|
|
|
|
spin_lock_irqsave(&slob_lock, flags);
|
|
sp->units = SLOB_UNITS(PAGE_SIZE);
|
|
sp->free = b;
|
|
+ sp->size = 0;
|
|
INIT_LIST_HEAD(&sp->list);
|
|
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
|
|
set_slob_page_free(sp, slob_list);
|
|
@@ -476,10 +479,9 @@ static void slob_free(void *block, int size)
|
|
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
|
|
*/
|
|
|
|
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
|
+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
|
|
{
|
|
- unsigned int *m;
|
|
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
+ slob_t *m;
|
|
void *ret;
|
|
|
|
gfp &= gfp_allowed_mask;
|
|
@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
|
|
|
if (!m)
|
|
return NULL;
|
|
- *m = size;
|
|
+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
|
|
+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
|
|
+ m[0].units = size;
|
|
+ m[1].units = align;
|
|
ret = (void *)m + align;
|
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
|
gfp |= __GFP_COMP;
|
|
ret = slob_new_pages(gfp, order, node);
|
|
if (ret) {
|
|
- struct page *page;
|
|
- page = virt_to_page(ret);
|
|
- page->private = size;
|
|
+ struct slob_page *sp;
|
|
+ sp = slob_page(ret);
|
|
+ sp->size = size;
|
|
}
|
|
|
|
trace_kmalloc_node(_RET_IP_, ret,
|
|
size, PAGE_SIZE << order, gfp, node);
|
|
}
|
|
|
|
- kmemleak_alloc(ret, size, 1, gfp);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
|
+{
|
|
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
+ void *ret = __kmalloc_node_align(size, gfp, node, align);
|
|
+
|
|
+ if (!ZERO_OR_NULL_PTR(ret))
|
|
+ kmemleak_alloc(ret, size, 1, gfp);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__kmalloc_node);
|
|
@@ -533,13 +547,83 @@ void kfree(const void *block)
|
|
sp = slob_page(block);
|
|
if (is_slob_page(sp)) {
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
- unsigned int *m = (unsigned int *)(block - align);
|
|
- slob_free(m, *m + align);
|
|
- } else
|
|
+ slob_t *m = (slob_t *)(block - align);
|
|
+ slob_free(m, m[0].units + align);
|
|
+ } else {
|
|
+ clear_slob_page(sp);
|
|
+ free_slob_page(sp);
|
|
+ sp->size = 0;
|
|
put_page(&sp->page);
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL(kfree);
|
|
|
|
+bool is_usercopy_object(const void *ptr)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
|
|
+{
|
|
+ struct slob_page *sp;
|
|
+ const slob_t *free;
|
|
+ const void *base;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (ZERO_OR_NULL_PTR(ptr))
|
|
+ return "<null>";
|
|
+
|
|
+ if (!virt_addr_valid(ptr))
|
|
+ return NULL;
|
|
+
|
|
+ sp = slob_page(ptr);
|
|
+ if (!PageSlab((struct page *)sp))
|
|
+ return NULL;
|
|
+
|
|
+ if (sp->size) {
|
|
+ base = page_address(&sp->page);
|
|
+ if (base <= ptr && n <= sp->size - (ptr - base))
|
|
+ return NULL;
|
|
+ return "<slob>";
|
|
+ }
|
|
+
|
|
+ /* some tricky double walking to find the chunk */
|
|
+ spin_lock_irqsave(&slob_lock, flags);
|
|
+ base = (void *)((unsigned long)ptr & PAGE_MASK);
|
|
+ free = sp->free;
|
|
+
|
|
+ while (!slob_last(free) && (void *)free <= ptr) {
|
|
+ base = free + slob_units(free);
|
|
+ free = slob_next(free);
|
|
+ }
|
|
+
|
|
+ while (base < (void *)free) {
|
|
+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
|
|
+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
|
|
+ int offset;
|
|
+
|
|
+ if (ptr < base + align)
|
|
+ break;
|
|
+
|
|
+ offset = ptr - base - align;
|
|
+ if (offset >= m) {
|
|
+ base += size;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (n > m - offset)
|
|
+ break;
|
|
+
|
|
+ spin_unlock_irqrestore(&slob_lock, flags);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&slob_lock, flags);
|
|
+ return "<slob>";
|
|
+}
|
|
+#endif
|
|
+
|
|
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
|
|
size_t ksize(const void *block)
|
|
{
|
|
@@ -552,10 +636,10 @@ size_t ksize(const void *block)
|
|
sp = slob_page(block);
|
|
if (is_slob_page(sp)) {
|
|
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
|
- unsigned int *m = (unsigned int *)(block - align);
|
|
- return SLOB_UNITS(*m) * SLOB_UNIT;
|
|
+ slob_t *m = (slob_t *)(block - align);
|
|
+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
|
|
} else
|
|
- return sp->page.private;
|
|
+ return sp->size;
|
|
}
|
|
EXPORT_SYMBOL(ksize);
|
|
|
|
@@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
{
|
|
struct kmem_cache *c;
|
|
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
|
|
+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
|
|
+#else
|
|
c = slob_alloc(sizeof(struct kmem_cache),
|
|
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
|
|
+#endif
|
|
|
|
if (c) {
|
|
c->name = name;
|
|
@@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
|
|
|
lockdep_trace_alloc(flags);
|
|
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ b = __kmalloc_node_align(c->size, flags, node, c->align);
|
|
+#else
|
|
if (c->size < PAGE_SIZE) {
|
|
b = slob_alloc(c->size, flags, c->align, node);
|
|
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
|
|
SLOB_UNITS(c->size) * SLOB_UNIT,
|
|
flags, node);
|
|
} else {
|
|
+ struct slob_page *sp;
|
|
+
|
|
b = slob_new_pages(flags, get_order(c->size), node);
|
|
+ sp = slob_page(b);
|
|
+ sp->size = c->size;
|
|
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
|
|
PAGE_SIZE << get_order(c->size),
|
|
flags, node);
|
|
}
|
|
+#endif
|
|
|
|
if (c->ctor)
|
|
c->ctor(b);
|
|
@@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
|
|
|
|
static void __kmem_cache_free(void *b, int size)
|
|
{
|
|
- if (size < PAGE_SIZE)
|
|
+ struct slob_page *sp = slob_page(b);
|
|
+
|
|
+ if (is_slob_page(sp))
|
|
slob_free(b, size);
|
|
- else
|
|
+ else {
|
|
+ clear_slob_page(sp);
|
|
+ free_slob_page(sp);
|
|
+ sp->size = 0;
|
|
slob_free_pages(b, get_order(size));
|
|
+ }
|
|
}
|
|
|
|
static void kmem_rcu_free(struct rcu_head *head)
|
|
@@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
|
|
|
|
void kmem_cache_free(struct kmem_cache *c, void *b)
|
|
{
|
|
+ int size = c->size;
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ if (size + c->align < PAGE_SIZE) {
|
|
+ size += c->align;
|
|
+ b -= c->align;
|
|
+ }
|
|
+#endif
|
|
+
|
|
kmemleak_free_recursive(b, c->flags);
|
|
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
|
|
struct slob_rcu *slob_rcu;
|
|
- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
|
|
- slob_rcu->size = c->size;
|
|
+ slob_rcu = b + (size - sizeof(struct slob_rcu));
|
|
+ slob_rcu->size = size;
|
|
call_rcu(&slob_rcu->head, kmem_rcu_free);
|
|
} else {
|
|
- __kmem_cache_free(b, c->size);
|
|
+ __kmem_cache_free(b, size);
|
|
}
|
|
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ trace_kfree(_RET_IP_, b);
|
|
+#else
|
|
trace_kmem_cache_free(_RET_IP_, b);
|
|
+#endif
|
|
+
|
|
}
|
|
EXPORT_SYMBOL(kmem_cache_free);
|
|
|
|
diff --git a/mm/slub.c b/mm/slub.c
|
|
index b62bb16..8bb7580 100644
|
|
--- a/mm/slub.c
|
|
+++ b/mm/slub.c
|
|
@@ -2589,6 +2589,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
|
|
|
page = virt_to_head_page(x);
|
|
|
|
+ BUG_ON(!PageSlab(page));
|
|
+
|
|
slab_free(s, page, x, _RET_IP_);
|
|
|
|
trace_kmem_cache_free(_RET_IP_, x);
|
|
@@ -2622,7 +2624,7 @@ static int slub_min_objects;
|
|
* Merge control. If this is set then no merging of slab caches will occur.
|
|
* (Could be removed. This was introduced to pacify the merge skeptics.)
|
|
*/
|
|
-static int slub_nomerge;
|
|
+static int slub_nomerge = 1;
|
|
|
|
/*
|
|
* Calculate the order of allocation given an slab object size.
|
|
@@ -3075,7 +3077,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|
else
|
|
s->cpu_partial = 30;
|
|
|
|
- s->refcount = 1;
|
|
+ atomic_set(&s->refcount, 1);
|
|
#ifdef CONFIG_NUMA
|
|
s->remote_node_defrag_ratio = 1000;
|
|
#endif
|
|
@@ -3179,8 +3181,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
|
|
void kmem_cache_destroy(struct kmem_cache *s)
|
|
{
|
|
down_write(&slub_lock);
|
|
- s->refcount--;
|
|
- if (!s->refcount) {
|
|
+ if (atomic_dec_and_test(&s->refcount)) {
|
|
list_del(&s->list);
|
|
up_write(&slub_lock);
|
|
if (kmem_cache_close(s)) {
|
|
@@ -3209,6 +3210,10 @@ static struct kmem_cache *kmem_cache;
|
|
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
|
|
#endif
|
|
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
|
|
+#endif
|
|
+
|
|
static int __init setup_slub_min_order(char *str)
|
|
{
|
|
get_option(&str, &slub_min_order);
|
|
@@ -3323,6 +3328,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
|
|
return kmalloc_dma_caches[index];
|
|
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ if (flags & SLAB_USERCOPY)
|
|
+ return kmalloc_usercopy_caches[index];
|
|
+
|
|
+#endif
|
|
+
|
|
return kmalloc_caches[index];
|
|
}
|
|
|
|
@@ -3391,6 +3403,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
EXPORT_SYMBOL(__kmalloc_node);
|
|
#endif
|
|
|
|
+bool is_usercopy_object(const void *ptr)
|
|
+{
|
|
+ struct page *page;
|
|
+ struct kmem_cache *s;
|
|
+
|
|
+ if (ZERO_OR_NULL_PTR(ptr))
|
|
+ return false;
|
|
+
|
|
+ if (!virt_addr_valid(ptr))
|
|
+ return false;
|
|
+
|
|
+ page = virt_to_head_page(ptr);
|
|
+
|
|
+ if (!PageSlab(page))
|
|
+ return false;
|
|
+
|
|
+ s = page->slab;
|
|
+ return s->flags & SLAB_USERCOPY;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY
|
|
+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
|
|
+{
|
|
+ struct page *page;
|
|
+ struct kmem_cache *s;
|
|
+ unsigned long offset;
|
|
+
|
|
+ if (ZERO_OR_NULL_PTR(ptr))
|
|
+ return "<null>";
|
|
+
|
|
+ if (!virt_addr_valid(ptr))
|
|
+ return NULL;
|
|
+
|
|
+ page = virt_to_head_page(ptr);
|
|
+
|
|
+ if (!PageSlab(page))
|
|
+ return NULL;
|
|
+
|
|
+ s = page->slab;
|
|
+ if (!(s->flags & SLAB_USERCOPY))
|
|
+ return s->name;
|
|
+
|
|
+ offset = (ptr - page_address(page)) % s->size;
|
|
+ if (offset <= s->objsize && n <= s->objsize - offset)
|
|
+ return NULL;
|
|
+
|
|
+ return s->name;
|
|
+}
|
|
+#endif
|
|
+
|
|
size_t ksize(const void *object)
|
|
{
|
|
struct page *page;
|
|
@@ -3665,7 +3727,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
|
|
int node;
|
|
|
|
list_add(&s->list, &slab_caches);
|
|
- s->refcount = -1;
|
|
+ atomic_set(&s->refcount, -1);
|
|
|
|
for_each_node_state(node, N_NORMAL_MEMORY) {
|
|
struct kmem_cache_node *n = get_node(s, node);
|
|
@@ -3785,17 +3847,17 @@ void __init kmem_cache_init(void)
|
|
|
|
/* Caches that are not of the two-to-the-power-of size */
|
|
if (KMALLOC_MIN_SIZE <= 32) {
|
|
- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
|
|
+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
|
|
caches++;
|
|
}
|
|
|
|
if (KMALLOC_MIN_SIZE <= 64) {
|
|
- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
|
|
+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
|
|
caches++;
|
|
}
|
|
|
|
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
|
|
- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
|
|
+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
|
|
caches++;
|
|
}
|
|
|
|
@@ -3837,6 +3899,22 @@ void __init kmem_cache_init(void)
|
|
}
|
|
}
|
|
#endif
|
|
+
|
|
+#ifdef CONFIG_PAX_USERCOPY_SLABS
|
|
+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
|
|
+ struct kmem_cache *s = kmalloc_caches[i];
|
|
+
|
|
+ if (s && s->size) {
|
|
+ char *name = kasprintf(GFP_NOWAIT,
|
|
+ "usercopy-kmalloc-%d", s->objsize);
|
|
+
|
|
+ BUG_ON(!name);
|
|
+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
|
|
+ s->objsize, SLAB_USERCOPY);
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
printk(KERN_INFO
|
|
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
|
|
" CPUs=%d, Nodes=%d\n",
|
|
@@ -3863,7 +3941,7 @@ static int slab_unmergeable(struct kmem_cache *s)
|
|
/*
|
|
* We may have set a slab to be unmergeable during bootstrap.
|
|
*/
|
|
- if (s->refcount < 0)
|
|
+ if (atomic_read(&s->refcount) < 0)
|
|
return 1;
|
|
|
|
return 0;
|
|
@@ -3922,7 +4000,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
down_write(&slub_lock);
|
|
s = find_mergeable(size, align, flags, name, ctor);
|
|
if (s) {
|
|
- s->refcount++;
|
|
+ atomic_inc(&s->refcount);
|
|
/*
|
|
* Adjust the object sizes so that we clear
|
|
* the complete object on kzalloc.
|
|
@@ -3931,7 +4009,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
|
|
|
if (sysfs_slab_alias(s, name)) {
|
|
- s->refcount--;
|
|
+ atomic_dec(&s->refcount);
|
|
goto err;
|
|
}
|
|
up_write(&slub_lock);
|
|
@@ -4695,7 +4773,7 @@ SLAB_ATTR_RO(ctor);
|
|
|
|
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
|
|
{
|
|
- return sprintf(buf, "%d\n", s->refcount - 1);
|
|
+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
|
|
}
|
|
SLAB_ATTR_RO(aliases);
|
|
|
|
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
|
|
index 1b7e22a..3fcd4f3 100644
|
|
--- a/mm/sparse-vmemmap.c
|
|
+++ b/mm/sparse-vmemmap.c
|
|
@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
- pud_populate(&init_mm, pud, p);
|
|
+ pud_populate_kernel(&init_mm, pud, p);
|
|
}
|
|
return pud;
|
|
}
|
|
@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
- pgd_populate(&init_mm, pgd, p);
|
|
+ pgd_populate_kernel(&init_mm, pgd, p);
|
|
}
|
|
return pgd;
|
|
}
|
|
diff --git a/mm/swap.c.rej b/mm/swap.c.rej
|
|
new file mode 100644
|
|
index 0000000..f011efb
|
|
--- /dev/null
|
|
+++ b/mm/swap.c.rej
|
|
@@ -0,0 +1,18 @@
|
|
+--- mm/swap.c 2012-05-21 11:33:40.171929979 +0200
|
|
++++ mm/swap.c 2012-05-21 12:10:12.044049025 +0200
|
|
+@@ -30,6 +30,7 @@
|
|
+ #include <linux/backing-dev.h>
|
|
+ #include <linux/memcontrol.h>
|
|
+ #include <linux/gfp.h>
|
|
++#include <linux/hugetlb.h>
|
|
+
|
|
+ #include "internal.h"
|
|
+
|
|
+@@ -70,6 +71,8 @@ static void __put_compound_page(struct p
|
|
+
|
|
+ __page_cache_release(page);
|
|
+ dtor = get_compound_page_dtor(page);
|
|
++ if (!PageHuge(page))
|
|
++ BUG_ON(dtor != free_compound_page);
|
|
+ (*dtor)(page);
|
|
+ }
|
|
diff --git a/mm/swapfile.c b/mm/swapfile.c
|
|
index 36ad6e8..64086df 100644
|
|
--- a/mm/swapfile.c
|
|
+++ b/mm/swapfile.c
|
|
@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
|
|
/* Activity counter to indicate that a swapon or swapoff has occurred */
|
|
-static atomic_t proc_poll_event = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
|
|
|
|
static inline unsigned char swap_count(unsigned char ent)
|
|
{
|
|
@@ -1729,7 +1729,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|
}
|
|
filp_close(swap_file, NULL);
|
|
err = 0;
|
|
- atomic_inc(&proc_poll_event);
|
|
+ atomic_inc_unchecked(&proc_poll_event);
|
|
wake_up_interruptible(&proc_poll_wait);
|
|
|
|
out_dput:
|
|
@@ -1745,8 +1745,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
|
|
|
|
poll_wait(file, &proc_poll_wait, wait);
|
|
|
|
- if (seq->poll_event != atomic_read(&proc_poll_event)) {
|
|
- seq->poll_event = atomic_read(&proc_poll_event);
|
|
+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
|
|
+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
|
|
return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
|
|
}
|
|
|
|
@@ -1844,7 +1844,7 @@ static int swaps_open(struct inode *inode, struct file *file)
|
|
return ret;
|
|
|
|
seq = file->private_data;
|
|
- seq->poll_event = atomic_read(&proc_poll_event);
|
|
+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
|
|
return 0;
|
|
}
|
|
|
|
@@ -2182,7 +2182,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
(p->flags & SWP_DISCARDABLE) ? "D" : "");
|
|
|
|
mutex_unlock(&swapon_mutex);
|
|
- atomic_inc(&proc_poll_event);
|
|
+ atomic_inc_unchecked(&proc_poll_event);
|
|
wake_up_interruptible(&proc_poll_wait);
|
|
|
|
if (S_ISREG(inode->i_mode))
|
|
diff --git a/mm/util.c b/mm/util.c
|
|
index 6d4bc4e..8ee202f 100644
|
|
--- a/mm/util.c
|
|
+++ b/mm/util.c
|
|
@@ -288,6 +288,12 @@ pid_t vm_is_stack(struct task_struct *task,
|
|
void arch_pick_mmap_layout(struct mm_struct *mm)
|
|
{
|
|
mm->mmap_base = TASK_UNMAPPED_BASE;
|
|
+
|
|
+#ifdef CONFIG_PAX_RANDMMAP
|
|
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
|
|
+ mm->mmap_base += mm->delta_mmap;
|
|
+#endif
|
|
+
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
mm->unmap_area = arch_unmap_area;
|
|
}
|
|
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
|
|
index c260b5e..03b7690 100644
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
|
|
|
|
pte = pte_offset_kernel(pmd, addr);
|
|
do {
|
|
- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
|
|
- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
|
|
+ BUG_ON(!pte_exec(*pte));
|
|
+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
|
|
+ continue;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ {
|
|
+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
|
|
+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
|
|
+ }
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
}
|
|
|
|
@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
|
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
|
|
{
|
|
pte_t *pte;
|
|
+ int ret = -ENOMEM;
|
|
|
|
/*
|
|
* nr is a running index into the array which helps higher level
|
|
@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
|
|
pte = pte_alloc_kernel(pmd, addr);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
+
|
|
+ pax_open_kernel();
|
|
do {
|
|
struct page *page = pages[*nr];
|
|
|
|
- if (WARN_ON(!pte_none(*pte)))
|
|
- return -EBUSY;
|
|
- if (WARN_ON(!page))
|
|
- return -ENOMEM;
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (pgprot_val(prot) & _PAGE_NX)
|
|
+#endif
|
|
+
|
|
+ if (WARN_ON(!pte_none(*pte))) {
|
|
+ ret = -EBUSY;
|
|
+ goto out;
|
|
+ }
|
|
+ if (WARN_ON(!page)) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
|
|
(*nr)++;
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
|
- return 0;
|
|
+ ret = 0;
|
|
+out:
|
|
+ pax_close_kernel();
|
|
+ return ret;
|
|
}
|
|
|
|
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
|
|
@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
|
|
pmd_t *pmd;
|
|
unsigned long next;
|
|
|
|
- pmd = pmd_alloc(&init_mm, pud, addr);
|
|
+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
do {
|
|
@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
|
|
pud_t *pud;
|
|
unsigned long next;
|
|
|
|
- pud = pud_alloc(&init_mm, pgd, addr);
|
|
+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
do {
|
|
@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
|
|
* and fall back on vmalloc() if that fails. Others
|
|
* just put it in the vmalloc space.
|
|
*/
|
|
-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
|
+#ifdef CONFIG_MODULES
|
|
+#ifdef MODULES_VADDR
|
|
unsigned long addr = (unsigned long)x;
|
|
if (addr >= MODULES_VADDR && addr < MODULES_END)
|
|
return 1;
|
|
#endif
|
|
+
|
|
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
|
|
+ return 1;
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
return is_vmalloc_addr(x);
|
|
}
|
|
|
|
@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
|
|
|
|
if (!pgd_none(*pgd)) {
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
+#ifdef CONFIG_X86
|
|
+ if (!pud_large(*pud))
|
|
+#endif
|
|
if (!pud_none(*pud)) {
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
+#ifdef CONFIG_X86
|
|
+ if (!pmd_large(*pmd))
|
|
+#endif
|
|
if (!pmd_none(*pmd)) {
|
|
pte_t *ptep, pte;
|
|
|
|
@@ -370,7 +410,7 @@ static void purge_vmap_area_lazy(void);
|
|
* Allocate a region of KVA of the specified size and alignment, within the
|
|
* vstart and vend.
|
|
*/
|
|
-static struct vmap_area *alloc_vmap_area(unsigned long size,
|
|
+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
|
|
unsigned long align,
|
|
unsigned long vstart, unsigned long vend,
|
|
int node, gfp_t gfp_mask)
|
|
@@ -1391,6 +1431,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
|
|
struct vm_struct *area;
|
|
|
|
BUG_ON(in_interrupt());
|
|
+
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (flags & VM_KERNEXEC) {
|
|
+ if (start != VMALLOC_START || end != VMALLOC_END)
|
|
+ return NULL;
|
|
+ start = (unsigned long)MODULES_EXEC_VADDR;
|
|
+ end = (unsigned long)MODULES_EXEC_END;
|
|
+ }
|
|
+#endif
|
|
+
|
|
if (flags & VM_IOREMAP) {
|
|
int bit = fls(size);
|
|
|
|
@@ -1642,6 +1692,11 @@ void *vmap(struct page **pages, unsigned int count,
|
|
if (count > totalram_pages)
|
|
return NULL;
|
|
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (!(pgprot_val(prot) & _PAGE_NX))
|
|
+ flags |= VM_KERNEXEC;
|
|
+#endif
|
|
+
|
|
area = get_vm_area_caller((count << PAGE_SHIFT), flags,
|
|
__builtin_return_address(0));
|
|
if (!area)
|
|
@@ -1748,6 +1803,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|
if (!size || (size >> PAGE_SHIFT) > total_pages)
|
|
goto fail;
|
|
|
|
+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
|
|
+ if (!(pgprot_val(prot) & _PAGE_NX))
|
|
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
|
|
+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
|
|
+ else
|
|
+#endif
|
|
+
|
|
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
|
|
start, end, node, gfp_mask, caller);
|
|
if (!area)
|
|
@@ -1921,10 +1983,9 @@ EXPORT_SYMBOL(vzalloc_node);
|
|
* For tight control over page level allocator and protection flags
|
|
* use __vmalloc() instead.
|
|
*/
|
|
-
|
|
void *vmalloc_exec(unsigned long size)
|
|
{
|
|
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
|
|
+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
|
|
-1, __builtin_return_address(0));
|
|
}
|
|
|
|
@@ -2219,6 +2280,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
|
|
unsigned long uaddr = vma->vm_start;
|
|
unsigned long usize = vma->vm_end - vma->vm_start;
|
|
|
|
+ BUG_ON(vma->vm_mirror);
|
|
+
|
|
if ((PAGE_SIZE-1) & (unsigned long)addr)
|
|
return -EINVAL;
|
|
|
|
diff --git a/mm/vmstat.c b/mm/vmstat.c
|
|
index a38b0cb..7f94b4c 100644
|
|
--- a/mm/vmstat.c
|
|
+++ b/mm/vmstat.c
|
|
@@ -81,7 +81,7 @@ void vm_events_fold_cpu(int cpu)
|
|
*
|
|
* vm_stat contains the global counters
|
|
*/
|
|
-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
|
|
+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
|
|
EXPORT_SYMBOL(vm_stat);
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -457,7 +457,7 @@ void refresh_cpu_vm_stats(int cpu)
|
|
v = p->vm_stat_diff[i];
|
|
p->vm_stat_diff[i] = 0;
|
|
local_irq_restore(flags);
|
|
- atomic_long_add(v, &zone->vm_stat[i]);
|
|
+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
|
|
global_diff[i] += v;
|
|
#ifdef CONFIG_NUMA
|
|
/* 3 seconds idle till flush */
|
|
@@ -495,7 +495,7 @@ void refresh_cpu_vm_stats(int cpu)
|
|
|
|
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
|
|
if (global_diff[i])
|
|
- atomic_long_add(global_diff[i], &vm_stat[i]);
|
|
+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
|
|
}
|
|
|
|
#endif
|
|
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
|
|
index e22b8ad..e3ca38d 100644
|
|
--- a/net/8021q/vlan.c
|
|
+++ b/net/8021q/vlan.c
|
|
@@ -557,8 +557,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
|
|
err = -EPERM;
|
|
if (!capable(CAP_NET_ADMIN))
|
|
break;
|
|
- if ((args.u.name_type >= 0) &&
|
|
- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
|
|
+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
|
|
struct vlan_net *vn;
|
|
|
|
vn = net_generic(net, vlan_net_id);
|
|
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
|
|
index fccae26..e7ece2f 100644
|
|
--- a/net/9p/trans_fd.c
|
|
+++ b/net/9p/trans_fd.c
|
|
@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
|
|
oldfs = get_fs();
|
|
set_fs(get_ds());
|
|
/* The cast to a user pointer is valid due to the set_fs() */
|
|
- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
|
|
+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
|
|
set_fs(oldfs);
|
|
|
|
if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
|
|
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
|
|
index 876fbe8..8bbea9f 100644
|
|
--- a/net/atm/atm_misc.c
|
|
+++ b/net/atm/atm_misc.c
|
|
@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
|
|
if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
|
|
return 1;
|
|
atm_return(vcc, truesize);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(atm_charge);
|
|
@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
|
|
}
|
|
}
|
|
atm_return(vcc, guess);
|
|
- atomic_inc(&vcc->stats->rx_drop);
|
|
+ atomic_inc_unchecked(&vcc->stats->rx_drop);
|
|
return NULL;
|
|
}
|
|
EXPORT_SYMBOL(atm_alloc_charge);
|
|
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
|
|
|
|
void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
|
|
{
|
|
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
|
|
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
|
|
__SONET_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
}
|
|
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
|
|
|
|
void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
|
|
{
|
|
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
|
|
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
|
|
__SONET_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
}
|
|
diff --git a/net/atm/lec.h b/net/atm/lec.h
|
|
index dfc0719..47c5322 100644
|
|
--- a/net/atm/lec.h
|
|
+++ b/net/atm/lec.h
|
|
@@ -48,7 +48,7 @@ struct lane2_ops {
|
|
const u8 *tlvs, u32 sizeoftlvs);
|
|
void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
|
|
const u8 *tlvs, u32 sizeoftlvs);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/*
|
|
* ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
|
|
diff --git a/net/atm/mpc.h b/net/atm/mpc.h
|
|
index 0919a88..a23d54e 100644
|
|
--- a/net/atm/mpc.h
|
|
+++ b/net/atm/mpc.h
|
|
@@ -33,7 +33,7 @@ struct mpoa_client {
|
|
struct mpc_parameters parameters; /* parameters for this client */
|
|
|
|
const struct net_device_ops *old_ops;
|
|
- struct net_device_ops new_ops;
|
|
+ net_device_ops_no_const new_ops;
|
|
};
|
|
|
|
|
|
diff --git a/net/atm/proc.c b/net/atm/proc.c
|
|
index 0d020de..011c7bb 100644
|
|
--- a/net/atm/proc.c
|
|
+++ b/net/atm/proc.c
|
|
@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
|
|
const struct k_atm_aal_stats *stats)
|
|
{
|
|
seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
|
|
- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
|
|
- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
|
|
- atomic_read(&stats->rx_drop));
|
|
+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
|
|
+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
|
|
+ atomic_read_unchecked(&stats->rx_drop));
|
|
}
|
|
|
|
static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
|
|
diff --git a/net/atm/resources.c b/net/atm/resources.c
|
|
index 23f45ce..c748f1a 100644
|
|
--- a/net/atm/resources.c
|
|
+++ b/net/atm/resources.c
|
|
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
|
|
static void copy_aal_stats(struct k_atm_aal_stats *from,
|
|
struct atm_aal_stats *to)
|
|
{
|
|
-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
|
|
+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
|
|
__AAL_STAT_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
}
|
|
@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
|
|
static void subtract_aal_stats(struct k_atm_aal_stats *from,
|
|
struct atm_aal_stats *to)
|
|
{
|
|
-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
|
|
+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
|
|
__AAL_STAT_ITEMS
|
|
#undef __HANDLE_ITEM
|
|
}
|
|
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
|
|
index fa701b6..8b9762a 100644
|
|
--- a/net/batman-adv/bat_iv_ogm.c
|
|
+++ b/net/batman-adv/bat_iv_ogm.c
|
|
@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
|
|
|
|
/* change sequence number to network order */
|
|
batman_ogm_packet->seqno =
|
|
- htonl((uint32_t)atomic_read(&hard_iface->seqno));
|
|
+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
|
|
|
|
batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
|
|
batman_ogm_packet->tt_crc = htons((uint16_t)
|
|
@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
|
|
else
|
|
batman_ogm_packet->gw_flags = NO_FLAGS;
|
|
|
|
- atomic_inc(&hard_iface->seqno);
|
|
+ atomic_inc_unchecked(&hard_iface->seqno);
|
|
|
|
slide_own_bcast_window(hard_iface);
|
|
bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
|
|
@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
|
|
return;
|
|
|
|
/* could be changed by schedule_own_packet() */
|
|
- if_incoming_seqno = atomic_read(&if_incoming->seqno);
|
|
+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
|
|
|
|
has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
|
|
|
|
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
|
|
index 3778977..f6a9450 100644
|
|
--- a/net/batman-adv/hard-interface.c
|
|
+++ b/net/batman-adv/hard-interface.c
|
|
@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
|
|
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
|
|
dev_add_pack(&hard_iface->batman_adv_ptype);
|
|
|
|
- atomic_set(&hard_iface->seqno, 1);
|
|
- atomic_set(&hard_iface->frag_seqno, 1);
|
|
+ atomic_set_unchecked(&hard_iface->seqno, 1);
|
|
+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
|
|
bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
|
|
hard_iface->net_dev->name);
|
|
|
|
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
|
|
index a5590f4..8d31969 100644
|
|
--- a/net/batman-adv/soft-interface.c
|
|
+++ b/net/batman-adv/soft-interface.c
|
|
@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
|
|
|
|
/* set broadcast sequence number */
|
|
bcast_packet->seqno =
|
|
- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
|
|
+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
|
|
|
|
add_bcast_packet_to_list(bat_priv, skb, 1);
|
|
|
|
@@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
|
|
atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
|
|
|
|
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
|
|
- atomic_set(&bat_priv->bcast_seqno, 1);
|
|
+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
|
|
atomic_set(&bat_priv->ttvn, 0);
|
|
atomic_set(&bat_priv->tt_local_changes, 0);
|
|
atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
|
|
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
|
|
index 302efb5..1590365 100644
|
|
--- a/net/batman-adv/types.h
|
|
+++ b/net/batman-adv/types.h
|
|
@@ -38,8 +38,8 @@ struct hard_iface {
|
|
int16_t if_num;
|
|
char if_status;
|
|
struct net_device *net_dev;
|
|
- atomic_t seqno;
|
|
- atomic_t frag_seqno;
|
|
+ atomic_unchecked_t seqno;
|
|
+ atomic_unchecked_t frag_seqno;
|
|
unsigned char *packet_buff;
|
|
int packet_len;
|
|
struct kobject *hardif_obj;
|
|
@@ -155,7 +155,7 @@ struct bat_priv {
|
|
atomic_t orig_interval; /* uint */
|
|
atomic_t hop_penalty; /* uint */
|
|
atomic_t log_level; /* uint */
|
|
- atomic_t bcast_seqno;
|
|
+ atomic_unchecked_t bcast_seqno;
|
|
atomic_t bcast_queue_left;
|
|
atomic_t batman_queue_left;
|
|
atomic_t ttvn; /* translation table version number */
|
|
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
|
|
index 676f6a6..3b4e668 100644
|
|
--- a/net/batman-adv/unicast.c
|
|
+++ b/net/batman-adv/unicast.c
|
|
@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
|
|
frag1->flags = UNI_FRAG_HEAD | large_tail;
|
|
frag2->flags = large_tail;
|
|
|
|
- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
|
|
+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
|
|
frag1->seqno = htons(seqno - 1);
|
|
frag2->seqno = htons(seqno);
|
|
|
|
diff --git a/net/bluetooth/hci_conn.c.rej b/net/bluetooth/hci_conn.c.rej
|
|
new file mode 100644
|
|
index 0000000..bd8d5ef
|
|
--- /dev/null
|
|
+++ b/net/bluetooth/hci_conn.c.rej
|
|
@@ -0,0 +1,10 @@
|
|
+diff a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c (rejected hunks)
|
|
+@@ -404,7 +404,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
|
|
+ memset(&cp, 0, sizeof(cp));
|
|
+
|
|
+ cp.handle = cpu_to_le16(conn->handle);
|
|
+- memcpy(cp.ltk, ltk, sizeof(ltk));
|
|
++ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
|
|
+
|
|
+ hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
|
|
+ }
|
|
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
|
|
index fd9088a..d688d26 100644
|
|
--- a/net/bluetooth/l2cap_core.c
|
|
+++ b/net/bluetooth/l2cap_core.c
|
|
@@ -3854,8 +3854,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
|
|
break;
|
|
|
|
case L2CAP_CONF_RFC:
|
|
- if (olen == sizeof(rfc))
|
|
- memcpy(&rfc, (void *)val, olen);
|
|
+ if (olen != sizeof(rfc))
|
|
+ break;
|
|
+
|
|
+ memcpy(&rfc, (void *)val, olen);
|
|
|
|
if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
|
|
rfc.mode != pi->mode)
|
|
diff --git a/net/bluetooth/l2cap_core.c.rej b/net/bluetooth/l2cap_core.c.rej
|
|
new file mode 100644
|
|
index 0000000..2ab9548
|
|
--- /dev/null
|
|
+++ b/net/bluetooth/l2cap_core.c.rej
|
|
@@ -0,0 +1,15 @@
|
|
+--- net/bluetooth/l2cap_core.c 2012-05-21 11:33:40.879930017 +0200
|
|
++++ net/bluetooth/l2cap_core.c 2012-05-21 12:10:12.092049029 +0200
|
|
+@@ -2587,8 +2589,10 @@ static void l2cap_conf_rfc_get(struct l2
|
|
+
|
|
+ switch (type) {
|
|
+ case L2CAP_CONF_RFC:
|
|
+- if (olen == sizeof(rfc))
|
|
+- memcpy(&rfc, (void *)val, olen);
|
|
++ if (olen != sizeof(rfc))
|
|
++ break;
|
|
++
|
|
++ memcpy(&rfc, (void *)val, olen);
|
|
+ goto done;
|
|
+ }
|
|
+ }
|
|
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
|
|
index f381fa1..234728a 100644
|
|
--- a/net/bridge/netfilter/ebtables.c
|
|
+++ b/net/bridge/netfilter/ebtables.c
|
|
@@ -1522,7 +1522,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
|
|
tmp.valid_hooks = t->table->valid_hooks;
|
|
}
|
|
mutex_unlock(&ebt_mutex);
|
|
- if (copy_to_user(user, &tmp, *len) != 0){
|
|
+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
|
|
BUGPRINT("c2u Didn't work\n");
|
|
ret = -EFAULT;
|
|
break;
|
|
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
|
|
index 84efbe4..51d47bc 100644
|
|
--- a/net/caif/cfctrl.c
|
|
+++ b/net/caif/cfctrl.c
|
|
@@ -9,6 +9,7 @@
|
|
#include <linux/stddef.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/slab.h>
|
|
+#include <linux/sched.h>
|
|
#include <net/caif/caif_layer.h>
|
|
#include <net/caif/cfpkt.h>
|
|
#include <net/caif/cfctrl.h>
|
|
@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
|
|
memset(&dev_info, 0, sizeof(dev_info));
|
|
dev_info.id = 0xff;
|
|
cfsrvl_init(&this->serv, 0, &dev_info, false);
|
|
- atomic_set(&this->req_seq_no, 1);
|
|
- atomic_set(&this->rsp_seq_no, 1);
|
|
+ atomic_set_unchecked(&this->req_seq_no, 1);
|
|
+ atomic_set_unchecked(&this->rsp_seq_no, 1);
|
|
this->serv.layer.receive = cfctrl_recv;
|
|
sprintf(this->serv.layer.name, "ctrl");
|
|
this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
|
|
@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
|
|
struct cfctrl_request_info *req)
|
|
{
|
|
spin_lock_bh(&ctrl->info_list_lock);
|
|
- atomic_inc(&ctrl->req_seq_no);
|
|
- req->sequence_no = atomic_read(&ctrl->req_seq_no);
|
|
+ atomic_inc_unchecked(&ctrl->req_seq_no);
|
|
+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
|
|
list_add_tail(&req->list, &ctrl->list);
|
|
spin_unlock_bh(&ctrl->info_list_lock);
|
|
}
|
|
@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
|
|
if (p != first)
|
|
pr_warn("Requests are not received in order\n");
|
|
|
|
- atomic_set(&ctrl->rsp_seq_no,
|
|
+ atomic_set_unchecked(&ctrl->rsp_seq_no,
|
|
p->sequence_no);
|
|
list_del(&p->list);
|
|
goto out;
|
|
diff --git a/net/can/gw.c b/net/can/gw.c
|
|
index f78f898..00f4a02 100644
|
|
--- a/net/can/gw.c
|
|
+++ b/net/can/gw.c
|
|
@@ -96,7 +96,7 @@ struct cf_mod {
|
|
struct {
|
|
void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
|
|
void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
|
|
- } csumfunc;
|
|
+ } __no_const csumfunc;
|
|
};
|
|
|
|
|
|
diff --git a/net/compat.c b/net/compat.c
|
|
index 618ad24..e7353fc 100644
|
|
--- a/net/compat.c
|
|
+++ b/net/compat.c
|
|
@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
|
|
|
|
if (kern_msg->msg_name && kern_msg->msg_namelen) {
|
|
if (mode == VERIFY_READ) {
|
|
- int err = move_addr_to_kernel(kern_msg->msg_name,
|
|
+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
|
|
kern_msg->msg_namelen,
|
|
kern_address);
|
|
if (err < 0)
|
|
@@ -107,7 +107,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
|
|
}
|
|
|
|
tot_len = iov_from_user_compat_to_kern(kern_iov,
|
|
- (struct compat_iovec __user *)kern_msg->msg_iov,
|
|
+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
|
|
kern_msg->msg_iovlen);
|
|
if (tot_len >= 0)
|
|
kern_msg->msg_iov = kern_iov;
|
|
@@ -127,20 +127,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
|
|
|
|
#define CMSG_COMPAT_FIRSTHDR(msg) \
|
|
(((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
|
|
- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
|
|
+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
|
|
(struct compat_cmsghdr __user *)NULL)
|
|
|
|
#define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
|
|
((ucmlen) >= sizeof(struct compat_cmsghdr) && \
|
|
(ucmlen) <= (unsigned long) \
|
|
((mhdr)->msg_controllen - \
|
|
- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
|
|
+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
|
|
|
|
static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
|
|
struct compat_cmsghdr __user *cmsg, int cmsg_len)
|
|
{
|
|
char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
|
|
- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
|
|
+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
|
|
msg->msg_controllen)
|
|
return NULL;
|
|
return (struct compat_cmsghdr __user *)ptr;
|
|
@@ -230,7 +230,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
|
|
|
|
int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
|
|
{
|
|
- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
|
|
+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
|
|
struct compat_cmsghdr cmhdr;
|
|
struct compat_timeval ctv;
|
|
struct compat_timespec cts[3];
|
|
@@ -286,7 +286,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
|
|
|
|
void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
|
|
{
|
|
- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
|
|
+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
|
|
int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
|
|
int fdnum = scm->fp->count;
|
|
struct file **fp = scm->fp->fp;
|
|
@@ -375,7 +375,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
|
|
return -EFAULT;
|
|
old_fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
|
|
+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
|
|
set_fs(old_fs);
|
|
|
|
return err;
|
|
@@ -436,7 +436,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
|
|
len = sizeof(ktime);
|
|
old_fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
|
|
+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
|
|
set_fs(old_fs);
|
|
|
|
if (!err) {
|
|
@@ -579,7 +579,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
|
|
case MCAST_JOIN_GROUP:
|
|
case MCAST_LEAVE_GROUP:
|
|
{
|
|
- struct compat_group_req __user *gr32 = (void *)optval;
|
|
+ struct compat_group_req __user *gr32 = (void __user *)optval;
|
|
struct group_req __user *kgr =
|
|
compat_alloc_user_space(sizeof(struct group_req));
|
|
u32 interface;
|
|
@@ -600,7 +600,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
|
|
case MCAST_BLOCK_SOURCE:
|
|
case MCAST_UNBLOCK_SOURCE:
|
|
{
|
|
- struct compat_group_source_req __user *gsr32 = (void *)optval;
|
|
+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
|
|
struct group_source_req __user *kgsr = compat_alloc_user_space(
|
|
sizeof(struct group_source_req));
|
|
u32 interface;
|
|
@@ -621,7 +621,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
|
|
}
|
|
case MCAST_MSFILTER:
|
|
{
|
|
- struct compat_group_filter __user *gf32 = (void *)optval;
|
|
+ struct compat_group_filter __user *gf32 = (void __user *)optval;
|
|
struct group_filter __user *kgf;
|
|
u32 interface, fmode, numsrc;
|
|
|
|
@@ -659,7 +659,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
|
|
char __user *optval, int __user *optlen,
|
|
int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
|
|
{
|
|
- struct compat_group_filter __user *gf32 = (void *)optval;
|
|
+ struct compat_group_filter __user *gf32 = (void __user *)optval;
|
|
struct group_filter __user *kgf;
|
|
int __user *koptlen;
|
|
u32 interface, fmode, numsrc;
|
|
diff --git a/net/compat.c.rej b/net/compat.c.rej
|
|
new file mode 100644
|
|
index 0000000..79fee55
|
|
--- /dev/null
|
|
+++ b/net/compat.c.rej
|
|
@@ -0,0 +1,14 @@
|
|
+--- net/compat.c 2012-08-09 20:18:52.173847490 +0200
|
|
++++ net/compat.c 2012-08-09 20:19:41.993844830 +0200
|
|
+@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kms
|
|
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
|
|
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
|
|
+ return -EFAULT;
|
|
+- kmsg->msg_name = compat_ptr(tmp1);
|
|
+- kmsg->msg_iov = compat_ptr(tmp2);
|
|
+- kmsg->msg_control = compat_ptr(tmp3);
|
|
++ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
|
|
++ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
|
|
++ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
|
|
+ return 0;
|
|
+ }
|
|
diff --git a/net/core/datagram.c b/net/core/datagram.c
|
|
index da7e0c8..07ccc3e 100644
|
|
--- a/net/core/datagram.c
|
|
+++ b/net/core/datagram.c
|
|
@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
|
|
}
|
|
|
|
kfree_skb(skb);
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
sk_mem_reclaim_partial(sk);
|
|
|
|
return err;
|
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
|
index a1efb0c..85ddebd 100644
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -1606,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
|
{
|
|
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
|
|
if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
|
|
- atomic_long_inc(&dev->rx_dropped);
|
|
+ atomic_long_inc_unchecked(&dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -1616,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
|
|
nf_reset(skb);
|
|
|
|
if (unlikely(!is_skb_forwardable(dev, skb))) {
|
|
- atomic_long_inc(&dev->rx_dropped);
|
|
+ atomic_long_inc_unchecked(&dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -2062,7 +2062,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
|
|
|
|
struct dev_gso_cb {
|
|
void (*destructor)(struct sk_buff *skb);
|
|
-};
|
|
+} __no_const;
|
|
|
|
#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
|
|
|
|
@@ -2906,7 +2906,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
|
|
|
|
local_irq_restore(flags);
|
|
|
|
- atomic_long_inc(&skb->dev->rx_dropped);
|
|
+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -2978,7 +2978,7 @@ int netif_rx_ni(struct sk_buff *skb)
|
|
}
|
|
EXPORT_SYMBOL(netif_rx_ni);
|
|
|
|
-static void net_tx_action(struct softirq_action *h)
|
|
+static void net_tx_action(void)
|
|
{
|
|
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
|
|
|
@@ -3276,7 +3276,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
|
|
if (pt_prev) {
|
|
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
|
} else {
|
|
- atomic_long_inc(&skb->dev->rx_dropped);
|
|
+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
|
|
kfree_skb(skb);
|
|
/* Jamal, now you will not able to escape explaining
|
|
* me how you were going to use this. :-)
|
|
@@ -3837,7 +3837,7 @@ void netif_napi_del(struct napi_struct *napi)
|
|
}
|
|
EXPORT_SYMBOL(netif_napi_del);
|
|
|
|
-static void net_rx_action(struct softirq_action *h)
|
|
+static void net_rx_action(void)
|
|
{
|
|
struct softnet_data *sd = &__get_cpu_var(softnet_data);
|
|
unsigned long time_limit = jiffies + 2;
|
|
@@ -5873,7 +5873,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
|
} else {
|
|
netdev_stats_to_stats64(storage, &dev->stats);
|
|
}
|
|
- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
|
|
+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
|
|
return storage;
|
|
}
|
|
EXPORT_SYMBOL(dev_get_stats);
|
|
diff --git a/net/core/flow.c b/net/core/flow.c
|
|
index 9a517c6..e645cc5 100644
|
|
--- a/net/core/flow.c
|
|
+++ b/net/core/flow.c
|
|
@@ -61,7 +61,7 @@ struct flow_cache {
|
|
struct timer_list rnd_timer;
|
|
};
|
|
|
|
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
|
|
+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
|
|
EXPORT_SYMBOL(flow_cache_genid);
|
|
static struct flow_cache flow_cache_global;
|
|
static struct kmem_cache *flow_cachep __read_mostly;
|
|
@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
|
|
|
|
static int flow_entry_valid(struct flow_cache_entry *fle)
|
|
{
|
|
- if (atomic_read(&flow_cache_genid) != fle->genid)
|
|
+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
|
|
return 0;
|
|
if (fle->object && !fle->object->ops->check(fle->object))
|
|
return 0;
|
|
@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
|
|
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
|
|
fcp->hash_count++;
|
|
}
|
|
- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
|
|
+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
|
|
flo = fle->object;
|
|
if (!flo)
|
|
goto ret_object;
|
|
@@ -280,7 +280,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
|
|
}
|
|
flo = resolver(net, key, family, dir, flo, ctx);
|
|
if (fle) {
|
|
- fle->genid = atomic_read(&flow_cache_genid);
|
|
+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
|
|
if (!IS_ERR(flo))
|
|
fle->object = flo;
|
|
else
|
|
diff --git a/net/core/iovec.c b/net/core/iovec.c
|
|
index d1628b7..6a295e0 100644
|
|
--- a/net/core/iovec.c
|
|
+++ b/net/core/iovec.c
|
|
@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
|
if (m->msg_name && m->msg_namelen) {
|
|
if (mode == VERIFY_READ) {
|
|
void __user *namep;
|
|
- namep = (void __user __force *) m->msg_name;
|
|
+ namep = (void __force_user *) m->msg_name;
|
|
err = move_addr_to_kernel(namep, m->msg_namelen,
|
|
address);
|
|
if (err < 0)
|
|
@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
|
}
|
|
|
|
size = m->msg_iovlen * sizeof(struct iovec);
|
|
- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
|
|
+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
|
|
return -EFAULT;
|
|
|
|
m->msg_iov = iov;
|
|
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
|
|
index 0a33133..e7aae7c 100644
|
|
--- a/net/core/rtnetlink.c
|
|
+++ b/net/core/rtnetlink.c
|
|
@@ -56,7 +56,7 @@ struct rtnl_link {
|
|
rtnl_doit_func doit;
|
|
rtnl_dumpit_func dumpit;
|
|
rtnl_calcit_func calcit;
|
|
-};
|
|
+} __no_const;
|
|
|
|
static DEFINE_MUTEX(rtnl_mutex);
|
|
|
|
diff --git a/net/core/scm.c b/net/core/scm.c
|
|
index 611c5ef..88f6d6d 100644
|
|
--- a/net/core/scm.c
|
|
+++ b/net/core/scm.c
|
|
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
|
|
int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
|
|
{
|
|
struct cmsghdr __user *cm
|
|
- = (__force struct cmsghdr __user *)msg->msg_control;
|
|
+ = (struct cmsghdr __force_user *)msg->msg_control;
|
|
struct cmsghdr cmhdr;
|
|
int cmlen = CMSG_LEN(len);
|
|
int err;
|
|
@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
|
|
err = -EFAULT;
|
|
if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
|
|
goto out;
|
|
- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
|
|
+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
|
|
goto out;
|
|
cmlen = CMSG_SPACE(len);
|
|
if (msg->msg_controllen < cmlen)
|
|
@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
|
|
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
|
|
{
|
|
struct cmsghdr __user *cm
|
|
- = (__force struct cmsghdr __user*)msg->msg_control;
|
|
+ = (struct cmsghdr __force_user *)msg->msg_control;
|
|
|
|
int fdmax = 0;
|
|
int fdnum = scm->fp->count;
|
|
@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
|
|
if (fdnum < fdmax)
|
|
fdmax = fdnum;
|
|
|
|
- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
|
|
+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
|
|
i++, cmfptr++)
|
|
{
|
|
int new_fd;
|
|
diff --git a/net/core/sock.c b/net/core/sock.c
|
|
index 832cf04..b4f757d 100644
|
|
--- a/net/core/sock.c
|
|
+++ b/net/core/sock.c
|
|
@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
|
|
|
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
trace_sock_rcvqueue_full(sk, skb);
|
|
return -ENOMEM;
|
|
}
|
|
@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
return err;
|
|
|
|
if (!sk_rmem_schedule(sk, skb->truesize)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
skb_dst_force(skb);
|
|
|
|
spin_lock_irqsave(&list->lock, flags);
|
|
- skb->dropcount = atomic_read(&sk->sk_drops);
|
|
+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
|
|
__skb_queue_tail(list, skb);
|
|
spin_unlock_irqrestore(&list->lock, flags);
|
|
|
|
@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
|
skb->dev = NULL;
|
|
|
|
if (sk_rcvqueues_full(sk, skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
goto discard_and_relse;
|
|
}
|
|
if (nested)
|
|
@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
|
|
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
|
|
} else if (sk_add_backlog(sk, skb)) {
|
|
bh_unlock_sock(sk);
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
goto discard_and_relse;
|
|
}
|
|
|
|
@@ -1004,7 +1004,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|
return -ENOTCONN;
|
|
if (lv < len)
|
|
return -EINVAL;
|
|
- if (copy_to_user(optval, address, len))
|
|
+ if (len > sizeof(address) || copy_to_user(optval, address, len))
|
|
return -EFAULT;
|
|
goto lenout;
|
|
}
|
|
@@ -1050,7 +1050,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
|
|
|
if (len > lv)
|
|
len = lv;
|
|
- if (copy_to_user(optval, &v, len))
|
|
+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
|
|
return -EFAULT;
|
|
lenout:
|
|
if (put_user(len, optlen))
|
|
@@ -2127,7 +2127,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
|
*/
|
|
smp_wmb();
|
|
atomic_set(&sk->sk_refcnt, 1);
|
|
- atomic_set(&sk->sk_drops, 0);
|
|
+ atomic_set_unchecked(&sk->sk_drops, 0);
|
|
}
|
|
EXPORT_SYMBOL(sock_init_data);
|
|
|
|
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
|
|
index 02e75d1..9a57a7c 100644
|
|
--- a/net/decnet/sysctl_net_decnet.c
|
|
+++ b/net/decnet/sysctl_net_decnet.c
|
|
@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
|
|
|
|
if (len > *lenp) len = *lenp;
|
|
|
|
- if (copy_to_user(buffer, addr, len))
|
|
+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
|
|
return -EFAULT;
|
|
|
|
*lenp = len;
|
|
@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
|
|
|
|
if (len > *lenp) len = *lenp;
|
|
|
|
- if (copy_to_user(buffer, devname, len))
|
|
+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
|
|
return -EFAULT;
|
|
|
|
*lenp = len;
|
|
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
|
|
index 0a24199..311113f 100644
|
|
--- a/net/ipv4/fib_frontend.c
|
|
+++ b/net/ipv4/fib_frontend.c
|
|
@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
|
fib_sync_up(dev);
|
|
#endif
|
|
- atomic_inc(&net->ipv4.dev_addr_genid);
|
|
+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
|
|
rt_cache_flush(dev_net(dev), -1);
|
|
break;
|
|
case NETDEV_DOWN:
|
|
fib_del_ifaddr(ifa, NULL);
|
|
- atomic_inc(&net->ipv4.dev_addr_genid);
|
|
+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
|
|
if (ifa->ifa_dev->ifa_list == NULL) {
|
|
/* Last address was deleted from this interface.
|
|
* Disable IP.
|
|
@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
|
fib_sync_up(dev);
|
|
#endif
|
|
- atomic_inc(&net->ipv4.dev_addr_genid);
|
|
+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
|
|
rt_cache_flush(dev_net(dev), -1);
|
|
break;
|
|
case NETDEV_DOWN:
|
|
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
|
|
index 8d244ea..4a36694 100644
|
|
--- a/net/ipv4/fib_semantics.c
|
|
+++ b/net/ipv4/fib_semantics.c
|
|
@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
|
|
nh->nh_saddr = inet_select_addr(nh->nh_dev,
|
|
nh->nh_gw,
|
|
nh->nh_parent->fib_scope);
|
|
- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
|
|
+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
|
|
|
|
return nh->nh_saddr;
|
|
}
|
|
diff --git a/net/ipv4/inetpeer.c.rej b/net/ipv4/inetpeer.c.rej
|
|
new file mode 100644
|
|
index 0000000..d0bed373
|
|
--- /dev/null
|
|
+++ b/net/ipv4/inetpeer.c.rej
|
|
@@ -0,0 +1,13 @@
|
|
+--- net/ipv4/inetpeer.c 2012-07-21 01:28:45.442708108 +0200
|
|
++++ net/ipv4/inetpeer.c 2012-07-21 01:28:57.122708454 +0200
|
|
+@@ -487,8 +487,8 @@ relookup:
|
|
+ if (p) {
|
|
+ p->daddr = *daddr;
|
|
+ atomic_set(&p->refcnt, 1);
|
|
+- atomic_set(&p->rid, 0);
|
|
+- atomic_set(&p->ip_id_count,
|
|
++ atomic_set_unchecked(&p->rid, 0);
|
|
++ atomic_set_unchecked(&p->ip_id_count,
|
|
+ (daddr->family == AF_INET) ?
|
|
+ secure_ip_id(daddr->addr.a4) :
|
|
+ secure_ipv6_id(daddr->addr.a6));
|
|
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
|
|
index 4a40457..d3f263c 100644
|
|
--- a/net/ipv4/ip_fragment.c
|
|
+++ b/net/ipv4/ip_fragment.c
|
|
@@ -314,7 +314,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
|
|
return 0;
|
|
|
|
start = qp->rid;
|
|
- end = atomic_inc_return(&peer->rid);
|
|
+ end = atomic_inc_return_unchecked(&peer->rid);
|
|
qp->rid = end;
|
|
|
|
rc = qp->q.fragments && (end - start) > max;
|
|
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
|
|
index 3748284..21e2ed1 100644
|
|
--- a/net/ipv4/ip_sockglue.c
|
|
+++ b/net/ipv4/ip_sockglue.c
|
|
@@ -1277,7 +1277,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
|
if (sk->sk_type != SOCK_STREAM)
|
|
return -ENOPROTOOPT;
|
|
|
|
- msg.msg_control = optval;
|
|
+ msg.msg_control = (void __force_kernel *)optval;
|
|
msg.msg_controllen = len;
|
|
msg.msg_flags = flags;
|
|
|
|
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
|
|
index 92ac7e7..13f93d9 100644
|
|
--- a/net/ipv4/ipconfig.c
|
|
+++ b/net/ipv4/ipconfig.c
|
|
@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
|
|
|
|
mm_segment_t oldfs = get_fs();
|
|
set_fs(get_ds());
|
|
- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
|
|
+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
|
|
set_fs(oldfs);
|
|
return res;
|
|
}
|
|
@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
|
|
|
|
mm_segment_t oldfs = get_fs();
|
|
set_fs(get_ds());
|
|
- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
|
|
+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
|
|
set_fs(oldfs);
|
|
return res;
|
|
}
|
|
@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
|
|
|
|
mm_segment_t oldfs = get_fs();
|
|
set_fs(get_ds());
|
|
- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
|
|
+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
|
|
set_fs(oldfs);
|
|
return res;
|
|
}
|
|
diff --git a/net/ipv4/ping.c.rej b/net/ipv4/ping.c.rej
|
|
new file mode 100644
|
|
index 0000000..0f3a592
|
|
--- /dev/null
|
|
+++ b/net/ipv4/ping.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- net/ipv4/ping.c 2012-05-21 11:33:41.183930034 +0200
|
|
++++ net/ipv4/ping.c 2012-05-21 12:10:12.164049032 +0200
|
|
+@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock
|
|
+ sk_rmem_alloc_get(sp),
|
|
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
|
|
+ atomic_read(&sp->sk_refcnt), sp,
|
|
+- atomic_read(&sp->sk_drops), len);
|
|
++ atomic_read_unchecked(&sp->sk_drops), len);
|
|
+ }
|
|
+
|
|
+ static int ping_seq_show(struct seq_file *seq, void *v)
|
|
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
|
|
index 6e21393..a722416 100644
|
|
--- a/net/ipv4/raw.c
|
|
+++ b/net/ipv4/raw.c
|
|
@@ -306,7 +306,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
|
|
int raw_rcv(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -743,16 +743,20 @@ static int raw_init(struct sock *sk)
|
|
|
|
static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
|
|
{
|
|
+ struct icmp_filter filter;
|
|
+
|
|
if (optlen > sizeof(struct icmp_filter))
|
|
optlen = sizeof(struct icmp_filter);
|
|
- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
|
|
+ if (copy_from_user(&filter, optval, optlen))
|
|
return -EFAULT;
|
|
+ raw_sk(sk)->filter = filter;
|
|
return 0;
|
|
}
|
|
|
|
static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
|
|
{
|
|
int len, ret = -EFAULT;
|
|
+ struct icmp_filter filter;
|
|
|
|
if (get_user(len, optlen))
|
|
goto out;
|
|
@@ -762,8 +766,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
|
|
if (len > sizeof(struct icmp_filter))
|
|
len = sizeof(struct icmp_filter);
|
|
ret = -EFAULT;
|
|
- if (put_user(len, optlen) ||
|
|
- copy_to_user(optval, &raw_sk(sk)->filter, len))
|
|
+ filter = raw_sk(sk)->filter;
|
|
+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
|
|
goto out;
|
|
ret = 0;
|
|
out: return ret;
|
|
@@ -991,7 +995,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
|
|
sk_wmem_alloc_get(sp),
|
|
sk_rmem_alloc_get(sp),
|
|
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
|
|
- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
|
|
+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
|
|
}
|
|
|
|
static int raw_seq_show(struct seq_file *seq, void *v)
|
|
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
|
|
index d7a9925..b65d0c6 100644
|
|
--- a/net/ipv4/route.c
|
|
+++ b/net/ipv4/route.c
|
|
@@ -316,7 +316,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
|
|
|
|
static inline int rt_genid(struct net *net)
|
|
{
|
|
- return atomic_read(&net->ipv4.rt_genid);
|
|
+ return atomic_read_unchecked(&net->ipv4.rt_genid);
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
@@ -941,7 +941,7 @@ static void rt_cache_invalidate(struct net *net)
|
|
unsigned char shuffle;
|
|
|
|
get_random_bytes(&shuffle, sizeof(shuffle));
|
|
- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
|
|
+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
|
|
inetpeer_invalidate_tree(AF_INET);
|
|
}
|
|
|
|
diff --git a/net/ipv4/route.c.rej b/net/ipv4/route.c.rej
|
|
new file mode 100644
|
|
index 0000000..fe8448d
|
|
--- /dev/null
|
|
+++ b/net/ipv4/route.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- net/ipv4/route.c 2012-05-21 11:33:41.191930034 +0200
|
|
++++ net/ipv4/route.c 2012-05-21 12:10:12.176049032 +0200
|
|
+@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
|
|
+ error = rt->dst.error;
|
|
+ if (peer) {
|
|
+ inet_peer_refcheck(rt->peer);
|
|
+- id = atomic_read(&peer->ip_id_count) & 0xffff;
|
|
++ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
|
|
+ if (peer->tcp_ts_stamp) {
|
|
+ ts = peer->tcp_ts;
|
|
+ tsage = get_seconds() - peer->tcp_ts_stamp;
|
|
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
|
|
index a981cdc..48f4c3a 100644
|
|
--- a/net/ipv4/tcp_probe.c
|
|
+++ b/net/ipv4/tcp_probe.c
|
|
@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
|
|
if (cnt + width >= len)
|
|
break;
|
|
|
|
- if (copy_to_user(buf + cnt, tbuf, width))
|
|
+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
|
|
return -EFAULT;
|
|
cnt += width;
|
|
}
|
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
|
index e284f39..2c19509 100644
|
|
--- a/net/ipv4/udp.c
|
|
+++ b/net/ipv4/udp.c
|
|
@@ -1107,7 +1107,7 @@ static unsigned int first_packet_length(struct sock *sk)
|
|
udp_lib_checksum_complete(skb)) {
|
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
|
|
IS_UDPLITE(sk));
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
__skb_unlink(skb, rcvq);
|
|
__skb_queue_tail(&list_kill, skb);
|
|
}
|
|
@@ -1492,7 +1492,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
|
drop:
|
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
kfree_skb(skb);
|
|
return -1;
|
|
}
|
|
@@ -1511,7 +1511,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
|
|
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!skb1) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
|
IS_UDPLITE(sk));
|
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
|
|
diff --git a/net/ipv4/udp.c.rej b/net/ipv4/udp.c.rej
|
|
new file mode 100644
|
|
index 0000000..7f49fc5
|
|
--- /dev/null
|
|
+++ b/net/ipv4/udp.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- net/ipv4/udp.c 2012-05-21 11:33:41.259930038 +0200
|
|
++++ net/ipv4/udp.c 2012-05-21 12:10:12.180049033 +0200
|
|
+@@ -2095,7 +2095,7 @@ static void udp4_format_sock(struct sock
|
|
+ sk_rmem_alloc_get(sp),
|
|
+ 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
|
|
+ atomic_read(&sp->sk_refcnt), sp,
|
|
+- atomic_read(&sp->sk_drops), len);
|
|
++ atomic_read_unchecked(&sp->sk_drops), len);
|
|
+ }
|
|
+
|
|
+ int udp4_seq_show(struct seq_file *seq, void *v)
|
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
|
index 019f5da..df19c51 100644
|
|
--- a/net/ipv6/addrconf.c
|
|
+++ b/net/ipv6/addrconf.c
|
|
@@ -2227,7 +2227,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
|
|
p.iph.ihl = 5;
|
|
p.iph.protocol = IPPROTO_IPV6;
|
|
p.iph.ttl = 64;
|
|
- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
|
|
+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
|
|
|
|
if (ops->ndo_do_ioctl) {
|
|
mm_segment_t oldfs = get_fs();
|
|
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
|
|
index aefc8b7..3ad60b8 100644
|
|
--- a/net/ipv6/inet6_connection_sock.c
|
|
+++ b/net/ipv6/inet6_connection_sock.c
|
|
@@ -179,7 +179,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
|
|
#ifdef CONFIG_XFRM
|
|
{
|
|
struct rt6_info *rt = (struct rt6_info *)dst;
|
|
- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
|
|
+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
|
|
}
|
|
#endif
|
|
}
|
|
@@ -194,7 +194,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
|
|
#ifdef CONFIG_XFRM
|
|
if (dst) {
|
|
struct rt6_info *rt = (struct rt6_info *)dst;
|
|
- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
|
|
+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
|
|
__sk_dst_reset(sk);
|
|
dst = NULL;
|
|
}
|
|
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
|
|
index 34c1109..5751747 100644
|
|
--- a/net/ipv6/ipv6_sockglue.c
|
|
+++ b/net/ipv6/ipv6_sockglue.c
|
|
@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|
if (sk->sk_type != SOCK_STREAM)
|
|
return -ENOPROTOOPT;
|
|
|
|
- msg.msg_control = optval;
|
|
+ msg.msg_control = (void __force_kernel *)optval;
|
|
msg.msg_controllen = len;
|
|
msg.msg_flags = flags;
|
|
|
|
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
|
|
index f21a068..4406174 100644
|
|
--- a/net/ipv6/raw.c
|
|
+++ b/net/ipv6/raw.c
|
|
@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
{
|
|
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
|
|
skb_checksum_complete(skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
|
struct raw6_sock *rp = raw6_sk(sk);
|
|
|
|
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
if (inet->hdrincl) {
|
|
if (skb_checksum_complete(skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
kfree_skb(skb);
|
|
return NET_RX_DROP;
|
|
}
|
|
@@ -601,7 +601,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
|
|
return err;
|
|
}
|
|
|
|
-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
|
|
+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
|
|
struct flowi6 *fl6, struct dst_entry **dstp,
|
|
unsigned int flags)
|
|
{
|
|
@@ -914,12 +914,15 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
|
|
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
|
|
char __user *optval, int optlen)
|
|
{
|
|
+ struct icmp6_filter filter;
|
|
+
|
|
switch (optname) {
|
|
case ICMPV6_FILTER:
|
|
if (optlen > sizeof(struct icmp6_filter))
|
|
optlen = sizeof(struct icmp6_filter);
|
|
- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
|
|
+ if (copy_from_user(&filter, optval, optlen))
|
|
return -EFAULT;
|
|
+ raw6_sk(sk)->filter = filter;
|
|
return 0;
|
|
default:
|
|
return -ENOPROTOOPT;
|
|
@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
|
|
char __user *optval, int __user *optlen)
|
|
{
|
|
int len;
|
|
+ struct icmp6_filter filter;
|
|
|
|
switch (optname) {
|
|
case ICMPV6_FILTER:
|
|
@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
|
|
len = sizeof(struct icmp6_filter);
|
|
if (put_user(len, optlen))
|
|
return -EFAULT;
|
|
- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
|
|
+ filter = raw6_sk(sk)->filter;
|
|
+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
|
|
return -EFAULT;
|
|
return 0;
|
|
default:
|
|
@@ -1250,7 +1255,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
|
|
0, 0L, 0,
|
|
sock_i_uid(sp), 0,
|
|
sock_i_ino(sp),
|
|
- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
|
|
+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
|
|
}
|
|
|
|
static int raw6_seq_show(struct seq_file *seq, void *v)
|
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
|
index 875d25b..4c16aa7 100644
|
|
--- a/net/ipv6/udp.c
|
|
+++ b/net/ipv6/udp.c
|
|
@@ -549,7 +549,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
|
|
|
|
return 0;
|
|
drop:
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
drop_no_sk_drops_inc:
|
|
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
kfree_skb(skb);
|
|
@@ -625,7 +625,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
|
|
continue;
|
|
}
|
|
drop:
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
UDP6_INC_STATS_BH(sock_net(sk),
|
|
UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
|
|
UDP6_INC_STATS_BH(sock_net(sk),
|
|
@@ -796,7 +796,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
|
|
if (!sock_owned_by_user(sk))
|
|
udpv6_queue_rcv_skb(sk, skb);
|
|
else if (sk_add_backlog(sk, skb)) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
bh_unlock_sock(sk);
|
|
sock_put(sk);
|
|
goto discard;
|
|
@@ -1416,7 +1416,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
|
|
sock_i_uid(sp), 0,
|
|
sock_i_ino(sp),
|
|
atomic_read(&sp->sk_refcnt), sp,
|
|
- atomic_read(&sp->sk_drops));
|
|
+ atomic_read_unchecked(&sp->sk_drops));
|
|
}
|
|
|
|
int udp6_seq_show(struct seq_file *seq, void *v)
|
|
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
|
|
index 20fec0d..51fe6a1 100644
|
|
--- a/net/irda/ircomm/ircomm_tty.c
|
|
+++ b/net/irda/ircomm/ircomm_tty.c
|
|
@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
|
|
add_wait_queue(&self->open_wait, &wait);
|
|
|
|
IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
|
|
- __FILE__,__LINE__, tty->driver->name, self->open_count );
|
|
+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
|
|
|
|
/* As far as I can see, we protect open_count - Jean II */
|
|
spin_lock_irqsave(&self->spinlock, flags);
|
|
if (!tty_hung_up_p(filp)) {
|
|
extra_count = 1;
|
|
- self->open_count--;
|
|
+ local_dec(&self->open_count);
|
|
}
|
|
spin_unlock_irqrestore(&self->spinlock, flags);
|
|
- self->blocked_open++;
|
|
+ local_inc(&self->blocked_open);
|
|
|
|
while (1) {
|
|
if (tty->termios->c_cflag & CBAUD) {
|
|
@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
|
|
}
|
|
|
|
IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
|
|
- __FILE__,__LINE__, tty->driver->name, self->open_count );
|
|
+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
|
|
|
|
schedule();
|
|
}
|
|
@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
|
|
if (extra_count) {
|
|
/* ++ is not atomic, so this should be protected - Jean II */
|
|
spin_lock_irqsave(&self->spinlock, flags);
|
|
- self->open_count++;
|
|
+ local_inc(&self->open_count);
|
|
spin_unlock_irqrestore(&self->spinlock, flags);
|
|
}
|
|
- self->blocked_open--;
|
|
+ local_dec(&self->blocked_open);
|
|
|
|
IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
|
|
- __FILE__,__LINE__, tty->driver->name, self->open_count);
|
|
+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
|
|
|
|
if (!retval)
|
|
self->flags |= ASYNC_NORMAL_ACTIVE;
|
|
@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
|
|
}
|
|
/* ++ is not atomic, so this should be protected - Jean II */
|
|
spin_lock_irqsave(&self->spinlock, flags);
|
|
- self->open_count++;
|
|
+ local_inc(&self->open_count);
|
|
|
|
tty->driver_data = self;
|
|
self->tty = tty;
|
|
spin_unlock_irqrestore(&self->spinlock, flags);
|
|
|
|
IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
|
|
- self->line, self->open_count);
|
|
+ self->line, local_read(&self->open_count));
|
|
|
|
/* Not really used by us, but lets do it anyway */
|
|
self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
|
|
@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
|
|
return;
|
|
}
|
|
|
|
- if ((tty->count == 1) && (self->open_count != 1)) {
|
|
+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
|
|
/*
|
|
* Uh, oh. tty->count is 1, which means that the tty
|
|
* structure will be freed. state->count should always
|
|
@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
|
|
*/
|
|
IRDA_DEBUG(0, "%s(), bad serial port count; "
|
|
"tty->count is 1, state->count is %d\n", __func__ ,
|
|
- self->open_count);
|
|
- self->open_count = 1;
|
|
+ local_read(&self->open_count));
|
|
+ local_set(&self->open_count, 1);
|
|
}
|
|
|
|
- if (--self->open_count < 0) {
|
|
+ if (local_dec_return(&self->open_count) < 0) {
|
|
IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
|
|
- __func__, self->line, self->open_count);
|
|
- self->open_count = 0;
|
|
+ __func__, self->line, local_read(&self->open_count));
|
|
+ local_set(&self->open_count, 0);
|
|
}
|
|
- if (self->open_count) {
|
|
+ if (local_read(&self->open_count)) {
|
|
spin_unlock_irqrestore(&self->spinlock, flags);
|
|
|
|
IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
|
|
@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
|
|
tty->closing = 0;
|
|
self->tty = NULL;
|
|
|
|
- if (self->blocked_open) {
|
|
+ if (local_read(&self->blocked_open)) {
|
|
if (self->close_delay)
|
|
schedule_timeout_interruptible(self->close_delay);
|
|
wake_up_interruptible(&self->open_wait);
|
|
@@ -1010,7 +1010,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
|
|
spin_lock_irqsave(&self->spinlock, flags);
|
|
self->flags &= ~ASYNC_NORMAL_ACTIVE;
|
|
self->tty = NULL;
|
|
- self->open_count = 0;
|
|
+ local_set(&self->open_count, 0);
|
|
spin_unlock_irqrestore(&self->spinlock, flags);
|
|
|
|
wake_up_interruptible(&self->open_wait);
|
|
@@ -1357,7 +1357,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
|
|
seq_putc(m, '\n');
|
|
|
|
seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
|
|
- seq_printf(m, "Open count: %d\n", self->open_count);
|
|
+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
|
|
seq_printf(m, "Max data size: %d\n", self->max_data_size);
|
|
seq_printf(m, "Max header size: %d\n", self->max_header_size);
|
|
|
|
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
|
|
index 1fe397a..beda699 100644
|
|
--- a/net/iucv/af_iucv.c
|
|
+++ b/net/iucv/af_iucv.c
|
|
@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
|
|
|
|
write_lock_bh(&iucv_sk_list.lock);
|
|
|
|
- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
|
|
+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
|
|
while (__iucv_get_sock_by_name(name)) {
|
|
sprintf(name, "%08x",
|
|
- atomic_inc_return(&iucv_sk_list.autobind_name));
|
|
+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
|
|
}
|
|
|
|
write_unlock_bh(&iucv_sk_list.lock);
|
|
diff --git a/net/key/af_key.c b/net/key/af_key.c
|
|
index 1afbbd8..7bcb070 100644
|
|
--- a/net/key/af_key.c
|
|
+++ b/net/key/af_key.c
|
|
@@ -3020,10 +3020,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
|
|
static u32 get_acqseq(void)
|
|
{
|
|
u32 res;
|
|
- static atomic_t acqseq;
|
|
+ static atomic_unchecked_t acqseq;
|
|
|
|
do {
|
|
- res = atomic_inc_return(&acqseq);
|
|
+ res = atomic_inc_return_unchecked(&acqseq);
|
|
} while (!res);
|
|
return res;
|
|
}
|
|
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
|
|
index 3744d81..d242de9 100644
|
|
--- a/net/mac80211/ieee80211_i.h
|
|
+++ b/net/mac80211/ieee80211_i.h
|
|
@@ -28,6 +28,7 @@
|
|
#include <net/ieee80211_radiotap.h>
|
|
#include <net/cfg80211.h>
|
|
#include <net/mac80211.h>
|
|
+#include <asm/local.h>
|
|
#include "key.h"
|
|
#include "sta_info.h"
|
|
|
|
@@ -860,7 +861,7 @@ struct ieee80211_local {
|
|
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
|
|
spinlock_t queue_stop_reason_lock;
|
|
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
int monitors, cooked_mntrs;
|
|
/* number of interfaces with corresponding FIF_ flags */
|
|
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
|
|
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
|
|
index 9f32756..7822f6f 100644
|
|
--- a/net/mac80211/iface.c
|
|
+++ b/net/mac80211/iface.c
|
|
@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
|
|
break;
|
|
}
|
|
|
|
- if (local->open_count == 0) {
|
|
+ if (local_read(&local->open_count) == 0) {
|
|
res = drv_start(local);
|
|
if (res)
|
|
goto err_del_bss;
|
|
@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
|
|
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
|
|
|
|
if (!is_valid_ether_addr(dev->dev_addr)) {
|
|
- if (!local->open_count)
|
|
+ if (!local_read(&local->open_count))
|
|
drv_stop(local);
|
|
return -EADDRNOTAVAIL;
|
|
}
|
|
@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
|
|
mutex_unlock(&local->mtx);
|
|
|
|
if (coming_up)
|
|
- local->open_count++;
|
|
+ local_inc(&local->open_count);
|
|
|
|
if (hw_reconf_flags)
|
|
ieee80211_hw_config(local, hw_reconf_flags);
|
|
@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
|
|
err_del_interface:
|
|
drv_remove_interface(local, sdata);
|
|
err_stop:
|
|
- if (!local->open_count)
|
|
+ if (!local_read(&local->open_count))
|
|
drv_stop(local);
|
|
err_del_bss:
|
|
sdata->bss = NULL;
|
|
@@ -493,7 +493,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
}
|
|
|
|
if (going_down)
|
|
- local->open_count--;
|
|
+ local_dec(&local->open_count);
|
|
|
|
switch (sdata->vif.type) {
|
|
case NL80211_IFTYPE_AP_VLAN:
|
|
@@ -567,7 +567,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
|
if (cancel_scan)
|
|
flush_delayed_work(&local->scan_work);
|
|
|
|
- if (local->open_count == 0) {
|
|
+ if (local_read(&local->open_count) == 0) {
|
|
if (local->ops->napi_poll)
|
|
napi_disable(&local->napi);
|
|
ieee80211_clear_tx_pending(local);
|
|
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
|
|
index 018e3fb..5cd77b5 100644
|
|
--- a/net/mac80211/main.c
|
|
+++ b/net/mac80211/main.c
|
|
@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
|
|
local->hw.conf.power_level = power;
|
|
}
|
|
|
|
- if (changed && local->open_count) {
|
|
+ if (changed && local_read(&local->open_count)) {
|
|
ret = drv_config(local, changed);
|
|
/*
|
|
* Goal:
|
|
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
|
|
index ef8eba1..5c63952 100644
|
|
--- a/net/mac80211/pm.c
|
|
+++ b/net/mac80211/pm.c
|
|
@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
|
struct ieee80211_sub_if_data *sdata;
|
|
struct sta_info *sta;
|
|
|
|
- if (!local->open_count)
|
|
+ if (!local_read(&local->open_count))
|
|
goto suspend;
|
|
|
|
ieee80211_scan_cancel(local);
|
|
@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
|
cancel_work_sync(&local->dynamic_ps_enable_work);
|
|
del_timer_sync(&local->dynamic_ps_timer);
|
|
|
|
- local->wowlan = wowlan && local->open_count;
|
|
+ local->wowlan = wowlan && local_read(&local->open_count);
|
|
if (local->wowlan) {
|
|
int err = drv_suspend(local, wowlan);
|
|
if (err < 0) {
|
|
@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
|
}
|
|
|
|
/* stop hardware - this must stop RX */
|
|
- if (local->open_count)
|
|
+ if (local_read(&local->open_count))
|
|
ieee80211_stop_device(local);
|
|
|
|
suspend:
|
|
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
|
|
index 3313c11..bec9f17 100644
|
|
--- a/net/mac80211/rate.c
|
|
+++ b/net/mac80211/rate.c
|
|
@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
|
|
|
|
ASSERT_RTNL();
|
|
|
|
- if (local->open_count)
|
|
+ if (local_read(&local->open_count))
|
|
return -EBUSY;
|
|
|
|
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
|
|
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
|
|
index c97a065..ff61928 100644
|
|
--- a/net/mac80211/rc80211_pid_debugfs.c
|
|
+++ b/net/mac80211/rc80211_pid_debugfs.c
|
|
@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
|
|
|
|
spin_unlock_irqrestore(&events->lock, status);
|
|
|
|
- if (copy_to_user(buf, pb, p))
|
|
+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
|
|
return -EFAULT;
|
|
|
|
return p;
|
|
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
|
|
index 8a998ad..5efe867 100644
|
|
--- a/net/mac80211/util.c
|
|
+++ b/net/mac80211/util.c
|
|
@@ -1208,7 +1208,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
|
|
}
|
|
#endif
|
|
/* everything else happens only if HW was up & running */
|
|
- if (!local->open_count)
|
|
+ if (!local_read(&local->open_count))
|
|
goto wake_up;
|
|
|
|
/*
|
|
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
|
|
index 29fa5ba..8debc79 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_conn.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_conn.c
|
|
@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
|
|
/* Increase the refcnt counter of the dest */
|
|
atomic_inc(&dest->refcnt);
|
|
|
|
- conn_flags = atomic_read(&dest->conn_flags);
|
|
+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
|
|
if (cp->protocol != IPPROTO_UDP)
|
|
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
|
|
/* Bind with the destination and its corresponding transmitter */
|
|
@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
|
|
atomic_set(&cp->refcnt, 1);
|
|
|
|
atomic_set(&cp->n_control, 0);
|
|
- atomic_set(&cp->in_pkts, 0);
|
|
+ atomic_set_unchecked(&cp->in_pkts, 0);
|
|
|
|
atomic_inc(&ipvs->conn_count);
|
|
if (flags & IP_VS_CONN_F_NO_CPORT)
|
|
@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
|
|
|
|
/* Don't drop the entry if its number of incoming packets is not
|
|
located in [0, 8] */
|
|
- i = atomic_read(&cp->in_pkts);
|
|
+ i = atomic_read_unchecked(&cp->in_pkts);
|
|
if (i > 8 || i < 0) return 0;
|
|
|
|
if (!todrop_rate[i]) return 0;
|
|
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
|
|
index abdb475..15dcf9c 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_core.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_core.c
|
|
@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
|
|
ret = cp->packet_xmit(skb, cp, pd->pp);
|
|
/* do not touch skb anymore */
|
|
|
|
- atomic_inc(&cp->in_pkts);
|
|
+ atomic_inc_unchecked(&cp->in_pkts);
|
|
ip_vs_conn_put(cp);
|
|
return ret;
|
|
}
|
|
@@ -1621,7 +1621,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
|
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
|
|
pkts = sysctl_sync_threshold(ipvs);
|
|
else
|
|
- pkts = atomic_add_return(1, &cp->in_pkts);
|
|
+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
|
|
|
|
if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
|
|
cp->protocol == IPPROTO_SCTP) {
|
|
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
index f2ed4a9..1e9683e 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_ctl.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
|
|
@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
|
|
ip_vs_rs_hash(ipvs, dest);
|
|
write_unlock_bh(&ipvs->rs_lock);
|
|
}
|
|
- atomic_set(&dest->conn_flags, conn_flags);
|
|
+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
|
|
|
|
/* bind the service */
|
|
if (!dest->svc) {
|
|
@@ -2029,7 +2029,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
|
|
" %-7s %-6d %-10d %-10d\n",
|
|
&dest->addr.in6,
|
|
ntohs(dest->port),
|
|
- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
|
|
+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
|
|
atomic_read(&dest->weight),
|
|
atomic_read(&dest->activeconns),
|
|
atomic_read(&dest->inactconns));
|
|
@@ -2040,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
|
|
"%-7s %-6d %-10d %-10d\n",
|
|
ntohl(dest->addr.ip),
|
|
ntohs(dest->port),
|
|
- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
|
|
+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
|
|
atomic_read(&dest->weight),
|
|
atomic_read(&dest->activeconns),
|
|
atomic_read(&dest->inactconns));
|
|
@@ -2510,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
|
|
|
|
entry.addr = dest->addr.ip;
|
|
entry.port = dest->port;
|
|
- entry.conn_flags = atomic_read(&dest->conn_flags);
|
|
+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
|
|
entry.weight = atomic_read(&dest->weight);
|
|
entry.u_threshold = dest->u_threshold;
|
|
entry.l_threshold = dest->l_threshold;
|
|
@@ -3044,7 +3044,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
|
|
NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
|
|
|
|
NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
|
|
- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
|
|
+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
|
|
NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
|
|
NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
|
|
NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
|
|
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
|
|
index 6a5555c..d01224e 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_sync.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_sync.c
|
|
@@ -649,7 +649,7 @@ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
|
|
* i.e only increment in_pkts for Templates.
|
|
*/
|
|
if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
|
|
- int pkts = atomic_add_return(1, &cp->in_pkts);
|
|
+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
|
|
|
|
if (pkts % sysctl_sync_period(ipvs) != 1)
|
|
return;
|
|
@@ -797,7 +797,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
|
|
|
|
if (opt)
|
|
memcpy(&cp->in_seq, opt, sizeof(*opt));
|
|
- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
|
|
+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
|
|
cp->state = state;
|
|
cp->old_state = cp->state;
|
|
/*
|
|
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
index ec78ab6..18d07e1 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_xmit.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
|
|
@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
else
|
|
rc = NF_ACCEPT;
|
|
/* do not touch skb anymore */
|
|
- atomic_inc(&cp->in_pkts);
|
|
+ atomic_inc_unchecked(&cp->in_pkts);
|
|
goto out;
|
|
}
|
|
|
|
@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
else
|
|
rc = NF_ACCEPT;
|
|
/* do not touch skb anymore */
|
|
- atomic_inc(&cp->in_pkts);
|
|
+ atomic_inc_unchecked(&cp->in_pkts);
|
|
goto out;
|
|
}
|
|
|
|
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
|
|
index 66b2c54..c7884e3 100644
|
|
--- a/net/netfilter/nfnetlink_log.c
|
|
+++ b/net/netfilter/nfnetlink_log.c
|
|
@@ -70,7 +70,7 @@ struct nfulnl_instance {
|
|
};
|
|
|
|
static DEFINE_SPINLOCK(instances_lock);
|
|
-static atomic_t global_seq;
|
|
+static atomic_unchecked_t global_seq;
|
|
|
|
#define INSTANCE_BUCKETS 16
|
|
static struct hlist_head instance_table[INSTANCE_BUCKETS];
|
|
@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
|
|
/* global sequence number */
|
|
if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
|
|
NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
|
|
- htonl(atomic_inc_return(&global_seq)));
|
|
+ htonl(atomic_inc_return_unchecked(&global_seq)));
|
|
|
|
if (data_len) {
|
|
struct nlattr *nla;
|
|
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
|
|
index 4fe4fb4..87a89e5 100644
|
|
--- a/net/netfilter/xt_statistic.c
|
|
+++ b/net/netfilter/xt_statistic.c
|
|
@@ -19,7 +19,7 @@
|
|
#include <linux/module.h>
|
|
|
|
struct xt_statistic_priv {
|
|
- atomic_t count;
|
|
+ atomic_unchecked_t count;
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
MODULE_LICENSE("GPL");
|
|
@@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|
break;
|
|
case XT_STATISTIC_MODE_NTH:
|
|
do {
|
|
- oval = atomic_read(&info->master->count);
|
|
+ oval = atomic_read_unchecked(&info->master->count);
|
|
nval = (oval == info->u.nth.every) ? 0 : oval + 1;
|
|
- } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
|
|
+ } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
|
|
if (nval == 0)
|
|
ret = !ret;
|
|
break;
|
|
@@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
|
|
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
|
|
if (info->master == NULL)
|
|
return -ENOMEM;
|
|
- atomic_set(&info->master->count, info->u.nth.count);
|
|
+ atomic_set_unchecked(&info->master->count, info->u.nth.count);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
|
|
index fc40cb3..9dd81b5 100644
|
|
--- a/net/netlink/af_netlink.c
|
|
+++ b/net/netlink/af_netlink.c
|
|
@@ -757,7 +757,7 @@ static void netlink_overrun(struct sock *sk)
|
|
sk->sk_error_report(sk);
|
|
}
|
|
}
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
}
|
|
|
|
static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
|
|
@@ -2039,7 +2039,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
|
|
sk_wmem_alloc_get(s),
|
|
nlk->cb,
|
|
atomic_read(&s->sk_refcnt),
|
|
- atomic_read(&s->sk_drops),
|
|
+ atomic_read_unchecked(&s->sk_drops),
|
|
sock_i_ino(s)
|
|
);
|
|
|
|
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
|
|
index d678fda..01c7cc1 100644
|
|
--- a/net/packet/af_packet.c
|
|
+++ b/net/packet/af_packet.c
|
|
@@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
po->stats.tp_packets++;
|
|
- skb->dropcount = atomic_read(&sk->sk_drops);
|
|
+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
sk->sk_data_ready(sk, skb->len);
|
|
@@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
drop_n_acct:
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
po->stats.tp_drops++;
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
|
|
drop_n_restore:
|
|
@@ -2637,6 +2637,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
|
|
|
|
static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
|
{
|
|
+ struct sock_extended_err ee;
|
|
struct sock_exterr_skb *serr;
|
|
struct sk_buff *skb, *skb2;
|
|
int copied, err;
|
|
@@ -2658,8 +2659,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
|
|
sock_recv_timestamp(msg, sk, skb);
|
|
|
|
serr = SKB_EXT_ERR(skb);
|
|
+ ee = serr->ee;
|
|
put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
|
|
- sizeof(serr->ee), &serr->ee);
|
|
+ sizeof ee, &ee);
|
|
|
|
msg->msg_flags |= MSG_ERRQUEUE;
|
|
err = copied;
|
|
@@ -3287,7 +3289,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|
case PACKET_HDRLEN:
|
|
if (len > sizeof(int))
|
|
len = sizeof(int);
|
|
- if (copy_from_user(&val, optval, len))
|
|
+ if (len > sizeof(val) || copy_from_user(&val, optval, len))
|
|
return -EFAULT;
|
|
switch (val) {
|
|
case TPACKET_V1:
|
|
@@ -3337,7 +3339,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
|
|
|
if (put_user(len, optlen))
|
|
return -EFAULT;
|
|
- if (copy_to_user(optval, data, len))
|
|
+ if (len > sizeof(st) || copy_to_user(optval, data, len))
|
|
return -EFAULT;
|
|
return 0;
|
|
}
|
|
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
|
|
index 9726fe6..fc4e3a4 100644
|
|
--- a/net/phonet/pep.c
|
|
+++ b/net/phonet/pep.c
|
|
@@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|
|
|
case PNS_PEP_CTRL_REQ:
|
|
if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
break;
|
|
}
|
|
__skb_pull(skb, 4);
|
|
@@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
|
|
if (pn->rx_credits == 0) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
err = -ENOBUFS;
|
|
break;
|
|
}
|
|
@@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
|
|
}
|
|
|
|
if (pn->rx_credits == 0) {
|
|
- atomic_inc(&sk->sk_drops);
|
|
+ atomic_inc_unchecked(&sk->sk_drops);
|
|
err = NET_RX_DROP;
|
|
break;
|
|
}
|
|
diff --git a/net/phonet/socket.c.rej b/net/phonet/socket.c.rej
|
|
new file mode 100644
|
|
index 0000000..36ff5fd
|
|
--- /dev/null
|
|
+++ b/net/phonet/socket.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- net/phonet/socket.c 2012-03-19 10:39:14.136049059 +0100
|
|
++++ net/phonet/socket.c 2012-05-21 12:10:12.268049037 +0200
|
|
+@@ -614,7 +614,7 @@ static int pn_sock_seq_show(struct seq_f
|
|
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
|
|
+ sock_i_uid(sk), sock_i_ino(sk),
|
|
+ atomic_read(&sk->sk_refcnt), sk,
|
|
+- atomic_read(&sk->sk_drops), &len);
|
|
++ atomic_read_unchecked(&sk->sk_drops), &len);
|
|
+ }
|
|
+ seq_printf(seq, "%*s\n", 127 - len, "");
|
|
+ return 0;
|
|
diff --git a/net/rds/cong.c b/net/rds/cong.c
|
|
index e5b65ac..f3b6fb7 100644
|
|
--- a/net/rds/cong.c
|
|
+++ b/net/rds/cong.c
|
|
@@ -78,7 +78,7 @@
|
|
* finds that the saved generation number is smaller than the global generation
|
|
* number, it wakes up the process.
|
|
*/
|
|
-static atomic_t rds_cong_generation = ATOMIC_INIT(0);
|
|
+static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
|
|
|
|
/*
|
|
* Congestion monitoring
|
|
@@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
|
|
rdsdebug("waking map %p for %pI4\n",
|
|
map, &map->m_addr);
|
|
rds_stats_inc(s_cong_update_received);
|
|
- atomic_inc(&rds_cong_generation);
|
|
+ atomic_inc_unchecked(&rds_cong_generation);
|
|
if (waitqueue_active(&map->m_waitq))
|
|
wake_up(&map->m_waitq);
|
|
if (waitqueue_active(&rds_poll_waitq))
|
|
@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
|
|
|
|
int rds_cong_updated_since(unsigned long *recent)
|
|
{
|
|
- unsigned long gen = atomic_read(&rds_cong_generation);
|
|
+ unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
|
|
|
|
if (likely(*recent == gen))
|
|
return 0;
|
|
diff --git a/net/rds/ib.h b/net/rds/ib.h
|
|
index edfaaaf..8c89879 100644
|
|
--- a/net/rds/ib.h
|
|
+++ b/net/rds/ib.h
|
|
@@ -128,7 +128,7 @@ struct rds_ib_connection {
|
|
/* sending acks */
|
|
unsigned long i_ack_flags;
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
- atomic64_t i_ack_next; /* next ACK to send */
|
|
+ atomic64_unchecked_t i_ack_next; /* next ACK to send */
|
|
#else
|
|
spinlock_t i_ack_lock; /* protect i_ack_next */
|
|
u64 i_ack_next; /* next ACK to send */
|
|
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
|
|
index a1e1162..265e129 100644
|
|
--- a/net/rds/ib_cm.c
|
|
+++ b/net/rds/ib_cm.c
|
|
@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
|
|
/* Clear the ACK state */
|
|
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
- atomic64_set(&ic->i_ack_next, 0);
|
|
+ atomic64_set_unchecked(&ic->i_ack_next, 0);
|
|
#else
|
|
ic->i_ack_next = 0;
|
|
#endif
|
|
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
|
|
index 8d19491..05a3e65 100644
|
|
--- a/net/rds/ib_recv.c
|
|
+++ b/net/rds/ib_recv.c
|
|
@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
|
|
static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
|
|
int ack_required)
|
|
{
|
|
- atomic64_set(&ic->i_ack_next, seq);
|
|
+ atomic64_set_unchecked(&ic->i_ack_next, seq);
|
|
if (ack_required) {
|
|
smp_mb__before_clear_bit();
|
|
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
|
@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
|
|
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
|
smp_mb__after_clear_bit();
|
|
|
|
- return atomic64_read(&ic->i_ack_next);
|
|
+ return atomic64_read_unchecked(&ic->i_ack_next);
|
|
}
|
|
#endif
|
|
|
|
diff --git a/net/rds/iw.h b/net/rds/iw.h
|
|
index 04ce3b1..48119a6 100644
|
|
--- a/net/rds/iw.h
|
|
+++ b/net/rds/iw.h
|
|
@@ -134,7 +134,7 @@ struct rds_iw_connection {
|
|
/* sending acks */
|
|
unsigned long i_ack_flags;
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
- atomic64_t i_ack_next; /* next ACK to send */
|
|
+ atomic64_unchecked_t i_ack_next; /* next ACK to send */
|
|
#else
|
|
spinlock_t i_ack_lock; /* protect i_ack_next */
|
|
u64 i_ack_next; /* next ACK to send */
|
|
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
|
|
index a91e1db..cf3053f 100644
|
|
--- a/net/rds/iw_cm.c
|
|
+++ b/net/rds/iw_cm.c
|
|
@@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
|
|
/* Clear the ACK state */
|
|
clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
- atomic64_set(&ic->i_ack_next, 0);
|
|
+ atomic64_set_unchecked(&ic->i_ack_next, 0);
|
|
#else
|
|
ic->i_ack_next = 0;
|
|
#endif
|
|
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
|
|
index 4503335..db566b4 100644
|
|
--- a/net/rds/iw_recv.c
|
|
+++ b/net/rds/iw_recv.c
|
|
@@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
|
|
static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
|
|
int ack_required)
|
|
{
|
|
- atomic64_set(&ic->i_ack_next, seq);
|
|
+ atomic64_set_unchecked(&ic->i_ack_next, seq);
|
|
if (ack_required) {
|
|
smp_mb__before_clear_bit();
|
|
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
|
@@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
|
|
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
|
|
smp_mb__after_clear_bit();
|
|
|
|
- return atomic64_read(&ic->i_ack_next);
|
|
+ return atomic64_read_unchecked(&ic->i_ack_next);
|
|
}
|
|
#endif
|
|
|
|
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
|
|
index edac9ef..16bcb98 100644
|
|
--- a/net/rds/tcp.c
|
|
+++ b/net/rds/tcp.c
|
|
@@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
|
|
int val = 1;
|
|
|
|
set_fs(KERNEL_DS);
|
|
- sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
|
|
+ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
|
|
sizeof(val));
|
|
set_fs(oldfs);
|
|
}
|
|
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
|
|
index 1b4fd68..2234175 100644
|
|
--- a/net/rds/tcp_send.c
|
|
+++ b/net/rds/tcp_send.c
|
|
@@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
|
|
|
|
oldfs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
|
|
+ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
|
|
sizeof(val));
|
|
set_fs(oldfs);
|
|
}
|
|
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
|
|
index 798ea39..d433d02 100644
|
|
--- a/net/rxrpc/af_rxrpc.c
|
|
+++ b/net/rxrpc/af_rxrpc.c
|
|
@@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
|
|
__be32 rxrpc_epoch;
|
|
|
|
/* current debugging ID */
|
|
-atomic_t rxrpc_debug_id;
|
|
+atomic_unchecked_t rxrpc_debug_id;
|
|
|
|
/* count of skbs currently in use */
|
|
atomic_t rxrpc_n_skbs;
|
|
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
|
|
index c3126e8..21facc7 100644
|
|
--- a/net/rxrpc/ar-ack.c
|
|
+++ b/net/rxrpc/ar-ack.c
|
|
@@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|
|
|
_enter("{%d,%d,%d,%d},",
|
|
call->acks_hard, call->acks_unacked,
|
|
- atomic_read(&call->sequence),
|
|
+ atomic_read_unchecked(&call->sequence),
|
|
CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
|
|
|
|
stop = 0;
|
|
@@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
|
|
|
|
/* each Tx packet has a new serial number */
|
|
sp->hdr.serial =
|
|
- htonl(atomic_inc_return(&call->conn->serial));
|
|
+ htonl(atomic_inc_return_unchecked(&call->conn->serial));
|
|
|
|
hdr = (struct rxrpc_header *) txb->head;
|
|
hdr->serial = sp->hdr.serial;
|
|
@@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
|
|
*/
|
|
static void rxrpc_clear_tx_window(struct rxrpc_call *call)
|
|
{
|
|
- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
|
|
+ rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
|
|
}
|
|
|
|
/*
|
|
@@ -629,7 +629,7 @@ static int rxrpc_process_rx_queue(struct rxrpc_call *call,
|
|
|
|
latest = ntohl(sp->hdr.serial);
|
|
hard = ntohl(ack.firstPacket);
|
|
- tx = atomic_read(&call->sequence);
|
|
+ tx = atomic_read_unchecked(&call->sequence);
|
|
|
|
_proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
|
|
latest,
|
|
@@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|
goto maybe_reschedule;
|
|
|
|
send_ACK_with_skew:
|
|
- ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
|
|
+ ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
|
|
ntohl(ack.serial));
|
|
send_ACK:
|
|
mtu = call->conn->trans->peer->if_mtu;
|
|
@@ -1173,7 +1173,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|
ackinfo.rxMTU = htonl(5692);
|
|
ackinfo.jumbo_max = htonl(4);
|
|
|
|
- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
|
|
+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
|
|
_proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
|
|
ntohl(hdr.serial),
|
|
ntohs(ack.maxSkew),
|
|
@@ -1191,7 +1191,7 @@ void rxrpc_process_call(struct work_struct *work)
|
|
send_message:
|
|
_debug("send message");
|
|
|
|
- hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
|
|
+ hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
|
|
_proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
|
|
send_message_2:
|
|
|
|
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
|
|
index bf656c2..48f9d27 100644
|
|
--- a/net/rxrpc/ar-call.c
|
|
+++ b/net/rxrpc/ar-call.c
|
|
@@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
|
|
spin_lock_init(&call->lock);
|
|
rwlock_init(&call->state_lock);
|
|
atomic_set(&call->usage, 1);
|
|
- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
|
+ call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
|
|
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
|
|
|
|
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
|
|
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
|
|
index 4106ca9..a338d7a 100644
|
|
--- a/net/rxrpc/ar-connection.c
|
|
+++ b/net/rxrpc/ar-connection.c
|
|
@@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
|
|
rwlock_init(&conn->lock);
|
|
spin_lock_init(&conn->state_lock);
|
|
atomic_set(&conn->usage, 1);
|
|
- conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
|
+ conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
|
|
conn->avail_calls = RXRPC_MAXCALLS;
|
|
conn->size_align = 4;
|
|
conn->header_size = sizeof(struct rxrpc_header);
|
|
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
|
|
index e7ed43a..6afa140 100644
|
|
--- a/net/rxrpc/ar-connevent.c
|
|
+++ b/net/rxrpc/ar-connevent.c
|
|
@@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
|
|
|
|
len = iov[0].iov_len + iov[1].iov_len;
|
|
|
|
- hdr.serial = htonl(atomic_inc_return(&conn->serial));
|
|
+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
|
|
_proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
|
|
|
|
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
|
|
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
|
|
index 1a2b0633..e8d1382e 100644
|
|
--- a/net/rxrpc/ar-input.c
|
|
+++ b/net/rxrpc/ar-input.c
|
|
@@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
|
/* track the latest serial number on this connection for ACK packet
|
|
* information */
|
|
serial = ntohl(sp->hdr.serial);
|
|
- hi_serial = atomic_read(&call->conn->hi_serial);
|
|
+ hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
|
|
while (serial > hi_serial)
|
|
- hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
|
|
+ hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
|
|
serial);
|
|
|
|
/* request ACK generation for any ACK or DATA packet that requests
|
|
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
|
|
index 8e22bd3..f66d1c0 100644
|
|
--- a/net/rxrpc/ar-internal.h
|
|
+++ b/net/rxrpc/ar-internal.h
|
|
@@ -272,8 +272,8 @@ struct rxrpc_connection {
|
|
int error; /* error code for local abort */
|
|
int debug_id; /* debug ID for printks */
|
|
unsigned call_counter; /* call ID counter */
|
|
- atomic_t serial; /* packet serial number counter */
|
|
- atomic_t hi_serial; /* highest serial number received */
|
|
+ atomic_unchecked_t serial; /* packet serial number counter */
|
|
+ atomic_unchecked_t hi_serial; /* highest serial number received */
|
|
u8 avail_calls; /* number of calls available */
|
|
u8 size_align; /* data size alignment (for security) */
|
|
u8 header_size; /* rxrpc + security header size */
|
|
@@ -346,7 +346,7 @@ struct rxrpc_call {
|
|
spinlock_t lock;
|
|
rwlock_t state_lock; /* lock for state transition */
|
|
atomic_t usage;
|
|
- atomic_t sequence; /* Tx data packet sequence counter */
|
|
+ atomic_unchecked_t sequence; /* Tx data packet sequence counter */
|
|
u32 abort_code; /* local/remote abort code */
|
|
enum { /* current state of call */
|
|
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
|
|
@@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
|
|
*/
|
|
extern atomic_t rxrpc_n_skbs;
|
|
extern __be32 rxrpc_epoch;
|
|
-extern atomic_t rxrpc_debug_id;
|
|
+extern atomic_unchecked_t rxrpc_debug_id;
|
|
extern struct workqueue_struct *rxrpc_workqueue;
|
|
|
|
/*
|
|
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
|
|
index 87f7135..74d3703 100644
|
|
--- a/net/rxrpc/ar-local.c
|
|
+++ b/net/rxrpc/ar-local.c
|
|
@@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
|
|
spin_lock_init(&local->lock);
|
|
rwlock_init(&local->services_lock);
|
|
atomic_set(&local->usage, 1);
|
|
- local->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
|
+ local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
|
|
memcpy(&local->srx, srx, sizeof(*srx));
|
|
}
|
|
|
|
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
|
|
index 16ae887..d24f12b 100644
|
|
--- a/net/rxrpc/ar-output.c
|
|
+++ b/net/rxrpc/ar-output.c
|
|
@@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
|
|
sp->hdr.cid = call->cid;
|
|
sp->hdr.callNumber = call->call_id;
|
|
sp->hdr.seq =
|
|
- htonl(atomic_inc_return(&call->sequence));
|
|
+ htonl(atomic_inc_return_unchecked(&call->sequence));
|
|
sp->hdr.serial =
|
|
- htonl(atomic_inc_return(&conn->serial));
|
|
+ htonl(atomic_inc_return_unchecked(&conn->serial));
|
|
sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
|
|
sp->hdr.userStatus = 0;
|
|
sp->hdr.securityIndex = conn->security_ix;
|
|
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
|
|
index 2754f09..b20e38f 100644
|
|
--- a/net/rxrpc/ar-peer.c
|
|
+++ b/net/rxrpc/ar-peer.c
|
|
@@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
|
|
INIT_LIST_HEAD(&peer->error_targets);
|
|
spin_lock_init(&peer->lock);
|
|
atomic_set(&peer->usage, 1);
|
|
- peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
|
+ peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
|
|
memcpy(&peer->srx, srx, sizeof(*srx));
|
|
|
|
rxrpc_assess_MTU_size(peer);
|
|
diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
|
|
index 38047f7..9f48511 100644
|
|
--- a/net/rxrpc/ar-proc.c
|
|
+++ b/net/rxrpc/ar-proc.c
|
|
@@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
|
atomic_read(&conn->usage),
|
|
rxrpc_conn_states[conn->state],
|
|
key_serial(conn->key),
|
|
- atomic_read(&conn->serial),
|
|
- atomic_read(&conn->hi_serial));
|
|
+ atomic_read_unchecked(&conn->serial),
|
|
+ atomic_read_unchecked(&conn->hi_serial));
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
|
|
index 92df566..87ec1bf 100644
|
|
--- a/net/rxrpc/ar-transport.c
|
|
+++ b/net/rxrpc/ar-transport.c
|
|
@@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
|
|
spin_lock_init(&trans->client_lock);
|
|
rwlock_init(&trans->conn_lock);
|
|
atomic_set(&trans->usage, 1);
|
|
- trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
|
+ trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
|
|
|
|
if (peer->srx.transport.family == AF_INET) {
|
|
switch (peer->srx.transport_type) {
|
|
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
|
|
index 7635107..4670276 100644
|
|
--- a/net/rxrpc/rxkad.c
|
|
+++ b/net/rxrpc/rxkad.c
|
|
@@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
|
|
|
|
len = iov[0].iov_len + iov[1].iov_len;
|
|
|
|
- hdr.serial = htonl(atomic_inc_return(&conn->serial));
|
|
+ hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
|
|
_proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
|
|
|
|
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
|
|
@@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
|
|
|
|
len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
|
|
|
|
- hdr->serial = htonl(atomic_inc_return(&conn->serial));
|
|
+ hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
|
|
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
|
|
|
|
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
|
|
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
|
|
index bc7b5de..a6820b8 100644
|
|
--- a/net/sctp/socket.c
|
|
+++ b/net/sctp/socket.c
|
|
@@ -4623,7 +4623,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
|
|
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
|
|
if (space_left < addrlen)
|
|
return -ENOMEM;
|
|
- if (copy_to_user(to, &temp, addrlen))
|
|
+ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
|
|
return -EFAULT;
|
|
to += addrlen;
|
|
cnt++;
|
|
diff --git a/net/socket.c b/net/socket.c
|
|
index 88ab330..aff24ac 100644
|
|
--- a/net/socket.c
|
|
+++ b/net/socket.c
|
|
@@ -2011,7 +2011,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
|
|
* checking falls down on this.
|
|
*/
|
|
if (copy_from_user(ctl_buf,
|
|
- (void __user __force *)msg_sys->msg_control,
|
|
+ (void __force_user *)msg_sys->msg_control,
|
|
ctl_len))
|
|
goto out_freectl;
|
|
msg_sys->msg_control = ctl_buf;
|
|
@@ -2829,7 +2829,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|
}
|
|
|
|
ifr = compat_alloc_user_space(buf_size);
|
|
- rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
|
|
+ rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
|
|
|
|
if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
|
|
return -EFAULT;
|
|
@@ -2853,12 +2853,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|
offsetof(struct ethtool_rxnfc, fs.ring_cookie));
|
|
|
|
if (copy_in_user(rxnfc, compat_rxnfc,
|
|
- (void *)(&rxnfc->fs.m_ext + 1) -
|
|
- (void *)rxnfc) ||
|
|
+ (void __user *)(&rxnfc->fs.m_ext + 1) -
|
|
+ (void __user *)rxnfc) ||
|
|
copy_in_user(&rxnfc->fs.ring_cookie,
|
|
&compat_rxnfc->fs.ring_cookie,
|
|
- (void *)(&rxnfc->fs.location + 1) -
|
|
- (void *)&rxnfc->fs.ring_cookie) ||
|
|
+ (void __user *)(&rxnfc->fs.location + 1) -
|
|
+ (void __user *)&rxnfc->fs.ring_cookie) ||
|
|
copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
|
|
sizeof(rxnfc->rule_cnt)))
|
|
return -EFAULT;
|
|
@@ -2870,12 +2870,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
|
|
|
|
if (convert_out) {
|
|
if (copy_in_user(compat_rxnfc, rxnfc,
|
|
- (const void *)(&rxnfc->fs.m_ext + 1) -
|
|
- (const void *)rxnfc) ||
|
|
+ (const void __user *)(&rxnfc->fs.m_ext + 1) -
|
|
+ (const void __user *)rxnfc) ||
|
|
copy_in_user(&compat_rxnfc->fs.ring_cookie,
|
|
&rxnfc->fs.ring_cookie,
|
|
- (const void *)(&rxnfc->fs.location + 1) -
|
|
- (const void *)&rxnfc->fs.ring_cookie) ||
|
|
+ (const void __user *)(&rxnfc->fs.location + 1) -
|
|
+ (const void __user *)&rxnfc->fs.ring_cookie) ||
|
|
copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
|
|
sizeof(rxnfc->rule_cnt)))
|
|
return -EFAULT;
|
|
@@ -2945,7 +2945,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
|
|
old_fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
err = dev_ioctl(net, cmd,
|
|
- (struct ifreq __user __force *) &kifr);
|
|
+ (struct ifreq __force_user *) &kifr);
|
|
set_fs(old_fs);
|
|
|
|
return err;
|
|
@@ -3054,7 +3054,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
|
|
|
|
old_fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
- err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
|
|
+ err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
|
|
set_fs(old_fs);
|
|
|
|
if (cmd == SIOCGIFMAP && !err) {
|
|
@@ -3159,7 +3159,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
|
|
ret |= __get_user(rtdev, &(ur4->rt_dev));
|
|
if (rtdev) {
|
|
ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
|
|
- r4.rt_dev = (char __user __force *)devname;
|
|
+ r4.rt_dev = (char __force_user *)devname;
|
|
devname[15] = 0;
|
|
} else
|
|
r4.rt_dev = NULL;
|
|
@@ -3385,8 +3385,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
|
|
int __user *uoptlen;
|
|
int err;
|
|
|
|
- uoptval = (char __user __force *) optval;
|
|
- uoptlen = (int __user __force *) optlen;
|
|
+ uoptval = (char __force_user *) optval;
|
|
+ uoptlen = (int __force_user *) optlen;
|
|
|
|
set_fs(KERNEL_DS);
|
|
if (level == SOL_SOCKET)
|
|
@@ -3406,7 +3406,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
|
|
char __user *uoptval;
|
|
int err;
|
|
|
|
- uoptval = (char __user __force *) optval;
|
|
+ uoptval = (char __force_user *) optval;
|
|
|
|
set_fs(KERNEL_DS);
|
|
if (level == SOL_SOCKET)
|
|
diff --git a/net/socket.c.rej b/net/socket.c.rej
|
|
new file mode 100644
|
|
index 0000000..a137006
|
|
--- /dev/null
|
|
+++ b/net/socket.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- net/socket.c 2012-08-09 20:18:52.313847482 +0200
|
|
++++ net/socket.c 2012-08-09 20:19:06.321846735 +0200
|
|
+@@ -2139,7 +2139,7 @@ static int __sys_recvmsg(struct socket *
|
|
+ * kernel msghdr to use the kernel address space)
|
|
+ */
|
|
+
|
|
+- uaddr = (__force void __user *)msg_sys->msg_name;
|
|
++ uaddr = (void __force_user *)msg_sys->msg_name;
|
|
+ uaddr_len = COMPAT_NAMELEN(msg);
|
|
+ if (MSG_CMSG_COMPAT & flags) {
|
|
+ err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
|
|
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
|
|
index b50336b..14890d6 100644
|
|
--- a/net/sunrpc/sched.c
|
|
+++ b/net/sunrpc/sched.c
|
|
@@ -242,9 +242,9 @@ static int rpc_wait_bit_killable(void *word)
|
|
#ifdef RPC_DEBUG
|
|
static void rpc_task_set_debuginfo(struct rpc_task *task)
|
|
{
|
|
- static atomic_t rpc_pid;
|
|
+ static atomic_unchecked_t rpc_pid;
|
|
|
|
- task->tk_pid = atomic_inc_return(&rpc_pid);
|
|
+ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
|
|
}
|
|
#else
|
|
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
|
|
index 8343737..677025e 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
|
|
@@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
|
|
static unsigned int min_max_inline = 4096;
|
|
static unsigned int max_max_inline = 65536;
|
|
|
|
-atomic_t rdma_stat_recv;
|
|
-atomic_t rdma_stat_read;
|
|
-atomic_t rdma_stat_write;
|
|
-atomic_t rdma_stat_sq_starve;
|
|
-atomic_t rdma_stat_rq_starve;
|
|
-atomic_t rdma_stat_rq_poll;
|
|
-atomic_t rdma_stat_rq_prod;
|
|
-atomic_t rdma_stat_sq_poll;
|
|
-atomic_t rdma_stat_sq_prod;
|
|
+atomic_unchecked_t rdma_stat_recv;
|
|
+atomic_unchecked_t rdma_stat_read;
|
|
+atomic_unchecked_t rdma_stat_write;
|
|
+atomic_unchecked_t rdma_stat_sq_starve;
|
|
+atomic_unchecked_t rdma_stat_rq_starve;
|
|
+atomic_unchecked_t rdma_stat_rq_poll;
|
|
+atomic_unchecked_t rdma_stat_rq_prod;
|
|
+atomic_unchecked_t rdma_stat_sq_poll;
|
|
+atomic_unchecked_t rdma_stat_sq_prod;
|
|
|
|
/* Temporary NFS request map and context caches */
|
|
struct kmem_cache *svc_rdma_map_cachep;
|
|
@@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
|
|
len -= *ppos;
|
|
if (len > *lenp)
|
|
len = *lenp;
|
|
- if (len && copy_to_user(buffer, str_buf, len))
|
|
+ if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
|
|
return -EFAULT;
|
|
*lenp = len;
|
|
*ppos += len;
|
|
@@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
|
|
{
|
|
.procname = "rdma_stat_read",
|
|
.data = &rdma_stat_read,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_recv",
|
|
.data = &rdma_stat_recv,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_write",
|
|
.data = &rdma_stat_write,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_sq_starve",
|
|
.data = &rdma_stat_sq_starve,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_rq_starve",
|
|
.data = &rdma_stat_rq_starve,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_rq_poll",
|
|
.data = &rdma_stat_rq_poll,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_rq_prod",
|
|
.data = &rdma_stat_rq_prod,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_sq_poll",
|
|
.data = &rdma_stat_sq_poll,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
{
|
|
.procname = "rdma_stat_sq_prod",
|
|
.data = &rdma_stat_sq_prod,
|
|
- .maxlen = sizeof(atomic_t),
|
|
+ .maxlen = sizeof(atomic_unchecked_t),
|
|
.mode = 0644,
|
|
.proc_handler = read_reset_stat,
|
|
},
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
|
|
index 41cb63b..c4a1489 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
|
|
@@ -501,7 +501,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
|
|
svc_rdma_put_context(ctxt, 0);
|
|
goto out;
|
|
}
|
|
- atomic_inc(&rdma_stat_read);
|
|
+ atomic_inc_unchecked(&rdma_stat_read);
|
|
|
|
if (read_wr.num_sge < chl_map->ch[ch_no].count) {
|
|
chl_map->ch[ch_no].count -= read_wr.num_sge;
|
|
@@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
|
dto_q);
|
|
list_del_init(&ctxt->dto_q);
|
|
} else {
|
|
- atomic_inc(&rdma_stat_rq_starve);
|
|
+ atomic_inc_unchecked(&rdma_stat_rq_starve);
|
|
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
|
ctxt = NULL;
|
|
}
|
|
@@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
|
dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
|
|
ctxt, rdma_xprt, rqstp, ctxt->wc_status);
|
|
BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
|
|
- atomic_inc(&rdma_stat_recv);
|
|
+ atomic_inc_unchecked(&rdma_stat_recv);
|
|
|
|
/* Build up the XDR from the receive buffers. */
|
|
rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
index 42eb7ba0..c887c45 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
|
|
@@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
|
|
write_wr.wr.rdma.remote_addr = to;
|
|
|
|
/* Post It */
|
|
- atomic_inc(&rdma_stat_write);
|
|
+ atomic_inc_unchecked(&rdma_stat_write);
|
|
if (svc_rdma_send(xprt, &write_wr))
|
|
goto err;
|
|
return 0;
|
|
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
index 73b428b..5f3f8f3 100644
|
|
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
|
|
@@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
|
|
return;
|
|
|
|
ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
|
|
- atomic_inc(&rdma_stat_rq_poll);
|
|
+ atomic_inc_unchecked(&rdma_stat_rq_poll);
|
|
|
|
while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
|
|
ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
|
|
@@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
|
|
}
|
|
|
|
if (ctxt)
|
|
- atomic_inc(&rdma_stat_rq_prod);
|
|
+ atomic_inc_unchecked(&rdma_stat_rq_prod);
|
|
|
|
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
|
|
/*
|
|
@@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
|
|
return;
|
|
|
|
ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
|
|
- atomic_inc(&rdma_stat_sq_poll);
|
|
+ atomic_inc_unchecked(&rdma_stat_sq_poll);
|
|
while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
|
|
if (wc.status != IB_WC_SUCCESS)
|
|
/* Close the transport */
|
|
@@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
|
|
}
|
|
|
|
if (ctxt)
|
|
- atomic_inc(&rdma_stat_sq_prod);
|
|
+ atomic_inc_unchecked(&rdma_stat_sq_prod);
|
|
}
|
|
|
|
static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
|
|
@@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
|
|
spin_lock_bh(&xprt->sc_lock);
|
|
if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
|
|
spin_unlock_bh(&xprt->sc_lock);
|
|
- atomic_inc(&rdma_stat_sq_starve);
|
|
+ atomic_inc_unchecked(&rdma_stat_sq_starve);
|
|
|
|
/* See if we can opportunistically reap SQ WR to make room */
|
|
sq_cq_reap(xprt);
|
|
diff --git a/net/tipc/link.c b/net/tipc/link.c
|
|
index b4b9b30..5b62131 100644
|
|
--- a/net/tipc/link.c
|
|
+++ b/net/tipc/link.c
|
|
@@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
|
|
struct tipc_msg fragm_hdr;
|
|
struct sk_buff *buf, *buf_chain, *prev;
|
|
u32 fragm_crs, fragm_rest, hsz, sect_rest;
|
|
- const unchar *sect_crs;
|
|
+ const unchar __user *sect_crs;
|
|
int curr_sect;
|
|
u32 fragm_no;
|
|
|
|
@@ -1247,7 +1247,7 @@ static int link_send_sections_long(struct tipc_port *sender,
|
|
|
|
if (!sect_rest) {
|
|
sect_rest = msg_sect[++curr_sect].iov_len;
|
|
- sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
|
|
+ sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
|
|
}
|
|
|
|
if (sect_rest < fragm_rest)
|
|
@@ -1266,7 +1266,7 @@ static int link_send_sections_long(struct tipc_port *sender,
|
|
}
|
|
} else
|
|
skb_copy_to_linear_data_offset(buf, fragm_crs,
|
|
- sect_crs, sz);
|
|
+ (const void __force_kernel *)sect_crs, sz);
|
|
sect_crs += sz;
|
|
sect_rest -= sz;
|
|
fragm_crs += sz;
|
|
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
|
|
index e3afe16..333ea83 100644
|
|
--- a/net/tipc/msg.c
|
|
+++ b/net/tipc/msg.c
|
|
@@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
|
|
msg_sect[cnt].iov_len);
|
|
else
|
|
skb_copy_to_linear_data_offset(*buf, pos,
|
|
- msg_sect[cnt].iov_base,
|
|
+ (const void __force_kernel *)msg_sect[cnt].iov_base,
|
|
msg_sect[cnt].iov_len);
|
|
pos += msg_sect[cnt].iov_len;
|
|
}
|
|
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
|
|
index b2964e9..fdf2e27 100644
|
|
--- a/net/tipc/subscr.c
|
|
+++ b/net/tipc/subscr.c
|
|
@@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
|
|
{
|
|
struct iovec msg_sect;
|
|
|
|
- msg_sect.iov_base = (void *)&sub->evt;
|
|
+ msg_sect.iov_base = (void __force_user *)&sub->evt;
|
|
msg_sect.iov_len = sizeof(struct tipc_event);
|
|
|
|
sub->evt.event = htohl(event, sub->swap);
|
|
diff --git a/net/wireless/core.h b/net/wireless/core.h
|
|
index 9febb17..6cb2f67 100644
|
|
--- a/net/wireless/core.h
|
|
+++ b/net/wireless/core.h
|
|
@@ -27,7 +27,7 @@ struct cfg80211_registered_device {
|
|
struct mutex mtx;
|
|
|
|
/* rfkill support */
|
|
- struct rfkill_ops rfkill_ops;
|
|
+ rfkill_ops_no_const rfkill_ops;
|
|
struct rfkill *rfkill;
|
|
struct work_struct rfkill_sync;
|
|
|
|
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
|
|
index af648e0..6185d3a 100644
|
|
--- a/net/wireless/wext-core.c
|
|
+++ b/net/wireless/wext-core.c
|
|
@@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
|
|
*/
|
|
|
|
/* Support for very large requests */
|
|
- if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
|
|
- (user_length > descr->max_tokens)) {
|
|
+ if (user_length > descr->max_tokens) {
|
|
/* Allow userspace to GET more than max so
|
|
* we can support any size GET requests.
|
|
* There is still a limit : -ENOMEM.
|
|
@@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
|
|
}
|
|
}
|
|
|
|
- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
|
|
- /*
|
|
- * If this is a GET, but not NOMAX, it means that the extra
|
|
- * data is not bounded by userspace, but by max_tokens. Thus
|
|
- * set the length to max_tokens. This matches the extra data
|
|
- * allocation.
|
|
- * The driver should fill it with the number of tokens it
|
|
- * provided, and it may check iwp->length rather than having
|
|
- * knowledge of max_tokens. If the driver doesn't change the
|
|
- * iwp->length, this ioctl just copies back max_token tokens
|
|
- * filled with zeroes. Hopefully the driver isn't claiming
|
|
- * them to be valid data.
|
|
- */
|
|
- iwp->length = descr->max_tokens;
|
|
- }
|
|
-
|
|
err = handler(dev, info, (union iwreq_data *) iwp, extra);
|
|
|
|
iwp->length += essid_compat;
|
|
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
|
|
index 71c80c7..e43384a 100644
|
|
--- a/net/xfrm/xfrm_policy.c
|
|
+++ b/net/xfrm/xfrm_policy.c
|
|
@@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
|
|
{
|
|
policy->walk.dead = 1;
|
|
|
|
- atomic_inc(&policy->genid);
|
|
+ atomic_inc_unchecked(&policy->genid);
|
|
|
|
if (del_timer(&policy->timer))
|
|
xfrm_pol_put(policy);
|
|
@@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
|
|
hlist_add_head(&policy->bydst, chain);
|
|
xfrm_pol_hold(policy);
|
|
net->xfrm.policy_count[dir]++;
|
|
- atomic_inc(&flow_cache_genid);
|
|
+ atomic_inc_unchecked(&flow_cache_genid);
|
|
if (delpol)
|
|
__xfrm_policy_unlink(delpol, dir);
|
|
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
|
|
@@ -1530,7 +1530,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
|
goto out;
|
|
}
|
|
|
|
-static int inline
|
|
+static inline int
|
|
xfrm_dst_alloc_copy(void **target, const void *src, int size)
|
|
{
|
|
if (!*target) {
|
|
@@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
|
|
return 0;
|
|
}
|
|
|
|
-static int inline
|
|
+static inline int
|
|
xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
|
|
{
|
|
#ifdef CONFIG_XFRM_SUB_POLICY
|
|
@@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
|
|
#endif
|
|
}
|
|
|
|
-static int inline
|
|
+static inline int
|
|
xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
|
|
{
|
|
#ifdef CONFIG_XFRM_SUB_POLICY
|
|
@@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
|
|
|
|
xdst->num_pols = num_pols;
|
|
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
|
|
- xdst->policy_genid = atomic_read(&pols[0]->genid);
|
|
+ xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
|
|
|
|
return xdst;
|
|
}
|
|
@@ -2348,7 +2348,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
|
|
if (xdst->xfrm_genid != dst->xfrm->genid)
|
|
return 0;
|
|
if (xdst->num_pols > 0 &&
|
|
- xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
|
|
+ xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
|
|
return 0;
|
|
|
|
mtu = dst_mtu(dst->child);
|
|
@@ -2885,7 +2885,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
|
|
sizeof(pol->xfrm_vec[i].saddr));
|
|
pol->xfrm_vec[i].encap_family = mp->new_family;
|
|
/* flush bundles */
|
|
- atomic_inc(&pol->genid);
|
|
+ atomic_inc_unchecked(&pol->genid);
|
|
}
|
|
}
|
|
|
|
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
|
|
index ff1720d..fd819e7 100644
|
|
--- a/scripts/Makefile.build
|
|
+++ b/scripts/Makefile.build
|
|
@@ -62,7 +62,7 @@ endif
|
|
ifdef KBUILD_ENABLE_EXTRA_GCC_CHECKS
|
|
warning- := $(empty)
|
|
|
|
-warning-1 := -Wextra -Wunused -Wno-unused-parameter
|
|
+warning-1 := -Wextra -Wunused -Wno-unused-parameter -Wno-missing-field-initializers
|
|
warning-1 += -Wmissing-declarations
|
|
warning-1 += -Wmissing-format-attribute
|
|
warning-1 += -Wmissing-prototypes
|
|
@@ -111,7 +111,7 @@ endif
|
|
endif
|
|
|
|
# Do not include host rules unless needed
|
|
-ifneq ($(hostprogs-y)$(hostprogs-m),)
|
|
+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
|
|
include scripts/Makefile.host
|
|
endif
|
|
|
|
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
|
|
index 686cb0d..9d653bf 100644
|
|
--- a/scripts/Makefile.clean
|
|
+++ b/scripts/Makefile.clean
|
|
@@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
|
|
__clean-files := $(extra-y) $(always) \
|
|
$(targets) $(clean-files) \
|
|
$(host-progs) \
|
|
- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
|
|
+ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
|
|
+ $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
|
|
|
|
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
|
|
|
|
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
|
|
index 1ac414f..38575f7 100644
|
|
--- a/scripts/Makefile.host
|
|
+++ b/scripts/Makefile.host
|
|
@@ -31,6 +31,8 @@
|
|
# Note: Shared libraries consisting of C++ files are not supported
|
|
|
|
__hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
|
|
+__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
|
|
+__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
|
|
|
|
# C code
|
|
# Executables compiled from a single .c file
|
|
@@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
|
|
# Shared libaries (only .c supported)
|
|
# Shared libraries (.so) - all .so files referenced in "xxx-objs"
|
|
host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
|
|
+host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
|
|
+host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
|
|
# Remove .so files from "xxx-objs"
|
|
host-cobjs := $(filter-out %.so,$(host-cobjs))
|
|
+host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
|
|
|
|
-#Object (.o) files used by the shared libaries
|
|
+# Object (.o) files used by the shared libaries
|
|
host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
|
|
+host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
|
|
|
|
# output directory for programs/.o files
|
|
# hostprogs-y := tools/build may have been specified. Retrieve directory
|
|
@@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
|
|
host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
|
|
host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
|
|
host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
|
|
+host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
|
|
host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
|
|
+host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
|
|
host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
|
|
|
|
obj-dirs += $(host-objdirs)
|
|
@@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
|
|
$(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
|
|
$(call if_changed_dep,host-cshobjs)
|
|
|
|
+# Compile .c file, create position independent .o file
|
|
+# host-cxxshobjs -> .o
|
|
+quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
|
|
+ cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
|
|
+$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
|
|
+ $(call if_changed_dep,host-cxxshobjs)
|
|
+
|
|
# Link a shared library, based on position independent .o files
|
|
# *.o -> .so shared library (host-cshlib)
|
|
quiet_cmd_host-cshlib = HOSTLLD -shared $@
|
|
@@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
|
|
$(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
|
|
$(call if_changed,host-cshlib)
|
|
|
|
+# Link a shared library, based on position independent .o files
|
|
+# *.o -> .so shared library (host-cxxshlib)
|
|
+quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
|
|
+ cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
|
|
+ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
|
|
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
|
|
+$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
|
|
+ $(call if_changed,host-cxxshlib)
|
|
+
|
|
targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
|
|
- $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
|
|
+ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
|
|
|
|
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
|
|
index cb1f50c..cef2a7c 100644
|
|
--- a/scripts/basic/fixdep.c
|
|
+++ b/scripts/basic/fixdep.c
|
|
@@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
|
|
/*
|
|
* Lookup a value in the configuration string.
|
|
*/
|
|
-static int is_defined_config(const char *name, int len, unsigned int hash)
|
|
+static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
|
|
{
|
|
struct item *aux;
|
|
|
|
@@ -211,10 +211,10 @@ static void clear_config(void)
|
|
/*
|
|
* Record the use of a CONFIG_* word.
|
|
*/
|
|
-static void use_config(const char *m, int slen)
|
|
+static void use_config(const char *m, unsigned int slen)
|
|
{
|
|
unsigned int hash = strhash(m, slen);
|
|
- int c, i;
|
|
+ unsigned int c, i;
|
|
|
|
if (is_defined_config(m, slen, hash))
|
|
return;
|
|
@@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
|
|
|
|
static void parse_config_file(const char *map, size_t len)
|
|
{
|
|
- const int *end = (const int *) (map + len);
|
|
+ const unsigned int *end = (const unsigned int *) (map + len);
|
|
/* start at +1, so that p can never be < map */
|
|
- const int *m = (const int *) map + 1;
|
|
+ const unsigned int *m = (const unsigned int *) map + 1;
|
|
const char *p, *q;
|
|
|
|
for (; m < end; m++) {
|
|
@@ -406,7 +406,7 @@ static void print_deps(void)
|
|
static void traps(void)
|
|
{
|
|
static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
|
|
- int *p = (int *)test;
|
|
+ unsigned int *p = (unsigned int *)test;
|
|
|
|
if (*p != INT_CONF) {
|
|
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
|
|
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
|
|
new file mode 100644
|
|
index 0000000..008ac1a
|
|
--- /dev/null
|
|
+++ b/scripts/gcc-plugin.sh
|
|
@@ -0,0 +1,17 @@
|
|
+#!/bin/bash
|
|
+plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
|
|
+#include "gcc-plugin.h"
|
|
+#include "tree.h"
|
|
+#include "tm.h"
|
|
+#include "rtl.h"
|
|
+#ifdef ENABLE_BUILD_WITH_CXX
|
|
+#warning $2
|
|
+#else
|
|
+#warning $1
|
|
+#endif
|
|
+EOF`
|
|
+if [ $? -eq 0 ]
|
|
+then
|
|
+ [[ "$plugincc" =~ "$1" ]] && echo "$1"
|
|
+ [[ "$plugincc" =~ "$2" ]] && echo "$2"
|
|
+fi
|
|
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
|
|
index dcaaaa90..0992ab2 100644
|
|
--- a/scripts/mod/file2alias.c
|
|
+++ b/scripts/mod/file2alias.c
|
|
@@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
|
|
unsigned long size, unsigned long id_size,
|
|
void *symval)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
|
|
if (size % id_size || size < id_size) {
|
|
if (cross_build != 0)
|
|
@@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
|
|
/* USB is special because the bcdDevice can be matched against a numeric range */
|
|
/* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
|
|
static void do_usb_entry(struct usb_device_id *id,
|
|
- unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
|
|
+ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
|
|
unsigned char range_lo, unsigned char range_hi,
|
|
unsigned char max, struct module *mod)
|
|
{
|
|
@@ -262,7 +262,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
|
|
{
|
|
unsigned int devlo, devhi;
|
|
unsigned char chi, clo, max;
|
|
- int ndigits;
|
|
+ unsigned int ndigits;
|
|
|
|
id->match_flags = TO_NATIVE(id->match_flags);
|
|
id->idVendor = TO_NATIVE(id->idVendor);
|
|
@@ -504,7 +504,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
|
|
for (i = 0; i < count; i++) {
|
|
const char *id = (char *)devs[i].id;
|
|
char acpi_id[sizeof(devs[0].id)];
|
|
- int j;
|
|
+ unsigned int j;
|
|
|
|
buf_printf(&mod->dev_table_buf,
|
|
"MODULE_ALIAS(\"pnp:d%s*\");\n", id);
|
|
@@ -534,7 +534,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
|
|
|
|
for (j = 0; j < PNP_MAX_DEVICES; j++) {
|
|
const char *id = (char *)card->devs[j].id;
|
|
- int i2, j2;
|
|
+ unsigned int i2, j2;
|
|
int dup = 0;
|
|
|
|
if (!id[0])
|
|
@@ -560,7 +560,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
|
|
/* add an individual alias for every device entry */
|
|
if (!dup) {
|
|
char acpi_id[sizeof(card->devs[0].id)];
|
|
- int k;
|
|
+ unsigned int k;
|
|
|
|
buf_printf(&mod->dev_table_buf,
|
|
"MODULE_ALIAS(\"pnp:d%s*\");\n", id);
|
|
@@ -885,7 +885,7 @@ static void dmi_ascii_filter(char *d, const char *s)
|
|
static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
|
|
char *alias)
|
|
{
|
|
- int i, j;
|
|
+ unsigned int i, j;
|
|
|
|
sprintf(alias, "dmi*");
|
|
|
|
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
|
|
index 3187684..cc11c0e 100644
|
|
--- a/scripts/mod/modpost.c
|
|
+++ b/scripts/mod/modpost.c
|
|
@@ -928,6 +928,7 @@ enum mismatch {
|
|
ANY_INIT_TO_ANY_EXIT,
|
|
ANY_EXIT_TO_ANY_INIT,
|
|
EXPORT_TO_INIT_EXIT,
|
|
+ DATA_TO_TEXT
|
|
};
|
|
|
|
struct sectioncheck {
|
|
@@ -1036,6 +1037,12 @@ const struct sectioncheck sectioncheck[] = {
|
|
.tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
|
|
.mismatch = EXPORT_TO_INIT_EXIT,
|
|
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
|
|
+},
|
|
+/* Do not reference code from writable data */
|
|
+{
|
|
+ .fromsec = { DATA_SECTIONS, NULL },
|
|
+ .tosec = { TEXT_SECTIONS, NULL },
|
|
+ .mismatch = DATA_TO_TEXT
|
|
}
|
|
};
|
|
|
|
@@ -1158,10 +1165,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
|
|
continue;
|
|
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
|
|
continue;
|
|
- if (sym->st_value == addr)
|
|
- return sym;
|
|
/* Find a symbol nearby - addr are maybe negative */
|
|
d = sym->st_value - addr;
|
|
+ if (d == 0)
|
|
+ return sym;
|
|
if (d < 0)
|
|
d = addr - sym->st_value;
|
|
if (d < distance) {
|
|
@@ -1440,6 +1447,14 @@ static void report_sec_mismatch(const char *modname,
|
|
tosym, prl_to, prl_to, tosym);
|
|
free(prl_to);
|
|
break;
|
|
+ case DATA_TO_TEXT:
|
|
+#if 0
|
|
+ fprintf(stderr,
|
|
+ "The %s %s:%s references\n"
|
|
+ "the %s %s:%s%s\n",
|
|
+ from, fromsec, fromsym, to, tosec, tosym, to_p);
|
|
+#endif
|
|
+ break;
|
|
}
|
|
fprintf(stderr, "\n");
|
|
}
|
|
@@ -1674,7 +1689,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
|
|
static void check_sec_ref(struct module *mod, const char *modname,
|
|
struct elf_info *elf)
|
|
{
|
|
- int i;
|
|
+ unsigned int i;
|
|
Elf_Shdr *sechdrs = elf->sechdrs;
|
|
|
|
/* Walk through all sections */
|
|
@@ -1772,7 +1787,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
|
|
va_end(ap);
|
|
}
|
|
|
|
-void buf_write(struct buffer *buf, const char *s, int len)
|
|
+void buf_write(struct buffer *buf, const char *s, unsigned int len)
|
|
{
|
|
if (buf->size - buf->pos < len) {
|
|
buf->size += len + SZ;
|
|
@@ -1990,7 +2005,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
|
|
if (fstat(fileno(file), &st) < 0)
|
|
goto close_write;
|
|
|
|
- if (st.st_size != b->pos)
|
|
+ if (st.st_size != (off_t)b->pos)
|
|
goto close_write;
|
|
|
|
tmp = NOFAIL(malloc(b->pos));
|
|
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
|
|
index 51207e4..f7d603d 100644
|
|
--- a/scripts/mod/modpost.h
|
|
+++ b/scripts/mod/modpost.h
|
|
@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
|
|
|
|
struct buffer {
|
|
char *p;
|
|
- int pos;
|
|
- int size;
|
|
+ unsigned int pos;
|
|
+ unsigned int size;
|
|
};
|
|
|
|
void __attribute__((format(printf, 2, 3)))
|
|
buf_printf(struct buffer *buf, const char *fmt, ...);
|
|
|
|
void
|
|
-buf_write(struct buffer *buf, const char *s, int len);
|
|
+buf_write(struct buffer *buf, const char *s, unsigned int len);
|
|
|
|
struct module {
|
|
struct module *next;
|
|
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
|
|
index 9dfcd6d..099068e 100644
|
|
--- a/scripts/mod/sumversion.c
|
|
+++ b/scripts/mod/sumversion.c
|
|
@@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
|
|
goto out;
|
|
}
|
|
|
|
- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
|
|
+ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
|
|
warn("writing sum in %s failed: %s\n",
|
|
filename, strerror(errno));
|
|
goto out;
|
|
diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
|
|
index 5c11312..72742b5 100644
|
|
--- a/scripts/pnmtologo.c
|
|
+++ b/scripts/pnmtologo.c
|
|
@@ -237,14 +237,14 @@ static void write_header(void)
|
|
fprintf(out, " * Linux logo %s\n", logoname);
|
|
fputs(" */\n\n", out);
|
|
fputs("#include <linux/linux_logo.h>\n\n", out);
|
|
- fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
|
|
+ fprintf(out, "static unsigned char %s_data[] = {\n",
|
|
logoname);
|
|
}
|
|
|
|
static void write_footer(void)
|
|
{
|
|
fputs("\n};\n\n", out);
|
|
- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
|
|
+ fprintf(out, "const struct linux_logo %s = {\n", logoname);
|
|
fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
|
|
fprintf(out, "\t.width\t\t= %d,\n", logo_width);
|
|
fprintf(out, "\t.height\t\t= %d,\n", logo_height);
|
|
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
|
|
fputs("\n};\n\n", out);
|
|
|
|
/* write logo clut */
|
|
- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
|
|
+ fprintf(out, "static unsigned char %s_clut[] = {\n",
|
|
logoname);
|
|
write_hex_cnt = 0;
|
|
for (i = 0; i < logo_clutsize; i++) {
|
|
diff --git a/security/Kconfig b/security/Kconfig
|
|
index ccc61f8..c140672 100644
|
|
--- a/security/Kconfig
|
|
+++ b/security/Kconfig
|
|
@@ -4,6 +4,649 @@
|
|
|
|
menu "Security options"
|
|
|
|
+menu "PaX"
|
|
+
|
|
+ config ARCH_TRACK_EXEC_LIMIT
|
|
+ bool
|
|
+
|
|
+ config PAX_KERNEXEC_PLUGIN
|
|
+ bool
|
|
+
|
|
+ config PAX_PER_CPU_PGD
|
|
+ bool
|
|
+
|
|
+ config TASK_SIZE_MAX_SHIFT
|
|
+ int
|
|
+ depends on X86_64
|
|
+ default 47 if !PAX_PER_CPU_PGD
|
|
+ default 42 if PAX_PER_CPU_PGD
|
|
+
|
|
+ config PAX_USERCOPY_SLABS
|
|
+ bool
|
|
+
|
|
+config PAX
|
|
+ bool "Enable various PaX features"
|
|
+ depends on ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86
|
|
+ help
|
|
+ This allows you to enable various PaX features. PaX adds
|
|
+ intrusion prevention mechanisms to the kernel that reduce
|
|
+ the risks posed by exploitable memory corruption bugs.
|
|
+
|
|
+menu "PaX Control"
|
|
+ depends on PAX
|
|
+
|
|
+config PAX_SOFTMODE
|
|
+ bool 'Support soft mode'
|
|
+ help
|
|
+ Enabling this option will allow you to run PaX in soft mode, that
|
|
+ is, PaX features will not be enforced by default, only on executables
|
|
+ marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
|
|
+ support as they are the only way to mark executables for soft mode use.
|
|
+
|
|
+ Soft mode can be activated by using the "pax_softmode=1" kernel command
|
|
+ line option on boot. Furthermore you can control various PaX features
|
|
+ at runtime via the entries in /proc/sys/kernel/pax.
|
|
+
|
|
+config PAX_EI_PAX
|
|
+ bool 'Use legacy ELF header marking'
|
|
+ help
|
|
+ Enabling this option will allow you to control PaX features on
|
|
+ a per executable basis via the 'chpax' utility available at
|
|
+ http://pax.grsecurity.net/. The control flags will be read from
|
|
+ an otherwise reserved part of the ELF header. This marking has
|
|
+ numerous drawbacks (no support for soft-mode, toolchain does not
|
|
+ know about the non-standard use of the ELF header) therefore it
|
|
+ has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
|
|
+ support.
|
|
+
|
|
+ Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
|
|
+ support as well, they will override the legacy EI_PAX marks.
|
|
+
|
|
+ If you enable none of the marking options then all applications
|
|
+ will run with PaX enabled on them by default.
|
|
+
|
|
+config PAX_PT_PAX_FLAGS
|
|
+ bool 'Use ELF program header marking'
|
|
+ help
|
|
+ Enabling this option will allow you to control PaX features on
|
|
+ a per executable basis via the 'paxctl' utility available at
|
|
+ http://pax.grsecurity.net/. The control flags will be read from
|
|
+ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
|
|
+ has the benefits of supporting both soft mode and being fully
|
|
+ integrated into the toolchain (the binutils patch is available
|
|
+ from http://pax.grsecurity.net).
|
|
+
|
|
+ Note that if you enable the legacy EI_PAX marking support as well,
|
|
+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
|
|
+
|
|
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
|
|
+ must make sure that the marks are the same if a binary has both marks.
|
|
+
|
|
+ If you enable none of the marking options then all applications
|
|
+ will run with PaX enabled on them by default.
|
|
+
|
|
+config PAX_XATTR_PAX_FLAGS
|
|
+ bool 'Use filesystem extended attributes marking'
|
|
+ select CIFS_XATTR if CIFS
|
|
+ select EXT2_FS_XATTR if EXT2_FS
|
|
+ select EXT3_FS_XATTR if EXT3_FS
|
|
+ select EXT4_FS_XATTR if EXT4_FS
|
|
+ select JFFS2_FS_XATTR if JFFS2_FS
|
|
+ select REISERFS_FS_XATTR if REISERFS_FS
|
|
+ select SQUASHFS_XATTR if SQUASHFS
|
|
+ select TMPFS_XATTR if TMPFS
|
|
+ select UBIFS_FS_XATTR if UBIFS_FS
|
|
+ help
|
|
+ Enabling this option will allow you to control PaX features on
|
|
+ a per executable basis via the 'setfattr' utility. The control
|
|
+ flags will be read from the user.pax.flags extended attribute of
|
|
+ the file. This marking has the benefit of supporting binary-only
|
|
+ applications that self-check themselves (e.g., skype) and would
|
|
+ not tolerate chpax/paxctl changes. The main drawback is that
|
|
+ extended attributes are not supported by some filesystems (e.g.,
|
|
+ isofs, udf, vfat) so copying files through such filesystems will
|
|
+ lose the extended attributes and these PaX markings.
|
|
+
|
|
+ Note that if you enable the legacy EI_PAX marking support as well,
|
|
+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
|
|
+
|
|
+ If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
|
|
+ must make sure that the marks are the same if a binary has both marks.
|
|
+
|
|
+ If you enable none of the marking options then all applications
|
|
+ will run with PaX enabled on them by default.
|
|
+
|
|
+choice
|
|
+ prompt 'MAC system integration'
|
|
+ default PAX_NO_ACL_FLAGS
|
|
+ help
|
|
+ Mandatory Access Control systems have the option of controlling
|
|
+ PaX flags on a per executable basis, choose the method supported
|
|
+ by your particular system.
|
|
+
|
|
+ - "none": if your MAC system does not interact with PaX,
|
|
+ - "direct": if your MAC system defines pax_set_initial_flags() itself,
|
|
+ - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
|
|
+
|
|
+ NOTE: this option is for developers/integrators only.
|
|
+
|
|
+ config PAX_NO_ACL_FLAGS
|
|
+ bool 'none'
|
|
+
|
|
+ config PAX_HAVE_ACL_FLAGS
|
|
+ bool 'direct'
|
|
+
|
|
+ config PAX_HOOK_ACL_FLAGS
|
|
+ bool 'hook'
|
|
+endchoice
|
|
+
|
|
+endmenu
|
|
+
|
|
+menu "Non-executable pages"
|
|
+ depends on PAX
|
|
+
|
|
+config PAX_NOEXEC
|
|
+ bool "Enforce non-executable pages"
|
|
+ depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
|
|
+ help
|
|
+ By design some architectures do not allow for protecting memory
|
|
+ pages against execution or even if they do, Linux does not make
|
|
+ use of this feature. In practice this means that if a page is
|
|
+ readable (such as the stack or heap) it is also executable.
|
|
+
|
|
+ There is a well known exploit technique that makes use of this
|
|
+ fact and a common programming mistake where an attacker can
|
|
+ introduce code of his choice somewhere in the attacked program's
|
|
+ memory (typically the stack or the heap) and then execute it.
|
|
+
|
|
+ If the attacked program was running with different (typically
|
|
+ higher) privileges than that of the attacker, then he can elevate
|
|
+ his own privilege level (e.g. get a root shell, write to files for
|
|
+ which he does not have write access to, etc).
|
|
+
|
|
+ Enabling this option will let you choose from various features
|
|
+ that prevent the injection and execution of 'foreign' code in
|
|
+ a program.
|
|
+
|
|
+ This will also break programs that rely on the old behaviour and
|
|
+ expect that dynamically allocated memory via the malloc() family
|
|
+ of functions is executable (which it is not). Notable examples
|
|
+ are the XFree86 4.x server, the java runtime and wine.
|
|
+
|
|
+config PAX_PAGEEXEC
|
|
+ bool "Paging based non-executable pages"
|
|
+ depends on !COMPAT_VDSO && PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MPSC || MATOM || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
|
|
+ select X86_PAE if X86_32 && !HIGHMEM4G && (MCORE2 || MPSC || MATOM || MK8)
|
|
+ select ARCH_TRACK_EXEC_LIMIT if X86_32
|
|
+ select S390_SWITCH_AMODE if S390
|
|
+ select S390_EXEC_PROTECT if S390
|
|
+ help
|
|
+ This implementation is based on the paging feature of the CPU.
|
|
+ On i386 without hardware non-executable bit support there is a
|
|
+ variable but usually low performance impact, however on Intel's
|
|
+ P4 core based CPUs it is very high so you should not enable this
|
|
+ for kernels meant to be used on such CPUs.
|
|
+
|
|
+ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
|
|
+ with hardware non-executable bit support there is no performance
|
|
+ impact, on ppc the impact is negligible.
|
|
+
|
|
+ Note that several architectures require various emulations due to
|
|
+ badly designed userland ABIs, this will cause a performance impact
|
|
+ but will disappear as soon as userland is fixed. For example, ppc
|
|
+ userland MUST have been built with secure-plt by a recent toolchain.
|
|
+
|
|
+config PAX_SEGMEXEC
|
|
+ bool "Segmentation based non-executable pages"
|
|
+ depends on !COMPAT_VDSO && PAX_NOEXEC && X86_32
|
|
+ help
|
|
+ This implementation is based on the segmentation feature of the
|
|
+ CPU and has a very small performance impact, however applications
|
|
+ will be limited to a 1.5 GB address space instead of the normal
|
|
+ 3 GB.
|
|
+
|
|
+config PAX_EMUTRAMP
|
|
+ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
|
|
+ default y if PARISC
|
|
+ help
|
|
+ There are some programs and libraries that for one reason or
|
|
+ another attempt to execute special small code snippets from
|
|
+ non-executable memory pages. Most notable examples are the
|
|
+ signal handler return code generated by the kernel itself and
|
|
+ the GCC trampolines.
|
|
+
|
|
+ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
|
|
+ such programs will no longer work under your kernel.
|
|
+
|
|
+ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
|
|
+ utilities to enable trampoline emulation for the affected programs
|
|
+ yet still have the protection provided by the non-executable pages.
|
|
+
|
|
+ On parisc you MUST enable this option and EMUSIGRT as well, otherwise
|
|
+ your system will not even boot.
|
|
+
|
|
+ Alternatively you can say N here and use the 'chpax' or 'paxctl'
|
|
+ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
|
|
+ for the affected files.
|
|
+
|
|
+ NOTE: enabling this feature *may* open up a loophole in the
|
|
+ protection provided by non-executable pages that an attacker
|
|
+ could abuse. Therefore the best solution is to not have any
|
|
+ files on your system that would require this option. This can
|
|
+ be achieved by not using libc5 (which relies on the kernel
|
|
+ signal handler return code) and not using or rewriting programs
|
|
+ that make use of the nested function implementation of GCC.
|
|
+ Skilled users can just fix GCC itself so that it implements
|
|
+ nested function calls in a way that does not interfere with PaX.
|
|
+
|
|
+config PAX_EMUSIGRT
|
|
+ bool "Automatically emulate sigreturn trampolines"
|
|
+ depends on PAX_EMUTRAMP && PARISC
|
|
+ default y
|
|
+ help
|
|
+ Enabling this option will have the kernel automatically detect
|
|
+ and emulate signal return trampolines executing on the stack
|
|
+ that would otherwise lead to task termination.
|
|
+
|
|
+ This solution is intended as a temporary one for users with
|
|
+ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
|
|
+ Modula-3 runtime, etc) or executables linked to such, basically
|
|
+ everything that does not specify its own SA_RESTORER function in
|
|
+ normal executable memory like glibc 2.1+ does.
|
|
+
|
|
+ On parisc you MUST enable this option, otherwise your system will
|
|
+ not even boot.
|
|
+
|
|
+ NOTE: this feature cannot be disabled on a per executable basis
|
|
+ and since it *does* open up a loophole in the protection provided
|
|
+ by non-executable pages, the best solution is to not have any
|
|
+ files on your system that would require this option.
|
|
+
|
|
+config PAX_MPROTECT
|
|
+ bool "Restrict mprotect()"
|
|
+ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
|
|
+ help
|
|
+ Enabling this option will prevent programs from
|
|
+ - changing the executable status of memory pages that were
|
|
+ not originally created as executable,
|
|
+ - making read-only executable pages writable again,
|
|
+ - creating executable pages from anonymous memory,
|
|
+ - making read-only-after-relocations (RELRO) data pages writable again.
|
|
+
|
|
+ You should say Y here to complete the protection provided by
|
|
+ the enforcement of non-executable pages.
|
|
+
|
|
+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
|
|
+ this feature on a per file basis.
|
|
+
|
|
+config PAX_ELFRELOCS
|
|
+ bool "Allow ELF text relocations"
|
|
+ depends on PAX_MPROTECT
|
|
+ default n
|
|
+ help
|
|
+ Non-executable pages and mprotect() restrictions are effective
|
|
+ in preventing the introduction of new executable code into an
|
|
+ attacked task's address space. There remain only two venues
|
|
+ for this kind of attack: if the attacker can execute already
|
|
+ existing code in the attacked task then he can either have it
|
|
+ create and mmap() a file containing his code or have it mmap()
|
|
+ an already existing ELF library that does not have position
|
|
+ independent code in it and use mprotect() on it to make it
|
|
+ writable and copy his code there. While protecting against
|
|
+ the former approach is beyond PaX, the latter can be prevented
|
|
+ by having only PIC ELF libraries on one's system (which do not
|
|
+ need to relocate their code). If you are sure this is your case,
|
|
+ then disable this option otherwise be careful as you may not even
|
|
+ be able to boot or log on your system (for example, some PAM
|
|
+ modules are erroneously compiled as non-PIC by default).
|
|
+
|
|
+ NOTE: if you are using dynamic ELF executables (as suggested
|
|
+ when using ASLR) then you must have made sure that you linked
|
|
+ your files using the PIC version of crt1 (the et_dyn.tar.gz package
|
|
+ referenced there has already been updated to support this).
|
|
+
|
|
+config PAX_ETEXECRELOCS
|
|
+ bool "Allow ELF ET_EXEC text relocations"
|
|
+ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
|
|
+ select PAX_ELFRELOCS
|
|
+ default y
|
|
+ help
|
|
+ On some architectures there are incorrectly created applications
|
|
+ that require text relocations and would not work without enabling
|
|
+ this option. If you are an alpha, ia64 or parisc user, you should
|
|
+ enable this option and disable it once you have made sure that
|
|
+ none of your applications need it.
|
|
+
|
|
+config PAX_EMUPLT
|
|
+ bool "Automatically emulate ELF PLT"
|
|
+ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
|
|
+ default y
|
|
+ help
|
|
+ Enabling this option will have the kernel automatically detect
|
|
+ and emulate the Procedure Linkage Table entries in ELF files.
|
|
+ On some architectures such entries are in writable memory, and
|
|
+ become non-executable leading to task termination. Therefore
|
|
+ it is mandatory that you enable this option on alpha, parisc,
|
|
+ sparc and sparc64, otherwise your system would not even boot.
|
|
+
|
|
+ NOTE: this feature *does* open up a loophole in the protection
|
|
+ provided by the non-executable pages, therefore the proper
|
|
+ solution is to modify the toolchain to produce a PLT that does
|
|
+ not need to be writable.
|
|
+
|
|
+config PAX_DLRESOLVE
|
|
+ bool 'Emulate old glibc resolver stub'
|
|
+ depends on PAX_EMUPLT && SPARC
|
|
+ default n
|
|
+ help
|
|
+ This option is needed if userland has an old glibc (before 2.4)
|
|
+ that puts a 'save' instruction into the runtime generated resolver
|
|
+ stub that needs special emulation.
|
|
+
|
|
+config PAX_KERNEXEC
|
|
+ bool "Enforce non-executable kernel pages"
|
|
+ depends on (PPC || X86) && !COMPAT_VDSO && !XEN && (!X86_32 || X86_WP_WORKS_OK)
|
|
+ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
|
|
+ select PAX_KERNEXEC_PLUGIN if X86_64
|
|
+ help
|
|
+ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
|
|
+ that is, enabling this option will make it harder to inject
|
|
+ and execute 'foreign' code in kernel memory itself.
|
|
+
|
|
+ Note that on x86_64 kernels there is a known regression when
|
|
+ this feature and KVM/VMX are both enabled in the host kernel.
|
|
+
|
|
+choice
|
|
+ prompt "Return Address Instrumentation Method"
|
|
+ default PAX_KERNEXEC_PLUGIN_METHOD_BTS
|
|
+ depends on PAX_KERNEXEC_PLUGIN
|
|
+ help
|
|
+ Select the method used to instrument function pointer dereferences.
|
|
+ Note that binary modules cannot be instrumented by this approach.
|
|
+
|
|
+ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
|
|
+ bool "bts"
|
|
+ help
|
|
+ This method is compatible with binary only modules but has
|
|
+ a higher runtime overhead.
|
|
+
|
|
+ config PAX_KERNEXEC_PLUGIN_METHOD_OR
|
|
+ bool "or"
|
|
+ depends on !PARAVIRT
|
|
+ help
|
|
+ This method is incompatible with binary only modules but has
|
|
+ a lower runtime overhead.
|
|
+endchoice
|
|
+
|
|
+config PAX_KERNEXEC_PLUGIN_METHOD
|
|
+ string
|
|
+ default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
|
|
+ default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
|
|
+ default ""
|
|
+
|
|
+config PAX_KERNEXEC_MODULE_TEXT
|
|
+ int "Minimum amount of memory reserved for module code"
|
|
+ default "4"
|
|
+ depends on PAX_KERNEXEC && X86_32 && MODULES
|
|
+ help
|
|
+ Due to implementation details the kernel must reserve a fixed
|
|
+ amount of memory for module code at compile time that cannot be
|
|
+ changed at runtime. Here you can specify the minimum amount
|
|
+ in MB that will be reserved. Due to the same implementation
|
|
+ details this size will always be rounded up to the next 2/4 MB
|
|
+ boundary (depends on PAE) so the actually available memory for
|
|
+ module code will usually be more than this minimum.
|
|
+
|
|
+ The default 4 MB should be enough for most users but if you have
|
|
+ an excessive number of modules (e.g., most distribution configs
|
|
+ compile many drivers as modules) or use huge modules such as
|
|
+ nvidia's kernel driver, you will need to adjust this amount.
|
|
+ A good rule of thumb is to look at your currently loaded kernel
|
|
+ modules and add up their sizes.
|
|
+
|
|
+endmenu
|
|
+
|
|
+menu "Address Space Layout Randomization"
|
|
+ depends on PAX
|
|
+
|
|
+config PAX_ASLR
|
|
+ bool "Address Space Layout Randomization"
|
|
+ help
|
|
+ Many if not most exploit techniques rely on the knowledge of
|
|
+ certain addresses in the attacked program. The following options
|
|
+ will allow the kernel to apply a certain amount of randomization
|
|
+ to specific parts of the program thereby forcing an attacker to
|
|
+ guess them in most cases. Any failed guess will most likely crash
|
|
+ the attacked program which allows the kernel to detect such attempts
|
|
+ and react on them. PaX itself provides no reaction mechanisms,
|
|
+ instead it is strongly encouraged that you make use of Nergal's
|
|
+ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
|
|
+ (http://www.grsecurity.net/) built-in crash detection features or
|
|
+ develop one yourself.
|
|
+
|
|
+ By saying Y here you can choose to randomize the following areas:
|
|
+ - top of the task's kernel stack
|
|
+ - top of the task's userland stack
|
|
+ - base address for mmap() requests that do not specify one
|
|
+ (this includes all libraries)
|
|
+ - base address of the main executable
|
|
+
|
|
+ It is strongly recommended to say Y here as address space layout
|
|
+ randomization has negligible impact on performance yet it provides
|
|
+ a very effective protection.
|
|
+
|
|
+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
|
|
+ this feature on a per file basis.
|
|
+
|
|
+config PAX_RANDKSTACK
|
|
+ bool "Randomize kernel stack base"
|
|
+ depends on X86_TSC && X86
|
|
+ help
|
|
+ By saying Y here the kernel will randomize every task's kernel
|
|
+ stack on every system call. This will not only force an attacker
|
|
+ to guess it but also prevent him from making use of possible
|
|
+ leaked information about it.
|
|
+
|
|
+ Since the kernel stack is a rather scarce resource, randomization
|
|
+ may cause unexpected stack overflows, therefore you should very
|
|
+ carefully test your system. Note that once enabled in the kernel
|
|
+ configuration, this feature cannot be disabled on a per file basis.
|
|
+
|
|
+config PAX_RANDUSTACK
|
|
+ bool "Randomize user stack base"
|
|
+ depends on PAX_ASLR
|
|
+ help
|
|
+ By saying Y here the kernel will randomize every task's userland
|
|
+ stack. The randomization is done in two steps where the second
|
|
+ one may apply a big amount of shift to the top of the stack and
|
|
+ cause problems for programs that want to use lots of memory (more
|
|
+ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
|
|
+ For this reason the second step can be controlled by 'chpax' or
|
|
+ 'paxctl' on a per file basis.
|
|
+
|
|
+config PAX_RANDMMAP
|
|
+ bool "Randomize mmap() base"
|
|
+ depends on PAX_ASLR
|
|
+ help
|
|
+ By saying Y here the kernel will use a randomized base address for
|
|
+ mmap() requests that do not specify one themselves. As a result
|
|
+ all dynamically loaded libraries will appear at random addresses
|
|
+ and therefore be harder to exploit by a technique where an attacker
|
|
+ attempts to execute library code for his purposes (e.g. spawn a
|
|
+ shell from an exploited program that is running at an elevated
|
|
+ privilege level).
|
|
+
|
|
+ Furthermore, if a program is relinked as a dynamic ELF file, its
|
|
+ base address will be randomized as well, completing the full
|
|
+ randomization of the address space layout. Attacking such programs
|
|
+ becomes a guess game. You can find an example of doing this at
|
|
+ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
|
|
+ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
|
|
+
|
|
+ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
|
|
+ feature on a per file basis.
|
|
+
|
|
+endmenu
|
|
+
|
|
+menu "Miscellaneous hardening features"
|
|
+
|
|
+config PAX_MEMORY_SANITIZE
|
|
+ bool "Sanitize all freed memory"
|
|
+ depends on !HIBERNATION
|
|
+ help
|
|
+ By saying Y here the kernel will erase memory pages as soon as they
|
|
+ are freed. This in turn reduces the lifetime of data stored in the
|
|
+ pages, making it less likely that sensitive information such as
|
|
+ passwords, cryptographic secrets, etc stay in memory for too long.
|
|
+
|
|
+ This is especially useful for programs whose runtime is short, long
|
|
+ lived processes and the kernel itself benefit from this as long as
|
|
+ they operate on whole memory pages and ensure timely freeing of pages
|
|
+ that may hold sensitive information.
|
|
+
|
|
+ The tradeoff is performance impact, on a single CPU system kernel
|
|
+ compilation sees a 3% slowdown, other systems and workloads may vary
|
|
+ and you are advised to test this feature on your expected workload
|
|
+ before deploying it.
|
|
+
|
|
+ Note that this feature does not protect data stored in live pages,
|
|
+ e.g., process memory swapped to disk may stay there for a long time.
|
|
+
|
|
+config PAX_MEMORY_STACKLEAK
|
|
+ bool "Sanitize kernel stack"
|
|
+ depends on X86
|
|
+ help
|
|
+ By saying Y here the kernel will erase the kernel stack before it
|
|
+ returns from a system call. This in turn reduces the information
|
|
+ that a kernel stack leak bug can reveal.
|
|
+
|
|
+ Note that such a bug can still leak information that was put on
|
|
+ the stack by the current system call (the one eventually triggering
|
|
+ the bug) but traces of earlier system calls on the kernel stack
|
|
+ cannot leak anymore.
|
|
+
|
|
+ The tradeoff is performance impact, on a single CPU system kernel
|
|
+ compilation sees a 1% slowdown, other systems and workloads may vary
|
|
+ and you are advised to test this feature on your expected workload
|
|
+ before deploying it.
|
|
+
|
|
+ Note: full support for this feature requires gcc with plugin support
|
|
+ so make sure your compiler is at least gcc 4.5.0. Using older gcc
|
|
+ versions means that functions with large enough stack frames may
|
|
+ leave uninitialized memory behind that may be exposed to a later
|
|
+ syscall leaking the stack.
|
|
+
|
|
+config PAX_MEMORY_UDEREF
|
|
+ bool "Prevent invalid userland pointer dereference"
|
|
+ depends on X86 && !COMPAT_VDSO && !UML_X86 && !XEN
|
|
+ select PAX_PER_CPU_PGD if X86_64
|
|
+ help
|
|
+ By saying Y here the kernel will be prevented from dereferencing
|
|
+ userland pointers in contexts where the kernel expects only kernel
|
|
+ pointers. This is both a useful runtime debugging feature and a
|
|
+ security measure that prevents exploiting a class of kernel bugs.
|
|
+
|
|
+ The tradeoff is that some virtualization solutions may experience
|
|
+ a huge slowdown and therefore you should not enable this feature
|
|
+ for kernels meant to run in such environments. Whether a given VM
|
|
+ solution is affected or not is best determined by simply trying it
|
|
+ out, the performance impact will be obvious right on boot as this
|
|
+ mechanism engages from very early on. A good rule of thumb is that
|
|
+ VMs running on CPUs without hardware virtualization support (i.e.,
|
|
+ the majority of IA-32 CPUs) will likely experience the slowdown.
|
|
+
|
|
+config PAX_REFCOUNT
|
|
+ bool "Prevent various kernel object reference counter overflows"
|
|
+ depends on (ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86
|
|
+ help
|
|
+ By saying Y here the kernel will detect and prevent overflowing
|
|
+ various (but not all) kinds of object reference counters. Such
|
|
+ overflows can normally occur due to bugs only and are often, if
|
|
+ not always, exploitable.
|
|
+
|
|
+ The tradeoff is that data structures protected by an overflowed
|
|
+ refcount will never be freed and therefore will leak memory. Note
|
|
+ that this leak also happens even without this protection but in
|
|
+ that case the overflow can eventually trigger the freeing of the
|
|
+ data structure while it is still being used elsewhere, resulting
|
|
+ in the exploitable situation that this feature prevents.
|
|
+
|
|
+ Since this has a negligible performance impact, you should enable
|
|
+ this feature.
|
|
+
|
|
+config PAX_USERCOPY
|
|
+ bool "Harden heap object copies between kernel and userland"
|
|
+ depends on ARM || PPC || SPARC || X86
|
|
+ depends on SLAB || SLUB || SLOB
|
|
+ select PAX_USERCOPY_SLABS
|
|
+ help
|
|
+ By saying Y here the kernel will enforce the size of heap objects
|
|
+ when they are copied in either direction between the kernel and
|
|
+ userland, even if only a part of the heap object is copied.
|
|
+
|
|
+ Specifically, this checking prevents information leaking from the
|
|
+ kernel heap during kernel to userland copies (if the kernel heap
|
|
+ object is otherwise fully initialized) and prevents kernel heap
|
|
+ overflows during userland to kernel copies. Only objects belonging
|
|
+ to explictly marked slub types are allowed to be copied at all.
|
|
+
|
|
+ Note that the current implementation provides the strictest checks
|
|
+ for the SLUB allocator.
|
|
+
|
|
+ If frame pointers are enabled on x86, this option will also restrict
|
|
+ copies into and out of the kernel stack to local variables within a
|
|
+ single frame.
|
|
+
|
|
+ Since this has a negligible performance impact, you should enable
|
|
+ this feature.
|
|
+
|
|
+config PAX_CONSTIFY_PLUGIN
|
|
+ bool "Automatically constify eligible structures"
|
|
+ depends on !UML
|
|
+ help
|
|
+ By saying Y here the compiler will automatically constify a class
|
|
+ of types that contain only function pointers. This reduces the
|
|
+ kernel's attack surface and also produces a better memory layout.
|
|
+
|
|
+ Note that the implementation requires a gcc with plugin support,
|
|
+ i.e., gcc 4.5 or newer. You may need to install the supporting
|
|
+ headers explicitly in addition to the normal gcc package.
|
|
+
|
|
+ Note that if some code really has to modify constified variables
|
|
+ then the source code will have to be patched to allow it. Examples
|
|
+ can be found in PaX itself (the no_const attribute) and for some
|
|
+ out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
|
|
+
|
|
+config PAX_SIZE_OVERFLOW
|
|
+ bool "Prevent various integer overflows in function size parameters"
|
|
+ depends on X86
|
|
+ help
|
|
+ By saying Y here the kernel recomputes expressions of function
|
|
+ arguments marked by a size_overflow attribute with double integer
|
|
+ precision (DImode/TImode for 32/64 bit integer types).
|
|
+
|
|
+ The recomputed argument is checked against INT_MAX and an event
|
|
+ is logged on overflow and the triggering process is killed.
|
|
+
|
|
+ Homepage:
|
|
+ http://www.grsecurity.net/~ephox/overflow_plugin/
|
|
+
|
|
+config PAX_LATENT_ENTROPY
|
|
+ bool "Generate some entropy during boot"
|
|
+ help
|
|
+ By saying Y here the kernel will instrument early boot code to
|
|
+ extract some entropy from both original and artificially created
|
|
+ program state. This will help especially embedded systems where
|
|
+ there is little 'natural' source of entropy normally. The cost
|
|
+ is some slowdown of the boot process.
|
|
+
|
|
+ Note that entropy extracted this way is not cryptographically
|
|
+ secure!
|
|
+
|
|
+endmenu
|
|
+
|
|
+endmenu
|
|
+
|
|
config KEYS
|
|
bool "Enable access key retention support"
|
|
help
|
|
@@ -169,7 +812,7 @@ config INTEL_TXT
|
|
config LSM_MMAP_MIN_ADDR
|
|
int "Low address space for LSM to protect from user allocation"
|
|
depends on SECURITY && SECURITY_SELINUX
|
|
- default 32768 if ARM
|
|
+ default 32768 if ALPHA || ARM || PARISC || SPARC32
|
|
default 65536
|
|
help
|
|
This is the portion of low virtual memory which should be protected
|
|
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
|
|
index 3ccf7ac..d73ad64 100644
|
|
--- a/security/integrity/ima/ima.h
|
|
+++ b/security/integrity/ima/ima.h
|
|
@@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
|
|
extern spinlock_t ima_queue_lock;
|
|
|
|
struct ima_h_table {
|
|
- atomic_long_t len; /* number of stored measurements in the list */
|
|
- atomic_long_t violations;
|
|
+ atomic_long_unchecked_t len; /* number of stored measurements in the list */
|
|
+ atomic_long_unchecked_t violations;
|
|
struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
|
|
};
|
|
extern struct ima_h_table ima_htable;
|
|
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
|
|
index 88a2788..581ab92 100644
|
|
--- a/security/integrity/ima/ima_api.c
|
|
+++ b/security/integrity/ima/ima_api.c
|
|
@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
|
|
int result;
|
|
|
|
/* can overflow, only indicator */
|
|
- atomic_long_inc(&ima_htable.violations);
|
|
+ atomic_long_inc_unchecked(&ima_htable.violations);
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
if (!entry) {
|
|
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
|
|
index e1aa2b4..52027bf5e 100644
|
|
--- a/security/integrity/ima/ima_fs.c
|
|
+++ b/security/integrity/ima/ima_fs.c
|
|
@@ -28,12 +28,12 @@
|
|
static int valid_policy = 1;
|
|
#define TMPBUFLEN 12
|
|
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
|
|
- loff_t *ppos, atomic_long_t *val)
|
|
+ loff_t *ppos, atomic_long_unchecked_t *val)
|
|
{
|
|
char tmpbuf[TMPBUFLEN];
|
|
ssize_t len;
|
|
|
|
- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
|
|
+ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
|
|
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
|
|
}
|
|
|
|
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
|
|
index 55a6271..ad829c3 100644
|
|
--- a/security/integrity/ima/ima_queue.c
|
|
+++ b/security/integrity/ima/ima_queue.c
|
|
@@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
|
|
INIT_LIST_HEAD(&qe->later);
|
|
list_add_tail_rcu(&qe->later, &ima_measurements);
|
|
|
|
- atomic_long_inc(&ima_htable.len);
|
|
+ atomic_long_inc_unchecked(&ima_htable.len);
|
|
key = ima_hash_key(entry->digest);
|
|
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
|
|
return 0;
|
|
diff --git a/security/keys/compat.c.rej b/security/keys/compat.c.rej
|
|
new file mode 100644
|
|
index 0000000..7a74d9d
|
|
--- /dev/null
|
|
+++ b/security/keys/compat.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- security/keys/compat.c 2012-01-08 19:48:31.735470731 +0100
|
|
++++ security/keys/compat.c 2012-05-21 12:10:12.400049045 +0200
|
|
+@@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
|
|
+ if (ret == 0)
|
|
+ goto no_payload_free;
|
|
+
|
|
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
|
|
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
|
|
+
|
|
+ if (iov != iovstack)
|
|
+ kfree(iov);
|
|
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
|
|
index dfc8c22..dc57d2b 100644
|
|
--- a/security/keys/keyctl.c
|
|
+++ b/security/keys/keyctl.c
|
|
@@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
|
|
/*
|
|
* Copy the iovec data from userspace
|
|
*/
|
|
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
|
|
+static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
|
|
unsigned ioc)
|
|
{
|
|
for (; ioc > 0; ioc--) {
|
|
@@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
|
|
* If successful, 0 will be returned.
|
|
*/
|
|
long keyctl_instantiate_key_common(key_serial_t id,
|
|
- const struct iovec *payload_iov,
|
|
+ const struct iovec __user *payload_iov,
|
|
unsigned ioc,
|
|
size_t plen,
|
|
key_serial_t ringid)
|
|
@@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
|
|
[0].iov_len = plen
|
|
};
|
|
|
|
- return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
|
|
+ return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
|
|
}
|
|
|
|
return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
|
|
diff --git a/security/keys/keyctl.c.rej b/security/keys/keyctl.c.rej
|
|
new file mode 100644
|
|
index 0000000..60237fb
|
|
--- /dev/null
|
|
+++ b/security/keys/keyctl.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- security/keys/keyctl.c 2012-05-21 11:33:42.531930107 +0200
|
|
++++ security/keys/keyctl.c 2012-05-21 12:10:12.404049045 +0200
|
|
+@@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_seri
|
|
+ if (ret == 0)
|
|
+ goto no_payload_free;
|
|
+
|
|
+- ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
|
|
++ ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
|
|
+
|
|
+ if (iov != iovstack)
|
|
+ kfree(iov);
|
|
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
|
|
index d605f75..2bc6be9 100644
|
|
--- a/security/keys/keyring.c
|
|
+++ b/security/keys/keyring.c
|
|
@@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
|
|
ret = -EFAULT;
|
|
|
|
for (loop = 0; loop < klist->nkeys; loop++) {
|
|
+ key_serial_t serial;
|
|
key = klist->keys[loop];
|
|
+ serial = key->serial;
|
|
|
|
tmp = sizeof(key_serial_t);
|
|
if (tmp > buflen)
|
|
tmp = buflen;
|
|
|
|
- if (copy_to_user(buffer,
|
|
- &key->serial,
|
|
- tmp) != 0)
|
|
+ if (copy_to_user(buffer, &serial, tmp))
|
|
goto error;
|
|
|
|
buflen -= tmp;
|
|
diff --git a/security/security.c b/security/security.c
|
|
index cc355c0..8912394 100644
|
|
--- a/security/security.c
|
|
+++ b/security/security.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <linux/ima.h>
|
|
#include <linux/evm.h>
|
|
#include <linux/fsnotify.h>
|
|
+#include <linux/mm.h>
|
|
#include <net/flow.h>
|
|
|
|
#define MAX_LSM_EVM_XATTR 2
|
|
@@ -28,8 +29,8 @@
|
|
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
|
|
CONFIG_DEFAULT_SECURITY;
|
|
|
|
-static struct security_operations *security_ops;
|
|
-static struct security_operations default_security_ops = {
|
|
+static struct security_operations *security_ops __read_only;
|
|
+static struct security_operations default_security_ops __read_only = {
|
|
.name = "default",
|
|
};
|
|
|
|
@@ -70,7 +71,9 @@ int __init security_init(void)
|
|
|
|
void reset_security_ops(void)
|
|
{
|
|
+ pax_open_kernel();
|
|
security_ops = &default_security_ops;
|
|
+ pax_close_kernel();
|
|
}
|
|
|
|
/* Save user chosen LSM */
|
|
@@ -123,7 +126,9 @@ int __init register_security(struct security_operations *ops)
|
|
if (security_ops != &default_security_ops)
|
|
return -EAGAIN;
|
|
|
|
+ pax_open_kernel();
|
|
security_ops = ops;
|
|
+ pax_close_kernel();
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/security/selinux/hooks.c.rej b/security/selinux/hooks.c.rej
|
|
new file mode 100644
|
|
index 0000000..1e2910c
|
|
--- /dev/null
|
|
+++ b/security/selinux/hooks.c.rej
|
|
@@ -0,0 +1,10 @@
|
|
+diff a/security/selinux/hooks.c b/security/selinux/hooks.c (rejected hunks)
|
|
+@@ -5801,7 +5801,7 @@ static int selinux_bprm_check_security (struct linux_binprm *bprm)
|
|
+ #endif
|
|
+ /* TmmSecure end */
|
|
+
|
|
+-static struct security_operations selinux_ops = {
|
|
++static struct security_operations selinux_ops __read_only = {
|
|
+ .name = "selinux",
|
|
+
|
|
+ .binder_set_context_mgr = selinux_binder_set_context_mgr,
|
|
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
|
|
index d1c980ce..d4933e8 100644
|
|
--- a/security/selinux/include/xfrm.h
|
|
+++ b/security/selinux/include/xfrm.h
|
|
@@ -51,7 +51,7 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
|
|
|
|
static inline void selinux_xfrm_notify_policyload(void)
|
|
{
|
|
- atomic_inc(&flow_cache_genid);
|
|
+ atomic_inc_unchecked(&flow_cache_genid);
|
|
}
|
|
#else
|
|
static inline int selinux_xfrm_enabled(void)
|
|
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
|
|
index 82df24d..9e50f95 100644
|
|
--- a/security/smack/smack_lsm.c
|
|
+++ b/security/smack/smack_lsm.c
|
|
@@ -3502,7 +3502,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
|
|
return 0;
|
|
}
|
|
|
|
-struct security_operations smack_ops = {
|
|
+struct security_operations smack_ops __read_only = {
|
|
.name = "smack",
|
|
|
|
.ptrace_access_check = smack_ptrace_access_check,
|
|
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
|
|
index 620d37c..e2ad89b 100644
|
|
--- a/security/tomoyo/tomoyo.c
|
|
+++ b/security/tomoyo/tomoyo.c
|
|
@@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
|
|
* tomoyo_security_ops is a "struct security_operations" which is used for
|
|
* registering TOMOYO.
|
|
*/
|
|
-static struct security_operations tomoyo_security_ops = {
|
|
+static struct security_operations tomoyo_security_ops __read_only = {
|
|
.name = "tomoyo",
|
|
.cred_alloc_blank = tomoyo_cred_alloc_blank,
|
|
.cred_prepare = tomoyo_cred_prepare,
|
|
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
|
|
index 270790d..c67dfcb 100644
|
|
--- a/sound/aoa/codecs/onyx.c
|
|
+++ b/sound/aoa/codecs/onyx.c
|
|
@@ -54,7 +54,7 @@ struct onyx {
|
|
spdif_locked:1,
|
|
analog_locked:1,
|
|
original_mute:2;
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
struct codec_info *codec_info;
|
|
|
|
/* mutex serializes concurrent access to the device
|
|
@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
|
|
struct onyx *onyx = cii->codec_data;
|
|
|
|
mutex_lock(&onyx->mutex);
|
|
- onyx->open_count++;
|
|
+ local_inc(&onyx->open_count);
|
|
mutex_unlock(&onyx->mutex);
|
|
|
|
return 0;
|
|
@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
|
|
struct onyx *onyx = cii->codec_data;
|
|
|
|
mutex_lock(&onyx->mutex);
|
|
- onyx->open_count--;
|
|
- if (!onyx->open_count)
|
|
+ if (local_dec_and_test(&onyx->open_count))
|
|
onyx->spdif_locked = onyx->analog_locked = 0;
|
|
mutex_unlock(&onyx->mutex);
|
|
|
|
diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
|
|
index ffd2025..df062c9 100644
|
|
--- a/sound/aoa/codecs/onyx.h
|
|
+++ b/sound/aoa/codecs/onyx.h
|
|
@@ -11,6 +11,7 @@
|
|
#include <linux/i2c.h>
|
|
#include <asm/pmac_low_i2c.h>
|
|
#include <asm/prom.h>
|
|
+#include <asm/local.h>
|
|
|
|
/* PCM3052 register definitions */
|
|
|
|
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
|
|
index 4c1cc51..16040040 100644
|
|
--- a/sound/core/oss/pcm_oss.c
|
|
+++ b/sound/core/oss/pcm_oss.c
|
|
@@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
|
|
if (in_kernel) {
|
|
mm_segment_t fs;
|
|
fs = snd_enter_user();
|
|
- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
|
|
+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
|
|
snd_leave_user(fs);
|
|
} else {
|
|
- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
|
|
+ ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
|
|
}
|
|
if (ret != -EPIPE && ret != -ESTRPIPE)
|
|
break;
|
|
@@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
|
|
if (in_kernel) {
|
|
mm_segment_t fs;
|
|
fs = snd_enter_user();
|
|
- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
|
|
+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
|
|
snd_leave_user(fs);
|
|
} else {
|
|
- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
|
|
+ ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
|
|
}
|
|
if (ret == -EPIPE) {
|
|
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
|
|
@@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
|
|
struct snd_pcm_plugin_channel *channels;
|
|
size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
|
|
if (!in_kernel) {
|
|
- if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
|
|
+ if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
|
|
return -EFAULT;
|
|
buf = runtime->oss.buffer;
|
|
}
|
|
@@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
|
|
}
|
|
} else {
|
|
tmp = snd_pcm_oss_write2(substream,
|
|
- (const char __force *)buf,
|
|
+ (const char __force_kernel *)buf,
|
|
runtime->oss.period_bytes, 0);
|
|
if (tmp <= 0)
|
|
goto err;
|
|
@@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
|
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
|
snd_pcm_sframes_t frames, frames1;
|
|
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
|
|
- char __user *final_dst = (char __force __user *)buf;
|
|
+ char __user *final_dst = (char __force_user *)buf;
|
|
if (runtime->oss.plugin_first) {
|
|
struct snd_pcm_plugin_channel *channels;
|
|
size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
|
|
@@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
|
|
xfer += tmp;
|
|
runtime->oss.buffer_used -= tmp;
|
|
} else {
|
|
- tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
|
|
+ tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
|
|
runtime->oss.period_bytes, 0);
|
|
if (tmp <= 0)
|
|
goto err;
|
|
@@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
|
size1);
|
|
size1 /= runtime->channels; /* frames */
|
|
fs = snd_enter_user();
|
|
- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
|
|
+ snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
|
|
snd_leave_user(fs);
|
|
}
|
|
} else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
|
|
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
|
|
index 7076a8e..a828d7e 100644
|
|
--- a/sound/core/pcm_compat.c
|
|
+++ b/sound/core/pcm_compat.c
|
|
@@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
|
|
int err;
|
|
|
|
fs = snd_enter_user();
|
|
- err = snd_pcm_delay(substream, &delay);
|
|
+ err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
|
|
snd_leave_user(fs);
|
|
if (err < 0)
|
|
return err;
|
|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
|
|
index ff808cc..8087eb0 100644
|
|
--- a/sound/core/pcm_native.c
|
|
+++ b/sound/core/pcm_native.c
|
|
@@ -2864,11 +2864,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
|
|
switch (substream->stream) {
|
|
case SNDRV_PCM_STREAM_PLAYBACK:
|
|
result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
|
|
- (void __user *)arg);
|
|
+ (void __force_user *)arg);
|
|
break;
|
|
case SNDRV_PCM_STREAM_CAPTURE:
|
|
result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
|
|
- (void __user *)arg);
|
|
+ (void __force_user *)arg);
|
|
break;
|
|
default:
|
|
result = -EINVAL;
|
|
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
|
|
index 5cf8d65..912a79c 100644
|
|
--- a/sound/core/seq/seq_device.c
|
|
+++ b/sound/core/seq/seq_device.c
|
|
@@ -64,7 +64,7 @@ struct ops_list {
|
|
int argsize; /* argument size */
|
|
|
|
/* operators */
|
|
- struct snd_seq_dev_ops ops;
|
|
+ struct snd_seq_dev_ops *ops;
|
|
|
|
/* registred devices */
|
|
struct list_head dev_list; /* list of devices */
|
|
@@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
|
|
|
|
mutex_lock(&ops->reg_mutex);
|
|
/* copy driver operators */
|
|
- ops->ops = *entry;
|
|
+ ops->ops = entry;
|
|
ops->driver |= DRIVER_LOADED;
|
|
ops->argsize = argsize;
|
|
|
|
@@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
|
|
dev->name, ops->id, ops->argsize, dev->argsize);
|
|
return -EINVAL;
|
|
}
|
|
- if (ops->ops.init_device(dev) >= 0) {
|
|
+ if (ops->ops->init_device(dev) >= 0) {
|
|
dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
|
|
ops->num_init_devices++;
|
|
} else {
|
|
@@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
|
|
dev->name, ops->id, ops->argsize, dev->argsize);
|
|
return -EINVAL;
|
|
}
|
|
- if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
|
|
+ if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
|
|
dev->status = SNDRV_SEQ_DEVICE_FREE;
|
|
dev->driver_data = NULL;
|
|
ops->num_init_devices--;
|
|
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
|
|
index 621e60e..f4543f5 100644
|
|
--- a/sound/drivers/mts64.c
|
|
+++ b/sound/drivers/mts64.c
|
|
@@ -29,6 +29,7 @@
|
|
#include <sound/initval.h>
|
|
#include <sound/rawmidi.h>
|
|
#include <sound/control.h>
|
|
+#include <asm/local.h>
|
|
|
|
#define CARD_NAME "Miditerminal 4140"
|
|
#define DRIVER_NAME "MTS64"
|
|
@@ -67,7 +68,7 @@ struct mts64 {
|
|
struct pardevice *pardev;
|
|
int pardev_claimed;
|
|
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
int current_midi_output_port;
|
|
int current_midi_input_port;
|
|
u8 mode[MTS64_NUM_INPUT_PORTS];
|
|
@@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
|
|
{
|
|
struct mts64 *mts = substream->rmidi->private_data;
|
|
|
|
- if (mts->open_count == 0) {
|
|
+ if (local_read(&mts->open_count) == 0) {
|
|
/* We don't need a spinlock here, because this is just called
|
|
if the device has not been opened before.
|
|
So there aren't any IRQs from the device */
|
|
@@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
|
|
|
|
msleep(50);
|
|
}
|
|
- ++(mts->open_count);
|
|
+ local_inc(&mts->open_count);
|
|
|
|
return 0;
|
|
}
|
|
@@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
|
|
struct mts64 *mts = substream->rmidi->private_data;
|
|
unsigned long flags;
|
|
|
|
- --(mts->open_count);
|
|
- if (mts->open_count == 0) {
|
|
+ if (local_dec_return(&mts->open_count) == 0) {
|
|
/* We need the spinlock_irqsave here because we can still
|
|
have IRQs at this point */
|
|
spin_lock_irqsave(&mts->lock, flags);
|
|
@@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
|
|
|
|
msleep(500);
|
|
|
|
- } else if (mts->open_count < 0)
|
|
- mts->open_count = 0;
|
|
+ } else if (local_read(&mts->open_count) < 0)
|
|
+ local_set(&mts->open_count, 0);
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
|
|
index b953fb4..1999c01 100644
|
|
--- a/sound/drivers/opl4/opl4_lib.c
|
|
+++ b/sound/drivers/opl4/opl4_lib.c
|
|
@@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
|
|
MODULE_DESCRIPTION("OPL4 driver");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
-static void inline snd_opl4_wait(struct snd_opl4 *opl4)
|
|
+static inline void snd_opl4_wait(struct snd_opl4 *opl4)
|
|
{
|
|
int timeout = 10;
|
|
while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
|
|
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
|
|
index 3e32bd3..46fc152 100644
|
|
--- a/sound/drivers/portman2x4.c
|
|
+++ b/sound/drivers/portman2x4.c
|
|
@@ -48,6 +48,7 @@
|
|
#include <sound/initval.h>
|
|
#include <sound/rawmidi.h>
|
|
#include <sound/control.h>
|
|
+#include <asm/local.h>
|
|
|
|
#define CARD_NAME "Portman 2x4"
|
|
#define DRIVER_NAME "portman"
|
|
@@ -85,7 +86,7 @@ struct portman {
|
|
struct pardevice *pardev;
|
|
int pardev_claimed;
|
|
|
|
- int open_count;
|
|
+ local_t open_count;
|
|
int mode[PORTMAN_NUM_INPUT_PORTS];
|
|
struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
|
|
};
|
|
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
|
|
index 87657dd..a8268d4 100644
|
|
--- a/sound/firewire/amdtp.c
|
|
+++ b/sound/firewire/amdtp.c
|
|
@@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
|
|
ptr = s->pcm_buffer_pointer + data_blocks;
|
|
if (ptr >= pcm->runtime->buffer_size)
|
|
ptr -= pcm->runtime->buffer_size;
|
|
- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
|
|
+ ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
|
|
|
|
s->pcm_period_pointer += data_blocks;
|
|
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
|
|
@@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
|
|
*/
|
|
void amdtp_out_stream_update(struct amdtp_out_stream *s)
|
|
{
|
|
- ACCESS_ONCE(s->source_node_id_field) =
|
|
+ ACCESS_ONCE_RW(s->source_node_id_field) =
|
|
(fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
|
|
}
|
|
EXPORT_SYMBOL(amdtp_out_stream_update);
|
|
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
|
|
index 537a9cb..8e8c8e9 100644
|
|
--- a/sound/firewire/amdtp.h
|
|
+++ b/sound/firewire/amdtp.h
|
|
@@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
|
|
static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
|
|
struct snd_pcm_substream *pcm)
|
|
{
|
|
- ACCESS_ONCE(s->pcm) = pcm;
|
|
+ ACCESS_ONCE_RW(s->pcm) = pcm;
|
|
}
|
|
|
|
/**
|
|
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
|
|
index d428ffe..751ef78 100644
|
|
--- a/sound/firewire/isight.c
|
|
+++ b/sound/firewire/isight.c
|
|
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
|
|
ptr += count;
|
|
if (ptr >= runtime->buffer_size)
|
|
ptr -= runtime->buffer_size;
|
|
- ACCESS_ONCE(isight->buffer_pointer) = ptr;
|
|
+ ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
|
|
|
|
isight->period_counter += count;
|
|
if (isight->period_counter >= runtime->period_size) {
|
|
@@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
|
|
if (err < 0)
|
|
return err;
|
|
|
|
- ACCESS_ONCE(isight->pcm_active) = true;
|
|
+ ACCESS_ONCE_RW(isight->pcm_active) = true;
|
|
|
|
return 0;
|
|
}
|
|
@@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
|
|
{
|
|
struct isight *isight = substream->private_data;
|
|
|
|
- ACCESS_ONCE(isight->pcm_active) = false;
|
|
+ ACCESS_ONCE_RW(isight->pcm_active) = false;
|
|
|
|
mutex_lock(&isight->mutex);
|
|
isight_stop_streaming(isight);
|
|
@@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
|
|
|
|
switch (cmd) {
|
|
case SNDRV_PCM_TRIGGER_START:
|
|
- ACCESS_ONCE(isight->pcm_running) = true;
|
|
+ ACCESS_ONCE_RW(isight->pcm_running) = true;
|
|
break;
|
|
case SNDRV_PCM_TRIGGER_STOP:
|
|
- ACCESS_ONCE(isight->pcm_running) = false;
|
|
+ ACCESS_ONCE_RW(isight->pcm_running) = false;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
|
|
index 7bd5e33..1fcab12 100644
|
|
--- a/sound/isa/cmi8330.c
|
|
+++ b/sound/isa/cmi8330.c
|
|
@@ -172,7 +172,7 @@ struct snd_cmi8330 {
|
|
|
|
struct snd_pcm *pcm;
|
|
struct snd_cmi8330_stream {
|
|
- struct snd_pcm_ops ops;
|
|
+ snd_pcm_ops_no_const ops;
|
|
snd_pcm_open_callback_t open;
|
|
void *private_data; /* sb or wss */
|
|
} streams[2];
|
|
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
|
|
index 733b014..56ce96f 100644
|
|
--- a/sound/oss/sb_audio.c
|
|
+++ b/sound/oss/sb_audio.c
|
|
@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
|
|
buf16 = (signed short *)(localbuf + localoffs);
|
|
while (c)
|
|
{
|
|
- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
|
|
+ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
|
|
if (copy_from_user(lbuf8,
|
|
userbuf+useroffs + p,
|
|
locallen))
|
|
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
|
|
index 09d4648..cf234c7 100644
|
|
--- a/sound/oss/swarm_cs4297a.c
|
|
+++ b/sound/oss/swarm_cs4297a.c
|
|
@@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
|
|
{
|
|
struct cs4297a_state *s;
|
|
u32 pwr, id;
|
|
- mm_segment_t fs;
|
|
int rval;
|
|
#ifndef CONFIG_BCM_CS4297A_CSWARM
|
|
u64 cfg;
|
|
@@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
|
|
if (!rval) {
|
|
char *sb1250_duart_present;
|
|
|
|
+#if 0
|
|
+ mm_segment_t fs;
|
|
fs = get_fs();
|
|
set_fs(KERNEL_DS);
|
|
-#if 0
|
|
val = SOUND_MASK_LINE;
|
|
mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
|
|
for (i = 0; i < ARRAY_SIZE(initvol); i++) {
|
|
val = initvol[i].vol;
|
|
mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
|
|
}
|
|
+ set_fs(fs);
|
|
// cs4297a_write_ac97(s, 0x18, 0x0808);
|
|
#else
|
|
// cs4297a_write_ac97(s, 0x5e, 0x180);
|
|
cs4297a_write_ac97(s, 0x02, 0x0808);
|
|
cs4297a_write_ac97(s, 0x18, 0x0808);
|
|
#endif
|
|
- set_fs(fs);
|
|
|
|
list_add(&s->list, &cs4297a_devs);
|
|
|
|
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
|
|
index 56b4f74..7cfd41a 100644
|
|
--- a/sound/pci/hda/hda_codec.h
|
|
+++ b/sound/pci/hda/hda_codec.h
|
|
@@ -611,7 +611,7 @@ struct hda_bus_ops {
|
|
/* notify power-up/down from codec to controller */
|
|
void (*pm_notify)(struct hda_bus *bus);
|
|
#endif
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* template to pass to the bus constructor */
|
|
struct hda_bus_template {
|
|
@@ -713,6 +713,7 @@ struct hda_codec_ops {
|
|
#endif
|
|
void (*reboot_notify)(struct hda_codec *codec);
|
|
};
|
|
+typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
|
|
|
|
/* record for amp information cache */
|
|
struct hda_cache_head {
|
|
@@ -743,7 +744,7 @@ struct hda_pcm_ops {
|
|
struct snd_pcm_substream *substream);
|
|
int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
|
|
struct snd_pcm_substream *substream);
|
|
-};
|
|
+} __no_const;
|
|
|
|
/* PCM information for each substream */
|
|
struct hda_pcm_stream {
|
|
@@ -801,7 +802,7 @@ struct hda_codec {
|
|
const char *modelname; /* model name for preset */
|
|
|
|
/* set by patch */
|
|
- struct hda_codec_ops patch_ops;
|
|
+ hda_codec_ops_no_const patch_ops;
|
|
|
|
/* PCM to create, set by patch_ops.build_pcms callback */
|
|
unsigned int num_pcms;
|
|
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
|
|
index 0da778a..bc38b84 100644
|
|
--- a/sound/pci/ice1712/ice1712.h
|
|
+++ b/sound/pci/ice1712/ice1712.h
|
|
@@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
|
|
unsigned int mask_flags; /* total mask bits */
|
|
struct snd_akm4xxx_ops {
|
|
void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
|
|
- } ops;
|
|
+ } __no_const ops;
|
|
};
|
|
|
|
struct snd_ice1712_spdif {
|
|
@@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
|
|
int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
|
|
void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
|
|
int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
|
|
- } ops;
|
|
+ } __no_const ops;
|
|
};
|
|
|
|
|
|
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
|
|
index a8159b81..5f006a5 100644
|
|
--- a/sound/pci/ymfpci/ymfpci_main.c
|
|
+++ b/sound/pci/ymfpci/ymfpci_main.c
|
|
@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
|
|
if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
|
|
break;
|
|
}
|
|
- if (atomic_read(&chip->interrupt_sleep_count)) {
|
|
- atomic_set(&chip->interrupt_sleep_count, 0);
|
|
+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
|
|
+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
|
|
wake_up(&chip->interrupt_sleep);
|
|
}
|
|
__end:
|
|
@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
|
|
continue;
|
|
init_waitqueue_entry(&wait, current);
|
|
add_wait_queue(&chip->interrupt_sleep, &wait);
|
|
- atomic_inc(&chip->interrupt_sleep_count);
|
|
+ atomic_inc_unchecked(&chip->interrupt_sleep_count);
|
|
schedule_timeout_uninterruptible(msecs_to_jiffies(50));
|
|
remove_wait_queue(&chip->interrupt_sleep, &wait);
|
|
}
|
|
@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
|
|
snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
|
|
spin_unlock(&chip->reg_lock);
|
|
|
|
- if (atomic_read(&chip->interrupt_sleep_count)) {
|
|
- atomic_set(&chip->interrupt_sleep_count, 0);
|
|
+ if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
|
|
+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
|
|
wake_up(&chip->interrupt_sleep);
|
|
}
|
|
}
|
|
@@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
|
|
spin_lock_init(&chip->reg_lock);
|
|
spin_lock_init(&chip->voice_lock);
|
|
init_waitqueue_head(&chip->interrupt_sleep);
|
|
- atomic_set(&chip->interrupt_sleep_count, 0);
|
|
+ atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
|
|
chip->card = card;
|
|
chip->pci = pci;
|
|
chip->irq = -1;
|
|
diff --git a/sound/soc/soc-pcm.c.rej b/sound/soc/soc-pcm.c.rej
|
|
new file mode 100644
|
|
index 0000000..9673a52
|
|
--- /dev/null
|
|
+++ b/sound/soc/soc-pcm.c.rej
|
|
@@ -0,0 +1,11 @@
|
|
+--- sound/soc/soc-pcm.c 2012-05-21 11:33:44.919930237 +0200
|
|
++++ sound/soc/soc-pcm.c 2012-05-21 12:10:12.480049049 +0200
|
|
+@@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runti
|
|
+ struct snd_soc_platform *platform = rtd->platform;
|
|
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
|
|
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
|
|
+- struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
|
|
++ snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
|
|
+ struct snd_pcm *pcm;
|
|
+ char new_name[64];
|
|
+ int ret = 0, playback = 0, capture = 0;
|
|
diff --git a/sound/usb/card.h.rej b/sound/usb/card.h.rej
|
|
new file mode 100644
|
|
index 0000000..eae6a2b
|
|
--- /dev/null
|
|
+++ b/sound/usb/card.h.rej
|
|
@@ -0,0 +1,18 @@
|
|
+diff a/sound/usb/card.h b/sound/usb/card.h (rejected hunks)
|
|
+@@ -45,6 +45,7 @@ struct snd_urb_ops {
|
|
+ int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
|
|
+ int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
|
|
+ };
|
|
++typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
|
|
+
|
|
+ struct snd_usb_substream {
|
|
+ struct snd_usb_stream *stream;
|
|
+@@ -96,7 +97,7 @@ struct snd_usb_substream {
|
|
+ struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
|
|
+ spinlock_t lock;
|
|
+
|
|
+- struct snd_urb_ops ops; /* callbacks (must be filled at init) */
|
|
++ snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
|
|
+ int last_frame_number; /* stored frame number */
|
|
+ int last_delay; /* stored delay */
|
|
+ };
|
|
diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
|
|
new file mode 100644
|
|
index 0000000..50f2f2f6
|
|
--- /dev/null
|
|
+++ b/tools/gcc/.gitignore
|
|
@@ -0,0 +1 @@
|
|
+size_overflow_hash.h
|
|
diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
|
|
new file mode 100644
|
|
index 0000000..ea94c27
|
|
--- /dev/null
|
|
+++ b/tools/gcc/Makefile
|
|
@@ -0,0 +1,43 @@
|
|
+#CC := gcc
|
|
+#PLUGIN_SOURCE_FILES := pax_plugin.c
|
|
+#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
|
|
+GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
|
|
+#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
|
|
+
|
|
+ifeq ($(PLUGINCC),$(HOSTCC))
|
|
+HOSTLIBS := hostlibs
|
|
+HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
|
|
+else
|
|
+HOSTLIBS := hostcxxlibs
|
|
+HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
|
|
+endif
|
|
+
|
|
+$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
|
|
+$(HOSTLIBS)-y += colorize_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
|
|
+$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
|
|
+
|
|
+always := $($(HOSTLIBS)-y)
|
|
+
|
|
+constify_plugin-objs := constify_plugin.o
|
|
+stackleak_plugin-objs := stackleak_plugin.o
|
|
+kallocstat_plugin-objs := kallocstat_plugin.o
|
|
+kernexec_plugin-objs := kernexec_plugin.o
|
|
+checker_plugin-objs := checker_plugin.o
|
|
+colorize_plugin-objs := colorize_plugin.o
|
|
+size_overflow_plugin-objs := size_overflow_plugin.o
|
|
+latent_entropy_plugin-objs := latent_entropy_plugin.o
|
|
+
|
|
+$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
|
|
+
|
|
+quiet_cmd_build_size_overflow_hash = GENHASH $@
|
|
+ cmd_build_size_overflow_hash = \
|
|
+ $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
|
|
+$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
|
|
+ $(call if_changed,build_size_overflow_hash)
|
|
+
|
|
+targets += size_overflow_hash.h
|
|
diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
|
|
new file mode 100644
|
|
index 0000000..d41b5af
|
|
--- /dev/null
|
|
+++ b/tools/gcc/checker_plugin.c
|
|
@@ -0,0 +1,171 @@
|
|
+/*
|
|
+ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to implement various sparse (source code checker) features
|
|
+ *
|
|
+ * TODO:
|
|
+ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
|
|
+ *
|
|
+ * BUGS:
|
|
+ * - none known
|
|
+ */
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+//#include "expr.h" where are you...
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+#include "tree-flow.h"
|
|
+#include "target.h"
|
|
+
|
|
+extern void c_register_addr_space (const char *str, addr_space_t as);
|
|
+extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
|
|
+extern enum machine_mode default_addr_space_address_mode (addr_space_t);
|
|
+extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
|
|
+extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
|
|
+extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
|
|
+
|
|
+extern void print_gimple_stmt(FILE *, gimple, int, int);
|
|
+extern rtx emit_move_insn(rtx x, rtx y);
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static struct plugin_info checker_plugin_info = {
|
|
+ .version = "201111150100",
|
|
+};
|
|
+
|
|
+#define ADDR_SPACE_KERNEL 0
|
|
+#define ADDR_SPACE_FORCE_KERNEL 1
|
|
+#define ADDR_SPACE_USER 2
|
|
+#define ADDR_SPACE_FORCE_USER 3
|
|
+#define ADDR_SPACE_IOMEM 0
|
|
+#define ADDR_SPACE_FORCE_IOMEM 0
|
|
+#define ADDR_SPACE_PERCPU 0
|
|
+#define ADDR_SPACE_FORCE_PERCPU 0
|
|
+#define ADDR_SPACE_RCU 0
|
|
+#define ADDR_SPACE_FORCE_RCU 0
|
|
+
|
|
+static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
|
|
+{
|
|
+ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
|
|
+}
|
|
+
|
|
+static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
|
|
+{
|
|
+ return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
|
|
+}
|
|
+
|
|
+static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
|
|
+{
|
|
+ return default_addr_space_valid_pointer_mode(mode, as);
|
|
+}
|
|
+
|
|
+static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
|
|
+{
|
|
+ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
|
|
+}
|
|
+
|
|
+static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
|
|
+{
|
|
+ return default_addr_space_legitimize_address(x, oldx, mode, as);
|
|
+}
|
|
+
|
|
+static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
|
|
+{
|
|
+ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
|
|
+ return true;
|
|
+
|
|
+ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
|
|
+ return true;
|
|
+
|
|
+ return subset == superset;
|
|
+}
|
|
+
|
|
+static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
|
|
+{
|
|
+// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
|
|
+// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
|
|
+
|
|
+ return op;
|
|
+}
|
|
+
|
|
+static void register_checker_address_spaces(void *event_data, void *data)
|
|
+{
|
|
+ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
|
|
+ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
|
|
+ c_register_addr_space("__user", ADDR_SPACE_USER);
|
|
+ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
|
|
+// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
|
|
+// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
|
|
+// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
|
|
+// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
|
|
+// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
|
|
+// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
|
|
+
|
|
+ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
|
|
+ targetm.addr_space.address_mode = checker_addr_space_address_mode;
|
|
+ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
|
|
+ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
|
|
+// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
|
|
+ targetm.addr_space.subset_p = checker_addr_space_subset_p;
|
|
+ targetm.addr_space.convert = checker_addr_space_convert;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ const int argc = plugin_info->argc;
|
|
+ const struct plugin_argument * const argv = plugin_info->argv;
|
|
+ int i;
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
|
|
+
|
|
+ for (i = 0; i < argc; ++i)
|
|
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+
|
|
+ if (TARGET_64BIT == 0)
|
|
+ return 0;
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
|
|
new file mode 100644
|
|
index 0000000..846aeb0
|
|
--- /dev/null
|
|
+++ b/tools/gcc/colorize_plugin.c
|
|
@@ -0,0 +1,148 @@
|
|
+/*
|
|
+ * Copyright 2012 by PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to colorize diagnostic output
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static struct plugin_info colorize_plugin_info = {
|
|
+ .version = "201203092200",
|
|
+ .help = NULL,
|
|
+};
|
|
+
|
|
+#define GREEN "\033[32m\033[2m"
|
|
+#define LIGHTGREEN "\033[32m\033[1m"
|
|
+#define YELLOW "\033[33m\033[2m"
|
|
+#define LIGHTYELLOW "\033[33m\033[1m"
|
|
+#define RED "\033[31m\033[2m"
|
|
+#define LIGHTRED "\033[31m\033[1m"
|
|
+#define BLUE "\033[34m\033[2m"
|
|
+#define LIGHTBLUE "\033[34m\033[1m"
|
|
+#define BRIGHT "\033[m\033[1m"
|
|
+#define NORMAL "\033[m"
|
|
+
|
|
+static diagnostic_starter_fn old_starter;
|
|
+static diagnostic_finalizer_fn old_finalizer;
|
|
+
|
|
+static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
|
|
+{
|
|
+ const char *color;
|
|
+ char *newprefix;
|
|
+
|
|
+ switch (diagnostic->kind) {
|
|
+ case DK_NOTE:
|
|
+ color = LIGHTBLUE;
|
|
+ break;
|
|
+
|
|
+ case DK_PEDWARN:
|
|
+ case DK_WARNING:
|
|
+ color = LIGHTYELLOW;
|
|
+ break;
|
|
+
|
|
+ case DK_ERROR:
|
|
+ case DK_FATAL:
|
|
+ case DK_ICE:
|
|
+ case DK_PERMERROR:
|
|
+ case DK_SORRY:
|
|
+ color = LIGHTRED;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ color = NORMAL;
|
|
+ }
|
|
+
|
|
+ old_starter(context, diagnostic);
|
|
+ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
|
|
+ return;
|
|
+ pp_destroy_prefix(context->printer);
|
|
+ pp_set_prefix(context->printer, newprefix);
|
|
+}
|
|
+
|
|
+static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
|
|
+{
|
|
+ old_finalizer(context, diagnostic);
|
|
+}
|
|
+
|
|
+static void colorize_arm(void)
|
|
+{
|
|
+ old_starter = diagnostic_starter(global_dc);
|
|
+ old_finalizer = diagnostic_finalizer(global_dc);
|
|
+
|
|
+ diagnostic_starter(global_dc) = start_colorize;
|
|
+ diagnostic_finalizer(global_dc) = finalize_colorize;
|
|
+}
|
|
+
|
|
+static unsigned int execute_colorize_rearm(void)
|
|
+{
|
|
+ if (diagnostic_starter(global_dc) == start_colorize)
|
|
+ return 0;
|
|
+
|
|
+ colorize_arm();
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
|
|
+ .pass = {
|
|
+ .type = SIMPLE_IPA_PASS,
|
|
+ .name = "colorize_rearm",
|
|
+ .gate = NULL,
|
|
+ .execute = execute_colorize_rearm,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = 0
|
|
+ }
|
|
+};
|
|
+
|
|
+static void colorize_start_unit(void *gcc_data, void *user_data)
|
|
+{
|
|
+ colorize_arm();
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ struct register_pass_info colorize_rearm_pass_info = {
|
|
+ .pass = &pass_ipa_colorize_rearm.pass,
|
|
+ .reference_pass_name = "*free_lang_data",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
|
|
+ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
|
|
new file mode 100644
|
|
index 0000000..048d4fff
|
|
--- /dev/null
|
|
+++ b/tools/gcc/constify_plugin.c
|
|
@@ -0,0 +1,328 @@
|
|
+/*
|
|
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
|
|
+ * Copyright 2011 by PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2, or (at your option) v3
|
|
+ *
|
|
+ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
|
|
+ *
|
|
+ * Homepage:
|
|
+ * http://www.grsecurity.net/~ephox/const_plugin/
|
|
+ *
|
|
+ * Usage:
|
|
+ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
|
|
+ * $ gcc -fplugin=constify_plugin.so test.c -O2
|
|
+ */
|
|
+
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+#include "tree-flow.h"
|
|
+
|
|
+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static struct plugin_info const_plugin_info = {
|
|
+ .version = "201205300030",
|
|
+ .help = "no-constify\tturn off constification\n",
|
|
+};
|
|
+
|
|
+static void deconstify_tree(tree node);
|
|
+
|
|
+static void deconstify_type(tree type)
|
|
+{
|
|
+ tree field;
|
|
+
|
|
+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
|
|
+ tree type = TREE_TYPE(field);
|
|
+
|
|
+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
|
|
+ continue;
|
|
+ if (!TYPE_READONLY(type))
|
|
+ continue;
|
|
+
|
|
+ deconstify_tree(field);
|
|
+ }
|
|
+ TYPE_READONLY(type) = 0;
|
|
+ C_TYPE_FIELDS_READONLY(type) = 0;
|
|
+}
|
|
+
|
|
+static void deconstify_tree(tree node)
|
|
+{
|
|
+ tree old_type, new_type, field;
|
|
+
|
|
+ old_type = TREE_TYPE(node);
|
|
+
|
|
+ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
|
|
+
|
|
+ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
|
|
+ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
|
|
+ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
|
|
+ DECL_FIELD_CONTEXT(field) = new_type;
|
|
+
|
|
+ deconstify_type(new_type);
|
|
+
|
|
+ TREE_READONLY(node) = 0;
|
|
+ TREE_TYPE(node) = new_type;
|
|
+}
|
|
+
|
|
+static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
|
|
+{
|
|
+ tree type;
|
|
+
|
|
+ *no_add_attrs = true;
|
|
+ if (TREE_CODE(*node) == FUNCTION_DECL) {
|
|
+ error("%qE attribute does not apply to functions", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (TREE_CODE(*node) == VAR_DECL) {
|
|
+ error("%qE attribute does not apply to variables", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (TYPE_P(*node)) {
|
|
+ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
|
|
+ *no_add_attrs = false;
|
|
+ else
|
|
+ error("%qE attribute applies to struct and union types only", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ type = TREE_TYPE(*node);
|
|
+
|
|
+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
|
|
+ error("%qE attribute applies to struct and union types only", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
|
|
+ error("%qE attribute is already applied to the type", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
|
|
+ error("%qE attribute used on type that is not constified", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (TREE_CODE(*node) == TYPE_DECL) {
|
|
+ deconstify_tree(*node);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ return NULL_TREE;
|
|
+}
|
|
+
|
|
+static void constify_type(tree type)
|
|
+{
|
|
+ TYPE_READONLY(type) = 1;
|
|
+ C_TYPE_FIELDS_READONLY(type) = 1;
|
|
+}
|
|
+
|
|
+static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
|
|
+{
|
|
+ *no_add_attrs = true;
|
|
+ if (!TYPE_P(*node)) {
|
|
+ error("%qE attribute applies to types only", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
|
|
+ error("%qE attribute applies to struct and union types only", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ *no_add_attrs = false;
|
|
+ constify_type(*node);
|
|
+ return NULL_TREE;
|
|
+}
|
|
+
|
|
+static struct attribute_spec no_const_attr = {
|
|
+ .name = "no_const",
|
|
+ .min_length = 0,
|
|
+ .max_length = 0,
|
|
+ .decl_required = false,
|
|
+ .type_required = false,
|
|
+ .function_type_required = false,
|
|
+ .handler = handle_no_const_attribute,
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ .affects_type_identity = true
|
|
+#endif
|
|
+};
|
|
+
|
|
+static struct attribute_spec do_const_attr = {
|
|
+ .name = "do_const",
|
|
+ .min_length = 0,
|
|
+ .max_length = 0,
|
|
+ .decl_required = false,
|
|
+ .type_required = false,
|
|
+ .function_type_required = false,
|
|
+ .handler = handle_do_const_attribute,
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ .affects_type_identity = true
|
|
+#endif
|
|
+};
|
|
+
|
|
+static void register_attributes(void *event_data, void *data)
|
|
+{
|
|
+ register_attribute(&no_const_attr);
|
|
+ register_attribute(&do_const_attr);
|
|
+}
|
|
+
|
|
+static bool is_fptr(tree field)
|
|
+{
|
|
+ tree ptr = TREE_TYPE(field);
|
|
+
|
|
+ if (TREE_CODE(ptr) != POINTER_TYPE)
|
|
+ return false;
|
|
+
|
|
+ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
|
|
+}
|
|
+
|
|
+static bool walk_struct(tree node)
|
|
+{
|
|
+ tree field;
|
|
+
|
|
+ if (TYPE_FIELDS(node) == NULL_TREE)
|
|
+ return false;
|
|
+
|
|
+ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
|
|
+ gcc_assert(!TYPE_READONLY(node));
|
|
+ deconstify_type(node);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
|
|
+ tree type = TREE_TYPE(field);
|
|
+ enum tree_code code = TREE_CODE(type);
|
|
+ if (code == RECORD_TYPE || code == UNION_TYPE) {
|
|
+ if (!(walk_struct(type)))
|
|
+ return false;
|
|
+ } else if (!is_fptr(field) && !TREE_READONLY(field))
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static void finish_type(void *event_data, void *data)
|
|
+{
|
|
+ tree type = (tree)event_data;
|
|
+
|
|
+ if (type == NULL_TREE)
|
|
+ return;
|
|
+
|
|
+ if (TYPE_READONLY(type))
|
|
+ return;
|
|
+
|
|
+ if (walk_struct(type))
|
|
+ constify_type(type);
|
|
+}
|
|
+
|
|
+static unsigned int check_local_variables(void);
|
|
+
|
|
+struct gimple_opt_pass pass_local_variable = {
|
|
+ {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "check_local_variables",
|
|
+ .gate = NULL,
|
|
+ .execute = check_local_variables,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = 0
|
|
+ }
|
|
+};
|
|
+
|
|
+static unsigned int check_local_variables(void)
|
|
+{
|
|
+ tree var;
|
|
+ referenced_var_iterator rvi;
|
|
+
|
|
+#if BUILDING_GCC_VERSION == 4005
|
|
+ FOR_EACH_REFERENCED_VAR(var, rvi) {
|
|
+#else
|
|
+ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
|
|
+#endif
|
|
+ tree type = TREE_TYPE(var);
|
|
+
|
|
+ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
|
|
+ continue;
|
|
+
|
|
+ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
|
|
+ continue;
|
|
+
|
|
+ if (!TYPE_READONLY(type))
|
|
+ continue;
|
|
+
|
|
+// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
|
|
+// continue;
|
|
+
|
|
+// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
|
|
+// continue;
|
|
+
|
|
+ if (walk_struct(type)) {
|
|
+ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
|
|
+ return 1;
|
|
+ }
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ const int argc = plugin_info->argc;
|
|
+ const struct plugin_argument * const argv = plugin_info->argv;
|
|
+ int i;
|
|
+ bool constify = true;
|
|
+
|
|
+ struct register_pass_info local_variable_pass_info = {
|
|
+ .pass = &pass_local_variable.pass,
|
|
+ .reference_pass_name = "*referenced_vars",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < argc; ++i) {
|
|
+ if (!(strcmp(argv[i].key, "no-constify"))) {
|
|
+ constify = false;
|
|
+ continue;
|
|
+ }
|
|
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
|
|
+ if (constify) {
|
|
+ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
|
|
+ }
|
|
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
|
|
new file mode 100644
|
|
index 0000000..a0fe8b2
|
|
--- /dev/null
|
|
+++ b/tools/gcc/generate_size_overflow_hash.sh
|
|
@@ -0,0 +1,94 @@
|
|
+#!/bin/bash
|
|
+
|
|
+# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
|
|
+
|
|
+header1="size_overflow_hash.h"
|
|
+database="size_overflow_hash.data"
|
|
+n=65536
|
|
+
|
|
+usage() {
|
|
+cat <<EOF
|
|
+usage: $0 options
|
|
+OPTIONS:
|
|
+ -h|--help help
|
|
+ -o header file
|
|
+ -d database file
|
|
+ -n hash array size
|
|
+EOF
|
|
+ return 0
|
|
+}
|
|
+
|
|
+while true
|
|
+do
|
|
+ case "$1" in
|
|
+ -h|--help) usage && exit 0;;
|
|
+ -n) n=$2; shift 2;;
|
|
+ -o) header1="$2"; shift 2;;
|
|
+ -d) database="$2"; shift 2;;
|
|
+ --) shift 1; break ;;
|
|
+ *) break ;;
|
|
+ esac
|
|
+done
|
|
+
|
|
+create_defines() {
|
|
+ for i in `seq 1 10`
|
|
+ do
|
|
+ echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
|
|
+ done
|
|
+ echo >> "$header1"
|
|
+}
|
|
+
|
|
+create_structs () {
|
|
+ rm -f "$header1"
|
|
+
|
|
+ create_defines
|
|
+
|
|
+ cat "$database" | while read data
|
|
+ do
|
|
+ data_array=($data)
|
|
+ struct_hash_name="${data_array[0]}"
|
|
+ funcn="${data_array[1]}"
|
|
+ params="${data_array[2]}"
|
|
+ next="${data_array[5]}"
|
|
+
|
|
+ echo "struct size_overflow_hash $struct_hash_name = {" >> "$header1"
|
|
+
|
|
+ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
|
|
+ echo -en "\t.param\t= " >> "$header1"
|
|
+ line=
|
|
+ for param_num in ${params//-/ };
|
|
+ do
|
|
+ line="${line}PARAM"$param_num"|"
|
|
+ done
|
|
+
|
|
+ echo -e "${line%?},\n};\n" >> "$header1"
|
|
+ done
|
|
+}
|
|
+
|
|
+create_headers () {
|
|
+ echo "struct size_overflow_hash *size_overflow_hash[$n] = {" >> "$header1"
|
|
+}
|
|
+
|
|
+create_array_elements () {
|
|
+ index=0
|
|
+ grep -v "nohasharray" $database | sort -n -k 4 | while read data
|
|
+ do
|
|
+ data_array=($data)
|
|
+ i="${data_array[3]}"
|
|
+ hash="${data_array[4]}"
|
|
+ while [[ $index -lt $i ]]
|
|
+ do
|
|
+ echo -e "\t["$index"]\t= NULL," >> "$header1"
|
|
+ index=$(($index + 1))
|
|
+ done
|
|
+ index=$(($index + 1))
|
|
+ echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
|
|
+ done
|
|
+ echo '};' >> $header1
|
|
+}
|
|
+
|
|
+create_structs
|
|
+create_headers
|
|
+create_array_elements
|
|
+
|
|
+exit 0
|
|
diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
|
|
new file mode 100644
|
|
index 0000000..ab272e1
|
|
--- /dev/null
|
|
+++ b/tools/gcc/kallocstat_plugin.c
|
|
@@ -0,0 +1,167 @@
|
|
+/*
|
|
+ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to find the distribution of k*alloc sizes
|
|
+ *
|
|
+ * TODO:
|
|
+ *
|
|
+ * BUGS:
|
|
+ * - none known
|
|
+ */
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+//#include "expr.h" where are you...
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+
|
|
+extern void print_gimple_stmt(FILE *, gimple, int, int);
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static const char * const kalloc_functions[] = {
|
|
+ "__kmalloc",
|
|
+ "kmalloc",
|
|
+ "kmalloc_large",
|
|
+ "kmalloc_node",
|
|
+ "kmalloc_order",
|
|
+ "kmalloc_order_trace",
|
|
+ "kmalloc_slab",
|
|
+ "kzalloc",
|
|
+ "kzalloc_node",
|
|
+};
|
|
+
|
|
+static struct plugin_info kallocstat_plugin_info = {
|
|
+ .version = "201111150100",
|
|
+};
|
|
+
|
|
+static unsigned int execute_kallocstat(void);
|
|
+
|
|
+static struct gimple_opt_pass kallocstat_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "kallocstat",
|
|
+ .gate = NULL,
|
|
+ .execute = execute_kallocstat,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = 0
|
|
+ }
|
|
+};
|
|
+
|
|
+static bool is_kalloc(const char *fnname)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
|
|
+ if (!strcmp(fnname, kalloc_functions[i]))
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static unsigned int execute_kallocstat(void)
|
|
+{
|
|
+ basic_block bb;
|
|
+
|
|
+ // 1. loop through BBs and GIMPLE statements
|
|
+ FOR_EACH_BB(bb) {
|
|
+ gimple_stmt_iterator gsi;
|
|
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
|
|
+ // gimple match:
|
|
+ tree fndecl, size;
|
|
+ gimple call_stmt;
|
|
+ const char *fnname;
|
|
+
|
|
+ // is it a call
|
|
+ call_stmt = gsi_stmt(gsi);
|
|
+ if (!is_gimple_call(call_stmt))
|
|
+ continue;
|
|
+ fndecl = gimple_call_fndecl(call_stmt);
|
|
+ if (fndecl == NULL_TREE)
|
|
+ continue;
|
|
+ if (TREE_CODE(fndecl) != FUNCTION_DECL)
|
|
+ continue;
|
|
+
|
|
+ // is it a call to k*alloc
|
|
+ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
|
|
+ if (!is_kalloc(fnname))
|
|
+ continue;
|
|
+
|
|
+ // is the size arg the result of a simple const assignment
|
|
+ size = gimple_call_arg(call_stmt, 0);
|
|
+ while (true) {
|
|
+ gimple def_stmt;
|
|
+ expanded_location xloc;
|
|
+ size_t size_val;
|
|
+
|
|
+ if (TREE_CODE(size) != SSA_NAME)
|
|
+ break;
|
|
+ def_stmt = SSA_NAME_DEF_STMT(size);
|
|
+ if (!def_stmt || !is_gimple_assign(def_stmt))
|
|
+ break;
|
|
+ if (gimple_num_ops(def_stmt) != 2)
|
|
+ break;
|
|
+ size = gimple_assign_rhs1(def_stmt);
|
|
+ if (!TREE_CONSTANT(size))
|
|
+ continue;
|
|
+ xloc = expand_location(gimple_location(def_stmt));
|
|
+ if (!xloc.file)
|
|
+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
|
|
+ size_val = TREE_INT_CST_LOW(size);
|
|
+ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
|
|
+ break;
|
|
+ }
|
|
+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
|
|
+//debug_tree(gimple_call_fn(call_stmt));
|
|
+//print_node(stderr, "pax", fndecl, 4);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ struct register_pass_info kallocstat_pass_info = {
|
|
+ .pass = &kallocstat_pass.pass,
|
|
+ .reference_pass_name = "ssa",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
|
|
new file mode 100644
|
|
index 0000000..98011fa
|
|
--- /dev/null
|
|
+++ b/tools/gcc/kernexec_plugin.c
|
|
@@ -0,0 +1,427 @@
|
|
+/*
|
|
+ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
|
|
+ *
|
|
+ * TODO:
|
|
+ *
|
|
+ * BUGS:
|
|
+ * - none known
|
|
+ */
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+//#include "expr.h" where are you...
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+#include "tree-flow.h"
|
|
+
|
|
+extern void print_gimple_stmt(FILE *, gimple, int, int);
|
|
+extern rtx emit_move_insn(rtx x, rtx y);
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static struct plugin_info kernexec_plugin_info = {
|
|
+ .version = "201111291120",
|
|
+ .help = "method=[bts|or]\tinstrumentation method\n"
|
|
+};
|
|
+
|
|
+static unsigned int execute_kernexec_reload(void);
|
|
+static unsigned int execute_kernexec_fptr(void);
|
|
+static unsigned int execute_kernexec_retaddr(void);
|
|
+static bool kernexec_cmodel_check(void);
|
|
+
|
|
+static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
|
|
+static void (*kernexec_instrument_retaddr)(rtx);
|
|
+
|
|
+static struct gimple_opt_pass kernexec_reload_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "kernexec_reload",
|
|
+ .gate = kernexec_cmodel_check,
|
|
+ .execute = execute_kernexec_reload,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
|
|
+ }
|
|
+};
|
|
+
|
|
+static struct gimple_opt_pass kernexec_fptr_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "kernexec_fptr",
|
|
+ .gate = kernexec_cmodel_check,
|
|
+ .execute = execute_kernexec_fptr,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
|
|
+ }
|
|
+};
|
|
+
|
|
+static struct rtl_opt_pass kernexec_retaddr_pass = {
|
|
+ .pass = {
|
|
+ .type = RTL_PASS,
|
|
+ .name = "kernexec_retaddr",
|
|
+ .gate = kernexec_cmodel_check,
|
|
+ .execute = execute_kernexec_retaddr,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
|
|
+ }
|
|
+};
|
|
+
|
|
+static bool kernexec_cmodel_check(void)
|
|
+{
|
|
+ tree section;
|
|
+
|
|
+ if (ix86_cmodel != CM_KERNEL)
|
|
+ return false;
|
|
+
|
|
+ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
|
|
+ if (!section || !TREE_VALUE(section))
|
|
+ return true;
|
|
+
|
|
+ section = TREE_VALUE(TREE_VALUE(section));
|
|
+ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
|
|
+ */
|
|
+static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
|
|
+{
|
|
+ gimple asm_movabs_stmt;
|
|
+
|
|
+ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
|
|
+ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
|
|
+ gimple_asm_set_volatile(asm_movabs_stmt, true);
|
|
+ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
|
|
+ update_stmt(asm_movabs_stmt);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * find all asm() stmts that clobber r10 and add a reload of r10
|
|
+ */
|
|
+static unsigned int execute_kernexec_reload(void)
|
|
+{
|
|
+ basic_block bb;
|
|
+
|
|
+ // 1. loop through BBs and GIMPLE statements
|
|
+ FOR_EACH_BB(bb) {
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
|
|
+ // gimple match: __asm__ ("" : : : "r10");
|
|
+ gimple asm_stmt;
|
|
+ size_t nclobbers;
|
|
+
|
|
+ // is it an asm ...
|
|
+ asm_stmt = gsi_stmt(gsi);
|
|
+ if (gimple_code(asm_stmt) != GIMPLE_ASM)
|
|
+ continue;
|
|
+
|
|
+ // ... clobbering r10
|
|
+ nclobbers = gimple_asm_nclobbers(asm_stmt);
|
|
+ while (nclobbers--) {
|
|
+ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
|
|
+ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
|
|
+ continue;
|
|
+ kernexec_reload_fptr_mask(&gsi);
|
|
+//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
|
|
+ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
|
|
+ */
|
|
+static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
|
|
+{
|
|
+ gimple assign_intptr, assign_new_fptr, call_stmt;
|
|
+ tree intptr, old_fptr, new_fptr, kernexec_mask;
|
|
+
|
|
+ call_stmt = gsi_stmt(*gsi);
|
|
+ old_fptr = gimple_call_fn(call_stmt);
|
|
+
|
|
+ // create temporary unsigned long variable used for bitops and cast fptr to it
|
|
+ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
|
|
+ add_referenced_var(intptr);
|
|
+ mark_sym_for_renaming(intptr);
|
|
+ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
|
|
+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
|
|
+ update_stmt(assign_intptr);
|
|
+
|
|
+ // apply logical or to temporary unsigned long and bitmask
|
|
+ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
|
|
+// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
|
|
+ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
|
|
+ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
|
|
+ update_stmt(assign_intptr);
|
|
+
|
|
+ // cast temporary unsigned long back to a temporary fptr variable
|
|
+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
|
|
+ add_referenced_var(new_fptr);
|
|
+ mark_sym_for_renaming(new_fptr);
|
|
+ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
|
|
+ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
|
|
+ update_stmt(assign_new_fptr);
|
|
+
|
|
+ // replace call stmt fn with the new fptr
|
|
+ gimple_call_set_fn(call_stmt, new_fptr);
|
|
+ update_stmt(call_stmt);
|
|
+}
|
|
+
|
|
+static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
|
|
+{
|
|
+ gimple asm_or_stmt, call_stmt;
|
|
+ tree old_fptr, new_fptr, input, output;
|
|
+ VEC(tree, gc) *inputs = NULL;
|
|
+ VEC(tree, gc) *outputs = NULL;
|
|
+
|
|
+ call_stmt = gsi_stmt(*gsi);
|
|
+ old_fptr = gimple_call_fn(call_stmt);
|
|
+
|
|
+ // create temporary fptr variable
|
|
+ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
|
|
+ add_referenced_var(new_fptr);
|
|
+ mark_sym_for_renaming(new_fptr);
|
|
+
|
|
+ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
|
|
+ input = build_tree_list(NULL_TREE, build_string(2, "0"));
|
|
+ input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
|
|
+ output = build_tree_list(NULL_TREE, build_string(3, "=r"));
|
|
+ output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
|
|
+ VEC_safe_push(tree, gc, inputs, input);
|
|
+ VEC_safe_push(tree, gc, outputs, output);
|
|
+ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
|
|
+ gimple_asm_set_volatile(asm_or_stmt, true);
|
|
+ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
|
|
+ update_stmt(asm_or_stmt);
|
|
+
|
|
+ // replace call stmt fn with the new fptr
|
|
+ gimple_call_set_fn(call_stmt, new_fptr);
|
|
+ update_stmt(call_stmt);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
|
|
+ */
|
|
+static unsigned int execute_kernexec_fptr(void)
|
|
+{
|
|
+ basic_block bb;
|
|
+
|
|
+ // 1. loop through BBs and GIMPLE statements
|
|
+ FOR_EACH_BB(bb) {
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
|
|
+ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
|
|
+ tree fn;
|
|
+ gimple call_stmt;
|
|
+
|
|
+ // is it a call ...
|
|
+ call_stmt = gsi_stmt(gsi);
|
|
+ if (!is_gimple_call(call_stmt))
|
|
+ continue;
|
|
+ fn = gimple_call_fn(call_stmt);
|
|
+ if (TREE_CODE(fn) == ADDR_EXPR)
|
|
+ continue;
|
|
+ if (TREE_CODE(fn) != SSA_NAME)
|
|
+ gcc_unreachable();
|
|
+
|
|
+ // ... through a function pointer
|
|
+ fn = SSA_NAME_VAR(fn);
|
|
+ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
|
|
+ continue;
|
|
+ fn = TREE_TYPE(fn);
|
|
+ if (TREE_CODE(fn) != POINTER_TYPE)
|
|
+ continue;
|
|
+ fn = TREE_TYPE(fn);
|
|
+ if (TREE_CODE(fn) != FUNCTION_TYPE)
|
|
+ continue;
|
|
+
|
|
+ kernexec_instrument_fptr(&gsi);
|
|
+
|
|
+//debug_tree(gimple_call_fn(call_stmt));
|
|
+//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
|
|
+static void kernexec_instrument_retaddr_bts(rtx insn)
|
|
+{
|
|
+ rtx btsq;
|
|
+ rtvec argvec, constraintvec, labelvec;
|
|
+ int line;
|
|
+
|
|
+ // create asm volatile("btsq $63,(%%rsp)":::)
|
|
+ argvec = rtvec_alloc(0);
|
|
+ constraintvec = rtvec_alloc(0);
|
|
+ labelvec = rtvec_alloc(0);
|
|
+ line = expand_location(RTL_LOCATION(insn)).line;
|
|
+ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
|
|
+ MEM_VOLATILE_P(btsq) = 1;
|
|
+// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
|
|
+ emit_insn_before(btsq, insn);
|
|
+}
|
|
+
|
|
+// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
|
|
+static void kernexec_instrument_retaddr_or(rtx insn)
|
|
+{
|
|
+ rtx orq;
|
|
+ rtvec argvec, constraintvec, labelvec;
|
|
+ int line;
|
|
+
|
|
+ // create asm volatile("orq %%r10,(%%rsp)":::)
|
|
+ argvec = rtvec_alloc(0);
|
|
+ constraintvec = rtvec_alloc(0);
|
|
+ labelvec = rtvec_alloc(0);
|
|
+ line = expand_location(RTL_LOCATION(insn)).line;
|
|
+ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
|
|
+ MEM_VOLATILE_P(orq) = 1;
|
|
+// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
|
|
+ emit_insn_before(orq, insn);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * find all asm level function returns and forcibly set the highest bit of the return address
|
|
+ */
|
|
+static unsigned int execute_kernexec_retaddr(void)
|
|
+{
|
|
+ rtx insn;
|
|
+
|
|
+ // 1. find function returns
|
|
+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
|
|
+ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
|
|
+ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
|
|
+ rtx body;
|
|
+
|
|
+ // is it a retn
|
|
+ if (!JUMP_P(insn))
|
|
+ continue;
|
|
+ body = PATTERN(insn);
|
|
+ if (GET_CODE(body) == PARALLEL)
|
|
+ body = XVECEXP(body, 0, 0);
|
|
+ if (GET_CODE(body) != RETURN)
|
|
+ continue;
|
|
+ kernexec_instrument_retaddr(insn);
|
|
+ }
|
|
+
|
|
+// print_simple_rtl(stderr, get_insns());
|
|
+// print_rtl(stderr, get_insns());
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ const int argc = plugin_info->argc;
|
|
+ const struct plugin_argument * const argv = plugin_info->argv;
|
|
+ int i;
|
|
+ struct register_pass_info kernexec_reload_pass_info = {
|
|
+ .pass = &kernexec_reload_pass.pass,
|
|
+ .reference_pass_name = "ssa",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+ struct register_pass_info kernexec_fptr_pass_info = {
|
|
+ .pass = &kernexec_fptr_pass.pass,
|
|
+ .reference_pass_name = "ssa",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+ struct register_pass_info kernexec_retaddr_pass_info = {
|
|
+ .pass = &kernexec_retaddr_pass.pass,
|
|
+ .reference_pass_name = "pro_and_epilogue",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
|
|
+
|
|
+ if (TARGET_64BIT == 0)
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < argc; ++i) {
|
|
+ if (!strcmp(argv[i].key, "method")) {
|
|
+ if (!argv[i].value) {
|
|
+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ continue;
|
|
+ }
|
|
+ if (!strcmp(argv[i].value, "bts")) {
|
|
+ kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
|
|
+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
|
|
+ } else if (!strcmp(argv[i].value, "or")) {
|
|
+ kernexec_instrument_fptr = kernexec_instrument_fptr_or;
|
|
+ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
|
|
+ fix_register("r10", 1, 1);
|
|
+ } else
|
|
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
|
|
+ continue;
|
|
+ }
|
|
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ }
|
|
+ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
|
|
+ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
|
|
+
|
|
+ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
|
|
new file mode 100644
|
|
index 0000000..b8008f7
|
|
--- /dev/null
|
|
+++ b/tools/gcc/latent_entropy_plugin.c
|
|
@@ -0,0 +1,295 @@
|
|
+/*
|
|
+ * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to help generate a little bit of entropy from program state,
|
|
+ * used during boot in the kernel
|
|
+ *
|
|
+ * TODO:
|
|
+ * - add ipa pass to identify not explicitly marked candidate functions
|
|
+ * - mix in more program state (function arguments/return values, loop variables, etc)
|
|
+ * - more instrumentation control via attribute parameters
|
|
+ *
|
|
+ * BUGS:
|
|
+ * - LTO needs -flto-partition=none for now
|
|
+ */
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+//#include "expr.h" where are you...
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+#include "tree-flow.h"
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static tree latent_entropy_decl;
|
|
+
|
|
+static struct plugin_info latent_entropy_plugin_info = {
|
|
+ .version = "201207271820",
|
|
+ .help = NULL
|
|
+};
|
|
+
|
|
+static unsigned int execute_latent_entropy(void);
|
|
+static bool gate_latent_entropy(void);
|
|
+
|
|
+static struct gimple_opt_pass latent_entropy_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "latent_entropy",
|
|
+ .gate = gate_latent_entropy,
|
|
+ .execute = execute_latent_entropy,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = PROP_gimple_leh | PROP_cfg,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
|
|
+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
|
|
+ }
|
|
+};
|
|
+
|
|
+static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
|
|
+{
|
|
+ if (TREE_CODE(*node) != FUNCTION_DECL) {
|
|
+ *no_add_attrs = true;
|
|
+ error("%qE attribute only applies to functions", name);
|
|
+ }
|
|
+ return NULL_TREE;
|
|
+}
|
|
+
|
|
+static struct attribute_spec latent_entropy_attr = {
|
|
+ .name = "latent_entropy",
|
|
+ .min_length = 0,
|
|
+ .max_length = 0,
|
|
+ .decl_required = true,
|
|
+ .type_required = false,
|
|
+ .function_type_required = false,
|
|
+ .handler = handle_latent_entropy_attribute,
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ .affects_type_identity = false
|
|
+#endif
|
|
+};
|
|
+
|
|
+static void register_attributes(void *event_data, void *data)
|
|
+{
|
|
+ register_attribute(&latent_entropy_attr);
|
|
+}
|
|
+
|
|
+static bool gate_latent_entropy(void)
|
|
+{
|
|
+ tree latent_entropy_attr;
|
|
+
|
|
+ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
|
|
+ return latent_entropy_attr != NULL_TREE;
|
|
+}
|
|
+
|
|
+static unsigned HOST_WIDE_INT seed;
|
|
+static unsigned HOST_WIDE_INT get_random_const(void)
|
|
+{
|
|
+ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
|
|
+ return seed;
|
|
+}
|
|
+
|
|
+static enum tree_code get_op(tree *rhs)
|
|
+{
|
|
+ static enum tree_code op;
|
|
+ unsigned HOST_WIDE_INT random_const;
|
|
+
|
|
+ random_const = get_random_const();
|
|
+
|
|
+ switch (op) {
|
|
+ case BIT_XOR_EXPR:
|
|
+ op = PLUS_EXPR;
|
|
+ break;
|
|
+
|
|
+ case PLUS_EXPR:
|
|
+ if (rhs) {
|
|
+ op = LROTATE_EXPR;
|
|
+ random_const &= HOST_BITS_PER_WIDE_INT - 1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ case LROTATE_EXPR:
|
|
+ default:
|
|
+ op = BIT_XOR_EXPR;
|
|
+ break;
|
|
+ }
|
|
+ if (rhs)
|
|
+ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
|
|
+ return op;
|
|
+}
|
|
+
|
|
+static void perturb_local_entropy(basic_block bb, tree local_entropy)
|
|
+{
|
|
+ gimple_stmt_iterator gsi;
|
|
+ gimple assign;
|
|
+ tree addxorrol, rhs;
|
|
+ enum tree_code op;
|
|
+
|
|
+ op = get_op(&rhs);
|
|
+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
|
|
+ assign = gimple_build_assign(local_entropy, addxorrol);
|
|
+ find_referenced_vars_in(assign);
|
|
+//debug_bb(bb);
|
|
+ gsi = gsi_after_labels(bb);
|
|
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+}
|
|
+
|
|
+static void perturb_latent_entropy(basic_block bb, tree rhs)
|
|
+{
|
|
+ gimple_stmt_iterator gsi;
|
|
+ gimple assign;
|
|
+ tree addxorrol, temp;
|
|
+
|
|
+ // 1. create temporary copy of latent_entropy
|
|
+ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
|
|
+ add_referenced_var(temp);
|
|
+ mark_sym_for_renaming(temp);
|
|
+
|
|
+ // 2. read...
|
|
+ assign = gimple_build_assign(temp, latent_entropy_decl);
|
|
+ find_referenced_vars_in(assign);
|
|
+ gsi = gsi_after_labels(bb);
|
|
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+
|
|
+ // 3. ...modify...
|
|
+ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
|
|
+ assign = gimple_build_assign(temp, addxorrol);
|
|
+ find_referenced_vars_in(assign);
|
|
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+
|
|
+ // 4. ...write latent_entropy
|
|
+ assign = gimple_build_assign(latent_entropy_decl, temp);
|
|
+ find_referenced_vars_in(assign);
|
|
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+}
|
|
+
|
|
+static unsigned int execute_latent_entropy(void)
|
|
+{
|
|
+ basic_block bb;
|
|
+ gimple assign;
|
|
+ gimple_stmt_iterator gsi;
|
|
+ tree local_entropy;
|
|
+
|
|
+ if (!latent_entropy_decl) {
|
|
+ struct varpool_node *node;
|
|
+
|
|
+ for (node = varpool_nodes; node; node = node->next) {
|
|
+ tree var = node->decl;
|
|
+ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
|
|
+ continue;
|
|
+ latent_entropy_decl = var;
|
|
+// debug_tree(var);
|
|
+ break;
|
|
+ }
|
|
+ if (!latent_entropy_decl) {
|
|
+// debug_tree(current_function_decl);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
|
|
+
|
|
+ // 1. create local entropy variable
|
|
+ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
|
|
+ add_referenced_var(local_entropy);
|
|
+ mark_sym_for_renaming(local_entropy);
|
|
+
|
|
+ // 2. initialize local entropy variable
|
|
+ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
|
|
+ if (dom_info_available_p(CDI_DOMINATORS))
|
|
+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
|
|
+ gsi = gsi_start_bb(bb);
|
|
+
|
|
+ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
|
|
+// gimple_set_location(assign, loc);
|
|
+ find_referenced_vars_in(assign);
|
|
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+ bb = bb->next_bb;
|
|
+
|
|
+ // 3. instrument each BB with an operation on the local entropy variable
|
|
+ while (bb != EXIT_BLOCK_PTR) {
|
|
+ perturb_local_entropy(bb, local_entropy);
|
|
+ bb = bb->next_bb;
|
|
+ };
|
|
+
|
|
+ // 4. mix local entropy into the global entropy variable
|
|
+ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void start_unit_callback(void *gcc_data, void *user_data)
|
|
+{
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ seed = get_random_seed(false);
|
|
+#else
|
|
+ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
|
|
+ seed *= seed;
|
|
+#endif
|
|
+
|
|
+ if (in_lto_p)
|
|
+ return;
|
|
+
|
|
+ // extern u64 latent_entropy
|
|
+ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
|
|
+
|
|
+ TREE_STATIC(latent_entropy_decl) = 1;
|
|
+ TREE_PUBLIC(latent_entropy_decl) = 1;
|
|
+ TREE_USED(latent_entropy_decl) = 1;
|
|
+ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
|
|
+ DECL_EXTERNAL(latent_entropy_decl) = 1;
|
|
+ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
|
|
+ DECL_INITIAL(latent_entropy_decl) = NULL;
|
|
+// DECL_ASSEMBLER_NAME(latent_entropy_decl);
|
|
+// varpool_finalize_decl(latent_entropy_decl);
|
|
+// varpool_mark_needed_node(latent_entropy_decl);
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ struct register_pass_info latent_entropy_pass_info = {
|
|
+ .pass = &latent_entropy_pass.pass,
|
|
+ .reference_pass_name = "optimized",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_BEFORE
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
|
|
+ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
|
|
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
|
|
new file mode 100644
|
|
index 0000000..ac47bec
|
|
--- /dev/null
|
|
+++ b/tools/gcc/size_overflow_hash.data
|
|
@@ -0,0 +1,3028 @@
|
|
+_000001_hash alloc_dr 2 65495 _000001_hash NULL
|
|
+_000002_hash __copy_from_user 3 10918 _000002_hash NULL
|
|
+_000003_hash copy_from_user 3 17559 _000003_hash NULL
|
|
+_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL
|
|
+_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL
|
|
+_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL
|
|
+_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL
|
|
+_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL
|
|
+_000009_hash kmalloc 1 60432 _002597_hash NULL nohasharray
|
|
+_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL
|
|
+_000012_hash kmalloc_slab 1 11917 _000012_hash NULL
|
|
+_000013_hash kmemdup 2 64015 _000013_hash NULL
|
|
+_000014_hash __krealloc 2 14857 _000331_hash NULL nohasharray
|
|
+_000015_hash memdup_user 2 59590 _000015_hash NULL
|
|
+_000016_hash module_alloc 1 63630 _000016_hash NULL
|
|
+_000017_hash read_default_ldt 2 14302 _000017_hash NULL
|
|
+_000018_hash read_kcore 3 63488 _000018_hash NULL
|
|
+_000019_hash read_ldt 2 47570 _000019_hash NULL
|
|
+_000020_hash read_zero 3 19366 _000020_hash NULL
|
|
+_000021_hash __vmalloc_node 1 39308 _000021_hash NULL
|
|
+_000022_hash vm_map_ram 2 23078 _001054_hash NULL nohasharray
|
|
+_000023_hash aa_simple_write_to_buffer 4-3 49683 _000023_hash NULL
|
|
+_000024_hash ablkcipher_copy_iv 3 64140 _000024_hash NULL
|
|
+_000025_hash ablkcipher_next_slow 4 47274 _000025_hash NULL
|
|
+_000026_hash acpi_battery_write_alarm 3 1240 _000026_hash NULL
|
|
+_000027_hash acpi_os_allocate 1 14892 _000027_hash NULL
|
|
+_000028_hash acpi_system_write_wakeup_device 3 34853 _000028_hash NULL
|
|
+_000029_hash adu_write 3 30487 _000029_hash NULL
|
|
+_000030_hash aer_inject_write 3 52399 _000030_hash NULL
|
|
+_000031_hash afs_alloc_flat_call 2-3 36399 _000031_hash NULL
|
|
+_000033_hash afs_proc_cells_write 3 61139 _000033_hash NULL
|
|
+_000034_hash afs_proc_rootcell_write 3 15822 _000034_hash NULL
|
|
+_000035_hash agp_3_5_isochronous_node_enable 3 49465 _000035_hash NULL
|
|
+_000036_hash agp_alloc_page_array 1 22554 _000036_hash NULL
|
|
+_000037_hash ah_alloc_tmp 2 54378 _000037_hash NULL
|
|
+_000038_hash ahash_setkey_unaligned 3 33521 _000038_hash NULL
|
|
+_000039_hash alg_setkey 3 31485 _000039_hash NULL
|
|
+_000040_hash aligned_kmalloc 1 3628 _000040_hash NULL
|
|
+_000041_hash alloc_context 1 3194 _000041_hash NULL
|
|
+_000042_hash alloc_ep_req 2 54860 _000042_hash NULL
|
|
+_000043_hash alloc_fdmem 1 27083 _000043_hash NULL
|
|
+_000044_hash alloc_flex_gd 1 57259 _000044_hash NULL
|
|
+_000045_hash alloc_sglist 1-3-2 22960 _000045_hash NULL
|
|
+_000046_hash aoedev_flush 2 44398 _000046_hash NULL
|
|
+_000047_hash append_to_buffer 3 63550 _000047_hash NULL
|
|
+_000048_hash asix_read_cmd 5 13245 _000048_hash NULL
|
|
+_000049_hash asix_write_cmd 5 58192 _000049_hash NULL
|
|
+_000050_hash asn1_octets_decode 2 9991 _000050_hash NULL
|
|
+_000051_hash asn1_oid_decode 2 4999 _000051_hash NULL
|
|
+_000052_hash at76_set_card_command 4 4471 _000052_hash NULL
|
|
+_000053_hash ath6kl_add_bss_if_needed 6 24317 _000053_hash NULL
|
|
+_000054_hash ath6kl_debug_roam_tbl_event 3 5224 _000054_hash NULL
|
|
+_000055_hash ath6kl_mgmt_powersave_ap 6 13791 _000055_hash NULL
|
|
+_000056_hash ath6kl_send_go_probe_resp 3 21113 _000056_hash NULL
|
|
+_000057_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000057_hash NULL
|
|
+_000058_hash ath6kl_set_assoc_req_ies 3 43185 _000058_hash NULL
|
|
+_000059_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000059_hash NULL
|
|
+_000060_hash ath6kl_wmi_send_action_cmd 7 58860 _000060_hash NULL
|
|
+_000061_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000061_hash NULL
|
|
+_000062_hash attach_hdlc_protocol 3 19986 _000062_hash NULL
|
|
+_000063_hash audio_write 4 54261 _001597_hash NULL nohasharray
|
|
+_000064_hash audit_unpack_string 3 13748 _000064_hash NULL
|
|
+_000065_hash av7110_vbi_write 3 34384 _000065_hash NULL
|
|
+_000066_hash ax25_setsockopt 5 42740 _000066_hash NULL
|
|
+_000067_hash b43_debugfs_write 3 34838 _000067_hash NULL
|
|
+_000068_hash b43legacy_debugfs_write 3 28556 _000068_hash NULL
|
|
+_000069_hash bch_alloc 1 4593 _000069_hash NULL
|
|
+_000070_hash befs_nls2utf 3 17163 _000070_hash NULL
|
|
+_000071_hash befs_utf2nls 3 25628 _000071_hash NULL
|
|
+_000072_hash bfad_debugfs_write_regrd 3 15218 _000072_hash NULL
|
|
+_000073_hash bfad_debugfs_write_regwr 3 61841 _000073_hash NULL
|
|
+_000074_hash bio_alloc_map_data 1-2 50782 _000074_hash NULL
|
|
+_000076_hash bio_kmalloc 2 54672 _000076_hash NULL
|
|
+_000077_hash blkcipher_copy_iv 3 24075 _000077_hash NULL
|
|
+_000078_hash blkcipher_next_slow 4 52733 _000078_hash NULL
|
|
+_000079_hash bl_pipe_downcall 3 34264 _000079_hash NULL
|
|
+_000080_hash bnad_debugfs_write_regrd 3 6706 _000080_hash NULL
|
|
+_000081_hash bnad_debugfs_write_regwr 3 57500 _000081_hash NULL
|
|
+_000082_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000082_hash NULL
|
|
+_000084_hash bnx2_nvram_write 4 7790 _000084_hash NULL
|
|
+_000085_hash brcmf_sdbrcm_downloadvars 3 42064 _000085_hash NULL
|
|
+_000086_hash btmrvl_gpiogap_write 3 35053 _000086_hash NULL
|
|
+_000087_hash btmrvl_hscfgcmd_write 3 27143 _000087_hash NULL
|
|
+_000088_hash btmrvl_hscmd_write 3 27089 _000088_hash NULL
|
|
+_000089_hash btmrvl_hsmode_write 3 42252 _000089_hash NULL
|
|
+_000090_hash btmrvl_pscmd_write 3 29504 _000090_hash NULL
|
|
+_000091_hash btmrvl_psmode_write 3 3703 _000091_hash NULL
|
|
+_000092_hash btrfs_alloc_delayed_item 1 11678 _000092_hash NULL
|
|
+_000093_hash cache_do_downcall 3 6926 _000093_hash NULL
|
|
+_000094_hash cachefiles_cook_key 2 33274 _000094_hash NULL
|
|
+_000095_hash cachefiles_daemon_write 3 43535 _000095_hash NULL
|
|
+_000096_hash capi_write 3 35104 _000096_hash NULL
|
|
+_000097_hash carl9170_debugfs_write 3 50857 _000097_hash NULL
|
|
+_000098_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000098_hash NULL
|
|
+_000100_hash cciss_proc_write 3 10259 _000100_hash NULL
|
|
+_000101_hash cdrom_read_cdda_old 4 27664 _000101_hash NULL
|
|
+_000102_hash ceph_alloc_page_vector 1 18710 _000102_hash NULL
|
|
+_000103_hash ceph_buffer_new 1 35974 _000103_hash NULL
|
|
+_000104_hash ceph_copy_user_to_page_vector 4 656 _000104_hash NULL
|
|
+_000105_hash ceph_get_direct_page_vector 2 41917 _000105_hash NULL
|
|
+_000106_hash ceph_msg_new 2 5846 _000106_hash NULL
|
|
+_000107_hash ceph_setxattr 4 18913 _000107_hash NULL
|
|
+_000108_hash cfi_read_pri 3 24366 _000108_hash NULL
|
|
+_000109_hash cgroup_write_string 5 10900 _000109_hash NULL
|
|
+_000110_hash cgroup_write_X64 5 54514 _000110_hash NULL
|
|
+_000111_hash change_xattr 5 61390 _000111_hash NULL
|
|
+_000112_hash check_load_and_stores 2 2143 _000112_hash NULL
|
|
+_000113_hash cifs_idmap_key_instantiate 3 54503 _000113_hash NULL
|
|
+_000114_hash cifs_security_flags_proc_write 3 5484 _000114_hash NULL
|
|
+_000115_hash cifs_setxattr 4 23957 _000115_hash NULL
|
|
+_000116_hash cifs_spnego_key_instantiate 3 23588 _000116_hash NULL
|
|
+_000117_hash ci_ll_write 4 3740 _000117_hash NULL
|
|
+_000118_hash cld_pipe_downcall 3 15058 _000118_hash NULL
|
|
+_000119_hash clear_refs_write 3 61904 _000119_hash NULL
|
|
+_000120_hash clusterip_proc_write 3 44729 _000120_hash NULL
|
|
+_000121_hash cm4040_write 3 58079 _000121_hash NULL
|
|
+_000122_hash cm_copy_private_data 2 3649 _000122_hash NULL
|
|
+_000123_hash cmm_write 3 2896 _000123_hash NULL
|
|
+_000124_hash cm_write 3 36858 _000124_hash NULL
|
|
+_000125_hash coda_psdev_write 3 1711 _000125_hash NULL
|
|
+_000126_hash codec_reg_read_file 3 36280 _000126_hash NULL
|
|
+_000127_hash command_file_write 3 31318 _000127_hash NULL
|
|
+_000128_hash command_write 3 58841 _000128_hash NULL
|
|
+_000129_hash comm_write 3 44537 _001532_hash NULL nohasharray
|
|
+_000130_hash concat_writev 3 21451 _000130_hash NULL
|
|
+_000131_hash copy_and_check 3 19089 _000131_hash NULL
|
|
+_000132_hash copy_from_user_toio 3 31966 _000132_hash NULL
|
|
+_000133_hash copy_items 6 50140 _000133_hash NULL
|
|
+_000134_hash copy_macs 4 45534 _000134_hash NULL
|
|
+_000135_hash __copy_to_user 3 17551 _000135_hash NULL
|
|
+_000136_hash copy_vm86_regs_from_user 3 45340 _000136_hash NULL
|
|
+_000137_hash cosa_write 3 1774 _000137_hash NULL
|
|
+_000138_hash create_entry 2 33479 _000138_hash NULL
|
|
+_000139_hash create_queues 2-3 9088 _000139_hash NULL
|
|
+_000141_hash create_xattr 5 54106 _000141_hash NULL
|
|
+_000142_hash create_xattr_datum 5 33356 _000142_hash NULL
|
|
+_000143_hash csum_partial_copy_fromiovecend 3-4 9957 _000143_hash NULL
|
|
+_000145_hash ctrl_out 3-5 8712 _000145_hash NULL
|
|
+_000147_hash cx24116_writeregN 4 41975 _000147_hash NULL
|
|
+_000148_hash cxacru_cm_get_array 4 4412 _000148_hash NULL
|
|
+_000149_hash cxgbi_alloc_big_mem 1 4707 _000149_hash NULL
|
|
+_000150_hash dac960_user_command_proc_write 3 3071 _000150_hash NULL
|
|
+_000151_hash datablob_format 2 39571 _002156_hash NULL nohasharray
|
|
+_000152_hash dccp_feat_clone_sp_val 3 11942 _000152_hash NULL
|
|
+_000153_hash dccp_setsockopt_ccid 4 30701 _000153_hash NULL
|
|
+_000154_hash dccp_setsockopt_cscov 2 37766 _000154_hash NULL
|
|
+_000155_hash dccp_setsockopt_service 4 65336 _000155_hash NULL
|
|
+_000156_hash ddb_output_write 3 31902 _000156_hash NULL
|
|
+_000157_hash ddebug_proc_write 3 18055 _000157_hash NULL
|
|
+_000158_hash dev_config 3 8506 _000158_hash NULL
|
|
+_000159_hash device_write 3 45156 _000159_hash NULL
|
|
+_000160_hash devm_kzalloc 2 4966 _000160_hash NULL
|
|
+_000161_hash devres_alloc 2 551 _000161_hash NULL
|
|
+_000162_hash dfs_file_write 3 41196 _000162_hash NULL
|
|
+_000163_hash direct_entry 3 38836 _000163_hash NULL
|
|
+_000164_hash dispatch_proc_write 3 44320 _000164_hash NULL
|
|
+_000165_hash diva_os_copy_from_user 4 7792 _000165_hash NULL
|
|
+_000166_hash dlm_alloc_pagevec 1 54296 _000166_hash NULL
|
|
+_000167_hash dlmfs_file_read 3 28385 _000167_hash NULL
|
|
+_000168_hash dlmfs_file_write 3 6892 _000168_hash NULL
|
|
+_000169_hash dm_read 3 15674 _000169_hash NULL
|
|
+_000170_hash dm_write 3 2513 _000170_hash NULL
|
|
+_000171_hash __dn_setsockopt 5 13060 _000171_hash NULL
|
|
+_000172_hash dns_query 3 9676 _000172_hash NULL
|
|
+_000173_hash dns_resolver_instantiate 3 63314 _000173_hash NULL
|
|
+_000174_hash do_add_counters 3 3992 _000174_hash NULL
|
|
+_000175_hash __do_config_autodelink 3 58763 _000175_hash NULL
|
|
+_000176_hash do_ip_setsockopt 5 41852 _000176_hash NULL
|
|
+_000177_hash do_ipv6_setsockopt 5 18215 _000177_hash NULL
|
|
+_000178_hash do_ip_vs_set_ctl 4 48641 _000178_hash NULL
|
|
+_000179_hash do_kimage_alloc 3 64827 _000179_hash NULL
|
|
+_000180_hash do_register_entry 4 29478 _000180_hash NULL
|
|
+_000181_hash do_tty_write 5 44896 _000181_hash NULL
|
|
+_000182_hash do_update_counters 4 2259 _000182_hash NULL
|
|
+_000183_hash dsp_write 2 46218 _000183_hash NULL
|
|
+_000184_hash dup_to_netobj 3 26363 _000184_hash NULL
|
|
+_000185_hash dvb_aplay 3 56296 _000185_hash NULL
|
|
+_000186_hash dvb_ca_en50221_io_write 3 43533 _000186_hash NULL
|
|
+_000187_hash dvbdmx_write 3 19423 _000187_hash NULL
|
|
+_000188_hash dvb_play 3 50814 _000188_hash NULL
|
|
+_000189_hash dw210x_op_rw 6 39915 _000189_hash NULL
|
|
+_000190_hash dwc3_link_state_write 3 12641 _000190_hash NULL
|
|
+_000191_hash dwc3_mode_write 3 51997 _000191_hash NULL
|
|
+_000192_hash dwc3_testmode_write 3 30516 _000192_hash NULL
|
|
+_000193_hash ecryptfs_copy_filename 4 11868 _000193_hash NULL
|
|
+_000194_hash ecryptfs_miscdev_write 3 26847 _000194_hash NULL
|
|
+_000195_hash ecryptfs_send_miscdev 2 64816 _000195_hash NULL
|
|
+_000196_hash efx_tsoh_heap_alloc 2 58545 _000196_hash NULL
|
|
+_000197_hash emi26_writememory 4 57908 _000197_hash NULL
|
|
+_000198_hash emi62_writememory 4 29731 _000198_hash NULL
|
|
+_000199_hash encrypted_instantiate 3 3168 _000199_hash NULL
|
|
+_000200_hash encrypted_update 3 13414 _000200_hash NULL
|
|
+_000201_hash ep0_write 3 14536 _001328_hash NULL nohasharray
|
|
+_000202_hash ep_read 3 58813 _000202_hash NULL
|
|
+_000203_hash ep_write 3 59008 _000203_hash NULL
|
|
+_000204_hash erst_dbg_write 3 46715 _000204_hash NULL
|
|
+_000205_hash esp_alloc_tmp 2 40558 _000205_hash NULL
|
|
+_000206_hash exofs_read_lookup_dev_table 3 17733 _000206_hash NULL
|
|
+_000207_hash ext4_kvmalloc 1 14796 _000207_hash NULL
|
|
+_000208_hash ezusb_writememory 4 45976 _000208_hash NULL
|
|
+_000209_hash fanotify_write 3 64623 _000209_hash NULL
|
|
+_000210_hash fd_copyin 3 56247 _000210_hash NULL
|
|
+_000211_hash ffs_epfile_io 3 64886 _000211_hash NULL
|
|
+_000212_hash ffs_prepare_buffer 2 59892 _000212_hash NULL
|
|
+_000213_hash f_hidg_write 3 7932 _000213_hash NULL
|
|
+_000214_hash file_read_actor 4 1401 _000214_hash NULL
|
|
+_000215_hash fill_write_buffer 3 3142 _000215_hash NULL
|
|
+_000216_hash fl_create 5 56435 _000216_hash NULL
|
|
+_000217_hash ftdi_elan_write 3 57309 _000217_hash NULL
|
|
+_000218_hash fuse_conn_limit_write 3 30777 _000218_hash NULL
|
|
+_000219_hash fw_iso_buffer_init 3 54582 _000219_hash NULL
|
|
+_000220_hash garmin_write_bulk 3 58191 _000220_hash NULL
|
|
+_000221_hash garp_attr_create 3 3883 _000221_hash NULL
|
|
+_000222_hash get_arg 3 5694 _000222_hash NULL
|
|
+_000223_hash getdqbuf 1 62908 _000223_hash NULL
|
|
+_000224_hash get_fdb_entries 3 41916 _000224_hash NULL
|
|
+_000225_hash get_indirect_ea 4 51869 _000225_hash NULL
|
|
+_000226_hash get_registers 3 26187 _000226_hash NULL
|
|
+_000227_hash get_scq 2 10897 _000227_hash NULL
|
|
+_000228_hash get_server_iovec 2 16804 _000228_hash NULL
|
|
+_000229_hash get_ucode_user 3 38202 _000229_hash NULL
|
|
+_000230_hash get_user_cpu_mask 2 14861 _000230_hash NULL
|
|
+_000231_hash gfs2_alloc_sort_buffer 1 18275 _000231_hash NULL
|
|
+_000232_hash gfs2_glock_nq_m 1 20347 _000232_hash NULL
|
|
+_000233_hash gigaset_initcs 2 43753 _000233_hash NULL
|
|
+_000234_hash gigaset_initdriver 2 1060 _000234_hash NULL
|
|
+_000235_hash gs_alloc_req 2 58883 _000235_hash NULL
|
|
+_000236_hash gs_buf_alloc 2 25067 _000236_hash NULL
|
|
+_000237_hash gsm_data_alloc 3 42437 _000237_hash NULL
|
|
+_000238_hash gss_pipe_downcall 3 23182 _000238_hash NULL
|
|
+_000239_hash handle_request 9 10024 _000239_hash NULL
|
|
+_000240_hash hash_new 1 62224 _000240_hash NULL
|
|
+_000241_hash hashtab_create 3 33769 _000241_hash NULL
|
|
+_000242_hash hcd_buffer_alloc 2 27495 _000242_hash NULL
|
|
+_000243_hash hci_sock_setsockopt 5 28993 _000243_hash NULL
|
|
+_000244_hash heap_init 2 49617 _000244_hash NULL
|
|
+_000245_hash hest_ghes_dev_register 1 46766 _000245_hash NULL
|
|
+_000246_hash hidraw_get_report 3 45609 _000246_hash NULL
|
|
+_000247_hash hidraw_report_event 3 49578 _000509_hash NULL nohasharray
|
|
+_000248_hash hidraw_send_report 3 23449 _000248_hash NULL
|
|
+_000249_hash hpfs_translate_name 3 41497 _000249_hash NULL
|
|
+_000250_hash hysdn_conf_write 3 52145 _000250_hash NULL
|
|
+_000251_hash hysdn_log_write 3 48694 _000251_hash NULL
|
|
+_000252_hash __i2400mu_send_barker 3 23652 _000252_hash NULL
|
|
+_000253_hash i2cdev_read 3 1206 _000253_hash NULL
|
|
+_000254_hash i2cdev_write 3 23310 _000254_hash NULL
|
|
+_000255_hash i2o_parm_field_get 5 34477 _000255_hash NULL
|
|
+_000256_hash i2o_parm_table_get 6 61635 _000256_hash NULL
|
|
+_000257_hash ib_copy_from_udata 3 59502 _000257_hash NULL
|
|
+_000258_hash ib_ucm_alloc_data 3 36885 _000258_hash NULL
|
|
+_000259_hash ib_umad_write 3 47993 _000259_hash NULL
|
|
+_000260_hash ib_uverbs_unmarshall_recv 5 12251 _000260_hash NULL
|
|
+_000261_hash icn_writecmd 2 38629 _000261_hash NULL
|
|
+_000262_hash ide_driver_proc_write 3 32493 _000262_hash NULL
|
|
+_000263_hash ide_settings_proc_write 3 35110 _000263_hash NULL
|
|
+_000264_hash idetape_chrdev_write 3 53976 _000264_hash NULL
|
|
+_000265_hash idmap_pipe_downcall 3 14591 _000265_hash NULL
|
|
+_000266_hash ieee80211_build_probe_req 7-5 27660 _000266_hash NULL
|
|
+_000267_hash ieee80211_if_write 3 34894 _000267_hash NULL
|
|
+_000268_hash if_write 3 51756 _000268_hash NULL
|
|
+_000269_hash ilo_write 3 64378 _000269_hash NULL
|
|
+_000270_hash ima_write_policy 3 40548 _000270_hash NULL
|
|
+_000271_hash init_data_container 1 60709 _000271_hash NULL
|
|
+_000272_hash init_send_hfcd 1 34586 _000272_hash NULL
|
|
+_000273_hash insert_dent 7 65034 _000273_hash NULL
|
|
+_000274_hash interpret_user_input 2 19393 _000274_hash NULL
|
|
+_000275_hash int_proc_write 3 39542 _000275_hash NULL
|
|
+_000276_hash ioctl_private_iw_point 7 1273 _000276_hash NULL
|
|
+_000277_hash iov_iter_copy_from_user 4 31942 _000277_hash NULL
|
|
+_000278_hash iov_iter_copy_from_user_atomic 4 56368 _000278_hash NULL
|
|
+_000279_hash iowarrior_write 3 18604 _000279_hash NULL
|
|
+_000280_hash ipc_alloc 1 1192 _000280_hash NULL
|
|
+_000281_hash ipc_rcu_alloc 1 21208 _000281_hash NULL
|
|
+_000282_hash ip_options_get_from_user 4 64958 _000282_hash NULL
|
|
+_000283_hash ipv6_renew_option 3 38813 _000283_hash NULL
|
|
+_000284_hash ip_vs_conn_fill_param_sync 6 29771 _002404_hash NULL nohasharray
|
|
+_000285_hash ip_vs_create_timeout_table 2 64478 _000285_hash NULL
|
|
+_000286_hash ipw_queue_tx_init 3 49161 _000286_hash NULL
|
|
+_000287_hash irda_setsockopt 5 19824 _000287_hash NULL
|
|
+_000288_hash irias_new_octseq_value 2 13596 _000288_hash NULL
|
|
+_000289_hash ir_lirc_transmit_ir 3 64403 _000289_hash NULL
|
|
+_000290_hash irnet_ctrl_write 3 24139 _000290_hash NULL
|
|
+_000291_hash isdn_add_channels 3 40905 _000291_hash NULL
|
|
+_000292_hash isdn_ppp_fill_rq 2 41428 _000292_hash NULL
|
|
+_000293_hash isdn_ppp_write 4 29109 _000293_hash NULL
|
|
+_000294_hash isdn_read 3 50021 _000294_hash NULL
|
|
+_000295_hash isdn_v110_open 3 2418 _000295_hash NULL
|
|
+_000296_hash isdn_writebuf_stub 4 52383 _000296_hash NULL
|
|
+_000297_hash islpci_mgt_transmit 5 34133 _000297_hash NULL
|
|
+_000298_hash iso_callback 3 43208 _000298_hash NULL
|
|
+_000299_hash iso_packets_buffer_init 3 29061 _000299_hash NULL
|
|
+_000300_hash it821x_firmware_command 3 8628 _000300_hash NULL
|
|
+_000301_hash ivtv_buf_copy_from_user 4 25502 _000301_hash NULL
|
|
+_000302_hash iwch_alloc_fastreg_pbl 2 40153 _000302_hash NULL
|
|
+_000303_hash iwl_calib_set 3 34400 _002188_hash NULL nohasharray
|
|
+_000304_hash jbd2_journal_init_revoke_table 1 36336 _000304_hash NULL
|
|
+_000305_hash jffs2_alloc_full_dirent 1 60179 _001111_hash NULL nohasharray
|
|
+_000306_hash journal_init_revoke_table 1 56331 _000306_hash NULL
|
|
+_000307_hash kcalloc 1-2 27770 _000307_hash NULL
|
|
+_000309_hash keyctl_instantiate_key_common 4 47889 _000309_hash NULL
|
|
+_000310_hash keyctl_update_key 3 26061 _000310_hash NULL
|
|
+_000311_hash __kfifo_alloc 2-3 22173 _000311_hash NULL
|
|
+_000313_hash kfifo_copy_from_user 3 5091 _000313_hash NULL
|
|
+_000314_hash kmalloc_node 1 50163 _000314_hash NULL
|
|
+_000315_hash kmalloc_parameter 1 65279 _000315_hash NULL
|
|
+_000316_hash kmem_alloc 1 31920 _000316_hash NULL
|
|
+_000317_hash kobj_map 2-3 9566 _000317_hash NULL
|
|
+_000319_hash kone_receive 4 4690 _000319_hash NULL
|
|
+_000320_hash kone_send 4 63435 _000320_hash NULL
|
|
+_000321_hash krealloc 2 14908 _000321_hash NULL
|
|
+_000322_hash kvmalloc 1 32646 _000322_hash NULL
|
|
+_000323_hash kvm_read_guest_atomic 4 10765 _000323_hash NULL
|
|
+_000324_hash kvm_read_guest_cached 4 39666 _000324_hash NULL
|
|
+_000325_hash kvm_read_guest_page 5 18074 _000325_hash NULL
|
|
+_000326_hash kzalloc 1 54740 _000326_hash NULL
|
|
+_000327_hash l2cap_sock_setsockopt 5 50207 _000327_hash NULL
|
|
+_000328_hash l2cap_sock_setsockopt_old 4 29346 _000328_hash NULL
|
|
+_000329_hash lane2_associate_req 4 45398 _000329_hash NULL
|
|
+_000330_hash lbs_debugfs_write 3 48413 _000330_hash NULL
|
|
+_000331_hash lcd_write 3 14857 _000331_hash &_000014_hash
|
|
+_000332_hash ldm_frag_add 2 5611 _000332_hash NULL
|
|
+_000333_hash __lgread 4 31668 _000333_hash NULL
|
|
+_000334_hash libipw_alloc_txb 1-3-2 27579 _000334_hash NULL
|
|
+_000335_hash link_send_sections_long 4 46556 _000335_hash NULL
|
|
+_000336_hash listxattr 3 12769 _000336_hash NULL
|
|
+_000337_hash LoadBitmap 2 19658 _000337_hash NULL
|
|
+_000338_hash load_msg 2 95 _000338_hash NULL
|
|
+_000339_hash lpfc_debugfs_dif_err_write 3 17424 _000339_hash NULL
|
|
+_000340_hash lp_write 3 9511 _000340_hash NULL
|
|
+_000341_hash mb_cache_create 2 17307 _000341_hash NULL
|
|
+_000342_hash mce_write 3 26201 _000342_hash NULL
|
|
+_000343_hash mcs7830_get_reg 3 33308 _000343_hash NULL
|
|
+_000344_hash mcs7830_set_reg 3 31413 _000344_hash NULL
|
|
+_000345_hash memcpy_fromiovec 3 55247 _000345_hash NULL
|
|
+_000346_hash memcpy_fromiovecend 3-4 2707 _000346_hash NULL
|
|
+_000348_hash mempool_kmalloc 2 53831 _000348_hash NULL
|
|
+_000349_hash mempool_resize 2 47983 _001821_hash NULL nohasharray
|
|
+_000350_hash mem_rw 3 22085 _000350_hash NULL
|
|
+_000351_hash mgmt_control 3 7349 _000351_hash NULL
|
|
+_000352_hash mgmt_pending_add 5 46976 _000352_hash NULL
|
|
+_000353_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000353_hash NULL
|
|
+_000354_hash mmc_alloc_sg 1 21504 _000354_hash NULL
|
|
+_000355_hash mmc_send_bus_test 4 18285 _000355_hash NULL
|
|
+_000356_hash mmc_send_cxd_data 5 38655 _000356_hash NULL
|
|
+_000357_hash module_alloc_update_bounds 1 47205 _000357_hash NULL
|
|
+_000358_hash move_addr_to_kernel 2 32673 _000358_hash NULL
|
|
+_000359_hash mpi_alloc_limb_space 1 23190 _000359_hash NULL
|
|
+_000360_hash mpi_resize 2 44674 _000360_hash NULL
|
|
+_000361_hash mptctl_getiocinfo 2 28545 _000361_hash NULL
|
|
+_000362_hash mtdchar_readoob 4 31200 _000362_hash NULL
|
|
+_000363_hash mtdchar_write 3 56831 _002688_hash NULL nohasharray
|
|
+_000364_hash mtdchar_writeoob 4 3393 _000364_hash NULL
|
|
+_000365_hash mtd_device_parse_register 5 5024 _000365_hash NULL
|
|
+_000366_hash mtf_test_write 3 18844 _000366_hash NULL
|
|
+_000367_hash mtrr_write 3 59622 _000367_hash NULL
|
|
+_000368_hash musb_test_mode_write 3 33518 _000368_hash NULL
|
|
+_000369_hash mwifiex_get_common_rates 3 17131 _000369_hash NULL
|
|
+_000370_hash mwifiex_update_curr_bss_params 5 16908 _000370_hash NULL
|
|
+_000371_hash nand_bch_init 2-3 16280 _001341_hash NULL nohasharray
|
|
+_000373_hash ncp_file_write 3 3813 _000373_hash NULL
|
|
+_000374_hash ncp__vol2io 5 4804 _000374_hash NULL
|
|
+_000375_hash nes_alloc_fast_reg_page_list 2 33523 _000375_hash NULL
|
|
+_000376_hash nfc_targets_found 3 29886 _000376_hash NULL
|
|
+_000377_hash nfs4_acl_new 1 49806 _000377_hash NULL
|
|
+_000378_hash nfs4_write_cached_acl 4 15070 _000378_hash NULL
|
|
+_000379_hash nfsd_cache_update 3 59574 _000379_hash NULL
|
|
+_000380_hash nfsd_symlink 6 63442 _000380_hash NULL
|
|
+_000381_hash nfs_idmap_get_desc 2-4 42990 _000381_hash NULL
|
|
+_000383_hash nfs_readdir_make_qstr 3 12509 _000383_hash NULL
|
|
+_000384_hash note_last_dentry 3 12285 _000384_hash NULL
|
|
+_000385_hash ntfs_copy_from_user 3-5 15072 _000385_hash NULL
|
|
+_000387_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000387_hash NULL
|
|
+_000389_hash ntfs_ucstonls 3 23097 _000389_hash NULL
|
|
+_000390_hash nvme_alloc_iod 1 56027 _000390_hash NULL
|
|
+_000391_hash nvram_write 3 3894 _000391_hash NULL
|
|
+_000392_hash o2hb_debug_create 4 18744 _000392_hash NULL
|
|
+_000393_hash o2net_send_message_vec 4 879 _001792_hash NULL nohasharray
|
|
+_000394_hash ocfs2_control_cfu 2 37750 _000394_hash NULL
|
|
+_000395_hash oom_adjust_write 3 41116 _000395_hash NULL
|
|
+_000396_hash oom_score_adj_write 3 42594 _000396_hash NULL
|
|
+_000397_hash opera1_xilinx_rw 5 31453 _000397_hash NULL
|
|
+_000398_hash oprofilefs_ulong_from_user 3 57251 _000398_hash NULL
|
|
+_000399_hash opticon_write 4 60775 _000399_hash NULL
|
|
+_000400_hash orig_node_add_if 2 32833 _000400_hash NULL
|
|
+_000401_hash orig_node_del_if 2 28371 _000401_hash NULL
|
|
+_000402_hash p9_check_zc_errors 4 15534 _000402_hash NULL
|
|
+_000403_hash packet_buffer_init 2 1607 _000403_hash NULL
|
|
+_000404_hash packet_setsockopt 5 17662 _000404_hash NULL
|
|
+_000405_hash parse_command 2 37079 _000405_hash NULL
|
|
+_000406_hash pcbit_writecmd 2 12332 _000406_hash NULL
|
|
+_000407_hash pcmcia_replace_cis 3 57066 _000407_hash NULL
|
|
+_000408_hash pgctrl_write 3 50453 _000408_hash NULL
|
|
+_000409_hash pg_write 3 40766 _000409_hash NULL
|
|
+_000410_hash pidlist_allocate 1 64404 _000410_hash NULL
|
|
+_000411_hash pipe_iov_copy_from_user 3 23102 _000411_hash NULL
|
|
+_000412_hash pipe_iov_copy_to_user 3 3447 _000412_hash NULL
|
|
+_000413_hash pkt_add 3 39897 _000413_hash NULL
|
|
+_000414_hash pktgen_if_write 3 55628 _000414_hash NULL
|
|
+_000415_hash platform_device_add_data 3 310 _000415_hash NULL
|
|
+_000416_hash platform_device_add_resources 3 13289 _000416_hash NULL
|
|
+_000417_hash pm_qos_power_write 3 52513 _000417_hash NULL
|
|
+_000418_hash pnpbios_proc_write 3 19758 _000418_hash NULL
|
|
+_000419_hash pool_allocate 3 42012 _000419_hash NULL
|
|
+_000420_hash posix_acl_alloc 1 48063 _000420_hash NULL
|
|
+_000421_hash ppp_cp_parse_cr 4 5214 _000421_hash NULL
|
|
+_000422_hash ppp_write 3 34034 _000422_hash NULL
|
|
+_000423_hash pp_read 3 33210 _000423_hash NULL
|
|
+_000424_hash pp_write 3 39554 _000424_hash NULL
|
|
+_000425_hash printer_req_alloc 2 62687 _001807_hash NULL nohasharray
|
|
+_000426_hash printer_write 3 60276 _000426_hash NULL
|
|
+_000427_hash prism2_set_genericelement 3 29277 _000427_hash NULL
|
|
+_000428_hash __probe_kernel_read 3 61119 _000428_hash NULL
|
|
+_000429_hash __probe_kernel_write 3 29842 _000429_hash NULL
|
|
+_000430_hash proc_coredump_filter_write 3 25625 _000430_hash NULL
|
|
+_000431_hash _proc_do_string 2 6376 _000431_hash NULL
|
|
+_000432_hash process_vm_rw_pages 5-6 15954 _000432_hash NULL
|
|
+_000434_hash proc_loginuid_write 3 63648 _000434_hash NULL
|
|
+_000435_hash proc_pid_attr_write 3 63845 _000435_hash NULL
|
|
+_000436_hash proc_scsi_devinfo_write 3 32064 _000436_hash NULL
|
|
+_000437_hash proc_scsi_write 3 29142 _000437_hash NULL
|
|
+_000438_hash proc_scsi_write_proc 3 267 _000438_hash NULL
|
|
+_000439_hash pstore_mkfile 5 50830 _000439_hash NULL
|
|
+_000440_hash pti_char_write 3 60960 _000440_hash NULL
|
|
+_000441_hash ptrace_writedata 4 45021 _000441_hash NULL
|
|
+_000442_hash pt_write 3 40159 _000442_hash NULL
|
|
+_000443_hash pvr2_ioread_set_sync_key 3 59882 _000443_hash NULL
|
|
+_000444_hash pvr2_stream_buffer_count 2 33719 _000444_hash NULL
|
|
+_000445_hash qdisc_class_hash_alloc 1 18262 _000445_hash NULL
|
|
+_000446_hash r3964_write 4 57662 _000446_hash NULL
|
|
+_000447_hash raw_seticmpfilter 3 6888 _000447_hash NULL
|
|
+_000448_hash raw_setsockopt 5 45800 _000448_hash NULL
|
|
+_000449_hash rawv6_seticmpfilter 5 12137 _000449_hash NULL
|
|
+_000450_hash ray_cs_essid_proc_write 3 17875 _000450_hash NULL
|
|
+_000451_hash rbd_add 3 16366 _000451_hash NULL
|
|
+_000452_hash rbd_snap_add 4 19678 _000452_hash NULL
|
|
+_000453_hash rdma_set_ib_paths 3 45592 _000453_hash NULL
|
|
+_000454_hash rds_page_copy_user 4 35691 _000454_hash NULL
|
|
+_000455_hash read 3 9397 _000455_hash NULL
|
|
+_000456_hash read_buf 2 20469 _000456_hash NULL
|
|
+_000457_hash read_cis_cache 4 29735 _000457_hash NULL
|
|
+_000458_hash realloc_buffer 2 25816 _000458_hash NULL
|
|
+_000459_hash realloc_packet_buffer 2 25569 _000459_hash NULL
|
|
+_000460_hash receive_DataRequest 3 9904 _000460_hash NULL
|
|
+_000461_hash recent_mt_proc_write 3 8206 _000461_hash NULL
|
|
+_000462_hash regmap_access_read_file 3 37223 _000462_hash NULL
|
|
+_000463_hash regmap_bulk_write 4 59049 _000463_hash NULL
|
|
+_000464_hash regmap_map_read_file 3 37685 _000464_hash NULL
|
|
+_000465_hash regset_tls_set 4 18459 _000465_hash NULL
|
|
+_000466_hash reg_w_buf 3 27724 _000466_hash NULL
|
|
+_000467_hash reg_w_ixbuf 4 34736 _000467_hash NULL
|
|
+_000468_hash remote_settings_file_write 3 22987 _000468_hash NULL
|
|
+_000469_hash request_key_auth_new 3 38092 _000469_hash NULL
|
|
+_000470_hash restore_i387_fxsave 2 17528 _000470_hash NULL
|
|
+_000471_hash revalidate 2 19043 _000471_hash NULL
|
|
+_000472_hash rfcomm_sock_setsockopt 5 18254 _000472_hash NULL
|
|
+_000473_hash rndis_add_response 2 58544 _000473_hash NULL
|
|
+_000474_hash rndis_set_oid 4 6547 _000474_hash NULL
|
|
+_000475_hash rngapi_reset 3 34366 _002911_hash NULL nohasharray
|
|
+_000476_hash roccat_common_receive 4 53407 _000476_hash NULL
|
|
+_000477_hash roccat_common_send 4 12284 _000477_hash NULL
|
|
+_000478_hash rpc_malloc 2 43573 _000478_hash NULL
|
|
+_000479_hash rt2x00debug_write_bbp 3 8212 _000479_hash NULL
|
|
+_000480_hash rt2x00debug_write_csr 3 64753 _000480_hash NULL
|
|
+_000481_hash rt2x00debug_write_eeprom 3 23091 _000481_hash NULL
|
|
+_000482_hash rt2x00debug_write_rf 3 38195 _000482_hash NULL
|
|
+_000483_hash rts51x_read_mem 4 26577 _000483_hash NULL
|
|
+_000484_hash rts51x_read_status 4 11830 _000484_hash NULL
|
|
+_000485_hash rts51x_write_mem 4 17598 _000485_hash NULL
|
|
+_000486_hash rw_copy_check_uvector 3 34271 _000486_hash NULL
|
|
+_000487_hash rxrpc_request_key 3 27235 _000487_hash NULL
|
|
+_000488_hash rxrpc_server_keyring 3 16431 _000488_hash NULL
|
|
+_000489_hash savemem 3 58129 _000489_hash NULL
|
|
+_000490_hash sb16_copy_from_user 10-7-6 55836 _000490_hash NULL
|
|
+_000493_hash sched_autogroup_write 3 10984 _000493_hash NULL
|
|
+_000494_hash scsi_mode_select 6 37330 _000494_hash NULL
|
|
+_000495_hash scsi_tgt_copy_sense 3 26933 _000495_hash NULL
|
|
+_000496_hash sctp_auth_create_key 1 51641 _000496_hash NULL
|
|
+_000497_hash sctp_getsockopt_delayed_ack 2 9232 _000497_hash NULL
|
|
+_000498_hash sctp_getsockopt_local_addrs 2 25178 _000498_hash NULL
|
|
+_000499_hash sctp_make_abort_user 3 29654 _000499_hash NULL
|
|
+_000500_hash sctp_setsockopt_active_key 3 43755 _000500_hash NULL
|
|
+_000501_hash sctp_setsockopt_adaptation_layer 3 26935 _001925_hash NULL nohasharray
|
|
+_000502_hash sctp_setsockopt_associnfo 3 51684 _000502_hash NULL
|
|
+_000503_hash sctp_setsockopt_auth_chunk 3 30843 _000503_hash NULL
|
|
+_000504_hash sctp_setsockopt_auth_key 3 3793 _000504_hash NULL
|
|
+_000505_hash sctp_setsockopt_autoclose 3 5775 _000505_hash NULL
|
|
+_000506_hash sctp_setsockopt_bindx 3 49870 _000506_hash NULL
|
|
+_000507_hash __sctp_setsockopt_connectx 3 46949 _000507_hash NULL
|
|
+_000508_hash sctp_setsockopt_context 3 31091 _000508_hash NULL
|
|
+_000509_hash sctp_setsockopt_default_send_param 3 49578 _000509_hash &_000247_hash
|
|
+_000510_hash sctp_setsockopt_delayed_ack 3 40129 _000510_hash NULL
|
|
+_000511_hash sctp_setsockopt_del_key 3 42304 _002281_hash NULL nohasharray
|
|
+_000512_hash sctp_setsockopt_events 3 18862 _000512_hash NULL
|
|
+_000513_hash sctp_setsockopt_hmac_ident 3 11687 _000513_hash NULL
|
|
+_000514_hash sctp_setsockopt_initmsg 3 1383 _000514_hash NULL
|
|
+_000515_hash sctp_setsockopt_maxburst 3 28041 _000515_hash NULL
|
|
+_000516_hash sctp_setsockopt_maxseg 3 11829 _000516_hash NULL
|
|
+_000517_hash sctp_setsockopt_peer_addr_params 3 734 _000517_hash NULL
|
|
+_000518_hash sctp_setsockopt_peer_primary_addr 3 13440 _000518_hash NULL
|
|
+_000519_hash sctp_setsockopt_rtoinfo 3 30941 _000519_hash NULL
|
|
+_000520_hash security_context_to_sid_core 2 29248 _000520_hash NULL
|
|
+_000521_hash sel_commit_bools_write 3 46077 _000521_hash NULL
|
|
+_000522_hash sel_write_avc_cache_threshold 3 2256 _000522_hash NULL
|
|
+_000523_hash sel_write_bool 3 46996 _000523_hash NULL
|
|
+_000524_hash sel_write_checkreqprot 3 60774 _000524_hash NULL
|
|
+_000525_hash sel_write_disable 3 10511 _000525_hash NULL
|
|
+_000526_hash sel_write_enforce 3 48998 _000526_hash NULL
|
|
+_000527_hash sel_write_load 3 63830 _000527_hash NULL
|
|
+_000528_hash send_bulk_static_data 3 61932 _000528_hash NULL
|
|
+_000529_hash send_control_msg 6 48498 _000529_hash NULL
|
|
+_000530_hash set_aoe_iflist 2 42737 _000530_hash NULL
|
|
+_000531_hash setkey_unaligned 3 39474 _000531_hash NULL
|
|
+_000532_hash set_registers 3 53582 _000532_hash NULL
|
|
+_000533_hash setsockopt 5 54539 _000533_hash NULL
|
|
+_000534_hash setup_req 3 5848 _000534_hash NULL
|
|
+_000535_hash setup_window 7 59178 _000535_hash NULL
|
|
+_000536_hash setxattr 4 37006 _000536_hash NULL
|
|
+_000537_hash sfq_alloc 1 2861 _000537_hash NULL
|
|
+_000538_hash sg_kmalloc 1 50240 _000538_hash NULL
|
|
+_000539_hash sgl_map_user_pages 2 30610 _000539_hash NULL
|
|
+_000540_hash shash_setkey_unaligned 3 8620 _000540_hash NULL
|
|
+_000541_hash shmem_xattr_alloc 2 61190 _000541_hash NULL
|
|
+_000542_hash sierra_setup_urb 5 46029 _000542_hash NULL
|
|
+_000543_hash simple_transaction_get 3 50633 _000543_hash NULL
|
|
+_000544_hash simple_write_to_buffer 2-5 3122 _000544_hash NULL
|
|
+_000546_hash sisusb_send_bulk_msg 3 17864 _000546_hash NULL
|
|
+_000547_hash skb_add_data 3 48363 _000547_hash NULL
|
|
+_000548_hash skb_do_copy_data_nocache 5 12465 _000548_hash NULL
|
|
+_000549_hash sl_alloc_bufs 2 50380 _000549_hash NULL
|
|
+_000550_hash sl_realloc_bufs 2 64086 _000550_hash NULL
|
|
+_000551_hash smk_write_ambient 3 45691 _000551_hash NULL
|
|
+_000552_hash smk_write_cipso 3 17989 _000552_hash NULL
|
|
+_000553_hash smk_write_direct 3 46363 _000553_hash NULL
|
|
+_000554_hash smk_write_doi 3 49621 _000554_hash NULL
|
|
+_000555_hash smk_write_load_list 3 52280 _000555_hash NULL
|
|
+_000556_hash smk_write_logging 3 2618 _000556_hash NULL
|
|
+_000557_hash smk_write_netlbladdr 3 42525 _000557_hash NULL
|
|
+_000558_hash smk_write_onlycap 3 14400 _000558_hash NULL
|
|
+_000559_hash snd_ctl_elem_user_tlv 3 11695 _000559_hash NULL
|
|
+_000560_hash snd_emu10k1_fx8010_read 5 9605 _000560_hash NULL
|
|
+_000561_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000561_hash NULL
|
|
+_000563_hash snd_gus_dram_poke 4 18525 _000563_hash NULL
|
|
+_000564_hash snd_hdsp_playback_copy 5 20676 _000564_hash NULL
|
|
+_000565_hash snd_info_entry_write 3 63474 _000565_hash NULL
|
|
+_000566_hash snd_korg1212_copy_from 6 36169 _000566_hash NULL
|
|
+_000567_hash snd_mem_proc_write 3 9786 _000567_hash NULL
|
|
+_000568_hash snd_midi_channel_init_set 1 30092 _000568_hash NULL
|
|
+_000569_hash snd_midi_event_new 1 9893 _000750_hash NULL nohasharray
|
|
+_000570_hash snd_opl4_mem_proc_write 5 9670 _000570_hash NULL
|
|
+_000571_hash snd_pcm_aio_read 3 13900 _000571_hash NULL
|
|
+_000572_hash snd_pcm_aio_write 3 28738 _000572_hash NULL
|
|
+_000573_hash snd_pcm_oss_write1 3 10872 _000573_hash NULL
|
|
+_000574_hash snd_pcm_oss_write2 3 27332 _000574_hash NULL
|
|
+_000575_hash snd_rawmidi_kernel_write1 4 56847 _000575_hash NULL
|
|
+_000576_hash snd_rme9652_playback_copy 5 20970 _000576_hash NULL
|
|
+_000577_hash snd_sb_csp_load_user 3 45190 _000577_hash NULL
|
|
+_000578_hash snd_usb_ctl_msg 8 8436 _000578_hash NULL
|
|
+_000579_hash sock_bindtodevice 3 50942 _000579_hash NULL
|
|
+_000580_hash sock_kmalloc 2 62205 _000580_hash NULL
|
|
+_000581_hash spidev_write 3 44510 _000581_hash NULL
|
|
+_000582_hash squashfs_read_table 3 16945 _000582_hash NULL
|
|
+_000583_hash srpt_alloc_ioctx 2-3 51042 _000583_hash NULL
|
|
+_000585_hash srpt_alloc_ioctx_ring 2 49330 _000585_hash NULL
|
|
+_000586_hash st5481_setup_isocpipes 6-4 61340 _000586_hash NULL
|
|
+_000587_hash sta_agg_status_write 3 45164 _000587_hash NULL
|
|
+_000588_hash svc_setsockopt 5 36876 _000588_hash NULL
|
|
+_000589_hash sys_add_key 4 61288 _000589_hash NULL
|
|
+_000590_hash sys_modify_ldt 3 18824 _000590_hash NULL
|
|
+_000591_hash sys_semtimedop 3 4486 _000591_hash NULL
|
|
+_000592_hash sys_setdomainname 2 4373 _000592_hash NULL
|
|
+_000593_hash sys_sethostname 2 42962 _000593_hash NULL
|
|
+_000594_hash tda10048_writeregbulk 4 11050 _000594_hash NULL
|
|
+_000595_hash tipc_log_resize 1 34803 _000595_hash NULL
|
|
+_000596_hash tomoyo_write_self 3 45161 _000596_hash NULL
|
|
+_000597_hash tower_write 3 8580 _000597_hash NULL
|
|
+_000598_hash tpm_write 3 50798 _000598_hash NULL
|
|
+_000599_hash trusted_instantiate 3 4710 _000599_hash NULL
|
|
+_000600_hash trusted_update 3 12664 _000600_hash NULL
|
|
+_000601_hash tt_changes_fill_buffer 3 62649 _000601_hash NULL
|
|
+_000602_hash tty_buffer_alloc 2 45437 _000602_hash NULL
|
|
+_000603_hash __tun_chr_ioctl 4 22300 _000603_hash NULL
|
|
+_000604_hash ubi_more_leb_change_data 4 63534 _000604_hash NULL
|
|
+_000605_hash ubi_more_update_data 4 39189 _000605_hash NULL
|
|
+_000606_hash ubi_resize_volume 2 50172 _000606_hash NULL
|
|
+_000607_hash udf_alloc_i_data 2 35786 _000607_hash NULL
|
|
+_000608_hash uea_idma_write 3 64139 _000608_hash NULL
|
|
+_000609_hash uea_request 4 47613 _000609_hash NULL
|
|
+_000610_hash uea_send_modem_cmd 3 3888 _000610_hash NULL
|
|
+_000611_hash uio_write 3 43202 _000611_hash NULL
|
|
+_000612_hash um_idi_write 3 18293 _000612_hash NULL
|
|
+_000613_hash us122l_ctl_msg 8 13330 _000613_hash NULL
|
|
+_000614_hash usb_alloc_urb 1 43436 _000614_hash NULL
|
|
+_000615_hash usblp_new_writeurb 2 22894 _000615_hash NULL
|
|
+_000616_hash usblp_write 3 23178 _000616_hash NULL
|
|
+_000617_hash usbtest_alloc_urb 3-5 34446 _000617_hash NULL
|
|
+_000619_hash usbtmc_write 3 64340 _000619_hash NULL
|
|
+_000620_hash user_instantiate 3 26131 _000620_hash NULL
|
|
+_000621_hash user_update 3 41332 _000621_hash NULL
|
|
+_000622_hash uvc_simplify_fraction 3 31303 _000622_hash NULL
|
|
+_000623_hash uwb_rc_cmd_done 4 35892 _000623_hash NULL
|
|
+_000624_hash uwb_rc_neh_grok_event 3 55799 _000624_hash NULL
|
|
+_000625_hash v9fs_alloc_rdir_buf 2 42150 _000625_hash NULL
|
|
+_000626_hash __vb2_perform_fileio 3 63033 _000626_hash NULL
|
|
+_000627_hash vc_do_resize 3-4 48842 _000627_hash NULL
|
|
+_000629_hash vcs_write 3 3910 _000629_hash NULL
|
|
+_000630_hash vfd_write 3 14717 _000630_hash NULL
|
|
+_000631_hash vga_arb_write 3 36112 _000631_hash NULL
|
|
+_000632_hash vga_switcheroo_debugfs_write 3 33984 _000632_hash NULL
|
|
+_000633_hash vhci_get_user 3 45039 _000633_hash NULL
|
|
+_000634_hash video_proc_write 3 6724 _000634_hash NULL
|
|
+_000635_hash vlsi_alloc_ring 3-4 57003 _000635_hash NULL
|
|
+_000637_hash __vmalloc 1 61168 _000637_hash NULL
|
|
+_000638_hash vmalloc_32 1 1135 _000638_hash NULL
|
|
+_000639_hash vmalloc_32_user 1 37519 _000639_hash NULL
|
|
+_000640_hash vmalloc_exec 1 36132 _000640_hash NULL
|
|
+_000641_hash vmalloc_node 1 58700 _000641_hash NULL
|
|
+_000642_hash __vmalloc_node_flags 1 30352 _000642_hash NULL
|
|
+_000643_hash vmalloc_user 1 32308 _000643_hash NULL
|
|
+_000644_hash vol_cdev_direct_write 3 20751 _000644_hash NULL
|
|
+_000645_hash vp_request_msix_vectors 2 28849 _000645_hash NULL
|
|
+_000646_hash vring_add_indirect 3-4 20737 _000646_hash NULL
|
|
+_000648_hash vring_new_virtqueue 1 9671 _000648_hash NULL
|
|
+_000649_hash vxge_os_dma_malloc 2 46184 _000649_hash NULL
|
|
+_000650_hash vxge_os_dma_malloc_async 3 56348 _000650_hash NULL
|
|
+_000651_hash wdm_write 3 53735 _000651_hash NULL
|
|
+_000652_hash wiimote_hid_send 3 48528 _000652_hash NULL
|
|
+_000653_hash wl1273_fm_fops_write 3 60621 _000653_hash NULL
|
|
+_000654_hash wlc_phy_loadsampletable_nphy 3 64367 _000654_hash NULL
|
|
+_000655_hash write 3 62671 _000655_hash NULL
|
|
+_000656_hash write_flush 3 50803 _000656_hash NULL
|
|
+_000657_hash write_rio 3 54837 _000657_hash NULL
|
|
+_000658_hash x25_asy_change_mtu 2 26928 _000658_hash NULL
|
|
+_000659_hash xdi_copy_from_user 4 8395 _000659_hash NULL
|
|
+_000660_hash xfrm_dst_alloc_copy 3 3034 _000660_hash NULL
|
|
+_000661_hash xfrm_user_policy 4 62573 _000661_hash NULL
|
|
+_000662_hash xfs_attrmulti_attr_set 4 59346 _000662_hash NULL
|
|
+_000663_hash xfs_handle_to_dentry 3 12135 _000663_hash NULL
|
|
+_000664_hash __xip_file_write 3 2733 _000664_hash NULL
|
|
+_000665_hash xprt_rdma_allocate 2 31372 _000665_hash NULL
|
|
+_000666_hash zd_usb_iowrite16v_async 3 23984 _000666_hash NULL
|
|
+_000667_hash zd_usb_read_fw 4 22049 _000667_hash NULL
|
|
+_000668_hash zerocopy_sg_from_iovec 3 11828 _000668_hash NULL
|
|
+_000669_hash zoran_write 3 22404 _000669_hash NULL
|
|
+_000671_hash acpi_ex_allocate_name_string 2 7685 _002855_hash NULL nohasharray
|
|
+_000672_hash acpi_os_allocate_zeroed 1 37422 _000672_hash NULL
|
|
+_000673_hash acpi_ut_initialize_buffer 2 47143 _002314_hash NULL nohasharray
|
|
+_000674_hash ad7879_spi_xfer 3 36311 _000674_hash NULL
|
|
+_000675_hash add_new_gdb 3 27643 _000675_hash NULL
|
|
+_000676_hash add_numbered_child 5 14273 _000676_hash NULL
|
|
+_000677_hash add_res_range 4 21310 _000677_hash NULL
|
|
+_000678_hash addtgt 3 54703 _000678_hash NULL
|
|
+_000679_hash add_uuid 4 49831 _000679_hash NULL
|
|
+_000680_hash afs_cell_alloc 2 24052 _000680_hash NULL
|
|
+_000681_hash aggr_recv_addba_req_evt 4 38037 _000681_hash NULL
|
|
+_000682_hash agp_create_memory 1 1075 _000682_hash NULL
|
|
+_000683_hash agp_create_user_memory 1 62955 _000683_hash NULL
|
|
+_000684_hash alg_setsockopt 5 20985 _000684_hash NULL
|
|
+_000685_hash alloc_async 1 14208 _000685_hash NULL
|
|
+_000686_hash ___alloc_bootmem_nopanic 1 53626 _000686_hash NULL
|
|
+_000687_hash alloc_buf 1 34532 _000687_hash NULL
|
|
+_000688_hash alloc_chunk 1 49575 _000688_hash NULL
|
|
+_000689_hash alloc_context 1 41283 _000689_hash NULL
|
|
+_000690_hash alloc_ctrl_packet 1 44667 _000690_hash NULL
|
|
+_000691_hash alloc_data_packet 1 46698 _000691_hash NULL
|
|
+_000692_hash alloc_dca_provider 2 59670 _000692_hash NULL
|
|
+_000693_hash __alloc_dev_table 2 54343 _000693_hash NULL
|
|
+_000694_hash alloc_ep 1 17269 _000694_hash NULL
|
|
+_000695_hash __alloc_extent_buffer 3 15093 _000695_hash NULL
|
|
+_000696_hash alloc_group_attrs 2 9194 _000719_hash NULL nohasharray
|
|
+_000697_hash alloc_large_system_hash 2 64490 _000697_hash NULL
|
|
+_000698_hash alloc_netdev_mqs 1 30030 _000698_hash NULL
|
|
+_000699_hash __alloc_objio_seg 1 7203 _000699_hash NULL
|
|
+_000700_hash alloc_ring 2-4 15345 _000700_hash NULL
|
|
+_000701_hash alloc_ring 2-4 39151 _000701_hash NULL
|
|
+_000704_hash alloc_session 1-2 64171 _000704_hash NULL
|
|
+_000708_hash alloc_smp_req 1 51337 _000708_hash NULL
|
|
+_000709_hash alloc_smp_resp 1 3566 _000709_hash NULL
|
|
+_000710_hash alloc_ts_config 1 45775 _000710_hash NULL
|
|
+_000711_hash alloc_upcall 2 62186 _000711_hash NULL
|
|
+_000712_hash altera_drscan 2 48698 _000712_hash NULL
|
|
+_000713_hash altera_irscan 2 62396 _000713_hash NULL
|
|
+_000714_hash altera_set_dr_post 2 54291 _000714_hash NULL
|
|
+_000715_hash altera_set_dr_pre 2 64862 _000715_hash NULL
|
|
+_000716_hash altera_set_ir_post 2 20948 _000716_hash NULL
|
|
+_000717_hash altera_set_ir_pre 2 54103 _000717_hash NULL
|
|
+_000718_hash altera_swap_dr 2 50090 _000718_hash NULL
|
|
+_000719_hash altera_swap_ir 2 9194 _000719_hash &_000696_hash
|
|
+_000720_hash amd_create_gatt_pages 1 20537 _000720_hash NULL
|
|
+_000721_hash aoechr_write 3 62883 _001352_hash NULL nohasharray
|
|
+_000722_hash applesmc_create_nodes 2 49392 _000722_hash NULL
|
|
+_000723_hash array_zalloc 1-2 7519 _000723_hash NULL
|
|
+_000725_hash arvo_sysfs_read 6 31617 _000725_hash NULL
|
|
+_000726_hash arvo_sysfs_write 6 3311 _000726_hash NULL
|
|
+_000727_hash asd_store_update_bios 4 10165 _000727_hash NULL
|
|
+_000728_hash ata_host_alloc 2 46094 _000728_hash NULL
|
|
+_000729_hash atalk_sendmsg 4 21677 _000729_hash NULL
|
|
+_000730_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000730_hash NULL
|
|
+_000731_hash ath6kl_mgmt_tx 9 21153 _000731_hash NULL
|
|
+_000732_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000732_hash NULL
|
|
+_000733_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000733_hash NULL
|
|
+_000734_hash ath_descdma_setup 5 12257 _000734_hash NULL
|
|
+_000735_hash ath_rx_edma_init 2 65483 _000735_hash NULL
|
|
+_000736_hash ati_create_gatt_pages 1 4722 _003142_hash NULL nohasharray
|
|
+_000737_hash au0828_init_isoc 2-3 61917 _000737_hash NULL
|
|
+_000739_hash audit_init_entry 1 38644 _000739_hash NULL
|
|
+_000740_hash ax25_sendmsg 4 62770 _000740_hash NULL
|
|
+_000741_hash b1_alloc_card 1 36155 _000741_hash NULL
|
|
+_000742_hash b43_nphy_load_samples 3 36481 _000742_hash NULL
|
|
+_000743_hash bio_copy_user_iov 4 37660 _000743_hash NULL
|
|
+_000744_hash __bio_map_kern 2-3 47379 _000744_hash NULL
|
|
+_000746_hash blk_register_region 1-2 51424 _000746_hash NULL
|
|
+_000748_hash bm_entry_write 3 28338 _000748_hash NULL
|
|
+_000749_hash bm_realloc_pages 2 9431 _000749_hash NULL
|
|
+_000750_hash bm_register_write 3 9893 _000750_hash &_000569_hash
|
|
+_000751_hash bm_status_write 3 12964 _000751_hash NULL
|
|
+_000752_hash br_mdb_rehash 2 42643 _000752_hash NULL
|
|
+_000753_hash btrfs_copy_from_user 3 43806 _000753_hash NULL
|
|
+_000754_hash btrfs_insert_delayed_dir_index 4 63720 _000754_hash NULL
|
|
+_000755_hash __btrfs_map_block 3 49839 _000755_hash NULL
|
|
+_000756_hash __c4iw_init_resource_fifo 3 8334 _000756_hash NULL
|
|
+_000757_hash cache_downcall 3 13666 _000757_hash NULL
|
|
+_000758_hash cache_slow_downcall 2 8570 _000758_hash NULL
|
|
+_000759_hash ca_extend 2 64541 _000759_hash NULL
|
|
+_000760_hash caif_seqpkt_sendmsg 4 22961 _000760_hash NULL
|
|
+_000761_hash caif_stream_sendmsg 4 9110 _000761_hash NULL
|
|
+_000762_hash carl9170_cmd_buf 3 950 _000762_hash NULL
|
|
+_000763_hash cdev_add 2-3 38176 _000763_hash NULL
|
|
+_000765_hash cdrom_read_cdda 4 50478 _000765_hash NULL
|
|
+_000766_hash ceph_dns_resolve_name 1 62488 _000766_hash NULL
|
|
+_000767_hash ceph_msgpool_get 2 54258 _000767_hash NULL
|
|
+_000768_hash cfg80211_connect_result 4-6 56515 _000768_hash NULL
|
|
+_000770_hash cfg80211_disconnected 4 57 _000770_hash NULL
|
|
+_000771_hash cfg80211_inform_bss 8 19332 _000771_hash NULL
|
|
+_000772_hash cfg80211_inform_bss_frame 4 41078 _000772_hash NULL
|
|
+_000773_hash cfg80211_mlme_register_mgmt 5 19852 _000773_hash NULL
|
|
+_000774_hash cfg80211_roamed_bss 4-6 50198 _000774_hash NULL
|
|
+_000776_hash cifs_readdata_alloc 1 50318 _000776_hash NULL
|
|
+_000777_hash cifs_readv_from_socket 3 19109 _000777_hash NULL
|
|
+_000778_hash cifs_writedata_alloc 1 32880 _003176_hash NULL nohasharray
|
|
+_000779_hash cnic_alloc_dma 3 34641 _000779_hash NULL
|
|
+_000780_hash configfs_write_file 3 61621 _000780_hash NULL
|
|
+_000781_hash construct_key 3 11329 _000781_hash NULL
|
|
+_000782_hash context_alloc 3 24645 _000782_hash NULL
|
|
+_000783_hash copy_to_user 3 57835 _000783_hash NULL
|
|
+_000784_hash create_attr_set 1 22861 _000784_hash NULL
|
|
+_000785_hash create_bounce_buffer 3 39155 _000785_hash NULL
|
|
+_000786_hash create_gpadl_header 2 19064 _000786_hash NULL
|
|
+_000787_hash _create_sg_bios 4 31244 _000787_hash NULL
|
|
+_000788_hash cryptd_alloc_instance 2-3 18048 _000788_hash NULL
|
|
+_000790_hash crypto_ahash_setkey 3 55134 _000790_hash NULL
|
|
+_000791_hash crypto_alloc_instance2 3 25277 _000791_hash NULL
|
|
+_000792_hash crypto_shash_setkey 3 60483 _000792_hash NULL
|
|
+_000793_hash cx231xx_init_bulk 3-2 47024 _000793_hash NULL
|
|
+_000794_hash cx231xx_init_isoc 2-3 56453 _000794_hash NULL
|
|
+_000796_hash cx231xx_init_vbi_isoc 2-3 28053 _000796_hash NULL
|
|
+_000798_hash cxgb_alloc_mem 1 24007 _000798_hash NULL
|
|
+_000799_hash cxgbi_device_portmap_create 3 25747 _000799_hash NULL
|
|
+_000800_hash cxgbi_device_register 1-2 36746 _000800_hash NULL
|
|
+_000802_hash __cxio_init_resource_fifo 3 23447 _000802_hash NULL
|
|
+_000803_hash dccp_sendmsg 4 56058 _000803_hash NULL
|
|
+_000804_hash ddp_make_gl 1 12179 _000804_hash NULL
|
|
+_000805_hash depth_write 3 3021 _000805_hash NULL
|
|
+_000806_hash dev_irnet_write 3 11398 _000806_hash NULL
|
|
+_000807_hash dev_set_alias 3 50084 _000807_hash NULL
|
|
+_000808_hash dev_write 3 7708 _000808_hash NULL
|
|
+_000809_hash dfs_global_file_write 3 6112 _000809_hash NULL
|
|
+_000810_hash dgram_sendmsg 4 45679 _000810_hash NULL
|
|
+_000811_hash disconnect 4 32521 _000811_hash NULL
|
|
+_000812_hash dma_attach 6-7 50831 _000812_hash NULL
|
|
+_000814_hash dn_sendmsg 4 38390 _000814_hash NULL
|
|
+_000815_hash do_dccp_setsockopt 5 54377 _000815_hash NULL
|
|
+_000816_hash do_jffs2_setxattr 5 25910 _000816_hash NULL
|
|
+_000817_hash do_msgsnd 4 1387 _000817_hash NULL
|
|
+_000818_hash do_raw_setsockopt 5 55215 _000818_hash NULL
|
|
+_000819_hash do_readv_writev 4 51849 _000819_hash NULL
|
|
+_000820_hash do_sync 1 9604 _000820_hash NULL
|
|
+_000821_hash dup_array 3 33551 _000821_hash NULL
|
|
+_000822_hash dvb_audio_write 3 51275 _000822_hash NULL
|
|
+_000823_hash dvb_ca_en50221_init 4 45718 _000823_hash NULL
|
|
+_000824_hash dvb_video_write 3 754 _000824_hash NULL
|
|
+_000825_hash econet_sendmsg 4 51430 _000825_hash NULL
|
|
+_000826_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000826_hash NULL
|
|
+_000827_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000827_hash NULL
|
|
+_000828_hash ecryptfs_send_message_locked 2 31801 _000828_hash NULL
|
|
+_000829_hash edac_device_alloc_ctl_info 1 5941 _000829_hash NULL
|
|
+_000830_hash edac_mc_alloc 1 54846 _000830_hash NULL
|
|
+_000831_hash edac_pci_alloc_ctl_info 1 63388 _000831_hash NULL
|
|
+_000832_hash efivar_create_sysfs_entry 2 19485 _000832_hash NULL
|
|
+_000833_hash em28xx_alloc_isoc 4 46892 _000833_hash NULL
|
|
+_000834_hash enable_write 3 30456 _000834_hash NULL
|
|
+_000835_hash enclosure_register 3 57412 _000835_hash NULL
|
|
+_000836_hash ext4_kvzalloc 1 47605 _000836_hash NULL
|
|
+_000837_hash extend_netdev_table 2 31680 _000837_hash NULL
|
|
+_000838_hash __feat_register_sp 6 64712 _000838_hash NULL
|
|
+_000839_hash __ffs_ep0_read_events 3 48868 _000839_hash NULL
|
|
+_000840_hash ffs_ep0_write 3 9438 _000840_hash NULL
|
|
+_000841_hash ffs_epfile_read 3 18775 _000841_hash NULL
|
|
+_000842_hash ffs_epfile_write 3 48014 _000842_hash NULL
|
|
+_000843_hash fib_info_hash_alloc 1 9075 _000843_hash NULL
|
|
+_000844_hash fillonedir 3 41746 _000844_hash NULL
|
|
+_000845_hash flexcop_device_kmalloc 1 54793 _000845_hash NULL
|
|
+_000846_hash frame_alloc 4 15981 _000846_hash NULL
|
|
+_000847_hash fw_node_create 2 9559 _000847_hash NULL
|
|
+_000848_hash garmin_read_process 3 27509 _000848_hash NULL
|
|
+_000849_hash garp_request_join 4 7471 _000849_hash NULL
|
|
+_000850_hash get_derived_key 4 61100 _000850_hash NULL
|
|
+_000851_hash get_entry 4 16003 _000851_hash NULL
|
|
+_000852_hash get_free_de 2 33714 _000852_hash NULL
|
|
+_000853_hash get_new_cssid 2 51665 _000853_hash NULL
|
|
+_000854_hash getxattr 4 24398 _000854_hash NULL
|
|
+_000855_hash gspca_dev_probe2 4 59833 _000855_hash NULL
|
|
+_000856_hash hcd_alloc_coherent 5 55862 _000856_hash NULL
|
|
+_000857_hash hci_sock_sendmsg 4 37420 _000857_hash NULL
|
|
+_000858_hash hid_register_field 2-3 4874 _000858_hash NULL
|
|
+_000860_hash hid_report_raw_event 4 7024 _000860_hash NULL
|
|
+_000861_hash hpi_alloc_control_cache 1 35351 _000861_hash NULL
|
|
+_000862_hash hugetlbfs_read_actor 2-5-4 34547 _000862_hash NULL
|
|
+_000865_hash hvc_alloc 4 12579 _000865_hash NULL
|
|
+_000866_hash __hwahc_dev_set_key 5 46328 _000866_hash NULL
|
|
+_000867_hash i2400m_zrealloc_2x 3 54166 _001430_hash NULL nohasharray
|
|
+_000868_hash ib_alloc_device 1 26483 _000868_hash NULL
|
|
+_000869_hash ib_create_send_mad 5 1196 _000869_hash NULL
|
|
+_000870_hash ibmasm_new_command 2 25714 _000870_hash NULL
|
|
+_000871_hash ib_send_cm_drep 3 50186 _000871_hash NULL
|
|
+_000872_hash ib_send_cm_mra 4 60202 _000872_hash NULL
|
|
+_000873_hash ib_send_cm_rtu 3 63138 _000873_hash NULL
|
|
+_000874_hash ieee80211_key_alloc 3 19065 _000874_hash NULL
|
|
+_000875_hash ieee80211_mgmt_tx 9 46860 _000875_hash NULL
|
|
+_000876_hash ieee80211_send_probe_req 6-4 6924 _000876_hash NULL
|
|
+_000877_hash if_writecmd 2 815 _000877_hash NULL
|
|
+_000878_hash init_bch 1-2 64130 _000878_hash NULL
|
|
+_000880_hash init_ipath 1 48187 _000880_hash NULL
|
|
+_000881_hash init_list_set 2-3 39188 _000881_hash NULL
|
|
+_000883_hash init_q 4 132 _000883_hash NULL
|
|
+_000884_hash init_state 2 60165 _000884_hash NULL
|
|
+_000885_hash init_tag_map 3 57515 _000885_hash NULL
|
|
+_000886_hash input_ff_create 2 21240 _000886_hash NULL
|
|
+_000887_hash input_mt_init_slots 2 31183 _000887_hash NULL
|
|
+_000888_hash interfaces 2 38859 _000888_hash NULL
|
|
+_000889_hash ioat2_alloc_ring 2 11172 _000889_hash NULL
|
|
+_000890_hash ip_generic_getfrag 3-4 12187 _000890_hash NULL
|
|
+_000892_hash ipr_alloc_ucode_buffer 1 40199 _000892_hash NULL
|
|
+_000893_hash ip_set_alloc 1 57953 _000893_hash NULL
|
|
+_000894_hash ipv6_flowlabel_opt 3 58135 _001125_hash NULL nohasharray
|
|
+_000895_hash ipv6_renew_options 5 28867 _000895_hash NULL
|
|
+_000896_hash ipxrtr_route_packet 4 54036 _000896_hash NULL
|
|
+_000897_hash irda_sendmsg 4 4388 _000897_hash NULL
|
|
+_000898_hash irda_sendmsg_dgram 4 38563 _000898_hash NULL
|
|
+_000899_hash irda_sendmsg_ultra 4 42047 _000899_hash NULL
|
|
+_000900_hash irias_add_octseq_attrib 4 29983 _000900_hash NULL
|
|
+_000901_hash irq_alloc_generic_chip 2 26650 _000901_hash NULL
|
|
+_000902_hash irq_domain_add_linear 2 29236 _000902_hash NULL
|
|
+_000903_hash iscsi_alloc_session 3 49390 _000903_hash NULL
|
|
+_000904_hash iscsi_create_conn 2 50425 _000904_hash NULL
|
|
+_000905_hash iscsi_create_endpoint 1 15193 _000905_hash NULL
|
|
+_000906_hash iscsi_create_iface 5 38510 _000906_hash NULL
|
|
+_000907_hash iscsi_decode_text_input 4 58292 _000907_hash NULL
|
|
+_000908_hash iscsi_pool_init 2-4 54913 _000908_hash NULL
|
|
+_000910_hash iscsit_dump_data_payload 2 38683 _000910_hash NULL
|
|
+_000911_hash isdn_write 3 45863 _000911_hash NULL
|
|
+_000912_hash isku_receive 4 54130 _000912_hash NULL
|
|
+_000913_hash isku_send 4 41542 _000913_hash NULL
|
|
+_000914_hash islpci_mgt_transaction 5 23610 _000914_hash NULL
|
|
+_000915_hash iso_sched_alloc 1 13377 _002079_hash NULL nohasharray
|
|
+_000916_hash ivtv_v4l2_write 3 39226 _000916_hash NULL
|
|
+_000917_hash iwl_trans_txq_alloc 3 36147 _000917_hash NULL
|
|
+_000918_hash iwmct_fw_parser_init 4 37876 _000918_hash NULL
|
|
+_000919_hash iwm_notif_send 6 12295 _000919_hash NULL
|
|
+_000920_hash iwm_ntf_calib_res 3 11686 _000920_hash NULL
|
|
+_000921_hash iwm_umac_set_config_var 4 17320 _000921_hash NULL
|
|
+_000922_hash ixgbe_alloc_q_vector 3-5 45428 _000922_hash NULL
|
|
+_000924_hash jbd2_journal_init_revoke 2 51088 _000924_hash NULL
|
|
+_000925_hash jffs2_write_dirent 5 37311 _000925_hash NULL
|
|
+_000926_hash journal_init_revoke 2 56933 _000926_hash NULL
|
|
+_000927_hash keyctl_instantiate_key 3 41855 _000927_hash NULL
|
|
+_000928_hash keyctl_instantiate_key_iov 3 16969 _000928_hash NULL
|
|
+_000929_hash __kfifo_from_user 3 20399 _000929_hash NULL
|
|
+_000930_hash kimage_crash_alloc 3 3233 _000930_hash NULL
|
|
+_000931_hash kimage_normal_alloc 3 31140 _000931_hash NULL
|
|
+_000932_hash kmem_realloc 2 37489 _000932_hash NULL
|
|
+_000933_hash kmem_zalloc 1 11510 _000933_hash NULL
|
|
+_000934_hash koneplus_send 4 18226 _000934_hash NULL
|
|
+_000935_hash koneplus_sysfs_read 6 42792 _000935_hash NULL
|
|
+_000936_hash kovaplus_send 4 10009 _000936_hash NULL
|
|
+_000937_hash kvm_read_guest_page_mmu 6 37611 _000937_hash NULL
|
|
+_000938_hash kvm_set_irq_routing 3 48704 _000938_hash NULL
|
|
+_000939_hash kvm_write_guest_cached 4 11106 _000939_hash NULL
|
|
+_000940_hash kvm_write_guest_page 5 63555 _002809_hash NULL nohasharray
|
|
+_000941_hash l2cap_skbuff_fromiovec 3-4 35003 _000941_hash NULL
|
|
+_000943_hash l2tp_ip_sendmsg 4 50411 _000943_hash NULL
|
|
+_000944_hash l2tp_session_create 1 25286 _000944_hash NULL
|
|
+_000945_hash lc_create 3 48662 _000945_hash NULL
|
|
+_000946_hash leaf_dealloc 3 29566 _000946_hash NULL
|
|
+_000947_hash linear_conf 2 23485 _000947_hash NULL
|
|
+_000948_hash lirc_buffer_init 2-3 53282 _000948_hash NULL
|
|
+_000950_hash llc_ui_sendmsg 4 24987 _000950_hash NULL
|
|
+_000951_hash lpfc_sli4_queue_alloc 3 62646 _000951_hash NULL
|
|
+_000952_hash mce_request_packet 3 1073 _000952_hash NULL
|
|
+_000953_hash mdiobus_alloc_size 1 52259 _000953_hash NULL
|
|
+_000954_hash media_entity_init 2-4 15870 _001556_hash NULL nohasharray
|
|
+_000956_hash memstick_alloc_host 1 142 _000956_hash NULL
|
|
+_000957_hash mesh_table_alloc 1 22305 _000957_hash NULL
|
|
+_000958_hash mfd_add_devices 4 56753 _000958_hash NULL
|
|
+_000959_hash mISDN_sock_sendmsg 4 41035 _000959_hash NULL
|
|
+_000960_hash mmc_alloc_host 1 48097 _000960_hash NULL
|
|
+_000961_hash mmc_test_alloc_mem 3 28102 _000961_hash NULL
|
|
+_000962_hash mpi_alloc 1 18094 _000962_hash NULL
|
|
+_000963_hash mpihelp_mul_karatsuba_case 5-3 23918 _000963_hash NULL
|
|
+_000964_hash mpihelp_mul_n 4 16405 _000964_hash NULL
|
|
+_000965_hash mpi_set_bit 2 15104 _000965_hash NULL
|
|
+_000966_hash mpi_set_highbit 2 37327 _001420_hash NULL nohasharray
|
|
+_000967_hash mtd_concat_create 2 14416 _000967_hash NULL
|
|
+_000968_hash mvumi_alloc_mem_resource 3 47750 _000968_hash NULL
|
|
+_000969_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _000969_hash NULL
|
|
+_000970_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _000970_hash NULL
|
|
+_000972_hash mwl8k_cmd_set_beacon 4 23110 _000972_hash NULL
|
|
+_000973_hash neigh_hash_alloc 1 17595 _000973_hash NULL
|
|
+_000974_hash netlink_sendmsg 4 33708 _001172_hash NULL nohasharray
|
|
+_000975_hash netxen_alloc_sds_rings 2 13417 _000975_hash NULL
|
|
+_000976_hash new_bind_ctl 2 35324 _000976_hash NULL
|
|
+_000977_hash new_dir 3 31919 _000977_hash NULL
|
|
+_000978_hash new_tape_buffer 2 32866 _000978_hash NULL
|
|
+_000979_hash nfc_llcp_build_tlv 3 19536 _000979_hash NULL
|
|
+_000980_hash nfc_llcp_send_i_frame 3 59130 _000980_hash NULL
|
|
+_000981_hash nfs4_alloc_slots 1 2454 _000981_hash NULL
|
|
+_000982_hash nfsctl_transaction_write 3 64800 _000982_hash NULL
|
|
+_000983_hash nfs_idmap_request_key 3 30208 _000983_hash NULL
|
|
+_000984_hash nfs_readdata_alloc 1 9990 _000984_hash NULL
|
|
+_000985_hash nfs_writedata_alloc 1 62868 _000985_hash NULL
|
|
+_000986_hash nl_pid_hash_zalloc 1 23314 _000986_hash NULL
|
|
+_000987_hash nr_sendmsg 4 53656 _000987_hash NULL
|
|
+_000988_hash nsm_create_handle 4 38060 _000988_hash NULL
|
|
+_000989_hash ntfs_copy_from_user_iovec 3-6 49829 _000989_hash NULL
|
|
+_000991_hash ntfs_file_buffered_write 4-6 41442 _000991_hash NULL
|
|
+_000993_hash __ntfs_malloc 1 34022 _000993_hash NULL
|
|
+_000994_hash nvme_alloc_queue 3 46865 _000994_hash NULL
|
|
+_000995_hash ocfs2_acl_from_xattr 2 21604 _000995_hash NULL
|
|
+_000996_hash ocfs2_control_message 3 19564 _000996_hash NULL
|
|
+_000997_hash opera1_usb_i2c_msgxfer 4 64521 _000997_hash NULL
|
|
+_000998_hash _ore_get_io_state 3 2166 _000998_hash NULL
|
|
+_000999_hash orig_hash_add_if 2 53676 _000999_hash NULL
|
|
+_001000_hash orig_hash_del_if 2 45080 _001000_hash NULL
|
|
+_001001_hash orinoco_set_key 5-7 17878 _001001_hash NULL
|
|
+_001003_hash osdmap_set_max_osd 2 57630 _001003_hash NULL
|
|
+_001004_hash _osd_realloc_seg 3 54352 _001004_hash NULL
|
|
+_001005_hash OSDSetBlock 2-4 38986 _001005_hash NULL
|
|
+_001007_hash osst_execute 7-6 17607 _001007_hash NULL
|
|
+_001008_hash osst_write 3 31581 _001008_hash NULL
|
|
+_001009_hash otp_read 2-5-4 10594 _001009_hash NULL
|
|
+_001012_hash ovs_vport_alloc 1 33475 _001012_hash NULL
|
|
+_001013_hash packet_sendmsg_spkt 4 28885 _001013_hash NULL
|
|
+_001014_hash pair_device 4 61175 _001708_hash NULL nohasharray
|
|
+_001015_hash pccard_store_cis 6 18176 _001015_hash NULL
|
|
+_001016_hash pci_add_cap_save_buffer 3 3426 _001016_hash NULL
|
|
+_001017_hash pcnet32_realloc_rx_ring 3 36598 _001017_hash NULL
|
|
+_001018_hash pcnet32_realloc_tx_ring 3 38428 _001018_hash NULL
|
|
+_001019_hash pcpu_mem_zalloc 1 22948 _001019_hash NULL
|
|
+_001020_hash pep_sendmsg 4 62524 _001020_hash NULL
|
|
+_001021_hash pfkey_sendmsg 4 47394 _001021_hash NULL
|
|
+_001022_hash pidlist_resize 2 496 _001022_hash NULL
|
|
+_001023_hash pin_code_reply 4 46510 _001023_hash NULL
|
|
+_001024_hash ping_getfrag 3-4 8360 _001024_hash NULL
|
|
+_001026_hash pipe_set_size 2 5204 _001026_hash NULL
|
|
+_001027_hash pkt_bio_alloc 1 48284 _001027_hash NULL
|
|
+_001028_hash platform_create_bundle 4-6 12785 _001028_hash NULL
|
|
+_001030_hash play_iframe 3 8219 _001030_hash NULL
|
|
+_001031_hash pm8001_store_update_fw 4 55716 _001031_hash NULL
|
|
+_001032_hash pmcraid_alloc_sglist 1 9864 _001032_hash NULL
|
|
+_001033_hash pn533_dep_link_up 5 7659 _001033_hash NULL
|
|
+_001034_hash pnp_alloc 1 24869 _001419_hash NULL nohasharray
|
|
+_001035_hash pn_sendmsg 4 12640 _001035_hash NULL
|
|
+_001036_hash pppoe_sendmsg 4 48039 _001036_hash NULL
|
|
+_001037_hash pppol2tp_sendmsg 4 56420 _001037_hash NULL
|
|
+_001038_hash process_vm_rw 3-5 47533 _001038_hash NULL
|
|
+_001040_hash process_vm_rw_single_vec 1-2 26213 _001040_hash NULL
|
|
+_001042_hash proc_write 3 51003 _001042_hash NULL
|
|
+_001043_hash profile_load 3 58267 _001043_hash NULL
|
|
+_001044_hash profile_remove 3 8556 _001044_hash NULL
|
|
+_001045_hash profile_replace 3 14652 _001045_hash NULL
|
|
+_001046_hash pscsi_get_bio 1 56103 _001046_hash NULL
|
|
+_001047_hash pyra_send 4 12061 _001047_hash NULL
|
|
+_001048_hash qc_capture 3 19298 _001048_hash NULL
|
|
+_001049_hash qla4xxx_alloc_work 2 44813 _001049_hash NULL
|
|
+_001050_hash qlcnic_alloc_msix_entries 2 46160 _001050_hash NULL
|
|
+_001051_hash qlcnic_alloc_sds_rings 2 26795 _001051_hash NULL
|
|
+_001052_hash queue_received_packet 5 9657 _001052_hash NULL
|
|
+_001053_hash raw_send_hdrinc 4 58803 _001053_hash NULL
|
|
+_001054_hash raw_sendmsg 4 23078 _001054_hash &_000022_hash
|
|
+_001055_hash rawsock_sendmsg 4 60010 _001055_hash NULL
|
|
+_001056_hash rawv6_send_hdrinc 3 35425 _001056_hash NULL
|
|
+_001057_hash rb_alloc 1 3102 _001057_hash NULL
|
|
+_001058_hash rbd_alloc_coll 1 33678 _001058_hash NULL
|
|
+_001059_hash rbd_create_rw_ops 2 4605 _001059_hash NULL
|
|
+_001060_hash rds_ib_inc_copy_to_user 3 55007 _001060_hash NULL
|
|
+_001061_hash rds_iw_inc_copy_to_user 3 29214 _001061_hash NULL
|
|
+_001062_hash rds_message_alloc 1 10517 _001062_hash NULL
|
|
+_001063_hash rds_message_copy_from_user 3 45510 _001063_hash NULL
|
|
+_001064_hash rds_message_inc_copy_to_user 3 26540 _001064_hash NULL
|
|
+_001065_hash redrat3_transmit_ir 3 64244 _001065_hash NULL
|
|
+_001066_hash regcache_rbtree_insert_to_block 5 58009 _001066_hash NULL
|
|
+_001067_hash _regmap_raw_write 4 42652 _001067_hash NULL
|
|
+_001068_hash regmap_register_patch 3 21681 _001068_hash NULL
|
|
+_001069_hash relay_alloc_page_array 1 52735 _001069_hash NULL
|
|
+_001070_hash remove_uuid 4 64505 _001070_hash NULL
|
|
+_001071_hash reshape_ring 2 29147 _001071_hash NULL
|
|
+_001072_hash RESIZE_IF_NEEDED 2 56286 _001072_hash NULL
|
|
+_001073_hash resize_stripes 2 61650 _001073_hash NULL
|
|
+_001074_hash rfcomm_sock_sendmsg 4 37661 _001074_hash NULL
|
|
+_001075_hash rose_sendmsg 4 20249 _001075_hash NULL
|
|
+_001076_hash rxrpc_send_data 5 21553 _001076_hash NULL
|
|
+_001077_hash rxrpc_setsockopt 5 50286 _001077_hash NULL
|
|
+_001078_hash saa7146_vmalloc_build_pgtable 2 19780 _001078_hash NULL
|
|
+_001079_hash saa7164_buffer_alloc_user 2 9627 _001079_hash NULL
|
|
+_001081_hash sco_send_frame 3 41815 _001081_hash NULL
|
|
+_001082_hash scsi_host_alloc 2 63041 _001082_hash NULL
|
|
+_001083_hash scsi_tgt_kspace_exec 8 9522 _001083_hash NULL
|
|
+_001084_hash sctp_sendmsg 4 61919 _001084_hash NULL
|
|
+_001085_hash sctp_setsockopt 5 44788 _001085_hash NULL
|
|
+_001086_hash sctp_setsockopt_connectx 3 6073 _001086_hash NULL
|
|
+_001087_hash sctp_setsockopt_connectx_old 3 22631 _001087_hash NULL
|
|
+_001088_hash sctp_tsnmap_init 2 36446 _001088_hash NULL
|
|
+_001089_hash sctp_user_addto_chunk 2-3 62047 _001089_hash NULL
|
|
+_001091_hash security_context_to_sid 2 19839 _001091_hash NULL
|
|
+_001092_hash security_context_to_sid_default 2 3492 _001092_hash NULL
|
|
+_001093_hash security_context_to_sid_force 2 20724 _001093_hash NULL
|
|
+_001094_hash selinux_transaction_write 3 59038 _001094_hash NULL
|
|
+_001095_hash sel_write_access 3 51704 _001095_hash NULL
|
|
+_001096_hash sel_write_create 3 11353 _001096_hash NULL
|
|
+_001097_hash sel_write_member 3 28800 _001097_hash NULL
|
|
+_001098_hash sel_write_relabel 3 55195 _001098_hash NULL
|
|
+_001099_hash sel_write_user 3 45060 _001099_hash NULL
|
|
+_001100_hash __seq_open_private 3 40715 _001100_hash NULL
|
|
+_001101_hash serverworks_create_gatt_pages 1 46582 _001101_hash NULL
|
|
+_001102_hash set_connectable 4 56458 _001102_hash NULL
|
|
+_001103_hash set_dev_class 4 39645 _001697_hash NULL nohasharray
|
|
+_001104_hash set_discoverable 4 48141 _001104_hash NULL
|
|
+_001105_hash setkey 3 14987 _001105_hash NULL
|
|
+_001106_hash set_le 4 30581 _001106_hash NULL
|
|
+_001107_hash set_link_security 4 4502 _001107_hash NULL
|
|
+_001108_hash set_local_name 4 55757 _001108_hash NULL
|
|
+_001109_hash set_powered 4 12129 _001109_hash NULL
|
|
+_001110_hash set_ssp 4 62411 _001110_hash NULL
|
|
+_001111_hash sg_build_sgat 3 60179 _001111_hash &_000305_hash
|
|
+_001112_hash sg_read_oxfer 3 51724 _001112_hash NULL
|
|
+_001113_hash shmem_xattr_set 4 11843 _001113_hash NULL
|
|
+_001114_hash simple_alloc_urb 3 60420 _001114_hash NULL
|
|
+_001115_hash sisusb_send_bridge_packet 2 11649 _001115_hash NULL
|
|
+_001116_hash sisusb_send_packet 2 20891 _001116_hash NULL
|
|
+_001117_hash skb_add_data_nocache 4 4682 _001117_hash NULL
|
|
+_001118_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001118_hash NULL
|
|
+_001121_hash skb_copy_to_page_nocache 6 58624 _001121_hash NULL
|
|
+_001122_hash sk_chk_filter 2 42095 _001122_hash NULL
|
|
+_001123_hash skcipher_sendmsg 4 30290 _001123_hash NULL
|
|
+_001124_hash sl_change_mtu 2 7396 _001124_hash NULL
|
|
+_001125_hash slhc_init 1-2 58135 _001125_hash &_000894_hash
|
|
+_001127_hash sm501_create_subdev 3-4 48668 _001127_hash NULL
|
|
+_001129_hash smk_write_access 3 49561 _001129_hash NULL
|
|
+_001130_hash snapshot_write 3 28351 _001130_hash NULL
|
|
+_001131_hash snd_ac97_pcm_assign 2 30218 _001131_hash NULL
|
|
+_001132_hash snd_card_create 4 64418 _001411_hash NULL nohasharray
|
|
+_001133_hash snd_emux_create_port 3 42533 _001133_hash NULL
|
|
+_001134_hash snd_gus_dram_write 4 38784 _001134_hash NULL
|
|
+_001135_hash snd_midi_channel_alloc_set 1 28153 _001135_hash NULL
|
|
+_001136_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001136_hash NULL
|
|
+_001137_hash snd_pcm_oss_sync1 2 45298 _001137_hash NULL
|
|
+_001138_hash snd_pcm_oss_write 3 38108 _001138_hash NULL
|
|
+_001139_hash snd_pcm_plugin_build 5 25505 _001139_hash NULL
|
|
+_001140_hash snd_rawmidi_kernel_write 3 25106 _001140_hash NULL
|
|
+_001141_hash snd_rawmidi_write 3 28008 _001141_hash NULL
|
|
+_001142_hash snd_rme32_playback_copy 5 43732 _001142_hash NULL
|
|
+_001143_hash snd_rme96_playback_copy 5 13111 _001143_hash NULL
|
|
+_001144_hash snd_seq_device_new 4 31753 _001144_hash NULL
|
|
+_001145_hash snd_seq_oss_readq_new 2 14283 _001145_hash NULL
|
|
+_001146_hash snd_vx_create 4 40948 _001146_hash NULL
|
|
+_001147_hash sock_setsockopt 5 50088 _001147_hash NULL
|
|
+_001148_hash sound_write 3 5102 _001148_hash NULL
|
|
+_001149_hash _sp2d_alloc 1 16944 _001149_hash NULL
|
|
+_001150_hash spi_alloc_master 2 45223 _001150_hash NULL
|
|
+_001151_hash spidev_message 3 5518 _001151_hash NULL
|
|
+_001152_hash spi_register_board_info 2 35651 _001152_hash NULL
|
|
+_001153_hash squashfs_cache_init 2 41656 _001153_hash NULL
|
|
+_001154_hash squashfs_read_data 6 59440 _001154_hash NULL
|
|
+_001155_hash srp_alloc_iu 2 44227 _001155_hash NULL
|
|
+_001156_hash srp_iu_pool_alloc 2 17920 _001156_hash NULL
|
|
+_001157_hash srp_ring_alloc 2 26760 _001157_hash NULL
|
|
+_001159_hash start_isoc_chain 2 565 _001159_hash NULL
|
|
+_001160_hash stk_prepare_sio_buffers 2 57168 _001160_hash NULL
|
|
+_001161_hash store_iwmct_log_level 4 60209 _001161_hash NULL
|
|
+_001162_hash store_iwmct_log_level_fw 4 1974 _001162_hash NULL
|
|
+_001163_hash st_write 3 16874 _001163_hash NULL
|
|
+_001164_hash svc_pool_map_alloc_arrays 2 47181 _001164_hash NULL
|
|
+_001165_hash symtab_init 2 61050 _001165_hash NULL
|
|
+_001166_hash sys_bind 3 10799 _001166_hash NULL
|
|
+_001167_hash sys_connect 3 15291 _001167_hash NULL
|
|
+_001168_hash sys_flistxattr 3 41407 _001168_hash NULL
|
|
+_001169_hash sys_fsetxattr 4 49736 _001169_hash NULL
|
|
+_001170_hash sysfs_write_file 3 57116 _001170_hash NULL
|
|
+_001171_hash sys_ipc 3 4889 _001171_hash NULL
|
|
+_001172_hash sys_keyctl 4 33708 _001172_hash &_000974_hash
|
|
+_001173_hash sys_listxattr 3 27833 _001173_hash NULL
|
|
+_001174_hash sys_llistxattr 3 4532 _001174_hash NULL
|
|
+_001175_hash sys_lsetxattr 4 61177 _001175_hash NULL
|
|
+_001176_hash sys_mq_timedsend 3 57661 _001176_hash NULL
|
|
+_001177_hash sys_sched_setaffinity 2 32046 _001177_hash NULL
|
|
+_001178_hash sys_semop 3 39457 _001178_hash NULL
|
|
+_001179_hash sys_sendto 6 20809 _001179_hash NULL
|
|
+_001180_hash sys_setxattr 4 37880 _001180_hash NULL
|
|
+_001181_hash t4_alloc_mem 1 32342 _001181_hash NULL
|
|
+_001182_hash tcf_hash_create 4 54360 _001182_hash NULL
|
|
+_001183_hash __team_options_register 3 63941 _001183_hash NULL
|
|
+_001184_hash test_unaligned_bulk 3 52333 _001184_hash NULL
|
|
+_001185_hash tifm_alloc_adapter 1 10903 _001185_hash NULL
|
|
+_001186_hash timeout_write 3 50991 _001186_hash NULL
|
|
+_001187_hash tipc_link_send_sections_fast 4 37920 _001187_hash NULL
|
|
+_001188_hash tipc_subseq_alloc 1 5957 _001188_hash NULL
|
|
+_001189_hash tm6000_read_write_usb 7 50774 _002917_hash NULL nohasharray
|
|
+_001190_hash tnode_alloc 1 49407 _001190_hash NULL
|
|
+_001191_hash tomoyo_commit_ok 2 20167 _001191_hash NULL
|
|
+_001192_hash tomoyo_scan_bprm 2-4 15642 _001192_hash NULL
|
|
+_001194_hash tps65910_i2c_write 3 39531 _001194_hash NULL
|
|
+_001195_hash ts_write 3 64336 _001195_hash NULL
|
|
+_001196_hash ttusb2_msg 4 3100 _001196_hash NULL
|
|
+_001197_hash tty_write 3 5494 _001197_hash NULL
|
|
+_001198_hash ubi_dbg_check_all_ff 4 59810 _001198_hash NULL
|
|
+_001199_hash ubi_dbg_check_write 5 48525 _001199_hash NULL
|
|
+_001200_hash ubifs_setxattr 4 59650 _001370_hash NULL nohasharray
|
|
+_001201_hash udf_sb_alloc_partition_maps 2 62313 _001201_hash NULL
|
|
+_001202_hash udplite_getfrag 3-4 14479 _001202_hash NULL
|
|
+_001204_hash ulong_write_file 3 26485 _001204_hash NULL
|
|
+_001205_hash unix_dgram_sendmsg 4 45699 _001205_hash NULL
|
|
+_001206_hash unix_stream_sendmsg 4 61455 _001206_hash NULL
|
|
+_001207_hash unlink_queued 3-4 645 _001207_hash NULL
|
|
+_001208_hash update_pmkid 4 2481 _001208_hash NULL
|
|
+_001209_hash usb_alloc_coherent 2 65444 _001209_hash NULL
|
|
+_001210_hash uvc_alloc_buffers 2 9656 _001210_hash NULL
|
|
+_001211_hash uvc_alloc_entity 3-4 20836 _001211_hash NULL
|
|
+_001212_hash v4l2_ctrl_new 7 38725 _001212_hash NULL
|
|
+_001213_hash v4l2_event_subscribe 3 19510 _001213_hash NULL
|
|
+_001214_hash vb2_read 3 42703 _001214_hash NULL
|
|
+_001215_hash vb2_write 3 31948 _001215_hash NULL
|
|
+_001216_hash vc_resize 2-3 3585 _001216_hash NULL
|
|
+_001218_hash __vhost_add_used_n 3 26554 _001218_hash NULL
|
|
+_001219_hash __videobuf_alloc_vb 1 27062 _001219_hash NULL
|
|
+_001220_hash videobuf_dma_init_kernel 3 6963 _001220_hash NULL
|
|
+_001221_hash virtqueue_add_buf 3-4 59470 _001221_hash NULL
|
|
+_001223_hash vmalloc 1 15464 _001223_hash NULL
|
|
+_001224_hash vmalloc_to_sg 2 58354 _001224_hash NULL
|
|
+_001225_hash vol_cdev_write 3 40915 _001225_hash NULL
|
|
+_001226_hash vxge_device_register 4 7752 _001226_hash NULL
|
|
+_001227_hash __vxge_hw_channel_allocate 3 55462 _001227_hash NULL
|
|
+_001228_hash vzalloc 1 47421 _001228_hash NULL
|
|
+_001229_hash vzalloc_node 1 23424 _001229_hash NULL
|
|
+_001230_hash wa_nep_queue 2 8858 _001230_hash NULL
|
|
+_001231_hash __wa_xfer_setup_segs 2 56725 _001231_hash NULL
|
|
+_001232_hash wiphy_new 2 2482 _001232_hash NULL
|
|
+_001233_hash wpan_phy_alloc 1 48056 _001233_hash NULL
|
|
+_001234_hash wusb_ccm_mac 7 32199 _001234_hash NULL
|
|
+_001235_hash x25_sendmsg 4 12487 _001235_hash NULL
|
|
+_001236_hash xfrm_hash_alloc 1 10997 _001236_hash NULL
|
|
+_001237_hash _xfs_buf_get_pages 2 46811 _001237_hash NULL
|
|
+_001238_hash xfs_da_buf_make 1 55845 _001238_hash NULL
|
|
+_001239_hash xfs_da_grow_inode_int 3 21785 _001239_hash NULL
|
|
+_001240_hash xfs_dir_cilookup_result 3 64288 _003130_hash NULL nohasharray
|
|
+_001241_hash xfs_iext_add_indirect_multi 3 32400 _001241_hash NULL
|
|
+_001242_hash xfs_iext_inline_to_direct 2 12384 _001242_hash NULL
|
|
+_001243_hash xfs_iroot_realloc 2 46826 _001243_hash NULL
|
|
+_001244_hash xhci_alloc_stream_info 3 63902 _001244_hash NULL
|
|
+_001245_hash xlog_recover_add_to_trans 4 62839 _001245_hash NULL
|
|
+_001246_hash xprt_alloc 2 1475 _001246_hash NULL
|
|
+_001247_hash xt_alloc_table_info 1 57903 _001247_hash NULL
|
|
+_001248_hash _zd_iowrite32v_async_locked 3 39034 _001248_hash NULL
|
|
+_001249_hash zd_usb_iowrite16v 3 49744 _001249_hash NULL
|
|
+_001250_hash acpi_ds_build_internal_package_obj 3 58271 _001250_hash NULL
|
|
+_001251_hash acpi_system_read_event 3 55362 _001251_hash NULL
|
|
+_001252_hash acpi_ut_create_buffer_object 1 42030 _001252_hash NULL
|
|
+_001253_hash acpi_ut_create_package_object 1 17594 _001253_hash NULL
|
|
+_001254_hash acpi_ut_create_string_object 1 15360 _001254_hash NULL
|
|
+_001255_hash ad7879_spi_multi_read 3 8218 _001255_hash NULL
|
|
+_001256_hash add_child 4 45201 _001256_hash NULL
|
|
+_001257_hash add_port 2 54941 _001257_hash NULL
|
|
+_001258_hash adu_read 3 24177 _001258_hash NULL
|
|
+_001259_hash afs_cell_create 2 27346 _001259_hash NULL
|
|
+_001260_hash agp_generic_alloc_user 1 9470 _001260_hash NULL
|
|
+_001261_hash alloc_agpphysmem_i8xx 1 39427 _001261_hash NULL
|
|
+_001262_hash allocate_cnodes 1 5329 _001262_hash NULL
|
|
+_001263_hash ___alloc_bootmem 1 11410 _001263_hash NULL
|
|
+_001264_hash __alloc_bootmem_nopanic 1 65397 _001264_hash NULL
|
|
+_001265_hash alloc_bulk_urbs_generic 5 12127 _001265_hash NULL
|
|
+_001266_hash alloc_candev 1-2 7776 _001266_hash NULL
|
|
+_001268_hash ____alloc_ei_netdev 1 51475 _001268_hash NULL
|
|
+_001269_hash alloc_etherdev_mqs 1 36450 _001269_hash NULL
|
|
+_001270_hash alloc_extent_buffer 3 52824 _001270_hash NULL
|
|
+_001271_hash alloc_fcdev 1 18780 _001271_hash NULL
|
|
+_001272_hash alloc_fddidev 1 15382 _001272_hash NULL
|
|
+_001273_hash alloc_hippi_dev 1 51320 _001273_hash NULL
|
|
+_001274_hash alloc_irdadev 1 19140 _001274_hash NULL
|
|
+_001275_hash alloc_ltalkdev 1 38071 _001275_hash NULL
|
|
+_001276_hash alloc_one_pg_vec_page 1 10747 _001276_hash NULL
|
|
+_001277_hash alloc_orinocodev 1 21371 _001277_hash NULL
|
|
+_001279_hash alloc_trdev 1 16399 _001279_hash NULL
|
|
+_001280_hash async_setkey 3 35521 _001280_hash NULL
|
|
+_001281_hash ata_host_alloc_pinfo 3 17325 _001281_hash NULL
|
|
+_001284_hash ath6kl_connect_event 7-9-8 14267 _001284_hash NULL
|
|
+_001285_hash ath6kl_fwlog_block_read 3 49836 _001285_hash NULL
|
|
+_001286_hash ath6kl_fwlog_read 3 32101 _001286_hash NULL
|
|
+_001287_hash ath_rx_init 2 43564 _001287_hash NULL
|
|
+_001288_hash ath_tx_init 2 60515 _001288_hash NULL
|
|
+_001289_hash atm_get_addr 3 31221 _001289_hash NULL
|
|
+_001290_hash av7110_ipack_init 2 46655 _001290_hash NULL
|
|
+_001291_hash bdx_rxdb_create 1 46525 _001291_hash NULL
|
|
+_001292_hash bdx_tx_db_init 2 41719 _001292_hash NULL
|
|
+_001293_hash bio_map_kern 3 64751 _001293_hash NULL
|
|
+_001294_hash bits_to_user 3 47733 _001294_hash NULL
|
|
+_001295_hash __blk_queue_init_tags 2 9778 _001295_hash NULL
|
|
+_001296_hash blk_queue_resize_tags 2 28670 _001296_hash NULL
|
|
+_001297_hash blk_rq_map_user_iov 5 16772 _001297_hash NULL
|
|
+_001298_hash bm_init 2 13529 _001298_hash NULL
|
|
+_001299_hash brcmf_alloc_wdev 1 60347 _001299_hash NULL
|
|
+_001300_hash btrfs_insert_dir_item 4 59304 _001300_hash NULL
|
|
+_001301_hash btrfs_map_block 3 64379 _001301_hash NULL
|
|
+_001302_hash c4_add_card 3 54968 _001302_hash NULL
|
|
+_001303_hash cache_read 3 24790 _001303_hash NULL
|
|
+_001304_hash cache_write 3 13589 _001304_hash NULL
|
|
+_001305_hash calc_hmac 3 32010 _001305_hash NULL
|
|
+_001306_hash ccid_getsockopt_builtin_ccids 2 53634 _001306_hash NULL
|
|
+_001307_hash ceph_copy_page_vector_to_user 4 31270 _001307_hash NULL
|
|
+_001308_hash ceph_read_dir 3 17005 _001308_hash NULL
|
|
+_001309_hash cfg80211_roamed 5-7 32632 _001309_hash NULL
|
|
+_001311_hash ci_ll_init 3 12930 _001311_hash NULL
|
|
+_001312_hash coda_psdev_read 3 35029 _001312_hash NULL
|
|
+_001313_hash construct_key_and_link 4 8321 _001313_hash NULL
|
|
+_001314_hash copy_counters_to_user 5 17027 _001824_hash NULL nohasharray
|
|
+_001315_hash copy_entries_to_user 1 52367 _001315_hash NULL
|
|
+_001316_hash copy_from_buf 4 27308 _001316_hash NULL
|
|
+_001317_hash copy_oldmem_page 3 26164 _001317_hash NULL
|
|
+_001318_hash copy_to_user_fromio 3 57432 _001318_hash NULL
|
|
+_001319_hash cryptd_hash_setkey 3 42781 _001319_hash NULL
|
|
+_001320_hash crypto_authenc_esn_setkey 3 6985 _001320_hash NULL
|
|
+_001321_hash crypto_authenc_setkey 3 80 _001321_hash NULL
|
|
+_001322_hash cx18_copy_buf_to_user 4 22735 _001322_hash NULL
|
|
+_001324_hash cxgbi_ddp_reserve 4 30091 _001324_hash NULL
|
|
+_001325_hash datablob_hmac_append 3 40038 _001325_hash NULL
|
|
+_001326_hash datablob_hmac_verify 4 24786 _001326_hash NULL
|
|
+_001327_hash dataflash_read_fact_otp 3-2 33204 _001327_hash NULL
|
|
+_001328_hash dataflash_read_user_otp 3-2 14536 _001328_hash &_000201_hash
|
|
+_001329_hash dccp_feat_register_sp 5 17914 _001329_hash NULL
|
|
+_001330_hash ddb_input_read 3 9743 _001330_hash NULL
|
|
+_001331_hash dev_read 3 56369 _001331_hash NULL
|
|
+_001332_hash diva_os_copy_to_user 4 48508 _001332_hash NULL
|
|
+_001333_hash diva_os_malloc 2 16406 _001333_hash NULL
|
|
+_001334_hash dlm_dir_lookup 4 56662 _001334_hash NULL
|
|
+_001335_hash dm_vcalloc 1-2 16814 _001335_hash NULL
|
|
+_001337_hash do_proc_readlink 3 14096 _001337_hash NULL
|
|
+_001338_hash do_readlink 2 43518 _001338_hash NULL
|
|
+_001339_hash __do_replace 5 37227 _001339_hash NULL
|
|
+_001340_hash do_sigpending 2 9766 _001340_hash NULL
|
|
+_001341_hash drbd_setsockopt 5 16280 _001341_hash &_000371_hash
|
|
+_001342_hash dsp_buffer_alloc 2 11684 _001342_hash NULL
|
|
+_001343_hash dump_midi 3 51040 _001343_hash NULL
|
|
+_001344_hash dvb_dmxdev_set_buffer_size 2 55643 _001344_hash NULL
|
|
+_001345_hash dvb_dvr_set_buffer_size 2 9840 _001345_hash NULL
|
|
+_001346_hash dvb_ringbuffer_pkt_read_user 3-5-2 4303 _001346_hash NULL
|
|
+_001348_hash dvb_ringbuffer_read_user 3 56702 _001348_hash NULL
|
|
+_001349_hash ecryptfs_filldir 3 6622 _001349_hash NULL
|
|
+_001350_hash ecryptfs_readlink 3 40775 _001350_hash NULL
|
|
+_001351_hash ecryptfs_send_message 2 18322 _001351_hash NULL
|
|
+_001352_hash em28xx_init_isoc 4 62883 _001352_hash &_000721_hash
|
|
+_001353_hash et61x251_read 3 25420 _001353_hash NULL
|
|
+_001354_hash ext4_add_new_descs 3 19509 _001354_hash NULL
|
|
+_001355_hash fat_ioctl_filldir 3 36621 _001355_hash NULL
|
|
+_001356_hash fd_copyout 3 59323 _001356_hash NULL
|
|
+_001357_hash f_hidg_read 3 6238 _001357_hash NULL
|
|
+_001358_hash filldir 3 55137 _001358_hash NULL
|
|
+_001359_hash filldir64 3 46469 _001359_hash NULL
|
|
+_001360_hash fops_read 3 40672 _001360_hash NULL
|
|
+_001361_hash from_buffer 3 18625 _001361_hash NULL
|
|
+_001362_hash fsm_init 2 16134 _001362_hash NULL
|
|
+_001363_hash get_subdir 3 62581 _001363_hash NULL
|
|
+_001364_hash gspca_dev_probe 4 2570 _001364_hash NULL
|
|
+_001365_hash handle_received_packet 3 22457 _001365_hash NULL
|
|
+_001366_hash hash_setkey 3 48310 _001366_hash NULL
|
|
+_001367_hash hdlcdrv_register 2 6792 _001367_hash NULL
|
|
+_001368_hash hdpvr_read 3 9273 _001368_hash NULL
|
|
+_001369_hash hid_input_report 4 32458 _001369_hash NULL
|
|
+_001370_hash hidraw_read 3 59650 _001370_hash &_001200_hash
|
|
+_001371_hash HiSax_readstatus 2 15752 _001371_hash NULL
|
|
+_001373_hash __hwahc_op_set_gtk 4 42038 _001373_hash NULL
|
|
+_001374_hash __hwahc_op_set_ptk 5 36510 _001374_hash NULL
|
|
+_001375_hash ib_copy_to_udata 3 27525 _001375_hash NULL
|
|
+_001376_hash idetape_chrdev_read 3 2097 _001376_hash NULL
|
|
+_001377_hash ieee80211_alloc_hw 1 43829 _001377_hash NULL
|
|
+_001378_hash ieee80211_bss_info_update 4 13991 _001378_hash NULL
|
|
+_001379_hash ilo_read 3 32531 _001379_hash NULL
|
|
+_001380_hash init_map_ipmac 3-4 63896 _001380_hash NULL
|
|
+_001382_hash init_tid_tabs 2-4-3 13252 _001382_hash NULL
|
|
+_001385_hash iowarrior_read 3 53483 _001385_hash NULL
|
|
+_001386_hash ipv6_getsockopt_sticky 5 56711 _001386_hash NULL
|
|
+_001387_hash ipwireless_send_packet 4 8328 _001387_hash NULL
|
|
+_001388_hash ipx_sendmsg 4 1362 _001388_hash NULL
|
|
+_001389_hash iscsi_conn_setup 2 35159 _001389_hash NULL
|
|
+_001390_hash iscsi_create_session 3 51647 _001390_hash NULL
|
|
+_001391_hash iscsi_host_alloc 2 36671 _001391_hash NULL
|
|
+_001392_hash iscsi_session_setup 4-5 196 _001392_hash NULL
|
|
+_001394_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _001701_hash NULL nohasharray
|
|
+_001395_hash isdn_ppp_read 4 50356 _001395_hash NULL
|
|
+_001396_hash isku_sysfs_read 6 58806 _001396_hash NULL
|
|
+_001397_hash isku_sysfs_write 6 49767 _001397_hash NULL
|
|
+_001398_hash iso_alloc_urb 4-5 45206 _001398_hash NULL
|
|
+_001400_hash ivtv_copy_buf_to_user 4 6159 _001400_hash NULL
|
|
+_001401_hash iwm_rx_handle 3 24899 _001401_hash NULL
|
|
+_001402_hash iwm_wdev_alloc 1 38415 _001402_hash NULL
|
|
+_001403_hash jbd2_alloc 1 41359 _001403_hash NULL
|
|
+_001404_hash jffs2_do_link 6 42048 _001404_hash NULL
|
|
+_001405_hash jffs2_do_unlink 4 62020 _001405_hash NULL
|
|
+_001406_hash jffs2_security_setxattr 4 62107 _001406_hash NULL
|
|
+_001407_hash jffs2_trusted_setxattr 4 17048 _001407_hash NULL
|
|
+_001408_hash jffs2_user_setxattr 4 10182 _001408_hash NULL
|
|
+_001409_hash kernel_setsockopt 5 35913 _001409_hash NULL
|
|
+_001410_hash keyctl_describe_key 3 36853 _001410_hash NULL
|
|
+_001411_hash keyctl_get_security 3 64418 _001411_hash &_001132_hash
|
|
+_001412_hash keyring_read 3 13438 _001412_hash NULL
|
|
+_001413_hash kfifo_copy_to_user 3 20646 _001413_hash NULL
|
|
+_001414_hash kmem_zalloc_large 1 56128 _001414_hash NULL
|
|
+_001415_hash kmp_init 2 41373 _001415_hash NULL
|
|
+_001416_hash koneplus_sysfs_write 6 35993 _001416_hash NULL
|
|
+_001417_hash kvm_clear_guest_page 4 2308 _001417_hash NULL
|
|
+_001418_hash kvm_read_nested_guest_page 5 13337 _001418_hash NULL
|
|
+_001419_hash l2cap_create_basic_pdu 3 24869 _001419_hash &_001034_hash
|
|
+_001420_hash l2cap_create_connless_pdu 3 37327 _001420_hash &_000966_hash
|
|
+_001421_hash l2cap_create_iframe_pdu 3 51801 _001421_hash NULL
|
|
+_001422_hash __lgwrite 4 57669 _001422_hash NULL
|
|
+_001423_hash libfc_host_alloc 2 7917 _001423_hash NULL
|
|
+_001424_hash llcp_sock_sendmsg 4 1092 _001424_hash NULL
|
|
+_001425_hash macvtap_get_user 4 28185 _001425_hash NULL
|
|
+_001426_hash mcam_v4l_read 3 36513 _001426_hash NULL
|
|
+_001427_hash mce_async_out 3 58056 _001427_hash NULL
|
|
+_001428_hash mce_flush_rx_buffer 2 14976 _001428_hash NULL
|
|
+_001429_hash mdc800_device_read 3 22896 _001429_hash NULL
|
|
+_001430_hash memcpy_toiovec 3 54166 _001430_hash &_000867_hash
|
|
+_001431_hash memcpy_toiovecend 3-4 19736 _001431_hash NULL
|
|
+_001433_hash mgt_set_varlen 4 60916 _001433_hash NULL
|
|
+_001434_hash mlx4_en_create_rx_ring 3 62498 _001434_hash NULL
|
|
+_001435_hash mlx4_en_create_tx_ring 4 48501 _001435_hash NULL
|
|
+_001436_hash mon_bin_get_event 4 52863 _001436_hash NULL
|
|
+_001437_hash mousedev_read 3 47123 _001437_hash NULL
|
|
+_001438_hash move_addr_to_user 2 2868 _001438_hash NULL
|
|
+_001439_hash mpihelp_mul 5-3 27805 _001439_hash NULL
|
|
+_001441_hash mpi_lshift_limbs 2 9337 _001441_hash NULL
|
|
+_001442_hash msnd_fifo_alloc 2 23179 _001442_hash NULL
|
|
+_001443_hash mtdswap_init 2 55719 _001443_hash NULL
|
|
+_001444_hash neigh_hash_grow 2 17283 _001444_hash NULL
|
|
+_001445_hash nfs4_realloc_slot_table 2 22859 _001445_hash NULL
|
|
+_001446_hash nfs_idmap_get_key 2 39616 _001446_hash NULL
|
|
+_001447_hash nsm_get_handle 4 52089 _001447_hash NULL
|
|
+_001448_hash ntfs_malloc_nofs 1 49572 _001448_hash NULL
|
|
+_001449_hash ntfs_malloc_nofs_nofail 1 63631 _001449_hash NULL
|
|
+_001450_hash nvme_create_queue 3 170 _001450_hash NULL
|
|
+_001451_hash ocfs2_control_write 3 54737 _001451_hash NULL
|
|
+_001452_hash orinoco_add_extscan_result 3 18207 _001452_hash NULL
|
|
+_001454_hash override_release 2 52032 _001454_hash NULL
|
|
+_001455_hash packet_snd 3 13634 _001455_hash NULL
|
|
+_001456_hash pcbit_stat 2 27364 _001456_hash NULL
|
|
+_001457_hash pcpu_extend_area_map 2 12589 _001457_hash NULL
|
|
+_001458_hash pg_read 3 17276 _001458_hash NULL
|
|
+_001459_hash picolcd_debug_eeprom_read 3 14549 _001459_hash NULL
|
|
+_001460_hash pkt_alloc_packet_data 1 37928 _001460_hash NULL
|
|
+_001461_hash pmcraid_build_passthrough_ioadls 2 62034 _001461_hash NULL
|
|
+_001462_hash pms_capture 4 27142 _001462_hash NULL
|
|
+_001463_hash posix_clock_register 2 5662 _001463_hash NULL
|
|
+_001464_hash printer_read 3 54851 _001464_hash NULL
|
|
+_001465_hash __proc_file_read 3 54978 _001465_hash NULL
|
|
+_001466_hash pt_read 3 49136 _001466_hash NULL
|
|
+_001467_hash put_cmsg 4 36589 _001467_hash NULL
|
|
+_001468_hash pvr2_ioread_read 3 10720 _001505_hash NULL nohasharray
|
|
+_001469_hash pwc_video_read 3 51735 _001469_hash NULL
|
|
+_001470_hash px_raw_event 4 49371 _001470_hash NULL
|
|
+_001471_hash qcam_read 3 13977 _001471_hash NULL
|
|
+_001472_hash rawv6_sendmsg 4 20080 _001472_hash NULL
|
|
+_001473_hash rds_sendmsg 4 40976 _001473_hash NULL
|
|
+_001474_hash read_flush 3 43851 _001474_hash NULL
|
|
+_001475_hash read_profile 3 27859 _001475_hash NULL
|
|
+_001476_hash read_vmcore 3 26501 _001476_hash NULL
|
|
+_001477_hash redirected_tty_write 3 65297 _001477_hash NULL
|
|
+_001478_hash __register_chrdev 2-3 54223 _001478_hash NULL
|
|
+_001480_hash regmap_raw_write 4 53803 _001480_hash NULL
|
|
+_001481_hash reiserfs_allocate_list_bitmaps 3 21732 _001481_hash NULL
|
|
+_001482_hash reiserfs_resize 2 34377 _001482_hash NULL
|
|
+_001483_hash request_key_auth_read 3 24109 _001483_hash NULL
|
|
+_001484_hash rfkill_fop_read 3 54711 _001484_hash NULL
|
|
+_001485_hash rng_dev_read 3 41581 _001485_hash NULL
|
|
+_001486_hash roccat_read 3 41093 _001486_hash NULL
|
|
+_001487_hash sco_sock_sendmsg 4 62542 _001487_hash NULL
|
|
+_001488_hash scsi_register 2 49094 _001488_hash NULL
|
|
+_001489_hash sctp_getsockopt_events 2 3607 _001489_hash NULL
|
|
+_001490_hash sctp_getsockopt_maxburst 2 42941 _001490_hash NULL
|
|
+_001491_hash sctp_getsockopt_maxseg 2 10737 _001491_hash NULL
|
|
+_001492_hash sctpprobe_read 3 17741 _001492_hash NULL
|
|
+_001493_hash sdhci_alloc_host 2 7509 _001493_hash NULL
|
|
+_001494_hash selinux_inode_post_setxattr 4 26037 _001494_hash NULL
|
|
+_001495_hash selinux_inode_setsecurity 4 18148 _001495_hash NULL
|
|
+_001496_hash selinux_inode_setxattr 4 10708 _001496_hash NULL
|
|
+_001497_hash selinux_secctx_to_secid 2 63744 _001497_hash NULL
|
|
+_001498_hash selinux_setprocattr 4 55611 _001498_hash NULL
|
|
+_001499_hash sel_write_context 3 25726 _002397_hash NULL nohasharray
|
|
+_001500_hash seq_copy_in_user 3 18543 _001500_hash NULL
|
|
+_001501_hash seq_open_net 4 8968 _001594_hash NULL nohasharray
|
|
+_001502_hash seq_open_private 3 61589 _001502_hash NULL
|
|
+_001503_hash set_arg 3 42824 _001503_hash NULL
|
|
+_001504_hash sg_read 3 25799 _001504_hash NULL
|
|
+_001505_hash shash_async_setkey 3 10720 _001505_hash &_001468_hash
|
|
+_001506_hash shash_compat_setkey 3 12267 _001506_hash NULL
|
|
+_001507_hash shmem_setxattr 4 55867 _001507_hash NULL
|
|
+_001508_hash simple_read_from_buffer 2-5 55957 _001508_hash NULL
|
|
+_001511_hash sm_checker_extend 2 23615 _001511_hash NULL
|
|
+_001512_hash sn9c102_read 3 29305 _001512_hash NULL
|
|
+_001513_hash snd_es1938_capture_copy 5 25930 _001513_hash NULL
|
|
+_001514_hash snd_gus_dram_peek 4 9062 _001514_hash NULL
|
|
+_001515_hash snd_hdsp_capture_copy 5 4011 _001515_hash NULL
|
|
+_001516_hash snd_korg1212_copy_to 6 92 _001516_hash NULL
|
|
+_001517_hash snd_opl4_mem_proc_read 5 63774 _001517_hash NULL
|
|
+_001518_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _001518_hash NULL
|
|
+_001519_hash snd_pcm_oss_read1 3 63771 _001519_hash NULL
|
|
+_001520_hash snd_rawmidi_kernel_read1 4 36740 _001520_hash NULL
|
|
+_001521_hash snd_rme9652_capture_copy 5 10287 _001521_hash NULL
|
|
+_001522_hash srp_target_alloc 3 37288 _001522_hash NULL
|
|
+_001523_hash stk_allocate_buffers 2 16291 _001523_hash NULL
|
|
+_001524_hash store_ifalias 4 35088 _001524_hash NULL
|
|
+_001525_hash store_msg 3 56417 _001525_hash NULL
|
|
+_001526_hash str_to_user 2 11411 _001526_hash NULL
|
|
+_001527_hash subbuf_read_actor 3 2071 _001527_hash NULL
|
|
+_001528_hash sys_fgetxattr 4 25166 _001528_hash NULL
|
|
+_001529_hash sys_gethostname 2 49698 _001529_hash NULL
|
|
+_001530_hash sys_getxattr 4 37418 _001530_hash NULL
|
|
+_001531_hash sys_kexec_load 2 14222 _001531_hash NULL
|
|
+_001532_hash sys_msgsnd 3 44537 _001532_hash &_000129_hash
|
|
+_001533_hash sys_process_vm_readv 3-5 19090 _003178_hash NULL nohasharray
|
|
+_001535_hash sys_process_vm_writev 3-5 4928 _001535_hash NULL
|
|
+_001537_hash sys_sched_getaffinity 2 60033 _001537_hash NULL
|
|
+_001538_hash sys_setsockopt 5 35320 _001538_hash NULL
|
|
+_001539_hash t3_init_l2t 1 8261 _001539_hash NULL
|
|
+_001540_hash team_options_register 3 20091 _001540_hash NULL
|
|
+_001541_hash tipc_send2name 6 16809 _001541_hash NULL
|
|
+_001542_hash tipc_send2port 5 63935 _001542_hash NULL
|
|
+_001543_hash tipc_send 4 51238 _001543_hash NULL
|
|
+_001544_hash tm6000_i2c_recv_regs16 5 2949 _001544_hash NULL
|
|
+_001545_hash tm6000_i2c_recv_regs 5 46215 _001545_hash NULL
|
|
+_001546_hash tm6000_i2c_send_regs 5 20250 _001546_hash NULL
|
|
+_001547_hash tnode_new 3 44757 _001547_hash NULL
|
|
+_001548_hash tomoyo_read_self 3 33539 _001548_hash NULL
|
|
+_001549_hash tomoyo_update_domain 2 5498 _001549_hash NULL
|
|
+_001550_hash tomoyo_update_policy 2 40458 _001550_hash NULL
|
|
+_001551_hash tpm_read 3 50344 _001551_hash NULL
|
|
+_001552_hash TSS_rawhmac 3 17486 _001552_hash NULL
|
|
+_001553_hash tt3650_ci_msg 4 57219 _001553_hash NULL
|
|
+_001554_hash tun_get_user 3 33178 _001554_hash NULL
|
|
+_001555_hash ubi_dbg_dump_flash 4 3870 _001555_hash NULL
|
|
+_001556_hash ubi_io_write 4-5 15870 _001556_hash &_000954_hash
|
|
+_001558_hash uio_read 3 49300 _001558_hash NULL
|
|
+_001559_hash unix_seqpacket_sendmsg 4 27893 _001559_hash NULL
|
|
+_001560_hash unlink1 3 63059 _001560_hash NULL
|
|
+_001562_hash usb_allocate_stream_buffers 3 8964 _001562_hash NULL
|
|
+_001563_hash usbdev_read 3 45114 _001563_hash NULL
|
|
+_001564_hash usblp_read 3 57342 _001564_hash NULL
|
|
+_001565_hash usbtmc_read 3 32377 _001565_hash NULL
|
|
+_001566_hash usbvision_v4l2_read 3 34386 _001566_hash NULL
|
|
+_001567_hash _usb_writeN_sync 4 31682 _001567_hash NULL
|
|
+_001568_hash user_read 3 51881 _001568_hash NULL
|
|
+_001569_hash v4l_stk_read 3 39672 _001569_hash NULL
|
|
+_001570_hash vcs_read 3 8017 _001570_hash NULL
|
|
+_001571_hash vdma_mem_alloc 1 6171 _001571_hash NULL
|
|
+_001572_hash venus_create 4 20555 _001572_hash NULL
|
|
+_001573_hash venus_link 5 32165 _001573_hash NULL
|
|
+_001574_hash venus_lookup 4 8121 _001574_hash NULL
|
|
+_001575_hash venus_mkdir 4 8967 _001575_hash NULL
|
|
+_001576_hash venus_remove 4 59781 _001576_hash NULL
|
|
+_001577_hash venus_rename 4-5 17707 _001577_hash NULL
|
|
+_001579_hash venus_rmdir 4 45564 _001579_hash NULL
|
|
+_001580_hash venus_symlink 4-6 23570 _001580_hash NULL
|
|
+_001582_hash vfs_readlink 3 54368 _001582_hash NULL
|
|
+_001583_hash vfs_readv 3 38011 _001583_hash NULL
|
|
+_001584_hash vfs_writev 3 25278 _001584_hash NULL
|
|
+_001585_hash vga_arb_read 3 4886 _001585_hash NULL
|
|
+_001586_hash vhci_put_user 4 12604 _001586_hash NULL
|
|
+_001587_hash vhost_add_used_n 3 10760 _001587_hash NULL
|
|
+_001588_hash __videobuf_copy_to_user 4 15423 _001588_hash NULL
|
|
+_001589_hash videobuf_pages_to_sg 2 3708 _001589_hash NULL
|
|
+_001590_hash videobuf_vmalloc_to_sg 2 4548 _001590_hash NULL
|
|
+_001591_hash virtnet_send_command 5-6 61993 _001591_hash NULL
|
|
+_001593_hash vmbus_establish_gpadl 3 4495 _001593_hash NULL
|
|
+_001594_hash vol_cdev_read 3 8968 _001594_hash &_001501_hash
|
|
+_001595_hash w9966_v4l_read 3 31148 _001595_hash NULL
|
|
+_001596_hash wdm_read 3 6549 _001596_hash NULL
|
|
+_001597_hash wusb_prf 7 54261 _001597_hash &_000063_hash
|
|
+_001598_hash xdi_copy_to_user 4 48900 _001598_hash NULL
|
|
+_001599_hash xfs_buf_get_uncached 2 51477 _001599_hash NULL
|
|
+_001600_hash xfs_efd_init 3 5463 _001600_hash NULL
|
|
+_001601_hash xfs_efi_init 2 5476 _001601_hash NULL
|
|
+_001602_hash xfs_iext_realloc_direct 2 20521 _001602_hash NULL
|
|
+_001603_hash xfs_iext_realloc_indirect 2 59211 _001603_hash NULL
|
|
+_001604_hash xfs_inumbers_fmt 3 12817 _001604_hash NULL
|
|
+_001605_hash xlog_recover_add_to_cont_trans 4 44102 _001605_hash NULL
|
|
+_001606_hash xz_dec_lzma2_create 2 36353 _002745_hash NULL nohasharray
|
|
+_001607_hash _zd_iowrite32v_locked 3 44725 _001607_hash NULL
|
|
+_001608_hash aat2870_reg_read_file 3 12221 _001608_hash NULL
|
|
+_001609_hash add_sctp_bind_addr 3 12269 _001609_hash NULL
|
|
+_001610_hash aes_decrypt_fail_read 3 54815 _001610_hash NULL
|
|
+_001611_hash aes_decrypt_interrupt_read 3 19910 _001611_hash NULL
|
|
+_001612_hash aes_decrypt_packets_read 3 10155 _001612_hash NULL
|
|
+_001613_hash aes_encrypt_fail_read 3 32562 _001613_hash NULL
|
|
+_001614_hash aes_encrypt_interrupt_read 3 39919 _001614_hash NULL
|
|
+_001615_hash aes_encrypt_packets_read 3 48666 _001615_hash NULL
|
|
+_001616_hash afs_cell_lookup 2 8482 _001616_hash NULL
|
|
+_001617_hash agp_allocate_memory 2 58761 _001617_hash NULL
|
|
+_001618_hash __alloc_bootmem 1 31498 _001618_hash NULL
|
|
+_001619_hash __alloc_bootmem_low 1 43423 _001619_hash NULL
|
|
+_001620_hash __alloc_bootmem_node_nopanic 2 6432 _001620_hash NULL
|
|
+_001621_hash alloc_cc770dev 1 48186 _001621_hash NULL
|
|
+_001622_hash __alloc_ei_netdev 1 29338 _001622_hash NULL
|
|
+_001623_hash __alloc_eip_netdev 1 51549 _001623_hash NULL
|
|
+_001624_hash alloc_libipw 1 22708 _001624_hash NULL
|
|
+_001625_hash alloc_pg_vec 2 8533 _001625_hash NULL
|
|
+_001626_hash alloc_sja1000dev 1 17868 _001626_hash NULL
|
|
+_001627_hash alloc_targets 2 8074 _001627_hash NULL
|
|
+_001630_hash ath6kl_disconnect_timeout_read 3 3650 _001630_hash NULL
|
|
+_001631_hash ath6kl_endpoint_stats_read 3 41554 _001631_hash NULL
|
|
+_001632_hash ath6kl_fwlog_mask_read 3 2050 _001632_hash NULL
|
|
+_001633_hash ath6kl_keepalive_read 3 44303 _001633_hash NULL
|
|
+_001634_hash ath6kl_listen_int_read 3 10355 _001634_hash NULL
|
|
+_001635_hash ath6kl_lrssi_roam_read 3 61022 _001635_hash NULL
|
|
+_001636_hash ath6kl_regdump_read 3 14393 _001636_hash NULL
|
|
+_001637_hash ath6kl_regread_read 3 25884 _001637_hash NULL
|
|
+_001638_hash ath6kl_regwrite_read 3 48747 _001638_hash NULL
|
|
+_001639_hash ath6kl_roam_table_read 3 26166 _001639_hash NULL
|
|
+_001640_hash ath9k_debugfs_read_buf 3 25316 _001640_hash NULL
|
|
+_001641_hash atk_debugfs_ggrp_read 3 29522 _001641_hash NULL
|
|
+_001642_hash b43_debugfs_read 3 24425 _001642_hash NULL
|
|
+_001643_hash b43legacy_debugfs_read 3 2473 _001643_hash NULL
|
|
+_001644_hash bcm_recvmsg 4 43992 _001644_hash NULL
|
|
+_001645_hash bfad_debugfs_read 3 13119 _001645_hash NULL
|
|
+_001646_hash bfad_debugfs_read_regrd 3 57830 _001646_hash NULL
|
|
+_001647_hash blk_init_tags 1 30592 _001647_hash NULL
|
|
+_001648_hash blk_queue_init_tags 2 44355 _002686_hash NULL nohasharray
|
|
+_001649_hash blk_rq_map_kern 4 47004 _001649_hash NULL
|
|
+_001650_hash bm_entry_read 3 10976 _001650_hash NULL
|
|
+_001651_hash bm_status_read 3 19583 _001651_hash NULL
|
|
+_001652_hash bnad_debugfs_read 3 50665 _001652_hash NULL
|
|
+_001653_hash bnad_debugfs_read_regrd 3 51308 _001653_hash NULL
|
|
+_001654_hash btmrvl_curpsmode_read 3 46939 _001654_hash NULL
|
|
+_001655_hash btmrvl_gpiogap_read 3 4718 _001655_hash NULL
|
|
+_001656_hash btmrvl_hscfgcmd_read 3 56303 _001656_hash NULL
|
|
+_001657_hash btmrvl_hscmd_read 3 1614 _001657_hash NULL
|
|
+_001658_hash btmrvl_hsmode_read 3 1647 _001658_hash NULL
|
|
+_001659_hash btmrvl_hsstate_read 3 920 _001659_hash NULL
|
|
+_001660_hash btmrvl_pscmd_read 3 24308 _001660_hash NULL
|
|
+_001661_hash btmrvl_psmode_read 3 22395 _001661_hash NULL
|
|
+_001662_hash btmrvl_psstate_read 3 50683 _001662_hash NULL
|
|
+_001663_hash btmrvl_txdnldready_read 3 413 _001663_hash NULL
|
|
+_001664_hash btrfs_add_link 5 9973 _001664_hash NULL
|
|
+_001665_hash btrfs_discard_extent 2 38547 _001665_hash NULL
|
|
+_001666_hash btrfs_find_create_tree_block 3 55812 _001666_hash NULL
|
|
+_001667_hash btrfsic_map_block 2 56751 _001667_hash NULL
|
|
+_001668_hash caif_stream_recvmsg 4 13173 _001668_hash NULL
|
|
+_001669_hash carl9170_alloc 1 27 _001669_hash NULL
|
|
+_001670_hash carl9170_debugfs_read 3 47738 _001670_hash NULL
|
|
+_001671_hash cgroup_read_s64 5 19570 _001671_hash NULL
|
|
+_001672_hash cgroup_read_u64 5 45532 _001672_hash NULL
|
|
+_001673_hash channel_type_read 3 47308 _001673_hash NULL
|
|
+_001674_hash codec_list_read_file 3 24910 _001674_hash NULL
|
|
+_001675_hash configfs_read_file 3 1683 _001675_hash NULL
|
|
+_001676_hash cpuset_common_file_read 5 8800 _001676_hash NULL
|
|
+_001677_hash create_subvol 4 2347 _001677_hash NULL
|
|
+_001678_hash cx18_copy_mdl_to_user 4 45549 _001678_hash NULL
|
|
+_001679_hash dai_list_read_file 3 25421 _001679_hash NULL
|
|
+_001680_hash dapm_bias_read_file 3 64715 _001680_hash NULL
|
|
+_001681_hash dapm_widget_power_read_file 3 59950 _001754_hash NULL nohasharray
|
|
+_001684_hash dbgfs_frame 3 45917 _001684_hash NULL
|
|
+_001685_hash dbgfs_state 3 38894 _001685_hash NULL
|
|
+_001686_hash debugfs_read 3 62535 _001686_hash NULL
|
|
+_001687_hash debug_output 3 18575 _001687_hash NULL
|
|
+_001688_hash debug_read 3 19322 _001688_hash NULL
|
|
+_001689_hash dfs_file_read 3 18116 _001689_hash NULL
|
|
+_001690_hash dma_memcpy_pg_to_iovec 6 1725 _001690_hash NULL
|
|
+_001691_hash dma_memcpy_to_iovec 5 12173 _001691_hash NULL
|
|
+_001692_hash dma_rx_errors_read 3 52045 _001692_hash NULL
|
|
+_001693_hash dma_rx_requested_read 3 65354 _001693_hash NULL
|
|
+_001694_hash dma_show_regs 3 35266 _001694_hash NULL
|
|
+_001695_hash dma_tx_errors_read 3 46060 _001695_hash NULL
|
|
+_001696_hash dma_tx_requested_read 3 16110 _001775_hash NULL nohasharray
|
|
+_001697_hash dm_exception_table_init 2 39645 _001697_hash &_001103_hash
|
|
+_001698_hash dn_recvmsg 4 17213 _001698_hash NULL
|
|
+_001699_hash dns_resolver_read 3 54658 _001699_hash NULL
|
|
+_001700_hash do_msgrcv 4 5590 _001700_hash NULL
|
|
+_001701_hash driver_state_read 3 17194 _001701_hash &_001394_hash
|
|
+_001702_hash dvb_demux_do_ioctl 3 34871 _001702_hash NULL
|
|
+_001703_hash dvb_dmxdev_buffer_read 4 20682 _001703_hash NULL
|
|
+_001704_hash dvb_dvr_do_ioctl 3 43355 _001704_hash NULL
|
|
+_001705_hash econet_recvmsg 4 40978 _001705_hash NULL
|
|
+_001706_hash event_calibration_read 3 21083 _001706_hash NULL
|
|
+_001707_hash event_heart_beat_read 3 48961 _001707_hash NULL
|
|
+_001708_hash event_oom_late_read 3 61175 _001708_hash &_001014_hash
|
|
+_001709_hash event_phy_transmit_error_read 3 10471 _001709_hash NULL
|
|
+_001710_hash event_rx_mem_empty_read 3 40363 _001710_hash NULL
|
|
+_001711_hash event_rx_mismatch_read 3 38518 _001711_hash NULL
|
|
+_001712_hash event_rx_pool_read 3 25792 _001712_hash NULL
|
|
+_001713_hash event_tx_stuck_read 3 19305 _001713_hash NULL
|
|
+_001714_hash excessive_retries_read 3 60425 _001714_hash NULL
|
|
+_001715_hash fallback_on_nodma_alloc 2 35332 _001715_hash NULL
|
|
+_001716_hash filter_read 3 61692 _001716_hash NULL
|
|
+_001717_hash format_devstat_counter 3 32550 _001717_hash NULL
|
|
+_001718_hash fragmentation_threshold_read 3 61718 _001718_hash NULL
|
|
+_001719_hash fuse_conn_limit_read 3 20084 _001719_hash NULL
|
|
+_001720_hash fuse_conn_waiting_read 3 49762 _001720_hash NULL
|
|
+_001721_hash generic_readlink 3 32654 _001721_hash NULL
|
|
+_001722_hash gpio_power_read 3 36059 _001722_hash NULL
|
|
+_001723_hash hash_recvmsg 4 50924 _001723_hash NULL
|
|
+_001724_hash ht40allow_map_read 3 55209 _002830_hash NULL nohasharray
|
|
+_001725_hash hwflags_read 3 52318 _001725_hash NULL
|
|
+_001726_hash hysdn_conf_read 3 42324 _003159_hash NULL nohasharray
|
|
+_001727_hash i2400m_rx_stats_read 3 57706 _001727_hash NULL
|
|
+_001728_hash i2400m_tx_stats_read 3 28527 _001728_hash NULL
|
|
+_001729_hash idmouse_read 3 63374 _001729_hash NULL
|
|
+_001730_hash ieee80211_if_read 3 6785 _001730_hash NULL
|
|
+_001731_hash ieee80211_rx_bss_info 3 61630 _001731_hash NULL
|
|
+_001732_hash ikconfig_read_current 3 1658 _001732_hash NULL
|
|
+_001733_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001733_hash NULL
|
|
+_001734_hash il3945_ucode_general_stats_read 3 46111 _001734_hash NULL
|
|
+_001735_hash il3945_ucode_rx_stats_read 3 3048 _001735_hash NULL
|
|
+_001736_hash il3945_ucode_tx_stats_read 3 36016 _001736_hash NULL
|
|
+_001737_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001737_hash NULL
|
|
+_001738_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001738_hash NULL
|
|
+_001739_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001739_hash NULL
|
|
+_001740_hash il4965_ucode_general_stats_read 3 56277 _001740_hash NULL
|
|
+_001741_hash il4965_ucode_rx_stats_read 3 61948 _001741_hash NULL
|
|
+_001742_hash il4965_ucode_tx_stats_read 3 12064 _001742_hash NULL
|
|
+_001743_hash il_dbgfs_chain_noise_read 3 38044 _001743_hash NULL
|
|
+_001744_hash il_dbgfs_channels_read 3 25005 _001744_hash NULL
|
|
+_001745_hash il_dbgfs_disable_ht40_read 3 42386 _001745_hash NULL
|
|
+_001746_hash il_dbgfs_fh_reg_read 3 40993 _001746_hash NULL
|
|
+_001747_hash il_dbgfs_force_reset_read 3 57517 _001747_hash NULL
|
|
+_001748_hash il_dbgfs_interrupt_read 3 3351 _001748_hash NULL
|
|
+_001749_hash il_dbgfs_missed_beacon_read 3 59956 _001749_hash NULL
|
|
+_001750_hash il_dbgfs_nvm_read 3 12288 _001750_hash NULL
|
|
+_001751_hash il_dbgfs_power_save_status_read 3 43165 _001751_hash NULL
|
|
+_001752_hash il_dbgfs_qos_read 3 33615 _001752_hash NULL
|
|
+_001753_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001753_hash NULL
|
|
+_001754_hash il_dbgfs_rxon_flags_read 3 59950 _001754_hash &_001681_hash
|
|
+_001755_hash il_dbgfs_rx_queue_read 3 11221 _001755_hash NULL
|
|
+_001756_hash il_dbgfs_rx_stats_read 3 15243 _001756_hash NULL
|
|
+_001757_hash il_dbgfs_sensitivity_read 3 2370 _001757_hash NULL
|
|
+_001758_hash il_dbgfs_sram_read 3 62296 _001758_hash NULL
|
|
+_001759_hash il_dbgfs_stations_read 3 21532 _001759_hash NULL
|
|
+_001760_hash il_dbgfs_status_read 3 58388 _001760_hash NULL
|
|
+_001761_hash il_dbgfs_tx_queue_read 3 55668 _001761_hash NULL
|
|
+_001762_hash il_dbgfs_tx_stats_read 3 32913 _001762_hash NULL
|
|
+_001763_hash ima_show_htable_value 2 57136 _001763_hash NULL
|
|
+_001765_hash ipw_write 3 59807 _001765_hash NULL
|
|
+_001766_hash irda_recvmsg_stream 4 35280 _001766_hash NULL
|
|
+_001767_hash iscsi_tcp_conn_setup 2 16376 _001767_hash NULL
|
|
+_001768_hash isr_cmd_cmplt_read 3 53439 _001768_hash NULL
|
|
+_001769_hash isr_commands_read 3 41398 _001769_hash NULL
|
|
+_001770_hash isr_decrypt_done_read 3 49490 _001770_hash NULL
|
|
+_001771_hash isr_dma0_done_read 3 8574 _001771_hash NULL
|
|
+_001772_hash isr_dma1_done_read 3 48159 _001772_hash NULL
|
|
+_001773_hash isr_fiqs_read 3 34687 _001773_hash NULL
|
|
+_001774_hash isr_host_acknowledges_read 3 54136 _001774_hash NULL
|
|
+_001775_hash isr_hw_pm_mode_changes_read 3 16110 _001775_hash &_001696_hash
|
|
+_001776_hash isr_irqs_read 3 9181 _001776_hash NULL
|
|
+_001777_hash isr_low_rssi_read 3 64789 _001777_hash NULL
|
|
+_001778_hash isr_pci_pm_read 3 30271 _001778_hash NULL
|
|
+_001779_hash isr_rx_headers_read 3 38325 _001779_hash NULL
|
|
+_001780_hash isr_rx_mem_overflow_read 3 43025 _001780_hash NULL
|
|
+_001781_hash isr_rx_procs_read 3 31804 _001781_hash NULL
|
|
+_001782_hash isr_rx_rdys_read 3 35283 _001782_hash NULL
|
|
+_001783_hash isr_tx_exch_complete_read 3 16103 _001783_hash NULL
|
|
+_001784_hash isr_tx_procs_read 3 23084 _001784_hash NULL
|
|
+_001785_hash isr_wakeups_read 3 49607 _001785_hash NULL
|
|
+_001786_hash ivtv_read 3 57796 _001786_hash NULL
|
|
+_001787_hash iwl_dbgfs_bt_traffic_read 3 35534 _001787_hash NULL
|
|
+_001788_hash iwl_dbgfs_chain_noise_read 3 46355 _001788_hash NULL
|
|
+_001789_hash iwl_dbgfs_channels_read 3 6784 _001789_hash NULL
|
|
+_001790_hash iwl_dbgfs_current_sleep_command_read 3 2081 _001790_hash NULL
|
|
+_001791_hash iwl_dbgfs_disable_ht40_read 3 35761 _001791_hash NULL
|
|
+_001792_hash iwl_dbgfs_fh_reg_read 3 879 _001792_hash &_000393_hash
|
|
+_001793_hash iwl_dbgfs_force_reset_read 3 62628 _001793_hash NULL
|
|
+_001794_hash iwl_dbgfs_interrupt_read 3 23574 _001794_hash NULL
|
|
+_001795_hash iwl_dbgfs_log_event_read 3 2107 _001795_hash NULL
|
|
+_001796_hash iwl_dbgfs_missed_beacon_read 3 50584 _001796_hash NULL
|
|
+_001797_hash iwl_dbgfs_nvm_read 3 23845 _001797_hash NULL
|
|
+_001798_hash iwl_dbgfs_plcp_delta_read 3 55407 _001798_hash NULL
|
|
+_001799_hash iwl_dbgfs_power_save_status_read 3 54392 _001799_hash NULL
|
|
+_001800_hash iwl_dbgfs_protection_mode_read 3 13943 _001800_hash NULL
|
|
+_001801_hash iwl_dbgfs_qos_read 3 11753 _001801_hash NULL
|
|
+_001802_hash iwl_dbgfs_reply_tx_error_read 3 19205 _001802_hash NULL
|
|
+_001803_hash iwl_dbgfs_rx_handlers_read 3 18708 _001803_hash NULL
|
|
+_001804_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _001804_hash NULL
|
|
+_001805_hash iwl_dbgfs_rxon_flags_read 3 20795 _001805_hash NULL
|
|
+_001806_hash iwl_dbgfs_rx_queue_read 3 19943 _001806_hash NULL
|
|
+_001807_hash iwl_dbgfs_rx_statistics_read 3 62687 _001807_hash &_000425_hash
|
|
+_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _003026_hash NULL nohasharray
|
|
+_001809_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001809_hash NULL
|
|
+_001810_hash iwl_dbgfs_sram_read 3 44505 _001810_hash NULL
|
|
+_001811_hash iwl_dbgfs_stations_read 3 9309 _001811_hash NULL
|
|
+_001812_hash iwl_dbgfs_status_read 3 5171 _001812_hash NULL
|
|
+_001813_hash iwl_dbgfs_temperature_read 3 29224 _001813_hash NULL
|
|
+_001814_hash iwl_dbgfs_thermal_throttling_read 3 38779 _001814_hash NULL
|
|
+_001815_hash iwl_dbgfs_traffic_log_read 3 58870 _001815_hash NULL
|
|
+_001816_hash iwl_dbgfs_tx_queue_read 3 4635 _001816_hash NULL
|
|
+_001817_hash iwl_dbgfs_tx_statistics_read 3 314 _001817_hash NULL
|
|
+_001818_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _001818_hash NULL
|
|
+_001819_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _001819_hash NULL
|
|
+_001820_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _001820_hash NULL
|
|
+_001821_hash iwl_dbgfs_ucode_tracing_read 3 47983 _001821_hash &_000349_hash
|
|
+_001822_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _001822_hash NULL
|
|
+_001823_hash iwl_dbgfs_wowlan_sram_read 3 540 _001823_hash NULL
|
|
+_001824_hash iwm_if_alloc 1 17027 _001824_hash &_001314_hash
|
|
+_001825_hash kernel_readv 3 35617 _001825_hash NULL
|
|
+_001826_hash key_algorithm_read 3 57946 _001826_hash NULL
|
|
+_001827_hash key_icverrors_read 3 20895 _001827_hash NULL
|
|
+_001828_hash key_key_read 3 3241 _001828_hash NULL
|
|
+_001829_hash key_replays_read 3 62746 _001829_hash NULL
|
|
+_001830_hash key_rx_spec_read 3 12736 _001830_hash NULL
|
|
+_001831_hash key_tx_spec_read 3 4862 _001831_hash NULL
|
|
+_001832_hash __kfifo_to_user 3 36555 _002199_hash NULL nohasharray
|
|
+_001833_hash __kfifo_to_user_r 3 39123 _001833_hash NULL
|
|
+_001834_hash kmem_zalloc_greedy 2-3 65268 _001834_hash NULL
|
|
+_001836_hash l2cap_chan_send 3 49995 _001836_hash NULL
|
|
+_001837_hash l2cap_sar_segment_sdu 3 27701 _001837_hash NULL
|
|
+_001838_hash lbs_debugfs_read 3 30721 _001838_hash NULL
|
|
+_001839_hash lbs_dev_info 3 51023 _001839_hash NULL
|
|
+_001840_hash lbs_host_sleep_read 3 31013 _001840_hash NULL
|
|
+_001841_hash lbs_rdbbp_read 3 45805 _001841_hash NULL
|
|
+_001842_hash lbs_rdmac_read 3 418 _001842_hash NULL
|
|
+_001843_hash lbs_rdrf_read 3 41431 _001843_hash NULL
|
|
+_001844_hash lbs_sleepparams_read 3 10840 _001844_hash NULL
|
|
+_001845_hash lbs_threshold_read 5 21046 _001845_hash NULL
|
|
+_001846_hash libfc_vport_create 2 4415 _001846_hash NULL
|
|
+_001847_hash lkdtm_debugfs_read 3 45752 _001847_hash NULL
|
|
+_001848_hash llcp_sock_recvmsg 4 13556 _001848_hash NULL
|
|
+_001849_hash long_retry_limit_read 3 59766 _001849_hash NULL
|
|
+_001850_hash lpfc_debugfs_dif_err_read 3 36303 _001850_hash NULL
|
|
+_001851_hash lpfc_debugfs_read 3 16566 _001851_hash NULL
|
|
+_001852_hash lpfc_idiag_baracc_read 3 58466 _002447_hash NULL nohasharray
|
|
+_001853_hash lpfc_idiag_ctlacc_read 3 33943 _001853_hash NULL
|
|
+_001854_hash lpfc_idiag_drbacc_read 3 15948 _001854_hash NULL
|
|
+_001855_hash lpfc_idiag_extacc_read 3 48301 _001855_hash NULL
|
|
+_001856_hash lpfc_idiag_mbxacc_read 3 28061 _001856_hash NULL
|
|
+_001857_hash lpfc_idiag_pcicfg_read 3 50334 _001857_hash NULL
|
|
+_001858_hash lpfc_idiag_queacc_read 3 13950 _001858_hash NULL
|
|
+_001859_hash lpfc_idiag_queinfo_read 3 55662 _001859_hash NULL
|
|
+_001860_hash mac80211_format_buffer 2 41010 _001860_hash NULL
|
|
+_001861_hash macvtap_put_user 4 55609 _001861_hash NULL
|
|
+_001862_hash macvtap_sendmsg 4 30629 _001862_hash NULL
|
|
+_001863_hash mic_calc_failure_read 3 59700 _001863_hash NULL
|
|
+_001864_hash mic_rx_pkts_read 3 27972 _001864_hash NULL
|
|
+_001865_hash minstrel_stats_read 3 17290 _001865_hash NULL
|
|
+_001866_hash mmc_ext_csd_read 3 13205 _001866_hash NULL
|
|
+_001867_hash mon_bin_read 3 6841 _001867_hash NULL
|
|
+_001868_hash mon_stat_read 3 25238 _001868_hash NULL
|
|
+_001870_hash mqueue_read_file 3 6228 _001870_hash NULL
|
|
+_001871_hash mwifiex_debug_read 3 53074 _001871_hash NULL
|
|
+_001872_hash mwifiex_getlog_read 3 54269 _001872_hash NULL
|
|
+_001873_hash mwifiex_info_read 3 53447 _001873_hash NULL
|
|
+_001874_hash mwifiex_rdeeprom_read 3 51429 _001874_hash NULL
|
|
+_001875_hash mwifiex_regrdwr_read 3 34472 _001875_hash NULL
|
|
+_001876_hash nfsd_vfs_read 6 62605 _003003_hash NULL nohasharray
|
|
+_001877_hash nfsd_vfs_write 6 54577 _001877_hash NULL
|
|
+_001878_hash nfs_idmap_lookup_id 2 10660 _001878_hash NULL
|
|
+_001879_hash o2hb_debug_read 3 37851 _001879_hash NULL
|
|
+_001880_hash o2net_debug_read 3 52105 _001880_hash NULL
|
|
+_001881_hash ocfs2_control_read 3 56405 _001881_hash NULL
|
|
+_001882_hash ocfs2_debug_read 3 14507 _001882_hash NULL
|
|
+_001883_hash ocfs2_readlink 3 50656 _001883_hash NULL
|
|
+_001884_hash oom_adjust_read 3 25127 _001884_hash NULL
|
|
+_001885_hash oom_score_adj_read 3 39921 _002116_hash NULL nohasharray
|
|
+_001886_hash oprofilefs_str_to_user 3 42182 _001886_hash NULL
|
|
+_001887_hash oprofilefs_ulong_to_user 3 11582 _001887_hash NULL
|
|
+_001888_hash _osd_req_list_objects 6 4204 _001888_hash NULL
|
|
+_001889_hash osd_req_read_kern 5 59990 _001889_hash NULL
|
|
+_001890_hash osd_req_write_kern 5 53486 _001890_hash NULL
|
|
+_001891_hash p54_init_common 1 23850 _001891_hash NULL
|
|
+_001892_hash packet_sendmsg 4 24954 _001892_hash NULL
|
|
+_001893_hash page_readlink 3 23346 _001893_hash NULL
|
|
+_001894_hash pcf50633_write_block 3 2124 _001894_hash NULL
|
|
+_001895_hash platform_list_read_file 3 34734 _001895_hash NULL
|
|
+_001896_hash pm860x_bulk_write 3 43875 _001896_hash NULL
|
|
+_001897_hash pm_qos_power_read 3 55891 _001897_hash NULL
|
|
+_001898_hash pms_read 3 53873 _001898_hash NULL
|
|
+_001899_hash port_show_regs 3 5904 _001899_hash NULL
|
|
+_001900_hash proc_coredump_filter_read 3 39153 _001900_hash NULL
|
|
+_001901_hash proc_fdinfo_read 3 62043 _001901_hash NULL
|
|
+_001902_hash proc_info_read 3 63344 _001902_hash NULL
|
|
+_001903_hash proc_loginuid_read 3 15631 _001903_hash NULL
|
|
+_001904_hash proc_pid_attr_read 3 10173 _001904_hash NULL
|
|
+_001905_hash proc_pid_readlink 3 52186 _001905_hash NULL
|
|
+_001906_hash proc_read 3 43614 _001906_hash NULL
|
|
+_001907_hash proc_self_readlink 3 38094 _001907_hash NULL
|
|
+_001908_hash proc_sessionid_read 3 6911 _002038_hash NULL nohasharray
|
|
+_001909_hash provide_user_output 3 41105 _001909_hash NULL
|
|
+_001910_hash ps_pspoll_max_apturn_read 3 6699 _001910_hash NULL
|
|
+_001911_hash ps_pspoll_timeouts_read 3 11776 _001911_hash NULL
|
|
+_001912_hash ps_pspoll_utilization_read 3 5361 _001912_hash NULL
|
|
+_001913_hash pstore_file_read 3 57288 _001913_hash NULL
|
|
+_001914_hash ps_upsd_max_apturn_read 3 19918 _001914_hash NULL
|
|
+_001915_hash ps_upsd_max_sptime_read 3 63362 _001915_hash NULL
|
|
+_001916_hash ps_upsd_timeouts_read 3 28924 _001916_hash NULL
|
|
+_001917_hash ps_upsd_utilization_read 3 51669 _001917_hash NULL
|
|
+_001918_hash pvr2_v4l2_read 3 18006 _001918_hash NULL
|
|
+_001919_hash pwr_disable_ps_read 3 13176 _001919_hash NULL
|
|
+_001920_hash pwr_elp_enter_read 3 5324 _001920_hash NULL
|
|
+_001921_hash pwr_enable_ps_read 3 17686 _001921_hash NULL
|
|
+_001922_hash pwr_fix_tsf_ps_read 3 26627 _001922_hash NULL
|
|
+_001923_hash pwr_missing_bcns_read 3 25824 _001923_hash NULL
|
|
+_001924_hash pwr_power_save_off_read 3 18355 _001924_hash NULL
|
|
+_001925_hash pwr_ps_enter_read 3 26935 _001925_hash &_000501_hash
|
|
+_001926_hash pwr_rcvd_awake_beacons_read 3 50505 _001926_hash NULL
|
|
+_001927_hash pwr_rcvd_beacons_read 3 52836 _001927_hash NULL
|
|
+_001928_hash pwr_tx_without_ps_read 3 48423 _001928_hash NULL
|
|
+_001929_hash pwr_tx_with_ps_read 3 60851 _001929_hash NULL
|
|
+_001930_hash pwr_wake_on_host_read 3 26321 _001930_hash NULL
|
|
+_001931_hash pwr_wake_on_timer_exp_read 3 22640 _001931_hash NULL
|
|
+_001932_hash queues_read 3 24877 _001932_hash NULL
|
|
+_001933_hash raw_recvmsg 4 17277 _001933_hash NULL
|
|
+_001934_hash rcname_read 3 25919 _001934_hash NULL
|
|
+_001935_hash read_4k_modal_eeprom 3 30212 _001935_hash NULL
|
|
+_001936_hash read_9287_modal_eeprom 3 59327 _001936_hash NULL
|
|
+_001937_hash reada_find_extent 2 63486 _001937_hash NULL
|
|
+_001938_hash read_def_modal_eeprom 3 14041 _001938_hash NULL
|
|
+_001939_hash read_enabled_file_bool 3 37744 _001939_hash NULL
|
|
+_001940_hash read_file_ani 3 23161 _001940_hash NULL
|
|
+_001941_hash read_file_antenna 3 13574 _001941_hash NULL
|
|
+_001942_hash read_file_base_eeprom 3 42168 _001942_hash NULL
|
|
+_001943_hash read_file_beacon 3 32595 _001943_hash NULL
|
|
+_001944_hash read_file_blob 3 57406 _001944_hash NULL
|
|
+_001945_hash read_file_bool 3 4180 _001945_hash NULL
|
|
+_001946_hash read_file_credit_dist_stats 3 54367 _001946_hash NULL
|
|
+_001947_hash read_file_debug 3 58256 _001947_hash NULL
|
|
+_001948_hash read_file_disable_ani 3 6536 _001948_hash NULL
|
|
+_001949_hash read_file_dma 3 9530 _001949_hash NULL
|
|
+_001950_hash read_file_dump_nfcal 3 18766 _001950_hash NULL
|
|
+_001951_hash read_file_frameerrors 3 64001 _001951_hash NULL
|
|
+_001952_hash read_file_interrupt 3 61742 _001959_hash NULL nohasharray
|
|
+_001953_hash read_file_misc 3 9948 _001953_hash NULL
|
|
+_001954_hash read_file_modal_eeprom 3 39909 _001954_hash NULL
|
|
+_001955_hash read_file_queue 3 40895 _001955_hash NULL
|
|
+_001956_hash read_file_rcstat 3 22854 _001956_hash NULL
|
|
+_001957_hash read_file_recv 3 48232 _001957_hash NULL
|
|
+_001958_hash read_file_regidx 3 33370 _001958_hash NULL
|
|
+_001959_hash read_file_regval 3 61742 _001959_hash &_001952_hash
|
|
+_001960_hash read_file_reset 3 52310 _001960_hash NULL
|
|
+_001961_hash read_file_rx_chainmask 3 41605 _001961_hash NULL
|
|
+_001962_hash read_file_slot 3 50111 _001962_hash NULL
|
|
+_001963_hash read_file_stations 3 35795 _001963_hash NULL
|
|
+_001964_hash read_file_tgt_int_stats 3 20697 _001964_hash NULL
|
|
+_001965_hash read_file_tgt_rx_stats 3 33944 _001965_hash NULL
|
|
+_001966_hash read_file_tgt_stats 3 8959 _001966_hash NULL
|
|
+_001967_hash read_file_tgt_tx_stats 3 51847 _001967_hash NULL
|
|
+_001968_hash read_file_tx_chainmask 3 3829 _001968_hash NULL
|
|
+_001969_hash read_file_war_stats 3 292 _001969_hash NULL
|
|
+_001970_hash read_file_xmit 3 21487 _001970_hash NULL
|
|
+_001971_hash read_from_oldmem 2 3337 _001971_hash NULL
|
|
+_001972_hash read_oldmem 3 55658 _001972_hash NULL
|
|
+_001973_hash regmap_name_read_file 3 39379 _001973_hash NULL
|
|
+_001974_hash repair_io_failure 4 4815 _001974_hash NULL
|
|
+_001975_hash request_key_and_link 4 42693 _001975_hash NULL
|
|
+_001976_hash res_counter_read 4 33499 _001976_hash NULL
|
|
+_001977_hash retry_count_read 3 52129 _001977_hash NULL
|
|
+_001978_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _001978_hash NULL
|
|
+_001979_hash rs_sta_dbgfs_scale_table_read 3 40262 _001979_hash NULL
|
|
+_001980_hash rs_sta_dbgfs_stats_table_read 3 56573 _001980_hash NULL
|
|
+_001981_hash rts_threshold_read 3 44384 _001981_hash NULL
|
|
+_001982_hash rx_dropped_read 3 44799 _001982_hash NULL
|
|
+_001983_hash rx_fcs_err_read 3 62844 _001983_hash NULL
|
|
+_001984_hash rx_hdr_overflow_read 3 64407 _001984_hash NULL
|
|
+_001985_hash rx_hw_stuck_read 3 57179 _001985_hash NULL
|
|
+_001986_hash rx_out_of_mem_read 3 10157 _001986_hash NULL
|
|
+_001987_hash rx_path_reset_read 3 23801 _001987_hash NULL
|
|
+_001988_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _001988_hash NULL
|
|
+_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _003089_hash NULL nohasharray
|
|
+_001990_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _001990_hash NULL
|
|
+_001991_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _001991_hash NULL
|
|
+_001992_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _001992_hash NULL
|
|
+_001993_hash rx_reset_counter_read 3 58001 _001993_hash NULL
|
|
+_001994_hash rx_xfr_hint_trig_read 3 40283 _001994_hash NULL
|
|
+_001995_hash s5m_bulk_write 3 4833 _001995_hash NULL
|
|
+_001996_hash scrub_setup_recheck_block 3-4 56245 _001996_hash NULL
|
|
+_001998_hash scsi_adjust_queue_depth 3 12802 _001998_hash NULL
|
|
+_001999_hash selinux_inode_notifysecctx 3 36896 _001999_hash NULL
|
|
+_002000_hash sel_read_avc_cache_threshold 3 33942 _002000_hash NULL
|
|
+_002001_hash sel_read_avc_hash_stats 3 1984 _002001_hash NULL
|
|
+_002002_hash sel_read_bool 3 24236 _002002_hash NULL
|
|
+_002003_hash sel_read_checkreqprot 3 33068 _002003_hash NULL
|
|
+_002004_hash sel_read_class 3 12669 _002541_hash NULL nohasharray
|
|
+_002005_hash sel_read_enforce 3 2828 _002005_hash NULL
|
|
+_002006_hash sel_read_handle_status 3 56139 _002006_hash NULL
|
|
+_002007_hash sel_read_handle_unknown 3 57933 _002007_hash NULL
|
|
+_002008_hash sel_read_initcon 3 32362 _002008_hash NULL
|
|
+_002009_hash sel_read_mls 3 25369 _002009_hash NULL
|
|
+_002010_hash sel_read_perm 3 42302 _002010_hash NULL
|
|
+_002011_hash sel_read_policy 3 55947 _002011_hash NULL
|
|
+_002012_hash sel_read_policycap 3 28544 _002012_hash NULL
|
|
+_002013_hash sel_read_policyvers 3 55 _002013_hash NULL
|
|
+_002014_hash send_msg 4 37323 _002014_hash NULL
|
|
+_002015_hash send_packet 4 52960 _002015_hash NULL
|
|
+_002016_hash short_retry_limit_read 3 4687 _002016_hash NULL
|
|
+_002017_hash simple_attr_read 3 24738 _002017_hash NULL
|
|
+_002018_hash simple_transaction_read 3 17076 _002018_hash NULL
|
|
+_002019_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002019_hash NULL
|
|
+_002022_hash skb_copy_datagram_iovec 2-4 5806 _002022_hash NULL
|
|
+_002024_hash smk_read_ambient 3 61220 _002024_hash NULL
|
|
+_002025_hash smk_read_direct 3 15803 _002025_hash NULL
|
|
+_002026_hash smk_read_doi 3 30813 _002026_hash NULL
|
|
+_002027_hash smk_read_logging 3 37804 _002027_hash NULL
|
|
+_002028_hash smk_read_onlycap 3 3855 _002028_hash NULL
|
|
+_002029_hash snapshot_read 3 22601 _002029_hash NULL
|
|
+_002030_hash snd_cs4281_BA0_read 5 6847 _002030_hash NULL
|
|
+_002031_hash snd_cs4281_BA1_read 5 20323 _002031_hash NULL
|
|
+_002032_hash snd_cs46xx_io_read 5 45734 _002032_hash NULL
|
|
+_002033_hash snd_gus_dram_read 4 56686 _002033_hash NULL
|
|
+_002034_hash snd_pcm_oss_read 3 28317 _002034_hash NULL
|
|
+_002035_hash snd_rme32_capture_copy 5 39653 _002035_hash NULL
|
|
+_002036_hash snd_rme96_capture_copy 5 58484 _002036_hash NULL
|
|
+_002037_hash snd_soc_hw_bulk_write_raw 4 14245 _002037_hash NULL
|
|
+_002038_hash spi_show_regs 3 6911 _002038_hash &_001908_hash
|
|
+_002039_hash sta_agg_status_read 3 14058 _002039_hash NULL
|
|
+_002040_hash sta_connected_time_read 3 17435 _002040_hash NULL
|
|
+_002041_hash sta_flags_read 3 56710 _002041_hash NULL
|
|
+_002042_hash sta_ht_capa_read 3 10366 _002042_hash NULL
|
|
+_002043_hash sta_last_seq_ctrl_read 3 19106 _002043_hash NULL
|
|
+_002044_hash sta_num_ps_buf_frames_read 3 1488 _002044_hash NULL
|
|
+_002045_hash st_read 3 51251 _002045_hash NULL
|
|
+_002046_hash supply_map_read_file 3 10608 _002046_hash NULL
|
|
+_002047_hash sysfs_read_file 3 42113 _002047_hash NULL
|
|
+_002048_hash sys_lgetxattr 4 45531 _002048_hash NULL
|
|
+_002049_hash sys_preadv 3 17100 _002049_hash NULL
|
|
+_002050_hash sys_pwritev 3 41722 _002050_hash NULL
|
|
+_002051_hash sys_readv 3 50664 _002051_hash NULL
|
|
+_002052_hash sys_rt_sigpending 2 24961 _002052_hash NULL
|
|
+_002053_hash sys_writev 3 28384 _002053_hash NULL
|
|
+_002054_hash test_iso_queue 5 62534 _002054_hash NULL
|
|
+_002055_hash ts_read 3 44687 _002055_hash NULL
|
|
+_002056_hash TSS_authhmac 3 12839 _002056_hash NULL
|
|
+_002057_hash TSS_checkhmac1 5 31429 _002057_hash NULL
|
|
+_002058_hash TSS_checkhmac2 5-7 40520 _002058_hash NULL
|
|
+_002060_hash tt3650_ci_msg_locked 4 8013 _002060_hash NULL
|
|
+_002061_hash tun_sendmsg 4 10337 _002061_hash NULL
|
|
+_002062_hash tx_internal_desc_overflow_read 3 47300 _002062_hash NULL
|
|
+_002063_hash tx_queue_len_read 3 1463 _002063_hash NULL
|
|
+_002064_hash tx_queue_status_read 3 44978 _002064_hash NULL
|
|
+_002065_hash ubi_io_write_data 4-5 40305 _002065_hash NULL
|
|
+_002067_hash uhci_debug_read 3 5911 _002067_hash NULL
|
|
+_002068_hash unix_stream_recvmsg 4 35210 _002068_hash NULL
|
|
+_002069_hash uvc_debugfs_stats_read 3 56651 _002069_hash NULL
|
|
+_002070_hash vhost_add_used_and_signal_n 4 8038 _002070_hash NULL
|
|
+_002071_hash vifs_state_read 3 33762 _002071_hash NULL
|
|
+_002072_hash vmbus_open 2-3 12154 _002072_hash NULL
|
|
+_002074_hash waiters_read 3 40902 _002074_hash NULL
|
|
+_002075_hash wep_addr_key_count_read 3 20174 _002075_hash NULL
|
|
+_002076_hash wep_decrypt_fail_read 3 58567 _002076_hash NULL
|
|
+_002077_hash wep_default_key_count_read 3 43035 _002077_hash NULL
|
|
+_002078_hash wep_interrupt_read 3 41492 _002078_hash NULL
|
|
+_002079_hash wep_key_not_found_read 3 13377 _002079_hash &_000915_hash
|
|
+_002080_hash wep_packets_read 3 18751 _002080_hash NULL
|
|
+_002081_hash wl1271_format_buffer 2 20834 _002081_hash NULL
|
|
+_002082_hash wm8994_bulk_write 3 13615 _002082_hash NULL
|
|
+_002083_hash wusb_prf_256 7 29203 _002083_hash NULL
|
|
+_002084_hash wusb_prf_64 7 51065 _002084_hash NULL
|
|
+_002085_hash xfs_buf_read_uncached 4 27519 _002085_hash NULL
|
|
+_002086_hash xfs_iext_add 3 41422 _002086_hash NULL
|
|
+_002087_hash xfs_iext_remove_direct 3 40744 _002087_hash NULL
|
|
+_002088_hash xfs_trans_get_efd 3 51148 _002088_hash NULL
|
|
+_002089_hash xfs_trans_get_efi 2 7898 _002089_hash NULL
|
|
+_002090_hash xlog_get_bp 2 23229 _002090_hash NULL
|
|
+_002091_hash xz_dec_init 2 29029 _002091_hash NULL
|
|
+_002092_hash aac_change_queue_depth 2 825 _002092_hash NULL
|
|
+_002093_hash agp_allocate_memory_wrap 1 16576 _002093_hash NULL
|
|
+_002094_hash arcmsr_adjust_disk_queue_depth 2 16756 _002094_hash NULL
|
|
+_002095_hash atalk_recvmsg 4 22053 _002095_hash NULL
|
|
+_002097_hash atomic_read_file 3 16227 _002097_hash NULL
|
|
+_002098_hash ax25_recvmsg 4 64441 _002098_hash NULL
|
|
+_002099_hash beacon_interval_read 3 7091 _002099_hash NULL
|
|
+_002100_hash btrfs_init_new_buffer 4 55761 _002100_hash NULL
|
|
+_002101_hash btrfs_mksubvol 3 39479 _002101_hash NULL
|
|
+_002102_hash bt_sock_recvmsg 4 12316 _002102_hash NULL
|
|
+_002103_hash bt_sock_stream_recvmsg 4 52518 _002103_hash NULL
|
|
+_002104_hash caif_seqpkt_recvmsg 4 32241 _002104_hash NULL
|
|
+_002105_hash cpu_type_read 3 36540 _002105_hash NULL
|
|
+_002106_hash cx18_read 3 23699 _002106_hash NULL
|
|
+_002107_hash dccp_recvmsg 4 16056 _002107_hash NULL
|
|
+_002108_hash depth_read 3 31112 _002108_hash NULL
|
|
+_002109_hash dfs_global_file_read 3 7787 _002109_hash NULL
|
|
+_002110_hash dgram_recvmsg 4 23104 _002110_hash NULL
|
|
+_002111_hash dma_skb_copy_datagram_iovec 3-5 21516 _002111_hash NULL
|
|
+_002113_hash dtim_interval_read 3 654 _002113_hash NULL
|
|
+_002114_hash dynamic_ps_timeout_read 3 10110 _002114_hash NULL
|
|
+_002115_hash enable_read 3 2117 _002115_hash NULL
|
|
+_002116_hash exofs_read_kern 6 39921 _002116_hash &_001885_hash
|
|
+_002117_hash fc_change_queue_depth 2 36841 _002117_hash NULL
|
|
+_002118_hash forced_ps_read 3 31685 _002118_hash NULL
|
|
+_002119_hash frequency_read 3 64031 _003106_hash NULL nohasharray
|
|
+_002120_hash get_alua_req 3 4166 _002120_hash NULL
|
|
+_002121_hash get_rdac_req 3 45882 _002121_hash NULL
|
|
+_002122_hash hci_sock_recvmsg 4 7072 _002122_hash NULL
|
|
+_002123_hash hpsa_change_queue_depth 2 15449 _002123_hash NULL
|
|
+_002124_hash hptiop_adjust_disk_queue_depth 2 20122 _002124_hash NULL
|
|
+_002125_hash ide_queue_pc_tail 5 11673 _002125_hash NULL
|
|
+_002126_hash ide_raw_taskfile 4 42355 _002126_hash NULL
|
|
+_002127_hash idetape_queue_rw_tail 3 29562 _002127_hash NULL
|
|
+_002128_hash ieee80211_if_read_aid 3 9705 _002128_hash NULL
|
|
+_002129_hash ieee80211_if_read_auto_open_plinks 3 38268 _002129_hash NULL
|
|
+_002130_hash ieee80211_if_read_ave_beacon 3 64924 _002130_hash NULL
|
|
+_002131_hash ieee80211_if_read_bssid 3 35161 _002131_hash NULL
|
|
+_002132_hash ieee80211_if_read_channel_type 3 23884 _002132_hash NULL
|
|
+_002133_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002133_hash NULL
|
|
+_002134_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002134_hash NULL
|
|
+_002135_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002135_hash NULL
|
|
+_002136_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002136_hash NULL
|
|
+_002137_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002137_hash NULL
|
|
+_002138_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002138_hash NULL
|
|
+_002139_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002139_hash NULL
|
|
+_002140_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002140_hash NULL
|
|
+_002141_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002141_hash NULL
|
|
+_002142_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002142_hash NULL
|
|
+_002143_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002143_hash NULL
|
|
+_002144_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002144_hash NULL
|
|
+_002145_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002145_hash NULL
|
|
+_002146_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002146_hash NULL
|
|
+_002147_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002147_hash NULL
|
|
+_002148_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002148_hash NULL
|
|
+_002149_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002149_hash NULL
|
|
+_002150_hash ieee80211_if_read_drop_unencrypted 3 37053 _002150_hash NULL
|
|
+_002151_hash ieee80211_if_read_dtim_count 3 38419 _002151_hash NULL
|
|
+_002152_hash ieee80211_if_read_element_ttl 3 18869 _002152_hash NULL
|
|
+_002153_hash ieee80211_if_read_estab_plinks 3 32533 _002153_hash NULL
|
|
+_002154_hash ieee80211_if_read_flags 3 57470 _002389_hash NULL nohasharray
|
|
+_002155_hash ieee80211_if_read_fwded_frames 3 36520 _002155_hash NULL
|
|
+_002156_hash ieee80211_if_read_fwded_mcast 3 39571 _002156_hash &_000151_hash
|
|
+_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002859_hash NULL nohasharray
|
|
+_002158_hash ieee80211_if_read_last_beacon 3 31257 _002158_hash NULL
|
|
+_002159_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002159_hash NULL
|
|
+_002160_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002160_hash NULL
|
|
+_002161_hash ieee80211_if_read_num_sta_authorized 3 56177 _002161_hash NULL
|
|
+_002162_hash ieee80211_if_read_num_sta_ps 3 34722 _002162_hash NULL
|
|
+_002163_hash ieee80211_if_read_path_refresh_time 3 25545 _002163_hash NULL
|
|
+_002164_hash ieee80211_if_read_peer 3 45233 _002164_hash NULL
|
|
+_002165_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002165_hash NULL
|
|
+_002166_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002166_hash NULL
|
|
+_002167_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002167_hash NULL
|
|
+_002168_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002168_hash NULL
|
|
+_002169_hash ieee80211_if_read_rssi_threshold 3 49260 _002169_hash NULL
|
|
+_002170_hash ieee80211_if_read_smps 3 27416 _002170_hash NULL
|
|
+_002171_hash ieee80211_if_read_state 3 9813 _002280_hash NULL nohasharray
|
|
+_002172_hash ieee80211_if_read_tkip_mic_test 3 19565 _002172_hash NULL
|
|
+_002173_hash ieee80211_if_read_tsf 3 16420 _002173_hash NULL
|
|
+_002174_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002174_hash NULL
|
|
+_002175_hash ieee80211_if_read_uapsd_queues 3 55150 _002175_hash NULL
|
|
+_002176_hash ieee80211_rx_mgmt_beacon 3 24430 _002176_hash NULL
|
|
+_002177_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002177_hash NULL
|
|
+_002178_hash ima_show_htable_violations 3 10619 _002178_hash NULL
|
|
+_002179_hash ima_show_measurements_count 3 23536 _002179_hash NULL
|
|
+_002180_hash insert_one_name 7 61668 _002180_hash NULL
|
|
+_002181_hash ipr_change_queue_depth 2 6431 _002181_hash NULL
|
|
+_002182_hash ip_recv_error 3 23109 _002182_hash NULL
|
|
+_002183_hash ipv6_recv_error 3 56347 _002183_hash NULL
|
|
+_002184_hash ipv6_recv_rxpmtu 3 7142 _002184_hash NULL
|
|
+_002185_hash ipx_recvmsg 4 44366 _002185_hash NULL
|
|
+_002186_hash irda_recvmsg_dgram 4 32631 _002186_hash NULL
|
|
+_002187_hash iscsi_change_queue_depth 2 23416 _002187_hash NULL
|
|
+_002188_hash ivtv_read_pos 3 34400 _002188_hash &_000303_hash
|
|
+_002189_hash key_conf_hw_key_idx_read 3 25003 _002189_hash NULL
|
|
+_002190_hash key_conf_keyidx_read 3 42443 _002190_hash NULL
|
|
+_002191_hash key_conf_keylen_read 3 49758 _002191_hash NULL
|
|
+_002192_hash key_flags_read 3 25931 _002192_hash NULL
|
|
+_002193_hash key_ifindex_read 3 31411 _002193_hash NULL
|
|
+_002194_hash key_tx_rx_count_read 3 44742 _002194_hash NULL
|
|
+_002195_hash l2cap_sock_sendmsg 4 63427 _002195_hash NULL
|
|
+_002196_hash l2tp_ip_recvmsg 4 22681 _002196_hash NULL
|
|
+_002197_hash llc_ui_recvmsg 4 3826 _002197_hash NULL
|
|
+_002198_hash lpfc_change_queue_depth 2 25905 _002198_hash NULL
|
|
+_002199_hash macvtap_do_read 4 36555 _002199_hash &_001832_hash
|
|
+_002200_hash megaraid_change_queue_depth 2 64815 _002200_hash NULL
|
|
+_002201_hash megasas_change_queue_depth 2 32747 _002201_hash NULL
|
|
+_002202_hash mptscsih_change_queue_depth 2 26036 _002202_hash NULL
|
|
+_002203_hash NCR_700_change_queue_depth 2 31742 _002203_hash NULL
|
|
+_002204_hash netlink_recvmsg 4 61600 _002204_hash NULL
|
|
+_002205_hash nfsctl_transaction_read 3 48250 _002205_hash NULL
|
|
+_002206_hash nfs_map_group_to_gid 3 15892 _002206_hash NULL
|
|
+_002207_hash nfs_map_name_to_uid 3 51132 _002207_hash NULL
|
|
+_002208_hash nr_recvmsg 4 12649 _002208_hash NULL
|
|
+_002209_hash osd_req_list_collection_objects 5 36664 _002209_hash NULL
|
|
+_002210_hash osd_req_list_partition_objects 5 56464 _002210_hash NULL
|
|
+_002212_hash packet_recv_error 3 16669 _002212_hash NULL
|
|
+_002213_hash packet_recvmsg 4 47700 _002213_hash NULL
|
|
+_002214_hash pep_recvmsg 4 19402 _002214_hash NULL
|
|
+_002215_hash pfkey_recvmsg 4 53604 _002215_hash NULL
|
|
+_002216_hash ping_recvmsg 4 25597 _002216_hash NULL
|
|
+_002217_hash pmcraid_change_queue_depth 2 9116 _002217_hash NULL
|
|
+_002218_hash pn_recvmsg 4 30887 _002218_hash NULL
|
|
+_002219_hash pointer_size_read 3 51863 _002219_hash NULL
|
|
+_002220_hash power_read 3 15939 _002220_hash NULL
|
|
+_002221_hash pppoe_recvmsg 4 15073 _002221_hash NULL
|
|
+_002222_hash pppol2tp_recvmsg 4 57742 _002222_hash NULL
|
|
+_002223_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002223_hash NULL
|
|
+_002224_hash qla2x00_change_queue_depth 2 24742 _002224_hash NULL
|
|
+_002225_hash raw_recvmsg 4 52529 _002225_hash NULL
|
|
+_002226_hash rawsock_recvmsg 4 12144 _002226_hash NULL
|
|
+_002227_hash rawv6_recvmsg 4 30265 _002227_hash NULL
|
|
+_002228_hash reada_add_block 2 54247 _002228_hash NULL
|
|
+_002229_hash readahead_tree_block 3 36285 _002229_hash NULL
|
|
+_002230_hash reada_tree_block_flagged 3 18402 _002230_hash NULL
|
|
+_002231_hash read_tree_block 3 841 _002231_hash NULL
|
|
+_002232_hash recover_peb 6-7 29238 _002232_hash NULL
|
|
+_002234_hash recv_msg 4 48709 _002234_hash NULL
|
|
+_002235_hash recv_stream 4 30138 _002235_hash NULL
|
|
+_002236_hash _req_append_segment 2 41031 _002236_hash NULL
|
|
+_002237_hash request_key_async 4 6990 _002237_hash NULL
|
|
+_002238_hash request_key_async_with_auxdata 4 46624 _002238_hash NULL
|
|
+_002239_hash request_key_with_auxdata 4 24515 _002239_hash NULL
|
|
+_002240_hash rose_recvmsg 4 2368 _002240_hash NULL
|
|
+_002241_hash rxrpc_recvmsg 4 26233 _002241_hash NULL
|
|
+_002242_hash rx_streaming_always_read 3 49401 _002242_hash NULL
|
|
+_002243_hash rx_streaming_interval_read 3 55291 _002243_hash NULL
|
|
+_002244_hash sas_change_queue_depth 2 18555 _002244_hash NULL
|
|
+_002245_hash scsi_activate_tcq 2 42640 _002245_hash NULL
|
|
+_002246_hash scsi_deactivate_tcq 2 47086 _002246_hash NULL
|
|
+_002247_hash scsi_execute 5 33596 _002247_hash NULL
|
|
+_002248_hash _scsih_adjust_queue_depth 2 1083 _002248_hash NULL
|
|
+_002249_hash scsi_init_shared_tag_map 2 59812 _002249_hash NULL
|
|
+_002250_hash scsi_track_queue_full 2 44239 _002250_hash NULL
|
|
+_002251_hash sctp_recvmsg 4 23265 _002251_hash NULL
|
|
+_002252_hash send_stream 4 3397 _002252_hash NULL
|
|
+_002253_hash skb_copy_and_csum_datagram_iovec 2 24466 _002253_hash NULL
|
|
+_002255_hash snd_gf1_mem_proc_dump 5 16926 _002255_hash NULL
|
|
+_002256_hash split_scan_timeout_read 3 20029 _002256_hash NULL
|
|
+_002257_hash sta_dev_read 3 14782 _002257_hash NULL
|
|
+_002258_hash sta_inactive_ms_read 3 25690 _002258_hash NULL
|
|
+_002259_hash sta_last_signal_read 3 31818 _002259_hash NULL
|
|
+_002260_hash stats_dot11ACKFailureCount_read 3 45558 _002260_hash NULL
|
|
+_002261_hash stats_dot11FCSErrorCount_read 3 28154 _002261_hash NULL
|
|
+_002262_hash stats_dot11RTSFailureCount_read 3 43948 _002262_hash NULL
|
|
+_002263_hash stats_dot11RTSSuccessCount_read 3 33065 _002263_hash NULL
|
|
+_002264_hash storvsc_connect_to_vsp 2 22 _002264_hash NULL
|
|
+_002265_hash suspend_dtim_interval_read 3 64971 _002265_hash NULL
|
|
+_002266_hash sys_msgrcv 3 959 _002266_hash NULL
|
|
+_002267_hash tcm_loop_change_queue_depth 2 42454 _002267_hash NULL
|
|
+_002268_hash tcp_copy_to_iovec 3 28344 _002268_hash NULL
|
|
+_002269_hash tcp_recvmsg 4 31238 _002269_hash NULL
|
|
+_002270_hash timeout_read 3 47915 _002270_hash NULL
|
|
+_002271_hash total_ps_buffered_read 3 16365 _002271_hash NULL
|
|
+_002272_hash tun_put_user 4 59849 _002272_hash NULL
|
|
+_002273_hash twa_change_queue_depth 2 48808 _002273_hash NULL
|
|
+_002274_hash tw_change_queue_depth 2 11116 _002274_hash NULL
|
|
+_002275_hash twl_change_queue_depth 2 41342 _002275_hash NULL
|
|
+_002276_hash ubi_eba_write_leb 5-6 19826 _002276_hash NULL
|
|
+_002278_hash ubi_eba_write_leb_st 5 27896 _002278_hash NULL
|
|
+_002279_hash udp_recvmsg 4 42558 _002279_hash NULL
|
|
+_002280_hash udpv6_recvmsg 4 9813 _002280_hash &_002171_hash
|
|
+_002281_hash ulong_read_file 3 42304 _002281_hash &_000511_hash
|
|
+_002282_hash unix_dgram_recvmsg 4 14952 _002282_hash NULL
|
|
+_002283_hash user_power_read 3 39414 _002283_hash NULL
|
|
+_002284_hash vcc_recvmsg 4 37198 _002284_hash NULL
|
|
+_002285_hash wep_iv_read 3 54744 _002285_hash NULL
|
|
+_002286_hash x25_recvmsg 4 42777 _002286_hash NULL
|
|
+_002287_hash xfs_iext_insert 3 18667 _002287_hash NULL
|
|
+_002288_hash xfs_iext_remove 3 50909 _002288_hash NULL
|
|
+_002289_hash xlog_find_verify_log_record 2 18870 _002289_hash NULL
|
|
+_002290_hash btrfs_alloc_free_block 3 29982 _002290_hash NULL
|
|
+_002291_hash cx18_read_pos 3 4683 _002291_hash NULL
|
|
+_002292_hash l2cap_sock_recvmsg 4 59886 _002292_hash NULL
|
|
+_002293_hash osd_req_list_dev_partitions 4 60027 _002293_hash NULL
|
|
+_002294_hash osd_req_list_partition_collections 5 38223 _002294_hash NULL
|
|
+_002295_hash osst_do_scsi 4 44410 _002295_hash NULL
|
|
+_002296_hash qla2x00_handle_queue_full 2 24365 _002296_hash NULL
|
|
+_002297_hash rfcomm_sock_recvmsg 4 22227 _002297_hash NULL
|
|
+_002298_hash scsi_execute_req 5 42088 _002298_hash NULL
|
|
+_002299_hash _scsih_change_queue_depth 2 26230 _002299_hash NULL
|
|
+_002300_hash spi_execute 5 28736 _002300_hash NULL
|
|
+_002301_hash submit_inquiry 3 42108 _002301_hash NULL
|
|
+_002302_hash tcp_dma_try_early_copy 3 37651 _002302_hash NULL
|
|
+_002303_hash tun_do_read 4 50800 _002303_hash NULL
|
|
+_002304_hash ubi_eba_atomic_leb_change 5 13041 _002304_hash NULL
|
|
+_002305_hash ubi_leb_write 4-5 41691 _002305_hash NULL
|
|
+_002307_hash unix_seqpacket_recvmsg 4 23062 _002307_hash NULL
|
|
+_002308_hash write_leb 5 36957 _002308_hash NULL
|
|
+_002309_hash ch_do_scsi 4 31171 _002309_hash NULL
|
|
+_002310_hash dbg_leb_write 4-5 20478 _002310_hash NULL
|
|
+_002312_hash scsi_mode_sense 5 16835 _002312_hash NULL
|
|
+_002313_hash scsi_vpd_inquiry 4 30040 _002313_hash NULL
|
|
+_002314_hash ses_recv_diag 4 47143 _002314_hash &_000673_hash
|
|
+_002315_hash ses_send_diag 4 64527 _002315_hash NULL
|
|
+_002316_hash spi_dv_device_echo_buffer 2-3 39846 _002316_hash NULL
|
|
+_002318_hash ubifs_leb_write 4-5 61226 _002318_hash NULL
|
|
+_002320_hash ubi_leb_change 4 14899 _002320_hash NULL
|
|
+_002321_hash ubi_write 4-5 30809 _002321_hash NULL
|
|
+_002322_hash dbg_leb_change 4 19969 _002322_hash NULL
|
|
+_002323_hash gluebi_write 3 27905 _002323_hash NULL
|
|
+_002324_hash scsi_get_vpd_page 4 51951 _002324_hash NULL
|
|
+_002325_hash sd_do_mode_sense 5 11507 _002325_hash NULL
|
|
+_002326_hash ubifs_leb_change 4 22399 _002436_hash NULL nohasharray
|
|
+_002327_hash ubifs_write_node 5 15088 _002327_hash NULL
|
|
+_002328_hash fixup_leb 3 43256 _002328_hash NULL
|
|
+_002329_hash recover_head 3 17904 _002329_hash NULL
|
|
+_002330_hash alloc_cpu_rmap 1 65363 _002330_hash NULL
|
|
+_002331_hash alloc_ebda_hpc 1-2 50046 _002331_hash NULL
|
|
+_002333_hash alloc_sched_domains 1 28972 _002333_hash NULL
|
|
+_002334_hash amthi_read 4 45831 _002334_hash NULL
|
|
+_002335_hash bcm_char_read 3 31750 _002335_hash NULL
|
|
+_002336_hash BcmCopySection 5 2035 _002336_hash NULL
|
|
+_002337_hash buffer_from_user 3 51826 _002337_hash NULL
|
|
+_002338_hash buffer_to_user 3 35439 _002338_hash NULL
|
|
+_002339_hash c4iw_init_resource_fifo 3 48090 _002339_hash NULL
|
|
+_002340_hash c4iw_init_resource_fifo_random 3 25547 _002340_hash NULL
|
|
+_002341_hash card_send_command 3 40757 _002341_hash NULL
|
|
+_002342_hash chd_dec_fetch_cdata 3 50926 _002342_hash NULL
|
|
+_002343_hash crystalhd_create_dio_pool 2 3427 _002343_hash NULL
|
|
+_002344_hash crystalhd_user_data 3 18407 _002344_hash NULL
|
|
+_002345_hash cxio_init_resource_fifo 3 28764 _002345_hash NULL
|
|
+_002346_hash cxio_init_resource_fifo_random 3 47151 _002346_hash NULL
|
|
+_002347_hash do_pages_stat 2 4437 _002347_hash NULL
|
|
+_002348_hash do_read_log_to_user 4 3236 _002348_hash NULL
|
|
+_002349_hash do_write_log_from_user 3 39362 _002349_hash NULL
|
|
+_002350_hash dt3155_read 3 59226 _002350_hash NULL
|
|
+_002351_hash easycap_alsa_vmalloc 2 14426 _002351_hash NULL
|
|
+_002352_hash evm_read_key 3 54674 _002352_hash NULL
|
|
+_002353_hash evm_write_key 3 27715 _002353_hash NULL
|
|
+_002354_hash fir16_create 3 5574 _002354_hash NULL
|
|
+_002355_hash iio_allocate_device 1 18821 _002355_hash NULL
|
|
+_002356_hash __iio_allocate_kfifo 2-3 55738 _002356_hash NULL
|
|
+_002358_hash __iio_allocate_sw_ring_buffer 3 4843 _002358_hash NULL
|
|
+_002359_hash iio_debugfs_read_reg 3 60908 _002359_hash NULL
|
|
+_002360_hash iio_debugfs_write_reg 3 22742 _002360_hash NULL
|
|
+_002361_hash iio_event_chrdev_read 3 54757 _002361_hash NULL
|
|
+_002362_hash iio_read_first_n_kfifo 2 57910 _002362_hash NULL
|
|
+_002363_hash iio_read_first_n_sw_rb 2 51911 _002363_hash NULL
|
|
+_002364_hash ioapic_setup_resources 1 35255 _002364_hash NULL
|
|
+_002365_hash keymap_store 4 45406 _002365_hash NULL
|
|
+_002366_hash kzalloc_node 1 24352 _002366_hash NULL
|
|
+_002367_hash line6_alloc_sysex_buffer 4 28225 _002367_hash NULL
|
|
+_002368_hash line6_dumpreq_initbuf 3 53123 _002368_hash NULL
|
|
+_002369_hash line6_midibuf_init 2 52425 _002369_hash NULL
|
|
+_002370_hash lirc_write 3 20604 _002370_hash NULL
|
|
+_002371_hash _malloc 1 54077 _002371_hash NULL
|
|
+_002372_hash mei_read 3 6507 _002372_hash NULL
|
|
+_002373_hash mei_write 3 4005 _002373_hash NULL
|
|
+_002374_hash mempool_create_node 1 44715 _002374_hash NULL
|
|
+_002375_hash msg_set 3 51725 _002375_hash NULL
|
|
+_002376_hash newpart 6 47485 _002376_hash NULL
|
|
+_002377_hash OS_kmalloc 1 36909 _002377_hash NULL
|
|
+_002378_hash pcpu_alloc_bootmem 2 62074 _002378_hash NULL
|
|
+_002379_hash pcpu_get_vm_areas 3 50085 _002379_hash NULL
|
|
+_002380_hash resource_from_user 3 30341 _002380_hash NULL
|
|
+_002381_hash sca3000_read_data 4 57064 _002381_hash NULL
|
|
+_002382_hash sca3000_read_first_n_hw_rb 2 11479 _002382_hash NULL
|
|
+_002383_hash send_midi_async 3 57463 _002383_hash NULL
|
|
+_002384_hash sep_create_dcb_dmatables_context 6 37551 _002384_hash NULL
|
|
+_002385_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002385_hash NULL
|
|
+_002386_hash sep_create_msgarea_context 4 33829 _002386_hash NULL
|
|
+_002387_hash sep_lli_table_secure_dma 2-3 64042 _002387_hash NULL
|
|
+_002389_hash sep_lock_user_pages 2-3 57470 _002389_hash &_002154_hash
|
|
+_002391_hash sep_prepare_input_output_dma_table_in_dcb 4-5 63087 _002391_hash NULL
|
|
+_002393_hash sep_read 3 17161 _002393_hash NULL
|
|
+_002394_hash TransmitTcb 4 12989 _002394_hash NULL
|
|
+_002395_hash ValidateDSDParamsChecksum 3 63654 _002395_hash NULL
|
|
+_002396_hash Wb35Reg_BurstWrite 4 62327 _002396_hash NULL
|
|
+_002397_hash __alloc_bootmem_low_node 2 25726 _002397_hash &_001499_hash
|
|
+_002398_hash __alloc_bootmem_node 2 1992 _002398_hash NULL
|
|
+_002399_hash alloc_irq_cpu_rmap 1 28459 _002399_hash NULL
|
|
+_002400_hash alloc_ring 2-4 18278 _002400_hash NULL
|
|
+_002402_hash c4iw_init_resource 2-3 30393 _002402_hash NULL
|
|
+_002404_hash cxio_hal_init_resource 2-7-6 29771 _002404_hash &_000284_hash
|
|
+_002407_hash cxio_hal_init_rhdl_resource 1 25104 _002407_hash NULL
|
|
+_002408_hash disk_expand_part_tbl 2 30561 _002408_hash NULL
|
|
+_002409_hash InterfaceTransmitPacket 3 42058 _002409_hash NULL
|
|
+_002410_hash line6_dumpreq_init 3 34473 _002410_hash NULL
|
|
+_002411_hash mempool_create 1 29437 _002411_hash NULL
|
|
+_002412_hash pcpu_fc_alloc 2 11818 _002412_hash NULL
|
|
+_002413_hash pod_alloc_sysex_buffer 3 31651 _002413_hash NULL
|
|
+_002414_hash r8712_usbctrl_vendorreq 6 48489 _002414_hash NULL
|
|
+_002415_hash r871x_set_wpa_ie 3 7000 _002415_hash NULL
|
|
+_002416_hash sys_move_pages 2 42626 _002416_hash NULL
|
|
+_002417_hash variax_alloc_sysex_buffer 3 15237 _002417_hash NULL
|
|
+_002418_hash vme_user_write 3 15587 _002418_hash NULL
|
|
+_002419_hash add_partition 2 55588 _002419_hash NULL
|
|
+_002420_hash __alloc_bootmem_node_high 2 65076 _002420_hash NULL
|
|
+_002421_hash ceph_msgpool_init 3 33312 _002421_hash NULL
|
|
+_002423_hash mempool_create_kmalloc_pool 1 41650 _002423_hash NULL
|
|
+_002424_hash mempool_create_page_pool 1 30189 _002424_hash NULL
|
|
+_002425_hash mempool_create_slab_pool 1 62907 _002425_hash NULL
|
|
+_002426_hash variax_set_raw2 4 32374 _002426_hash NULL
|
|
+_002427_hash bioset_create 1 5580 _002427_hash NULL
|
|
+_002428_hash bioset_integrity_create 2 62708 _002428_hash NULL
|
|
+_002429_hash biovec_create_pools 2 9575 _002429_hash NULL
|
|
+_002430_hash i2o_pool_alloc 4 55485 _002430_hash NULL
|
|
+_002431_hash prison_create 1 43623 _002431_hash NULL
|
|
+_002432_hash unlink_simple 3 47506 _002432_hash NULL
|
|
+_002433_hash alloc_ieee80211 1 20063 _002433_hash NULL
|
|
+_002434_hash alloc_ieee80211_rsl 1 34564 _002434_hash NULL
|
|
+_002435_hash alloc_page_cgroup 1 2919 _002435_hash NULL
|
|
+_002436_hash alloc_private 2 22399 _002436_hash &_002326_hash
|
|
+_002437_hash alloc_rtllib 1 51136 _002437_hash NULL
|
|
+_002438_hash alloc_rx_desc_ring 2 18016 _002438_hash NULL
|
|
+_002439_hash alloc_subdevices 2 43300 _002439_hash NULL
|
|
+_002440_hash atomic_counters_read 3 48827 _002440_hash NULL
|
|
+_002441_hash atomic_stats_read 3 36228 _002441_hash NULL
|
|
+_002442_hash capabilities_read 3 58457 _002442_hash NULL
|
|
+_002443_hash comedi_read 3 13199 _002443_hash NULL
|
|
+_002444_hash comedi_write 3 47926 _002444_hash NULL
|
|
+_002445_hash compat_do_arpt_set_ctl 4 12184 _002445_hash NULL
|
|
+_002446_hash compat_do_ip6t_set_ctl 4 3184 _002446_hash NULL
|
|
+_002447_hash compat_do_ipt_set_ctl 4 58466 _002447_hash &_001852_hash
|
|
+_002448_hash compat_filldir 3 32999 _002448_hash NULL
|
|
+_002449_hash compat_filldir64 3 35354 _002449_hash NULL
|
|
+_002450_hash compat_fillonedir 3 15620 _002450_hash NULL
|
|
+_002451_hash compat_rw_copy_check_uvector 3 25242 _002451_hash NULL
|
|
+_002452_hash compat_sock_setsockopt 5 23 _002452_hash NULL
|
|
+_002453_hash compat_sys_kexec_load 2 35674 _002453_hash NULL
|
|
+_002454_hash compat_sys_keyctl 4 9639 _002454_hash NULL
|
|
+_002455_hash compat_sys_move_pages 2 5861 _002455_hash NULL
|
|
+_002456_hash compat_sys_mq_timedsend 3 31060 _002456_hash NULL
|
|
+_002457_hash compat_sys_msgrcv 2 7482 _002457_hash NULL
|
|
+_002458_hash compat_sys_msgsnd 2 10738 _002458_hash NULL
|
|
+_002459_hash compat_sys_semtimedop 3 3606 _002459_hash NULL
|
|
+_002460_hash __copy_in_user 3 34790 _002460_hash NULL
|
|
+_002461_hash copy_in_user 3 57502 _002461_hash NULL
|
|
+_002462_hash dev_counters_read 3 19216 _002462_hash NULL
|
|
+_002463_hash dev_names_read 3 38509 _002463_hash NULL
|
|
+_002464_hash do_arpt_set_ctl 4 51053 _002464_hash NULL
|
|
+_002465_hash do_ip6t_set_ctl 4 60040 _002465_hash NULL
|
|
+_002466_hash do_ipt_set_ctl 4 56238 _002466_hash NULL
|
|
+_002467_hash drbd_bm_resize 2 20522 _002467_hash NULL
|
|
+_002468_hash driver_names_read 3 60399 _002468_hash NULL
|
|
+_002469_hash driver_stats_read 3 8944 _002469_hash NULL
|
|
+_002470_hash __earlyonly_bootmem_alloc 2 23824 _002470_hash NULL
|
|
+_002471_hash evtchn_read 3 3569 _002471_hash NULL
|
|
+_002472_hash ext_sd_execute_read_data 9 48589 _002472_hash NULL
|
|
+_002473_hash ext_sd_execute_write_data 9 8175 _002473_hash NULL
|
|
+_002474_hash fat_compat_ioctl_filldir 3 36328 _002474_hash NULL
|
|
+_002475_hash firmwareUpload 3 32794 _002475_hash NULL
|
|
+_002476_hash flash_read 3 57843 _002476_hash NULL
|
|
+_002477_hash flash_write 3 62354 _002477_hash NULL
|
|
+_002478_hash gather_array 3 56641 _002478_hash NULL
|
|
+_002479_hash ghash_async_setkey 3 60001 _002479_hash NULL
|
|
+_002480_hash gntdev_alloc_map 2 35145 _002480_hash NULL
|
|
+_002481_hash gnttab_map 2 56439 _002481_hash NULL
|
|
+_002482_hash gru_alloc_gts 2-3 60056 _002482_hash NULL
|
|
+_002484_hash handle_eviocgbit 3 44193 _002484_hash NULL
|
|
+_002485_hash hid_parse_report 3 51737 _002485_hash NULL
|
|
+_002486_hash ieee80211_alloc_txb 1-2 52477 _002486_hash NULL
|
|
+_002487_hash ieee80211_wx_set_gen_ie 3 51399 _002487_hash NULL
|
|
+_002488_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _002488_hash NULL
|
|
+_002489_hash init_cdev 1 8274 _002489_hash NULL
|
|
+_002490_hash init_per_cpu 1 17880 _002490_hash NULL
|
|
+_002491_hash ipath_create_cq 2 45586 _002491_hash NULL
|
|
+_002492_hash ipath_get_base_info 3 7043 _002492_hash NULL
|
|
+_002493_hash ipath_init_qp_table 2 25167 _002493_hash NULL
|
|
+_002494_hash ipath_resize_cq 2 712 _002494_hash NULL
|
|
+_002495_hash ni_gpct_device_construct 5 610 _002495_hash NULL
|
|
+_002496_hash options_write 3 47243 _002496_hash NULL
|
|
+_002497_hash portcntrs_1_read 3 47253 _002497_hash NULL
|
|
+_002498_hash portcntrs_2_read 3 56586 _002498_hash NULL
|
|
+_002499_hash portnames_read 3 41958 _002499_hash NULL
|
|
+_002500_hash ptc_proc_write 3 12076 _002500_hash NULL
|
|
+_002501_hash put_cmsg_compat 4 35937 _002501_hash NULL
|
|
+_002502_hash qib_alloc_devdata 2 51819 _002502_hash NULL
|
|
+_002503_hash qib_alloc_fast_reg_page_list 2 10507 _002503_hash NULL
|
|
+_002504_hash qib_cdev_init 1 34778 _002504_hash NULL
|
|
+_002505_hash qib_create_cq 2 27497 _002505_hash NULL
|
|
+_002506_hash qib_diag_write 3 62133 _002506_hash NULL
|
|
+_002507_hash qib_get_base_info 3 11369 _002507_hash NULL
|
|
+_002508_hash qib_resize_cq 2 53090 _002508_hash NULL
|
|
+_002509_hash qsfp_1_read 3 21915 _002509_hash NULL
|
|
+_002510_hash qsfp_2_read 3 31491 _002510_hash NULL
|
|
+_002511_hash queue_reply 3 22416 _002511_hash NULL
|
|
+_002512_hash Realloc 2 34961 _002512_hash NULL
|
|
+_002513_hash rfc4106_set_key 3 54519 _002513_hash NULL
|
|
+_002514_hash rtllib_alloc_txb 1-2 21687 _002514_hash NULL
|
|
+_002515_hash rtllib_wx_set_gen_ie 3 59808 _002515_hash NULL
|
|
+_002516_hash rts51x_transfer_data_partial 6 5735 _002516_hash NULL
|
|
+_002517_hash sparse_early_usemaps_alloc_node 4 9269 _002517_hash NULL
|
|
+_002518_hash split 2 11691 _002518_hash NULL
|
|
+_002519_hash stats_read_ul 3 32751 _002519_hash NULL
|
|
+_002520_hash store_debug_level 3 35652 _002520_hash NULL
|
|
+_002521_hash sys32_ipc 3 7238 _002521_hash NULL
|
|
+_002522_hash sys32_rt_sigpending 2 25814 _002522_hash NULL
|
|
+_002523_hash tunables_read 3 36385 _002523_hash NULL
|
|
+_002524_hash tunables_write 3 59563 _002524_hash NULL
|
|
+_002525_hash u32_array_read 3 2219 _002525_hash NULL
|
|
+_002526_hash usb_buffer_alloc 2 36276 _002526_hash NULL
|
|
+_002527_hash xenbus_file_write 3 6282 _002527_hash NULL
|
|
+_002528_hash xpc_kmalloc_cacheline_aligned 1 42895 _002528_hash NULL
|
|
+_002529_hash xpc_kzalloc_cacheline_aligned 1 65433 _002529_hash NULL
|
|
+_002530_hash xsd_read 3 15653 _002530_hash NULL
|
|
+_002531_hash compat_do_readv_writev 4 49102 _002531_hash NULL
|
|
+_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _003117_hash NULL nohasharray
|
|
+_002533_hash compat_process_vm_rw 3-5 22254 _002533_hash NULL
|
|
+_002535_hash compat_sys_setsockopt 5 3326 _002535_hash NULL
|
|
+_002536_hash ipath_cdev_init 1 37752 _002536_hash NULL
|
|
+_002537_hash ms_read_multiple_pages 4-5 8052 _002537_hash NULL
|
|
+_002539_hash ms_write_multiple_pages 5-6 10362 _002539_hash NULL
|
|
+_002541_hash sparse_mem_maps_populate_node 4 12669 _002541_hash &_002004_hash
|
|
+_002542_hash vmemmap_alloc_block 1 43245 _002542_hash NULL
|
|
+_002543_hash xd_read_multiple_pages 4-5 11422 _002543_hash NULL
|
|
+_002545_hash xd_write_multiple_pages 5-6 53633 _002545_hash NULL
|
|
+_002546_hash compat_readv 3 30273 _002546_hash NULL
|
|
+_002547_hash compat_sys_process_vm_readv 3-5 15374 _002547_hash NULL
|
|
+_002549_hash compat_sys_process_vm_writev 3-5 41194 _002549_hash NULL
|
|
+_002551_hash compat_writev 3 60063 _002551_hash NULL
|
|
+_002552_hash ms_rw_multi_sector 4 7459 _002552_hash NULL
|
|
+_002553_hash sparse_early_mem_maps_alloc_node 4 36971 _002553_hash NULL
|
|
+_002554_hash vmemmap_alloc_block_buf 1 61126 _002554_hash NULL
|
|
+_002555_hash xd_rw 4 49020 _002555_hash NULL
|
|
+_002556_hash compat_sys_preadv64 3 24283 _002556_hash NULL
|
|
+_002557_hash compat_sys_pwritev64 3 51151 _002557_hash NULL
|
|
+_002558_hash compat_sys_readv 3 20911 _002558_hash NULL
|
|
+_002559_hash compat_sys_writev 3 5784 _002559_hash NULL
|
|
+_002560_hash ms_rw 4 17220 _002560_hash NULL
|
|
+_002561_hash compat_sys_preadv 3 583 _002561_hash NULL
|
|
+_002562_hash compat_sys_pwritev 3 17886 _002562_hash NULL
|
|
+_002563_hash alloc_apertures 1 56561 _002563_hash NULL
|
|
+_002564_hash bin_uuid 3 28999 _002564_hash NULL
|
|
+_002565_hash __copy_from_user_inatomic_nocache 3 49921 _002565_hash NULL
|
|
+_002566_hash do_dmabuf_dirty_sou 7 3017 _002566_hash NULL
|
|
+_002567_hash do_surface_dirty_sou 7 39678 _002567_hash NULL
|
|
+_002568_hash drm_agp_bind_pages 3 56748 _002568_hash NULL
|
|
+_002569_hash drm_calloc_large 1-2 65421 _002569_hash NULL
|
|
+_002571_hash drm_fb_helper_init 3-4 19044 _002571_hash NULL
|
|
+_002573_hash drm_ht_create 2 18853 _002573_hash NULL
|
|
+_002574_hash drm_malloc_ab 1-2 16831 _002574_hash NULL
|
|
+_002576_hash drm_mode_crtc_set_gamma_size 2 31881 _002576_hash NULL
|
|
+_002577_hash drm_plane_init 6 28731 _002577_hash NULL
|
|
+_002578_hash drm_property_create 4 51239 _002578_hash NULL
|
|
+_002579_hash drm_property_create_blob 2 7414 _002579_hash NULL
|
|
+_002580_hash drm_vblank_init 2 11362 _002580_hash NULL
|
|
+_002581_hash drm_vmalloc_dma 1 14550 _002581_hash NULL
|
|
+_002582_hash fb_alloc_cmap_gfp 2 20792 _002582_hash NULL
|
|
+_002583_hash fbcon_prepare_logo 5 6246 _002583_hash NULL
|
|
+_002584_hash fb_read 3 33506 _002584_hash NULL
|
|
+_002585_hash fb_write 3 46924 _002585_hash NULL
|
|
+_002586_hash framebuffer_alloc 1 59145 _002586_hash NULL
|
|
+_002587_hash i915_cache_sharing_read 3 24775 _002587_hash NULL
|
|
+_002588_hash i915_cache_sharing_write 3 57961 _002588_hash NULL
|
|
+_002589_hash i915_max_freq_read 3 20581 _002589_hash NULL
|
|
+_002590_hash i915_max_freq_write 3 11350 _002590_hash NULL
|
|
+_002591_hash i915_wedged_read 3 35474 _002591_hash NULL
|
|
+_002592_hash i915_wedged_write 3 47771 _002592_hash NULL
|
|
+_002593_hash p9_client_read 5 19750 _002593_hash NULL
|
|
+_002594_hash probe_kernel_write 3 17481 _002594_hash NULL
|
|
+_002595_hash sched_feat_write 3 55202 _002595_hash NULL
|
|
+_002596_hash sd_alloc_ctl_entry 1 29708 _002596_hash NULL
|
|
+_002597_hash tstats_write 3 60432 _002597_hash &_000009_hash
|
|
+_002598_hash ttm_bo_fbdev_io 4 9805 _002598_hash NULL
|
|
+_002599_hash ttm_bo_io 5 47000 _002599_hash NULL
|
|
+_002600_hash ttm_dma_page_pool_free 2 34135 _002600_hash NULL
|
|
+_002601_hash ttm_page_pool_free 2 61661 _002601_hash NULL
|
|
+_002602_hash vmw_execbuf_process 5 22885 _002602_hash NULL
|
|
+_002603_hash vmw_fifo_reserve 2 12141 _002603_hash NULL
|
|
+_002604_hash vmw_kms_present 9 38130 _002604_hash NULL
|
|
+_002605_hash vmw_kms_readback 6 5727 _002605_hash NULL
|
|
+_002606_hash do_dmabuf_dirty_ldu 6 52241 _002606_hash NULL
|
|
+_002607_hash drm_mode_create_tv_properties 2 23122 _002607_hash NULL
|
|
+_002608_hash drm_property_create_enum 5 29201 _002608_hash NULL
|
|
+_002609_hash fast_user_write 5 20494 _002609_hash NULL
|
|
+_002610_hash fb_alloc_cmap 2 6554 _002610_hash NULL
|
|
+_002611_hash i915_gem_execbuffer_relocate_slow 7 25355 _002611_hash NULL
|
|
+_002612_hash kgdb_hex2mem 3 24755 _002612_hash NULL
|
|
+_002613_hash ttm_object_device_init 2 10321 _002613_hash NULL
|
|
+_002614_hash ttm_object_file_init 2 27804 _002614_hash NULL
|
|
+_002615_hash vmw_cursor_update_image 3-4 16332 _002615_hash NULL
|
|
+_002617_hash vmw_gmr2_bind 3 21305 _002617_hash NULL
|
|
+_002618_hash vmw_cursor_update_dmabuf 3-4 32045 _002618_hash NULL
|
|
+_002620_hash vmw_gmr_bind 3 44130 _002620_hash NULL
|
|
+_002621_hash vmw_du_crtc_cursor_set 4-5 28479 _002621_hash NULL
|
|
+_002622_hash __module_alloc 1 50004 _002622_hash NULL
|
|
+_002623_hash module_alloc_update_bounds_rw 1 63233 _002623_hash NULL
|
|
+_002624_hash module_alloc_update_bounds_rx 1 58634 _002624_hash NULL
|
|
+_002625_hash acpi_system_write_alarm 3 40205 _002625_hash NULL
|
|
+_002626_hash create_table 2 16213 _002626_hash NULL
|
|
+_002627_hash mem_read 3 57631 _002627_hash NULL
|
|
+_002628_hash mem_write 3 22232 _002628_hash NULL
|
|
+_002629_hash proc_fault_inject_read 3 36802 _002629_hash NULL
|
|
+_002630_hash proc_fault_inject_write 3 21058 _002630_hash NULL
|
|
+_002631_hash v9fs_fid_readn 4 60544 _002631_hash NULL
|
|
+_002632_hash v9fs_file_read 3 40858 _002632_hash NULL
|
|
+_002633_hash __devres_alloc 2 25598 _002633_hash NULL
|
|
+_002634_hash alloc_dummy_extent_buffer 2 56374 _002634_hash NULL
|
|
+_002635_hash alloc_fdtable 1 17389 _002635_hash NULL
|
|
+_002636_hash alloc_large_system_hash 2 22391 _002636_hash NULL
|
|
+_002637_hash alloc_ldt 2 21972 _002637_hash NULL
|
|
+_002638_hash __alloc_skb 1 23940 _002638_hash NULL
|
|
+_002639_hash __ata_change_queue_depth 3 23484 _002639_hash NULL
|
|
+_002640_hash btrfs_alloc_free_block 3 8986 _002640_hash NULL
|
|
+_002641_hash btrfs_find_device_for_logical 2 44993 _002641_hash NULL
|
|
+_002642_hash ccid3_hc_rx_getsockopt 3 62331 _002642_hash NULL
|
|
+_002643_hash ccid3_hc_tx_getsockopt 3 16314 _002643_hash NULL
|
|
+_002644_hash cifs_readdata_alloc 1 26360 _002644_hash NULL
|
|
+_002645_hash cistpl_vers_1 4 15023 _002645_hash NULL
|
|
+_002646_hash cmm_read 3 57520 _002646_hash NULL
|
|
+_002647_hash cosa_read 3 25966 _002647_hash NULL
|
|
+_002648_hash dm_table_create 3 35687 _002648_hash NULL
|
|
+_002649_hash dpcm_state_read_file 3 65489 _002649_hash NULL
|
|
+_002651_hash edac_mc_alloc 4 3611 _002651_hash NULL
|
|
+_002652_hash ep0_read 3 38095 _002652_hash NULL
|
|
+_002653_hash event_buffer_read 3 48772 _002765_hash NULL nohasharray
|
|
+_002654_hash extend_netdev_table 2 21453 _002654_hash NULL
|
|
+_002655_hash extract_entropy_user 3 26952 _002655_hash NULL
|
|
+_002656_hash fcoe_ctlr_device_add 3 1793 _002656_hash NULL
|
|
+_002657_hash fd_do_readv 3 51297 _002657_hash NULL
|
|
+_002658_hash fd_do_writev 3 29329 _002658_hash NULL
|
|
+_002659_hash ffs_ep0_read 3 2672 _002659_hash NULL
|
|
+_002660_hash fill_readbuf 3 32464 _002660_hash NULL
|
|
+_002661_hash fw_iso_buffer_alloc 2 13704 _002661_hash NULL
|
|
+_002662_hash get_fd_set 1 3866 _002662_hash NULL
|
|
+_002663_hash hidraw_report_event 3 20503 _002663_hash NULL
|
|
+_002664_hash ieee80211_if_read_ht_opmode 3 29044 _002664_hash NULL
|
|
+_002665_hash ieee80211_if_read_num_mcast_sta 3 12419 _002665_hash NULL
|
|
+_002666_hash iwl_dbgfs_calib_disabled_read 3 22649 _002666_hash NULL
|
|
+_002667_hash iwl_dbgfs_rf_reset_read 3 26512 _002667_hash NULL
|
|
+_002668_hash ixgbe_alloc_q_vector 4-6 24439 _002668_hash NULL
|
|
+_002670_hash joydev_handle_JSIOCSAXMAP 3 48898 _002836_hash NULL nohasharray
|
|
+_002671_hash joydev_handle_JSIOCSBTNMAP 3 15643 _002671_hash NULL
|
|
+_002672_hash __kfifo_from_user_r 3 60345 _002672_hash NULL
|
|
+_002673_hash kstrtoint_from_user 2 8778 _002673_hash NULL
|
|
+_002674_hash kstrtol_from_user 2 10168 _002674_hash NULL
|
|
+_002675_hash kstrtoll_from_user 2 19500 _002675_hash NULL
|
|
+_002676_hash kstrtos16_from_user 2 28300 _002676_hash NULL
|
|
+_002677_hash kstrtos8_from_user 2 58268 _002677_hash NULL
|
|
+_002678_hash kstrtou16_from_user 2 54274 _002678_hash NULL
|
|
+_002679_hash kstrtou8_from_user 2 55599 _002679_hash NULL
|
|
+_002680_hash kstrtouint_from_user 2 10536 _002680_hash NULL
|
|
+_002681_hash kstrtoul_from_user 2 64569 _002681_hash NULL
|
|
+_002682_hash kstrtoull_from_user 2 63026 _002682_hash NULL
|
|
+_002683_hash l2cap_create_iframe_pdu 3 40055 _002683_hash NULL
|
|
+_002684_hash l2tp_ip6_recvmsg 4 62874 _002684_hash NULL
|
|
+_002685_hash mem_cgroup_read 5 22461 _002685_hash NULL
|
|
+_002686_hash nfs_fscache_get_super_cookie 3 44355 _002686_hash &_001648_hash
|
|
+_002687_hash nfs_pgarray_set 2 1085 _002687_hash NULL
|
|
+_002688_hash ntfs_rl_realloc 3 56831 _002688_hash &_000363_hash
|
|
+_002689_hash ntfs_rl_realloc_nofail 3 32173 _002689_hash NULL
|
|
+_002690_hash pn533_dep_link_up 5 22154 _002690_hash NULL
|
|
+_002691_hash port_fops_write 3 54627 _002691_hash NULL
|
|
+_002692_hash ptp_read 4 63251 _002692_hash NULL
|
|
+_002693_hash qla4xxx_change_queue_depth 2 1268 _002693_hash NULL
|
|
+_002694_hash reqsk_queue_alloc 2 40272 _002694_hash NULL
|
|
+_002695_hash resize_info_buffer 2 62889 _002695_hash NULL
|
|
+_002696_hash rfkill_fop_write 3 64808 _002696_hash NULL
|
|
+_002697_hash rt2x00debug_write_rfcsr 3 41473 _002697_hash NULL
|
|
+_002698_hash rvmalloc 1 46873 _002698_hash NULL
|
|
+_002699_hash rw_copy_check_uvector 3 45748 _002699_hash NULL
|
|
+_002700_hash sctp_getsockopt_active_key 2 45483 _002700_hash NULL
|
|
+_002701_hash sctp_getsockopt_adaptation_layer 2 45375 _002701_hash NULL
|
|
+_002702_hash sctp_getsockopt_assoc_ids 2 9043 _002702_hash NULL
|
|
+_002703_hash sctp_getsockopt_associnfo 2 58169 _002703_hash NULL
|
|
+_002704_hash sctp_getsockopt_assoc_number 2 6384 _002704_hash NULL
|
|
+_002705_hash sctp_getsockopt_auto_asconf 2 46584 _002705_hash NULL
|
|
+_002706_hash sctp_getsockopt_context 2 52490 _002706_hash NULL
|
|
+_002707_hash sctp_getsockopt_default_send_param 2 63056 _002707_hash NULL
|
|
+_002708_hash sctp_getsockopt_disable_fragments 2 12330 _002708_hash NULL
|
|
+_002709_hash sctp_getsockopt_fragment_interleave 2 51215 _002709_hash NULL
|
|
+_002710_hash sctp_getsockopt_initmsg 2 26042 _002710_hash NULL
|
|
+_002711_hash sctp_getsockopt_mappedv4 2 20044 _002711_hash NULL
|
|
+_002712_hash sctp_getsockopt_nodelay 2 9560 _002712_hash NULL
|
|
+_002713_hash sctp_getsockopt_partial_delivery_point 2 60952 _002713_hash NULL
|
|
+_002714_hash sctp_getsockopt_peeloff 2 59190 _002714_hash NULL
|
|
+_002715_hash sctp_getsockopt_peer_addr_info 2 6024 _002715_hash NULL
|
|
+_002716_hash sctp_getsockopt_peer_addr_params 2 53645 _002716_hash NULL
|
|
+_002717_hash sctp_getsockopt_primary_addr 2 24639 _002717_hash NULL
|
|
+_002718_hash sctp_getsockopt_rtoinfo 2 62027 _002718_hash NULL
|
|
+_002719_hash sctp_getsockopt_sctp_status 2 56540 _002719_hash NULL
|
|
+_002720_hash self_check_write 5 50856 _002720_hash NULL
|
|
+_002721_hash smk_read_mapped 3 7562 _002721_hash NULL
|
|
+_002722_hash smk_set_cipso 3 20379 _002722_hash NULL
|
|
+_002723_hash smk_user_access 3 24440 _002723_hash NULL
|
|
+_002724_hash smk_write_mapped 3 13519 _002724_hash NULL
|
|
+_002725_hash smk_write_rules_list 3 18565 _002725_hash NULL
|
|
+_002726_hash snd_mixart_BA0_read 5 45069 _002726_hash NULL
|
|
+_002727_hash snd_mixart_BA1_read 5 5082 _002727_hash NULL
|
|
+_002728_hash snd_pcm_oss_read2 3 54387 _002728_hash NULL
|
|
+_002729_hash syslog_print 2 307 _002729_hash NULL
|
|
+_002730_hash tcp_dma_try_early_copy 3 4457 _002730_hash NULL
|
|
+_002731_hash tcp_send_rcvq 3 11316 _002731_hash NULL
|
|
+_002732_hash tomoyo_init_log 2 61526 _002732_hash NULL
|
|
+_002733_hash ubi_dump_flash 4 46381 _002733_hash NULL
|
|
+_002734_hash ubi_eba_atomic_leb_change 5 60379 _002734_hash NULL
|
|
+_002735_hash ubi_eba_write_leb 5-6 36029 _002735_hash NULL
|
|
+_002737_hash ubi_eba_write_leb_st 5 44343 _002737_hash NULL
|
|
+_002738_hash ubi_self_check_all_ff 4 41959 _002738_hash NULL
|
|
+_002739_hash unix_bind 3 15668 _002739_hash NULL
|
|
+_002740_hash usbvision_rvmalloc 1 19655 _002740_hash NULL
|
|
+_002742_hash v4l2_ctrl_new 7 24927 _002742_hash NULL
|
|
+_002743_hash v4l2_event_subscribe 3 53687 _002743_hash NULL
|
|
+_002744_hash v9fs_direct_read 3 45546 _002744_hash NULL
|
|
+_002745_hash v9fs_file_readn 4 36353 _002745_hash &_001606_hash
|
|
+_002746_hash __videobuf_alloc_vb 1 5665 _002746_hash NULL
|
|
+_002747_hash wm8350_write 3 24480 _002747_hash NULL
|
|
+_002748_hash xfs_buf_read_uncached 3 42844 _002748_hash NULL
|
|
+_002749_hash yurex_write 3 8761 _002749_hash NULL
|
|
+_002750_hash alloc_skb 1 55439 _002750_hash NULL
|
|
+_002751_hash alloc_skb_fclone 1 3467 _002751_hash NULL
|
|
+_002752_hash ata_scsi_change_queue_depth 2 23126 _002752_hash NULL
|
|
+_002753_hash ath6kl_disconnect_timeout_write 3 794 _002753_hash NULL
|
|
+_002754_hash ath6kl_keepalive_write 3 45600 _002754_hash NULL
|
|
+_002755_hash ath6kl_lrssi_roam_write 3 8362 _002755_hash NULL
|
|
+_002756_hash ath6kl_regread_write 3 14220 _002756_hash NULL
|
|
+_002757_hash core_sys_select 1 47494 _002757_hash NULL
|
|
+_002758_hash do_syslog 3 56807 _002758_hash NULL
|
|
+_002759_hash expand_fdtable 2 39273 _002759_hash NULL
|
|
+_002760_hash fd_execute_cmd 3 1132 _002760_hash NULL
|
|
+_002761_hash get_chars 3 40373 _002761_hash NULL
|
|
+_002762_hash hid_report_raw_event 4 2762 _002762_hash NULL
|
|
+_002763_hash inet_csk_listen_start 2 38233 _002763_hash NULL
|
|
+_002764_hash kstrtou32_from_user 2 30361 _002764_hash NULL
|
|
+_002765_hash l2cap_segment_sdu 4 48772 _002765_hash &_002653_hash
|
|
+_002766_hash __netdev_alloc_skb 2 18595 _002766_hash NULL
|
|
+_002767_hash nfs_readdata_alloc 2 65015 _002767_hash NULL
|
|
+_002768_hash nfs_writedata_alloc 2 12133 _002768_hash NULL
|
|
+_002769_hash ntfs_rl_append 2-4 6037 _002769_hash NULL
|
|
+_002771_hash ntfs_rl_insert 2-4 4931 _002771_hash NULL
|
|
+_002773_hash ntfs_rl_replace 2-4 14136 _002773_hash NULL
|
|
+_002775_hash ntfs_rl_split 2-4 52328 _002775_hash NULL
|
|
+_002777_hash port_fops_read 3 49626 _002777_hash NULL
|
|
+_002778_hash random_read 3 13815 _002778_hash NULL
|
|
+_002779_hash sg_proc_write_adio 3 45704 _002779_hash NULL
|
|
+_002780_hash sg_proc_write_dressz 3 46316 _002780_hash NULL
|
|
+_002781_hash tcp_sendmsg 4 30296 _002781_hash NULL
|
|
+_002782_hash tomoyo_write_log2 2 34318 _002782_hash NULL
|
|
+_002783_hash ubi_leb_change 4 10289 _002783_hash NULL
|
|
+_002784_hash ubi_leb_write 4-5 5478 _002784_hash NULL
|
|
+_002786_hash urandom_read 3 30462 _002786_hash NULL
|
|
+_002787_hash v9fs_cached_file_read 3 2514 _002787_hash NULL
|
|
+_002788_hash __videobuf_alloc_cached 1 12740 _002788_hash NULL
|
|
+_002789_hash __videobuf_alloc_uncached 1 55711 _002789_hash NULL
|
|
+_002790_hash wm8350_block_write 3 19727 _002790_hash NULL
|
|
+_002791_hash alloc_tx 2 32143 _002791_hash NULL
|
|
+_002792_hash alloc_wr 1-2 24635 _002792_hash NULL
|
|
+_002794_hash ath6kl_endpoint_stats_write 3 59621 _002794_hash NULL
|
|
+_002795_hash ath6kl_fwlog_mask_write 3 24810 _002795_hash NULL
|
|
+_002796_hash ath9k_wmi_cmd 4 327 _002796_hash NULL
|
|
+_002797_hash atm_alloc_charge 2 19517 _002879_hash NULL nohasharray
|
|
+_002798_hash ax25_output 2 22736 _002798_hash NULL
|
|
+_002799_hash bcsp_prepare_pkt 3 12961 _002799_hash NULL
|
|
+_002800_hash bt_skb_alloc 1 6404 _002800_hash NULL
|
|
+_002801_hash capinc_tty_write 3 28539 _002801_hash NULL
|
|
+_002802_hash cfpkt_create_pfx 1-2 23594 _002802_hash NULL
|
|
+_002804_hash cmd_complete 6 51629 _002804_hash NULL
|
|
+_002805_hash cmtp_add_msgpart 4 9252 _002805_hash NULL
|
|
+_002806_hash cmtp_send_interopmsg 7 376 _002806_hash NULL
|
|
+_002807_hash cxgb3_get_cpl_reply_skb 2 10620 _002807_hash NULL
|
|
+_002808_hash dbg_leb_change 4 23555 _002808_hash NULL
|
|
+_002809_hash dbg_leb_write 4-5 63555 _002809_hash &_000940_hash
|
|
+_002811_hash dccp_listen_start 2 35918 _002811_hash NULL
|
|
+_002812_hash __dev_alloc_skb 1 28681 _002812_hash NULL
|
|
+_002813_hash diva_os_alloc_message_buffer 1 64568 _002813_hash NULL
|
|
+_002814_hash dn_alloc_skb 2 6631 _002814_hash NULL
|
|
+_002815_hash do_pselect 1 62061 _002815_hash NULL
|
|
+_002816_hash _fc_frame_alloc 1 43568 _002816_hash NULL
|
|
+_002817_hash find_skb 2 20431 _002817_hash NULL
|
|
+_002818_hash fm_send_cmd 5 39639 _002818_hash NULL
|
|
+_002819_hash gem_alloc_skb 2 51715 _002819_hash NULL
|
|
+_002820_hash get_packet 3 41914 _002820_hash NULL
|
|
+_002821_hash get_packet 3 5747 _002821_hash NULL
|
|
+_002822_hash get_packet_pg 4 28023 _002822_hash NULL
|
|
+_002823_hash get_skb 2 63008 _002823_hash NULL
|
|
+_002824_hash hidp_queue_report 3 1881 _002824_hash NULL
|
|
+_002825_hash __hidp_send_ctrl_message 4 28303 _002825_hash NULL
|
|
+_002826_hash hycapi_rx_capipkt 3 11602 _002826_hash NULL
|
|
+_002827_hash i2400m_net_rx 5 27170 _002827_hash NULL
|
|
+_002828_hash igmpv3_newpack 2 35912 _002828_hash NULL
|
|
+_002829_hash inet_listen 2 14723 _002829_hash NULL
|
|
+_002830_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _002830_hash &_001724_hash
|
|
+_002831_hash isdn_ppp_ccp_xmit_reset 6 63297 _002831_hash NULL
|
|
+_002832_hash kmsg_read 3 46514 _002832_hash NULL
|
|
+_002833_hash _l2_alloc_skb 1 11883 _002833_hash NULL
|
|
+_002834_hash l3_alloc_skb 1 32289 _002834_hash NULL
|
|
+_002835_hash llc_alloc_frame 4 64366 _002835_hash NULL
|
|
+_002836_hash mac_drv_rx_init 2 48898 _002836_hash &_002670_hash
|
|
+_002837_hash mgmt_event 4 12810 _002837_hash NULL
|
|
+_002838_hash mI_alloc_skb 1 24770 _002838_hash NULL
|
|
+_002839_hash nci_skb_alloc 2 49757 _002839_hash NULL
|
|
+_002840_hash netdev_alloc_skb 2 62437 _002840_hash NULL
|
|
+_002841_hash __netdev_alloc_skb_ip_align 2 55067 _002841_hash NULL
|
|
+_002842_hash new_skb 1 21148 _002842_hash NULL
|
|
+_002843_hash nfc_alloc_recv_skb 1 10244 _002843_hash NULL
|
|
+_002844_hash nfcwilink_skb_alloc 1 16167 _002844_hash NULL
|
|
+_002845_hash nfulnl_alloc_skb 2 65207 _002845_hash NULL
|
|
+_002846_hash ni65_alloc_mem 3 10664 _002846_hash NULL
|
|
+_002847_hash pep_alloc_skb 3 46303 _002847_hash NULL
|
|
+_002848_hash pn_raw_send 2 54330 _002848_hash NULL
|
|
+_002849_hash __pskb_copy 2 9038 _002849_hash NULL
|
|
+_002850_hash refill_pool 2 19477 _002850_hash NULL
|
|
+_002851_hash rfcomm_wmalloc 2 58090 _002851_hash NULL
|
|
+_002852_hash rx 4 57944 _002852_hash NULL
|
|
+_002853_hash sctp_ulpevent_new 1 33377 _002853_hash NULL
|
|
+_002854_hash send_command 4 10832 _002854_hash NULL
|
|
+_002855_hash skb_copy_expand 2-3 7685 _002855_hash &_000671_hash
|
|
+_002857_hash sk_stream_alloc_skb 2 57622 _002857_hash NULL
|
|
+_002858_hash sock_alloc_send_pskb 2 21246 _002858_hash NULL
|
|
+_002859_hash sock_rmalloc 2 59740 _002859_hash &_002157_hash
|
|
+_002860_hash sock_wmalloc 2 16472 _002860_hash NULL
|
|
+_002861_hash solos_param_store 4 34755 _002861_hash NULL
|
|
+_002862_hash sys_select 1 38827 _002862_hash NULL
|
|
+_002863_hash sys_syslog 3 10746 _002863_hash NULL
|
|
+_002864_hash t4vf_pktgl_to_skb 2 39005 _002864_hash NULL
|
|
+_002865_hash tcp_collapse 5-6 63294 _002865_hash NULL
|
|
+_002867_hash tipc_cfg_reply_alloc 1 27606 _002867_hash NULL
|
|
+_002868_hash ubifs_leb_change 4 17789 _002868_hash NULL
|
|
+_002869_hash ubifs_leb_write 4-5 22679 _002869_hash NULL
|
|
+_002871_hash ulog_alloc_skb 1 23427 _002871_hash NULL
|
|
+_002872_hash _alloc_mISDN_skb 3 52232 _002872_hash NULL
|
|
+_002873_hash ath9k_multi_regread 4 65056 _002873_hash NULL
|
|
+_002874_hash ath_rxbuf_alloc 2 24745 _002874_hash NULL
|
|
+_002875_hash ax25_send_frame 2 19964 _002875_hash NULL
|
|
+_002876_hash bchannel_get_rxbuf 2 37213 _002876_hash NULL
|
|
+_002877_hash cfpkt_create 1 18197 _002877_hash NULL
|
|
+_002878_hash console_store 4 36007 _002878_hash NULL
|
|
+_002879_hash dev_alloc_skb 1 19517 _002879_hash &_002797_hash
|
|
+_002880_hash dn_nsp_do_disc 2-6 49474 _002880_hash NULL
|
|
+_002882_hash do_write_orph_node 2 64343 _002882_hash NULL
|
|
+_002883_hash dsp_cmx_send_member 2 15625 _002883_hash NULL
|
|
+_002884_hash fc_frame_alloc 2 1596 _002884_hash NULL
|
|
+_002885_hash fc_frame_alloc_fill 2 59394 _002885_hash NULL
|
|
+_002886_hash fmc_send_cmd 5 20435 _002886_hash NULL
|
|
+_002887_hash hci_send_cmd 3 43810 _002887_hash NULL
|
|
+_002888_hash hci_si_event 3 1404 _002888_hash NULL
|
|
+_002889_hash hfcpci_empty_bfifo 4 62323 _002889_hash NULL
|
|
+_002890_hash hidp_send_ctrl_message 4 43702 _002890_hash NULL
|
|
+_002891_hash hysdn_sched_rx 3 60533 _002891_hash NULL
|
|
+_002892_hash inet_dccp_listen 2 28565 _002892_hash NULL
|
|
+_002893_hash ip6_append_data 4-5 36490 _002893_hash NULL
|
|
+_002894_hash __ip_append_data 7-8 36191 _002894_hash NULL
|
|
+_002895_hash l1oip_socket_recv 6 56537 _002895_hash NULL
|
|
+_002896_hash l2cap_build_cmd 4 48676 _002896_hash NULL
|
|
+_002897_hash l2down_create 4 21755 _002897_hash NULL
|
|
+_002898_hash l2up_create 3 6430 _002898_hash NULL
|
|
+_002899_hash ldisc_receive 4 41516 _002899_hash NULL
|
|
+_002902_hash lro_gen_skb 6 2644 _002902_hash NULL
|
|
+_002903_hash macvtap_alloc_skb 2-4-3 50629 _002903_hash NULL
|
|
+_002906_hash mgmt_device_found 10 14146 _002906_hash NULL
|
|
+_002907_hash nci_send_cmd 3 58206 _002907_hash NULL
|
|
+_002908_hash netdev_alloc_skb_ip_align 2 40811 _002908_hash NULL
|
|
+_002909_hash nfcwilink_send_bts_cmd 3 10802 _002909_hash NULL
|
|
+_002910_hash nfqnl_mangle 2 14583 _002910_hash NULL
|
|
+_002911_hash p54_alloc_skb 3 34366 _002911_hash &_000475_hash
|
|
+_002912_hash packet_alloc_skb 2-5-4 62602 _002912_hash NULL
|
|
+_002915_hash pep_indicate 5 38611 _002915_hash NULL
|
|
+_002916_hash pep_reply 5 50582 _002916_hash NULL
|
|
+_002917_hash pipe_handler_request 5 50774 _002917_hash &_001189_hash
|
|
+_002918_hash ql_process_mac_rx_page 4 15543 _002918_hash NULL
|
|
+_002919_hash ql_process_mac_rx_skb 4 6689 _002919_hash NULL
|
|
+_002920_hash rfcomm_tty_write 3 51603 _002920_hash NULL
|
|
+_002921_hash send_mpa_reject 3 7135 _002921_hash NULL
|
|
+_002922_hash send_mpa_reply 3 32372 _002922_hash NULL
|
|
+_002923_hash set_rxd_buffer_pointer 8 9950 _002923_hash NULL
|
|
+_002924_hash sge_rx 3 50594 _002924_hash NULL
|
|
+_002925_hash skb_cow_data 2 11565 _002925_hash NULL
|
|
+_002926_hash smp_build_cmd 3 45853 _002926_hash NULL
|
|
+_002927_hash sock_alloc_send_skb 2 23720 _002927_hash NULL
|
|
+_002928_hash sys_pselect6 1 57449 _002928_hash NULL
|
|
+_002929_hash tcp_fragment 3 20436 _002929_hash NULL
|
|
+_002930_hash teiup_create 3 43201 _002930_hash NULL
|
|
+_002931_hash tg3_run_loopback 2 30093 _002931_hash NULL
|
|
+_002932_hash tun_alloc_skb 2-4-3 41216 _002932_hash NULL
|
|
+_002935_hash ubifs_write_node 5 11258 _002935_hash NULL
|
|
+_002936_hash use_pool 2 64607 _002936_hash NULL
|
|
+_002937_hash vxge_rx_alloc 3 52024 _002937_hash NULL
|
|
+_002938_hash add_packet 3 54433 _002938_hash NULL
|
|
+_002939_hash add_rx_skb 3 8257 _002939_hash NULL
|
|
+_002940_hash ath6kl_buf_alloc 1 57304 _002940_hash NULL
|
|
+_002941_hash bat_iv_ogm_aggregate_new 2 2620 _002941_hash NULL
|
|
+_002942_hash bnx2fc_process_l2_frame_compl 3 65072 _002942_hash NULL
|
|
+_002943_hash brcmu_pkt_buf_get_skb 1 5556 _002943_hash NULL
|
|
+_002944_hash br_send_bpdu 3 29669 _002944_hash NULL
|
|
+_002945_hash bt_skb_send_alloc 2 6581 _002945_hash NULL
|
|
+_002946_hash c4iw_reject_cr 3 28174 _002946_hash NULL
|
|
+_002947_hash carl9170_rx_copy_data 2 21656 _002947_hash NULL
|
|
+_002948_hash cfpkt_add_body 3 44630 _002948_hash NULL
|
|
+_002949_hash cfpkt_append 3 61206 _002949_hash NULL
|
|
+_002950_hash cosa_net_setup_rx 2 38594 _002950_hash NULL
|
|
+_002951_hash cxgb4_pktgl_to_skb 2 61899 _002951_hash NULL
|
|
+_002952_hash dn_alloc_send_pskb 2 4465 _002952_hash NULL
|
|
+_002953_hash dn_nsp_return_disc 2 60296 _002953_hash NULL
|
|
+_002954_hash dn_nsp_send_disc 2 23469 _002954_hash NULL
|
|
+_002955_hash dsp_tone_hw_message 3 17678 _002955_hash NULL
|
|
+_002956_hash dvb_net_sec 3 37884 _002956_hash NULL
|
|
+_002957_hash e1000_check_copybreak 3 62448 _002957_hash NULL
|
|
+_002958_hash fast_rx_path 3 59214 _002958_hash NULL
|
|
+_002959_hash fc_fcp_frame_alloc 2 12624 _002959_hash NULL
|
|
+_002960_hash fcoe_ctlr_send_keep_alive 3 15308 _002960_hash NULL
|
|
+_002961_hash fwnet_incoming_packet 3 40380 _002961_hash NULL
|
|
+_002962_hash fwnet_pd_new 4 39947 _002962_hash NULL
|
|
+_002963_hash got_frame 2 16028 _002963_hash NULL
|
|
+_002964_hash gsm_mux_rx_netchar 3 33336 _002964_hash NULL
|
|
+_002965_hash hdlcdev_rx 3 997 _002965_hash NULL
|
|
+_002966_hash hdlc_empty_fifo 2 18397 _002966_hash NULL
|
|
+_002967_hash hfc_empty_fifo 2 57972 _002967_hash NULL
|
|
+_002968_hash hfcpci_empty_fifo 4 2427 _002968_hash NULL
|
|
+_002969_hash hfcsusb_rx_frame 3 52745 _002969_hash NULL
|
|
+_002970_hash hidp_output_raw_report 3 5629 _002970_hash NULL
|
|
+_002971_hash hscx_empty_fifo 2 13360 _002971_hash NULL
|
|
+_002972_hash hysdn_rx_netpkt 3 16136 _002972_hash NULL
|
|
+_002973_hash ieee80211_fragment 4 33112 _002973_hash NULL
|
|
+_002974_hash ieee80211_probereq_get 4-6 29069 _002974_hash NULL
|
|
+_002976_hash ieee80211_send_auth 5 24121 _002976_hash NULL
|
|
+_002977_hash ieee80211_set_probe_resp 3 10077 _002977_hash NULL
|
|
+_002978_hash ieee80211_tdls_mgmt 8 9581 _002978_hash NULL
|
|
+_002979_hash ip6_ufo_append_data 5-7-6 4780 _002979_hash NULL
|
|
+_002982_hash ip_ufo_append_data 6-8-7 12775 _002982_hash NULL
|
|
+_002985_hash ipw_packet_received_skb 2 1230 _002985_hash NULL
|
|
+_002986_hash iwch_reject_cr 3 23901 _002986_hash NULL
|
|
+_002987_hash iwm_rx_packet_alloc 3 9898 _002987_hash NULL
|
|
+_002988_hash ixgb_check_copybreak 3 5847 _002988_hash NULL
|
|
+_002989_hash l1oip_socket_parse 4 4507 _002989_hash NULL
|
|
+_002990_hash l2cap_send_cmd 4 14548 _002990_hash NULL
|
|
+_002991_hash l2tp_ip6_sendmsg 4 7461 _002991_hash NULL
|
|
+_002993_hash lowpan_fragment_xmit 3-4 22095 _002993_hash NULL
|
|
+_002996_hash mcs_unwrap_fir 3 25733 _002996_hash NULL
|
|
+_002997_hash mcs_unwrap_mir 3 9455 _002997_hash NULL
|
|
+_002998_hash mld_newpack 2 50950 _002998_hash NULL
|
|
+_002999_hash nfc_alloc_send_skb 4 3167 _002999_hash NULL
|
|
+_003000_hash p54_download_eeprom 4 43842 _003000_hash NULL
|
|
+_003002_hash ppp_tx_cp 5 62044 _003002_hash NULL
|
|
+_003003_hash prism2_send_mgmt 4 62605 _003003_hash &_001876_hash
|
|
+_003004_hash prism2_sta_send_mgmt 5 43916 _003004_hash NULL
|
|
+_003005_hash _queue_data 4 54983 _003005_hash NULL
|
|
+_003006_hash read_dma 3 55086 _003006_hash NULL
|
|
+_003007_hash read_fifo 3 826 _003007_hash NULL
|
|
+_003008_hash receive_copy 3 12216 _003008_hash NULL
|
|
+_003009_hash rtl8169_try_rx_copy 3 705 _003009_hash NULL
|
|
+_003010_hash _rtl92s_firmware_downloadcode 3 14021 _003010_hash NULL
|
|
+_003011_hash rx_data 4 60442 _003011_hash NULL
|
|
+_003012_hash sis190_try_rx_copy 3 57069 _003012_hash NULL
|
|
+_003013_hash skge_rx_get 3 40598 _003013_hash NULL
|
|
+_003014_hash tcp_mark_head_lost 2 35895 _003014_hash NULL
|
|
+_003015_hash tcp_match_skb_to_sack 3-4 23568 _003015_hash NULL
|
|
+_003017_hash tso_fragment 3 29050 _003017_hash NULL
|
|
+_003018_hash tt_response_fill_table 1 57902 _003018_hash NULL
|
|
+_003020_hash udpv6_sendmsg 4 22316 _003020_hash NULL
|
|
+_003021_hash velocity_rx_copy 2 34583 _003021_hash NULL
|
|
+_003022_hash W6692_empty_Bfifo 2 47804 _003022_hash NULL
|
|
+_003023_hash zd_mac_rx 3 38296 _003023_hash NULL
|
|
+_003024_hash ath6kl_wmi_get_new_buf 1 52304 _003024_hash NULL
|
|
+_003025_hash bat_iv_ogm_queue_add 3 30870 _003025_hash NULL
|
|
+_003026_hash brcmf_alloc_pkt_and_read 2 63116 _003026_hash &_001808_hash
|
|
+_003027_hash brcmf_sdcard_recv_buf 6 38179 _003027_hash NULL
|
|
+_003028_hash brcmf_sdcard_rwdata 5 65041 _003028_hash NULL
|
|
+_003029_hash brcmf_sdcard_send_buf 6 7713 _003029_hash NULL
|
|
+_003030_hash carl9170_handle_mpdu 3 11056 _003030_hash NULL
|
|
+_003031_hash cfpkt_add_trail 3 27260 _003031_hash NULL
|
|
+_003032_hash cfpkt_pad_trail 2 55511 _003032_hash NULL
|
|
+_003033_hash dvb_net_sec_callback 2 28786 _003033_hash NULL
|
|
+_003034_hash fwnet_receive_packet 9 50537 _003034_hash NULL
|
|
+_003035_hash handle_rx_packet 3 58993 _003035_hash NULL
|
|
+_003036_hash HDLC_irq 2 8709 _003036_hash NULL
|
|
+_003037_hash hdlc_rpr_irq 2 10240 _003037_hash NULL
|
|
+_003043_hash ipwireless_network_packet_received 4 51277 _003043_hash NULL
|
|
+_003044_hash l2cap_bredr_sig_cmd 3 49065 _003044_hash NULL
|
|
+_003045_hash l2cap_sock_alloc_skb_cb 2 33532 _003045_hash NULL
|
|
+_003046_hash llcp_allocate_pdu 3 19866 _003046_hash NULL
|
|
+_003047_hash ppp_cp_event 6 2965 _003047_hash NULL
|
|
+_003048_hash receive_client_update_packet 3 49104 _003048_hash NULL
|
|
+_003049_hash receive_server_sync_packet 3 59021 _003049_hash NULL
|
|
+_003050_hash sky2_receive 2 13407 _003050_hash NULL
|
|
+_003051_hash tcp_sacktag_walk 5-6 49703 _003051_hash NULL
|
|
+_003053_hash tcp_write_xmit 2 64602 _003053_hash NULL
|
|
+_003054_hash ath6kl_wmi_add_wow_pattern_cmd 4 12842 _003054_hash NULL
|
|
+_003055_hash ath6kl_wmi_beginscan_cmd 8 25462 _003055_hash NULL
|
|
+_003056_hash ath6kl_wmi_send_probe_response_cmd 6 31728 _003056_hash NULL
|
|
+_003057_hash ath6kl_wmi_set_appie_cmd 5 39266 _003057_hash NULL
|
|
+_003058_hash ath6kl_wmi_set_ie_cmd 6 37260 _003058_hash NULL
|
|
+_003059_hash ath6kl_wmi_startscan_cmd 8 33674 _003059_hash NULL
|
|
+_003060_hash ath6kl_wmi_test_cmd 3 27312 _003060_hash NULL
|
|
+_003061_hash brcmf_sdbrcm_membytes 3-5 37324 _003061_hash NULL
|
|
+_003063_hash brcmf_sdbrcm_read_control 3 22721 _003063_hash NULL
|
|
+_003064_hash brcmf_tx_frame 3 20978 _003064_hash NULL
|
|
+_003065_hash __carl9170_rx 3 56784 _003065_hash NULL
|
|
+_003066_hash cfpkt_setlen 2 49343 _003066_hash NULL
|
|
+_003067_hash hdlc_irq_one 2 3944 _003067_hash NULL
|
|
+_003069_hash tcp_push_one 2 48816 _003069_hash NULL
|
|
+_003070_hash __tcp_push_pending_frames 2 48148 _003070_hash NULL
|
|
+_003071_hash brcmf_sdbrcm_bus_txctl 3 42492 _003071_hash NULL
|
|
+_003072_hash carl9170_rx 3 13272 _003072_hash NULL
|
|
+_003073_hash carl9170_rx_stream 3 1334 _003073_hash NULL
|
|
+_003074_hash tcp_push 3 10680 _003074_hash NULL
|
|
+_003075_hash create_log 2 8225 _003075_hash NULL
|
|
+_003076_hash expand_files 2 17080 _003076_hash NULL
|
|
+_003077_hash iio_device_alloc 1 41440 _003077_hash NULL
|
|
+_003078_hash OS_mem_token_alloc 1 14276 _003078_hash NULL
|
|
+_003079_hash packet_came 3 18072 _003079_hash NULL
|
|
+_003080_hash softsynth_write 3 3455 _003080_hash NULL
|
|
+_003081_hash alloc_fd 1 37637 _003081_hash NULL
|
|
+_003082_hash sys_dup3 2 33421 _003082_hash NULL
|
|
+_003083_hash do_fcntl 3 31468 _003083_hash NULL
|
|
+_003084_hash sys_dup2 2 25284 _003084_hash NULL
|
|
+_003085_hash sys_fcntl 3 19267 _003085_hash NULL
|
|
+_003086_hash sys_fcntl64 3 29031 _003086_hash NULL
|
|
+_003087_hash cmpk_message_handle_tx 4 54024 _003087_hash NULL
|
|
+_003088_hash comedi_buf_alloc 3 24822 _003088_hash NULL
|
|
+_003089_hash compat_rw_copy_check_uvector 3 22001 _003089_hash &_001989_hash
|
|
+_003090_hash compat_sys_fcntl64 3 60256 _003090_hash NULL
|
|
+_003091_hash evtchn_write 3 43278 _003091_hash NULL
|
|
+_003092_hash fw_download_code 3 13249 _003092_hash NULL
|
|
+_003093_hash fwSendNullPacket 2 54618 _003093_hash NULL
|
|
+_003095_hash ieee80211_authentication_req 3 63973 _003095_hash NULL
|
|
+_003097_hash rtllib_authentication_req 3 26713 _003097_hash NULL
|
|
+_003098_hash SendTxCommandPacket 3 42901 _003098_hash NULL
|
|
+_003099_hash snd_nm256_capture_copy 5 28622 _003099_hash NULL
|
|
+_003100_hash snd_nm256_playback_copy 5 38567 _003100_hash NULL
|
|
+_003101_hash tomoyo_init_log 2 14806 _003101_hash NULL
|
|
+_003102_hash usbdux_attach_common 4 51764 _003102_hash NULL
|
|
+_003103_hash compat_sys_fcntl 3 15654 _003103_hash NULL
|
|
+_003104_hash ieee80211_auth_challenge 3 18810 _003104_hash NULL
|
|
+_003105_hash ieee80211_rtl_auth_challenge 3 61897 _003105_hash NULL
|
|
+_003106_hash resize_async_buffer 4 64031 _003106_hash &_002119_hash
|
|
+_003107_hash rtllib_auth_challenge 3 12493 _003107_hash NULL
|
|
+_003108_hash tomoyo_write_log2 2 11732 _003108_hash NULL
|
|
+_003109_hash l2cap_sock_alloc_skb_cb 2 27671 _003109_hash NULL
|
|
+_003110_hash tcp_sacktag_walk 5-6 26339 _003110_hash NULL
|
|
+_003112_hash tcp_write_xmit 2 39755 _003112_hash NULL
|
|
+_003113_hash ab8500_address_write 3 4099 _003113_hash NULL
|
|
+_003114_hash ab8500_bank_write 3 51960 _003114_hash NULL
|
|
+_003115_hash ab8500_val_write 3 16473 _003115_hash NULL
|
|
+_003116_hash allocate_probes 1 40204 _003116_hash NULL
|
|
+_003117_hash alloc_ftrace_hash 1 57431 _003117_hash &_002532_hash
|
|
+_003118_hash __alloc_preds 2 9492 _003118_hash NULL
|
|
+_003119_hash __alloc_pred_stack 2 26687 _003119_hash NULL
|
|
+_003120_hash alloc_sched_domains 1 47756 _003120_hash NULL
|
|
+_003121_hash alloc_trace_probe 6 12323 _003121_hash NULL
|
|
+_003122_hash blk_dropped_read 3 4168 _003122_hash NULL
|
|
+_003123_hash blk_msg_write 3 13655 _003123_hash NULL
|
|
+_003124_hash cyttsp_probe 4 1940 _003124_hash NULL
|
|
+_003125_hash dccpprobe_read 3 52549 _003125_hash NULL
|
|
+_003126_hash event_enable_read 3 7074 _003126_hash NULL
|
|
+_003127_hash event_enable_write 3 45238 _003127_hash NULL
|
|
+_003128_hash event_filter_read 3 23494 _003128_hash NULL
|
|
+_003129_hash event_filter_write 3 56609 _003129_hash NULL
|
|
+_003130_hash event_id_read 3 64288 _003130_hash &_001240_hash
|
|
+_003131_hash ftrace_pid_write 3 39710 _003131_hash NULL
|
|
+_003132_hash ftrace_profile_read 3 21327 _003132_hash NULL
|
|
+_003133_hash ftrace_profile_write 3 53327 _003133_hash NULL
|
|
+_003134_hash hsc_msg_alloc 1 60990 _003134_hash NULL
|
|
+_003135_hash hsc_write 3 55875 _003135_hash NULL
|
|
+_003136_hash hsi_alloc_controller 1 41802 _003136_hash NULL
|
|
+_003137_hash hsi_register_board_info 2 13820 _003137_hash NULL
|
|
+_003138_hash ivtvfb_write 3 40023 _003138_hash NULL
|
|
+_003139_hash probes_write 3 29711 _003139_hash NULL
|
|
+_003140_hash rb_simple_read 3 45972 _003140_hash NULL
|
|
+_003141_hash rb_simple_write 3 20890 _003141_hash NULL
|
|
+_003142_hash show_header 3 4722 _003142_hash &_000736_hash
|
|
+_003143_hash stack_max_size_read 3 1445 _003143_hash NULL
|
|
+_003144_hash stack_max_size_write 3 36068 _003144_hash NULL
|
|
+_003145_hash subsystem_filter_read 3 62310 _003145_hash NULL
|
|
+_003146_hash subsystem_filter_write 3 13022 _003146_hash NULL
|
|
+_003147_hash system_enable_read 3 25815 _003147_hash NULL
|
|
+_003148_hash system_enable_write 3 61396 _003148_hash NULL
|
|
+_003149_hash trace_options_core_read 3 47390 _003149_hash NULL
|
|
+_003150_hash trace_options_core_write 3 61551 _003150_hash NULL
|
|
+_003151_hash trace_options_read 3 11419 _003151_hash NULL
|
|
+_003152_hash trace_options_write 3 48275 _003152_hash NULL
|
|
+_003153_hash trace_parser_get_init 2 31379 _003153_hash NULL
|
|
+_003154_hash trace_seq_to_user 3 65398 _003154_hash NULL
|
|
+_003155_hash tracing_buffers_read 3 11124 _003155_hash NULL
|
|
+_003156_hash tracing_clock_write 3 27961 _003156_hash NULL
|
|
+_003157_hash tracing_cpumask_read 3 7010 _003157_hash NULL
|
|
+_003158_hash tracing_ctrl_read 3 46922 _003158_hash NULL
|
|
+_003159_hash tracing_ctrl_write 3 42324 _003159_hash &_001726_hash
|
|
+_003160_hash tracing_entries_read 3 8345 _003160_hash NULL
|
|
+_003161_hash tracing_entries_write 3 60563 _003161_hash NULL
|
|
+_003162_hash tracing_max_lat_read 3 8890 _003162_hash NULL
|
|
+_003163_hash tracing_max_lat_write 3 8728 _003163_hash NULL
|
|
+_003164_hash tracing_read_dyn_info 3 45468 _003164_hash NULL
|
|
+_003165_hash tracing_readme_read 3 16493 _003165_hash NULL
|
|
+_003166_hash tracing_saved_cmdlines_read 3 21434 _003166_hash NULL
|
|
+_003167_hash tracing_set_trace_read 3 44122 _003167_hash NULL
|
|
+_003168_hash tracing_set_trace_write 3 57096 _003168_hash NULL
|
|
+_003169_hash tracing_stats_read 3 34537 _003169_hash NULL
|
|
+_003170_hash tracing_total_entries_read 3 62817 _003170_hash NULL
|
|
+_003171_hash tracing_trace_options_write 3 153 _003171_hash NULL
|
|
+_003172_hash ttm_put_pages 2 9179 _003172_hash NULL
|
|
+_003173_hash u_memcpya 2-3 30139 _003173_hash NULL
|
|
+_003174_hash alloc_and_copy_ftrace_hash 1 29368 _003174_hash NULL
|
|
+_003175_hash ath6kl_sdio_alloc_prep_scat_req 2 51986 _003175_hash NULL
|
|
+_003176_hash ath6kl_usb_submit_ctrl_in 6 32880 _003176_hash &_000778_hash
|
|
+_003177_hash ath6kl_usb_submit_ctrl_out 6 9978 _003177_hash NULL
|
|
+_003178_hash brcmf_usbdev_qinit 2 19090 _003178_hash &_001533_hash
|
|
+_003179_hash brcmf_usb_dl_cmd 4 53130 _003179_hash NULL
|
|
+_003180_hash create_trace_probe 1 20175 _003180_hash NULL
|
|
+_003181_hash da9052_group_write 3 4534 _003181_hash NULL
|
|
+_003182_hash mmio_read 4 40348 _003182_hash NULL
|
|
+_003183_hash ptp_filter_init 2 36780 _003183_hash NULL
|
|
+_003184_hash read_file_dfs 3 43145 _003184_hash NULL
|
|
+_003185_hash tracing_read_pipe 3 35312 _003185_hash NULL
|
|
+_003186_hash vivi_read 3 23073 _003186_hash NULL
|
|
+_003187_hash arcfb_write 3 8702 _003187_hash NULL
|
|
+_003188_hash beacon_interval_write 3 17952 _003188_hash NULL
|
|
+_003189_hash brcmf_usb_attach 1-2 44656 _003189_hash NULL
|
|
+_003191_hash broadsheetfb_write 3 39976 _003191_hash NULL
|
|
+_003192_hash broadsheet_spiflash_rewrite_sector 2 54864 _003192_hash NULL
|
|
+_003193_hash dtim_interval_write 3 30489 _003193_hash NULL
|
|
+_003194_hash dynamic_ps_timeout_write 3 37713 _003194_hash NULL
|
|
+_003195_hash f_audio_buffer_alloc 1 41110 _003195_hash NULL
|
|
+_003196_hash fb_sys_read 3 13778 _003196_hash NULL
|
|
+_003197_hash fb_sys_write 3 33130 _003197_hash NULL
|
|
+_003198_hash forced_ps_write 3 37209 _003198_hash NULL
|
|
+_003199_hash gpio_power_write 3 1991 _003199_hash NULL
|
|
+_003200_hash hecubafb_write 3 26942 _003200_hash NULL
|
|
+_003201_hash metronomefb_write 3 8823 _003201_hash NULL
|
|
+_003202_hash odev_update 2 50169 _003202_hash NULL
|
|
+_003203_hash oz_add_farewell 5 20652 _003203_hash NULL
|
|
+_003204_hash oz_cdev_read 3 20659 _003204_hash NULL
|
|
+_003205_hash oz_cdev_write 3 33852 _003205_hash NULL
|
|
+_003206_hash oz_ep_alloc 2 5587 _003206_hash NULL
|
|
+_003207_hash pmcraid_copy_sglist 3 38431 _003207_hash NULL
|
|
+_003208_hash rx_streaming_always_write 3 32357 _003208_hash NULL
|
|
+_003209_hash rx_streaming_interval_write 3 50120 _003209_hash NULL
|
|
+_003210_hash split_scan_timeout_write 3 52128 _003210_hash NULL
|
|
+_003211_hash suspend_dtim_interval_write 3 48854 _003211_hash NULL
|
|
+_003212_hash ufx_alloc_urb_list 3 10349 _003212_hash NULL
|
|
+_003213_hash viafb_dfph_proc_write 3 49288 _003213_hash NULL
|
|
+_003214_hash viafb_dfpl_proc_write 3 627 _003214_hash NULL
|
|
+_003215_hash viafb_dvp0_proc_write 3 23023 _003215_hash NULL
|
|
+_003216_hash viafb_dvp1_proc_write 3 48864 _003216_hash NULL
|
|
+_003217_hash viafb_vt1636_proc_write 3 16018 _003217_hash NULL
|
|
+_003218_hash wl1271_rx_handle_data 3 56360 _003218_hash NULL
|
|
+_003219_hash wl12xx_cmd_build_probe_req 6-8 3098 _003219_hash NULL
|
|
+_003220_hash picolcd_fb_write 3 2318 _003220_hash NULL
|
|
+_003221_hash dlfb_ops_write 3 64150 _003221_hash NULL
|
|
+_003222_hash ufx_ops_write 3 54848 _003222_hash NULL
|
|
+_003223_hash viafb_iga1_odev_proc_write 3 36241 _003223_hash NULL
|
|
+_003224_hash viafb_iga2_odev_proc_write 3 2363 _003224_hash NULL
|
|
+_003225_hash xenfb_write 3 43412 _003225_hash NULL
|
|
diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
|
|
new file mode 100644
|
|
index 0000000..5af42b5
|
|
--- /dev/null
|
|
+++ b/tools/gcc/size_overflow_plugin.c
|
|
@@ -0,0 +1,1558 @@
|
|
+/*
|
|
+ * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
|
|
+ * Licensed under the GPL v2, or (at your option) v3
|
|
+ *
|
|
+ * Homepage:
|
|
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
|
|
+ *
|
|
+ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
|
|
+ * with double integer precision (DImode/TImode for 32/64 bit integer types).
|
|
+ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
|
|
+ *
|
|
+ * Usage:
|
|
+ * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
|
|
+ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
|
|
+ */
|
|
+
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "intl.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "toplev.h"
|
|
+#include "function.h"
|
|
+#include "tree-flow.h"
|
|
+#include "plugin.h"
|
|
+#include "gimple.h"
|
|
+#include "c-common.h"
|
|
+#include "diagnostic.h"
|
|
+#include "cfgloop.h"
|
|
+
|
|
+struct size_overflow_hash {
|
|
+ struct size_overflow_hash *next;
|
|
+ const char *name;
|
|
+ unsigned int param;
|
|
+};
|
|
+
|
|
+#include "size_overflow_hash.h"
|
|
+
|
|
+#define __unused __attribute__((__unused__))
|
|
+#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
|
|
+#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
|
|
+#define BEFORE_STMT true
|
|
+#define AFTER_STMT false
|
|
+#define CREATE_NEW_VAR NULL_TREE
|
|
+#define CODES_LIMIT 32
|
|
+#define MAX_PARAM 10
|
|
+#define MY_STMT GF_PLF_1
|
|
+#define NO_CAST_CHECK GF_PLF_2
|
|
+
|
|
+#if BUILDING_GCC_VERSION == 4005
|
|
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
|
|
+#endif
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+void debug_gimple_stmt(gimple gs);
|
|
+
|
|
+static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
|
|
+static tree report_size_overflow_decl;
|
|
+static tree const_char_ptr_type_node;
|
|
+static unsigned int handle_function(void);
|
|
+static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before);
|
|
+static tree get_size_overflow_type(gimple stmt, tree node);
|
|
+
|
|
+static struct plugin_info size_overflow_plugin_info = {
|
|
+ .version = "20120811beta",
|
|
+ .help = "no-size-overflow\tturn off size overflow checking\n",
|
|
+};
|
|
+
|
|
+static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
|
|
+{
|
|
+ unsigned int arg_count;
|
|
+
|
|
+ if (TREE_CODE(*node) == FUNCTION_DECL)
|
|
+ arg_count = type_num_arguments(TREE_TYPE(*node));
|
|
+ else if (TREE_CODE(*node) == FUNCTION_TYPE || TREE_CODE(*node) == METHOD_TYPE)
|
|
+ arg_count = type_num_arguments(*node);
|
|
+ else {
|
|
+ *no_add_attrs = true;
|
|
+ error("%qE attribute only applies to functions", name);
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+
|
|
+ for (; args; args = TREE_CHAIN(args)) {
|
|
+ tree position = TREE_VALUE(args);
|
|
+ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
|
|
+ error("handle_size_overflow_attribute: overflow parameter outside range.");
|
|
+ *no_add_attrs = true;
|
|
+ }
|
|
+ }
|
|
+ return NULL_TREE;
|
|
+}
|
|
+
|
|
+static struct attribute_spec size_overflow_attr = {
|
|
+ .name = "size_overflow",
|
|
+ .min_length = 1,
|
|
+ .max_length = -1,
|
|
+ .decl_required = true,
|
|
+ .type_required = false,
|
|
+ .function_type_required = false,
|
|
+ .handler = handle_size_overflow_attribute,
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ .affects_type_identity = false
|
|
+#endif
|
|
+};
|
|
+
|
|
+static void register_attributes(void __unused *event_data, void __unused *data)
|
|
+{
|
|
+ register_attribute(&size_overflow_attr);
|
|
+}
|
|
+
|
|
+// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
|
|
+static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
|
|
+{
|
|
+#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
|
|
+#define cwmixa( in ) { cwfold( in, m, k, h ); }
|
|
+#define cwmixb( in ) { cwfold( in, n, h, k ); }
|
|
+
|
|
+ const unsigned int m = 0x57559429;
|
|
+ const unsigned int n = 0x5052acdb;
|
|
+ const unsigned int *key4 = (const unsigned int *)key;
|
|
+ unsigned int h = len;
|
|
+ unsigned int k = len + seed + n;
|
|
+ unsigned long long p;
|
|
+
|
|
+ while (len >= 8) {
|
|
+ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
|
|
+ len -= 8;
|
|
+ }
|
|
+ if (len >= 4) {
|
|
+ cwmixb(key4[0]) key4 += 1;
|
|
+ len -= 4;
|
|
+ }
|
|
+ if (len)
|
|
+ cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
|
|
+ cwmixb(h ^ (k + n));
|
|
+ return k ^ h;
|
|
+
|
|
+#undef cwfold
|
|
+#undef cwmixa
|
|
+#undef cwmixb
|
|
+}
|
|
+
|
|
+static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
|
|
+{
|
|
+ unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
|
|
+ unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
|
|
+ return fn ^ codes;
|
|
+}
|
|
+
|
|
+static inline tree get_original_function_decl(tree fndecl)
|
|
+{
|
|
+ if (DECL_ABSTRACT_ORIGIN(fndecl))
|
|
+ return DECL_ABSTRACT_ORIGIN(fndecl);
|
|
+ return fndecl;
|
|
+}
|
|
+
|
|
+static inline gimple get_def_stmt(tree node)
|
|
+{
|
|
+ gcc_assert(node != NULL_TREE);
|
|
+ gcc_assert(TREE_CODE(node) == SSA_NAME);
|
|
+ return SSA_NAME_DEF_STMT(node);
|
|
+}
|
|
+
|
|
+static unsigned char get_tree_code(tree type)
|
|
+{
|
|
+ switch (TREE_CODE(type)) {
|
|
+ case ARRAY_TYPE:
|
|
+ return 0;
|
|
+ case BOOLEAN_TYPE:
|
|
+ return 1;
|
|
+ case ENUMERAL_TYPE:
|
|
+ return 2;
|
|
+ case FUNCTION_TYPE:
|
|
+ return 3;
|
|
+ case INTEGER_TYPE:
|
|
+ return 4;
|
|
+ case POINTER_TYPE:
|
|
+ return 5;
|
|
+ case RECORD_TYPE:
|
|
+ return 6;
|
|
+ case UNION_TYPE:
|
|
+ return 7;
|
|
+ case VOID_TYPE:
|
|
+ return 8;
|
|
+ case REAL_TYPE:
|
|
+ return 9;
|
|
+ case VECTOR_TYPE:
|
|
+ return 10;
|
|
+ case REFERENCE_TYPE:
|
|
+ return 11;
|
|
+ default:
|
|
+ debug_tree(type);
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+}
|
|
+
|
|
+static size_t add_type_codes(tree type, unsigned char *tree_codes, size_t len)
|
|
+{
|
|
+ gcc_assert(type != NULL_TREE);
|
|
+
|
|
+ while (type && len < CODES_LIMIT) {
|
|
+ tree_codes[len] = get_tree_code(type);
|
|
+ len++;
|
|
+ type = TREE_TYPE(type);
|
|
+ }
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static unsigned int get_function_decl(tree fndecl, unsigned char *tree_codes)
|
|
+{
|
|
+ tree arg, result, type = TREE_TYPE(fndecl);
|
|
+ enum tree_code code = TREE_CODE(type);
|
|
+ size_t len = 0;
|
|
+
|
|
+ gcc_assert(code == FUNCTION_TYPE);
|
|
+
|
|
+ arg = TYPE_ARG_TYPES(type);
|
|
+ // skip builtins __builtin_constant_p
|
|
+ if (!arg && DECL_BUILT_IN(fndecl))
|
|
+ return 0;
|
|
+ gcc_assert(arg != NULL_TREE);
|
|
+
|
|
+ if (TREE_CODE_CLASS(code) == tcc_type)
|
|
+ result = type;
|
|
+ else
|
|
+ result = DECL_RESULT(fndecl);
|
|
+
|
|
+ gcc_assert(result != NULL_TREE);
|
|
+ len = add_type_codes(TREE_TYPE(result), tree_codes, len);
|
|
+
|
|
+ while (arg && len < CODES_LIMIT) {
|
|
+ len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
|
|
+ arg = TREE_CHAIN(arg);
|
|
+ }
|
|
+
|
|
+ gcc_assert(len != 0);
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static struct size_overflow_hash *get_function_hash(tree fndecl)
|
|
+{
|
|
+ unsigned int hash;
|
|
+ struct size_overflow_hash *entry;
|
|
+ unsigned char tree_codes[CODES_LIMIT];
|
|
+ size_t len;
|
|
+ const char *func_name = NAME(fndecl);
|
|
+
|
|
+ len = get_function_decl(fndecl, tree_codes);
|
|
+ if (len == 0)
|
|
+ return NULL;
|
|
+
|
|
+ hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
|
|
+
|
|
+ entry = size_overflow_hash[hash];
|
|
+ while (entry) {
|
|
+ if (!strcmp(entry->name, func_name))
|
|
+ return entry;
|
|
+ entry = entry->next;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void check_arg_type(tree var)
|
|
+{
|
|
+ tree type = TREE_TYPE(var);
|
|
+ enum tree_code code = TREE_CODE(type);
|
|
+
|
|
+ gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
|
|
+ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
|
|
+ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
|
|
+}
|
|
+
|
|
+static int find_arg_number(tree arg, tree func)
|
|
+{
|
|
+ tree var;
|
|
+ bool match = false;
|
|
+ unsigned int argnum = 1;
|
|
+
|
|
+ if (TREE_CODE(arg) == SSA_NAME)
|
|
+ arg = SSA_NAME_VAR(arg);
|
|
+
|
|
+ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
|
|
+ if (strcmp(NAME(arg), NAME(var))) {
|
|
+ argnum++;
|
|
+ continue;
|
|
+ }
|
|
+ check_arg_type(var);
|
|
+
|
|
+ match = true;
|
|
+ break;
|
|
+ }
|
|
+ if (!match) {
|
|
+ warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
|
|
+ return 0;
|
|
+ }
|
|
+ return argnum;
|
|
+}
|
|
+
|
|
+static void print_missing_msg(tree func, unsigned int argnum)
|
|
+{
|
|
+ unsigned int new_hash;
|
|
+ size_t len;
|
|
+ unsigned char tree_codes[CODES_LIMIT];
|
|
+ location_t loc = DECL_SOURCE_LOCATION(func);
|
|
+ const char *curfunc = NAME(func);
|
|
+
|
|
+ len = get_function_decl(func, tree_codes);
|
|
+ new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
|
|
+ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+", curfunc, curfunc, argnum, new_hash);
|
|
+}
|
|
+
|
|
+static void check_missing_attribute(tree arg)
|
|
+{
|
|
+ tree type, func = get_original_function_decl(current_function_decl);
|
|
+ unsigned int argnum;
|
|
+ struct size_overflow_hash *hash;
|
|
+
|
|
+ gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
|
|
+
|
|
+ type = TREE_TYPE(arg);
|
|
+
|
|
+ if (TREE_CODE(type) == POINTER_TYPE)
|
|
+ return;
|
|
+
|
|
+ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func)))
|
|
+ return;
|
|
+
|
|
+ argnum = find_arg_number(arg, func);
|
|
+ if (argnum == 0)
|
|
+ return;
|
|
+
|
|
+ hash = get_function_hash(func);
|
|
+ if (!hash || !(hash->param & (1U << argnum)))
|
|
+ print_missing_msg(func, argnum);
|
|
+}
|
|
+
|
|
+static tree create_new_var(tree type)
|
|
+{
|
|
+ tree new_var = create_tmp_var(type, "cicus");
|
|
+
|
|
+ add_referenced_var(new_var);
|
|
+ mark_sym_for_renaming(new_var);
|
|
+ return new_var;
|
|
+}
|
|
+
|
|
+static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
|
|
+{
|
|
+ gimple assign;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
|
|
+ tree type = TREE_TYPE(rhs1);
|
|
+ tree lhs = create_new_var(type);
|
|
+
|
|
+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
|
|
+ gimple_set_lhs(assign, make_ssa_name(lhs, assign));
|
|
+
|
|
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+ gimple_set_plf(assign, MY_STMT, true);
|
|
+ return assign;
|
|
+}
|
|
+
|
|
+static bool is_bool(tree node)
|
|
+{
|
|
+ tree type;
|
|
+
|
|
+ if (node == NULL_TREE)
|
|
+ return false;
|
|
+
|
|
+ type = TREE_TYPE(node);
|
|
+ if (!INTEGRAL_TYPE_P(type))
|
|
+ return false;
|
|
+ if (TREE_CODE(type) == BOOLEAN_TYPE)
|
|
+ return true;
|
|
+ if (TYPE_PRECISION(type) == 1)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static tree cast_a_tree(tree type, tree var)
|
|
+{
|
|
+ gcc_assert(type != NULL_TREE);
|
|
+ gcc_assert(var != NULL_TREE);
|
|
+ gcc_assert(fold_convertible_p(type, var));
|
|
+
|
|
+ return fold_convert(type, var);
|
|
+}
|
|
+
|
|
+static gimple build_cast_stmt(tree type, tree var, tree new_var, gimple_stmt_iterator *gsi, bool before)
|
|
+{
|
|
+ gimple assign;
|
|
+ location_t loc;
|
|
+
|
|
+ gcc_assert(type != NULL_TREE && var != NULL_TREE);
|
|
+ if (gsi_end_p(*gsi) && before == BEFORE_STMT)
|
|
+ gcc_unreachable();
|
|
+
|
|
+ if (new_var == CREATE_NEW_VAR)
|
|
+ new_var = create_new_var(type);
|
|
+
|
|
+ assign = gimple_build_assign(new_var, cast_a_tree(type, var));
|
|
+
|
|
+ if (!gsi_end_p(*gsi)) {
|
|
+ loc = gimple_location(gsi_stmt(*gsi));
|
|
+ gimple_set_location(assign, loc);
|
|
+ }
|
|
+
|
|
+ gimple_set_lhs(assign, make_ssa_name(new_var, assign));
|
|
+
|
|
+ if (before)
|
|
+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
|
|
+ else
|
|
+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
|
|
+ update_stmt(assign);
|
|
+ gimple_set_plf(assign, MY_STMT, true);
|
|
+
|
|
+ return assign;
|
|
+}
|
|
+
|
|
+static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before)
|
|
+{
|
|
+ gimple assign;
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ if (new_rhs1 == NULL_TREE)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) {
|
|
+ gsi = gsi_for_stmt(stmt);
|
|
+ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before);
|
|
+ return gimple_get_lhs(assign);
|
|
+ }
|
|
+ return new_rhs1;
|
|
+}
|
|
+
|
|
+static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
|
|
+{
|
|
+ tree oldstmt_rhs1, size_overflow_type, lhs;
|
|
+ enum tree_code code;
|
|
+ gimple stmt;
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ if (!*potentionally_overflowed)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ if (rhs1 == NULL_TREE) {
|
|
+ debug_gimple_stmt(oldstmt);
|
|
+ error("create_assign: rhs1 is NULL_TREE");
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+
|
|
+ if (gimple_code(oldstmt) == GIMPLE_ASM)
|
|
+ lhs = rhs1;
|
|
+ else
|
|
+ lhs = gimple_get_lhs(oldstmt);
|
|
+
|
|
+ oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
|
|
+ code = TREE_CODE(oldstmt_rhs1);
|
|
+ if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
|
|
+ check_missing_attribute(oldstmt_rhs1);
|
|
+
|
|
+ gsi = gsi_for_stmt(oldstmt);
|
|
+ pointer_set_insert(visited, oldstmt);
|
|
+ if (lookup_stmt_eh_lp(oldstmt) != 0) {
|
|
+ basic_block next_bb, cur_bb;
|
|
+ edge e;
|
|
+
|
|
+ gcc_assert(before == false);
|
|
+ gcc_assert(stmt_can_throw_internal(oldstmt));
|
|
+ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
|
|
+ gcc_assert(!gsi_end_p(gsi));
|
|
+
|
|
+ cur_bb = gimple_bb(oldstmt);
|
|
+ next_bb = cur_bb->next_bb;
|
|
+ e = find_edge(cur_bb, next_bb);
|
|
+ gcc_assert(e != NULL);
|
|
+ gcc_assert(e->flags & EDGE_FALLTHRU);
|
|
+
|
|
+ gsi = gsi_after_labels(next_bb);
|
|
+ gcc_assert(!gsi_end_p(gsi));
|
|
+
|
|
+ before = true;
|
|
+ oldstmt = gsi_stmt(gsi);
|
|
+ pointer_set_insert(visited, oldstmt);
|
|
+ }
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(oldstmt, lhs);
|
|
+
|
|
+ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before);
|
|
+ gimple_set_plf(stmt, MY_STMT, true);
|
|
+ return gimple_get_lhs(stmt);
|
|
+}
|
|
+
|
|
+static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3)
|
|
+{
|
|
+ tree new_var, lhs = gimple_get_lhs(oldstmt);
|
|
+ gimple stmt;
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ if (!*potentionally_overflowed)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ if (gimple_plf(oldstmt, MY_STMT))
|
|
+ return lhs;
|
|
+
|
|
+ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
|
|
+ rhs1 = gimple_assign_rhs1(oldstmt);
|
|
+ rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
|
|
+ }
|
|
+ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
|
|
+ rhs2 = gimple_assign_rhs2(oldstmt);
|
|
+ rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
|
|
+ }
|
|
+
|
|
+ stmt = gimple_copy(oldstmt);
|
|
+ gimple_set_location(stmt, gimple_location(oldstmt));
|
|
+ gimple_set_plf(stmt, MY_STMT, true);
|
|
+
|
|
+ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
|
|
+ gimple_assign_set_rhs_code(stmt, MULT_EXPR);
|
|
+
|
|
+ if (is_bool(lhs))
|
|
+ new_var = SSA_NAME_VAR(lhs);
|
|
+ else
|
|
+ new_var = create_new_var(size_overflow_type);
|
|
+ new_var = make_ssa_name(new_var, stmt);
|
|
+ gimple_set_lhs(stmt, new_var);
|
|
+
|
|
+ if (rhs1 != NULL_TREE) {
|
|
+ if (!gimple_assign_cast_p(oldstmt))
|
|
+ rhs1 = cast_a_tree(size_overflow_type, rhs1);
|
|
+ gimple_assign_set_rhs1(stmt, rhs1);
|
|
+ }
|
|
+
|
|
+ if (rhs2 != NULL_TREE)
|
|
+ gimple_assign_set_rhs2(stmt, rhs2);
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ if (rhs3 != NULL_TREE)
|
|
+ gimple_assign_set_rhs3(stmt, rhs3);
|
|
+#endif
|
|
+ gimple_set_vuse(stmt, gimple_vuse(oldstmt));
|
|
+ gimple_set_vdef(stmt, gimple_vdef(oldstmt));
|
|
+
|
|
+ gsi = gsi_for_stmt(oldstmt);
|
|
+ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
|
|
+ update_stmt(stmt);
|
|
+ pointer_set_insert(visited, oldstmt);
|
|
+ return gimple_get_lhs(stmt);
|
|
+}
|
|
+
|
|
+static gimple overflow_create_phi_node(gimple oldstmt, tree var)
|
|
+{
|
|
+ basic_block bb;
|
|
+ gimple phi;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
|
|
+
|
|
+ bb = gsi_bb(gsi);
|
|
+
|
|
+ phi = create_phi_node(var, bb);
|
|
+ gsi = gsi_last(phi_nodes(bb));
|
|
+ gsi_remove(&gsi, false);
|
|
+
|
|
+ gsi = gsi_for_stmt(oldstmt);
|
|
+ gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
|
|
+ gimple_set_bb(phi, bb);
|
|
+ gimple_set_plf(phi, MY_STMT, true);
|
|
+ return phi;
|
|
+}
|
|
+
|
|
+static basic_block create_a_first_bb(void)
|
|
+{
|
|
+ basic_block first_bb;
|
|
+
|
|
+ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
|
|
+ if (dom_info_available_p(CDI_DOMINATORS))
|
|
+ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
|
|
+ return first_bb;
|
|
+}
|
|
+
|
|
+static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i)
|
|
+{
|
|
+ basic_block bb;
|
|
+ gimple newstmt;
|
|
+ gimple_stmt_iterator gsi;
|
|
+ bool before = BEFORE_STMT;
|
|
+
|
|
+ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) {
|
|
+ gsi = gsi_for_stmt(get_def_stmt(arg));
|
|
+ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT);
|
|
+ return gimple_get_lhs(newstmt);
|
|
+ }
|
|
+
|
|
+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
|
|
+ gsi = gsi_after_labels(bb);
|
|
+ if (bb->index == 0) {
|
|
+ bb = create_a_first_bb();
|
|
+ gsi = gsi_start_bb(bb);
|
|
+ }
|
|
+ if (gsi_end_p(gsi))
|
|
+ before = AFTER_STMT;
|
|
+ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before);
|
|
+ return gimple_get_lhs(newstmt);
|
|
+}
|
|
+
|
|
+static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
|
|
+{
|
|
+ gimple newstmt;
|
|
+ gimple_stmt_iterator gsi;
|
|
+ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
|
|
+ gimple def_newstmt = get_def_stmt(new_rhs);
|
|
+
|
|
+ gsi_insert = gsi_insert_after;
|
|
+ gsi = gsi_for_stmt(def_newstmt);
|
|
+
|
|
+ switch (gimple_code(get_def_stmt(arg))) {
|
|
+ case GIMPLE_PHI:
|
|
+ newstmt = gimple_build_assign(new_var, new_rhs);
|
|
+ gsi = gsi_after_labels(gimple_bb(def_newstmt));
|
|
+ gsi_insert = gsi_insert_before;
|
|
+ break;
|
|
+ case GIMPLE_ASM:
|
|
+ case GIMPLE_CALL:
|
|
+ newstmt = gimple_build_assign(new_var, new_rhs);
|
|
+ break;
|
|
+ case GIMPLE_ASSIGN:
|
|
+ newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
|
|
+ break;
|
|
+ default:
|
|
+ /* unknown gimple_code (handle_build_new_phi_arg) */
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+
|
|
+ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
|
|
+ gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
|
|
+ gimple_set_plf(newstmt, MY_STMT, true);
|
|
+ update_stmt(newstmt);
|
|
+ return newstmt;
|
|
+}
|
|
+
|
|
+static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree arg, tree new_var)
|
|
+{
|
|
+ gimple newstmt;
|
|
+ tree new_rhs;
|
|
+
|
|
+ new_rhs = expand(visited, potentionally_overflowed, arg);
|
|
+ if (new_rhs == NULL_TREE)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ new_rhs = cast_to_new_size_overflow_type(get_def_stmt(new_rhs), new_rhs, size_overflow_type, AFTER_STMT);
|
|
+
|
|
+ newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
|
|
+ return gimple_get_lhs(newstmt);
|
|
+}
|
|
+
|
|
+static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
|
|
+{
|
|
+ gimple phi, oldstmt = get_def_stmt(var);
|
|
+ tree new_var, size_overflow_type;
|
|
+ unsigned int i, n = gimple_phi_num_args(oldstmt);
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(oldstmt, var);
|
|
+
|
|
+ new_var = create_new_var(size_overflow_type);
|
|
+
|
|
+ pointer_set_insert(visited, oldstmt);
|
|
+ phi = overflow_create_phi_node(oldstmt, new_var);
|
|
+ for (i = 0; i < n; i++) {
|
|
+ tree arg, lhs;
|
|
+
|
|
+ arg = gimple_phi_arg_def(oldstmt, i);
|
|
+ if (is_gimple_constant(arg))
|
|
+ arg = cast_a_tree(size_overflow_type, arg);
|
|
+ lhs = build_new_phi_arg(visited, potentionally_overflowed, size_overflow_type, arg, new_var);
|
|
+ if (lhs == NULL_TREE)
|
|
+ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_var, i);
|
|
+ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
|
|
+ }
|
|
+
|
|
+ update_stmt(phi);
|
|
+ return gimple_phi_result(phi);
|
|
+}
|
|
+
|
|
+static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
|
|
+{
|
|
+ gimple assign;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
|
|
+ tree origtype = TREE_TYPE(orig_rhs);
|
|
+
|
|
+ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
|
|
+
|
|
+ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
|
|
+ return gimple_get_lhs(assign);
|
|
+}
|
|
+
|
|
+static void change_rhs1(gimple stmt, tree new_rhs1)
|
|
+{
|
|
+ tree assign_rhs;
|
|
+ tree rhs = gimple_assign_rhs1(stmt);
|
|
+
|
|
+ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1);
|
|
+ gimple_assign_set_rhs1(stmt, assign_rhs);
|
|
+ update_stmt(stmt);
|
|
+}
|
|
+
|
|
+static bool check_mode_type(gimple stmt)
|
|
+{
|
|
+ tree lhs = gimple_get_lhs(stmt);
|
|
+ tree lhs_type = TREE_TYPE(lhs);
|
|
+ tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt));
|
|
+ enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
|
|
+ enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
|
|
+
|
|
+ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type))
|
|
+ return false;
|
|
+
|
|
+ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool check_undefined_integer_operation(gimple stmt)
|
|
+{
|
|
+ gimple def_stmt;
|
|
+ tree lhs = gimple_get_lhs(stmt);
|
|
+ tree rhs1 = gimple_assign_rhs1(stmt);
|
|
+ tree rhs1_type = TREE_TYPE(rhs1);
|
|
+ tree lhs_type = TREE_TYPE(lhs);
|
|
+
|
|
+ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
|
|
+ return false;
|
|
+
|
|
+ def_stmt = get_def_stmt(rhs1);
|
|
+ if (gimple_code(def_stmt) != GIMPLE_ASSIGN)
|
|
+ return false;
|
|
+
|
|
+ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt)
|
|
+{
|
|
+ tree size_overflow_type, lhs = gimple_get_lhs(stmt);
|
|
+ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
|
|
+ tree rhs1_type = TREE_TYPE(rhs1);
|
|
+ tree lhs_type = TREE_TYPE(lhs);
|
|
+
|
|
+ *potentionally_overflowed = true;
|
|
+
|
|
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
|
|
+
|
|
+ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
|
|
+ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ if (gimple_plf(stmt, MY_STMT))
|
|
+ return lhs;
|
|
+
|
|
+ if (gimple_plf(stmt, NO_CAST_CHECK)) {
|
|
+ size_overflow_type = get_size_overflow_type(stmt, rhs1);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
|
|
+ }
|
|
+
|
|
+ if (!gimple_assign_cast_p(stmt)) {
|
|
+ size_overflow_type = get_size_overflow_type(stmt, lhs);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
|
|
+ }
|
|
+
|
|
+ if (check_undefined_integer_operation(stmt)) {
|
|
+ size_overflow_type = get_size_overflow_type(stmt, lhs);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, NULL_TREE, NULL_TREE);
|
|
+ }
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(stmt, rhs1);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+
|
|
+ change_rhs1(stmt, new_rhs1);
|
|
+ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, potentionally_overflowed, BEFORE_STMT);
|
|
+
|
|
+ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
|
|
+ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ if (!check_mode_type(stmt))
|
|
+ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(stmt, lhs);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+
|
|
+ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, potentionally_overflowed, BEFORE_STMT);
|
|
+
|
|
+ return create_assign(visited, potentionally_overflowed, stmt, lhs, AFTER_STMT);
|
|
+}
|
|
+
|
|
+static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
|
|
+{
|
|
+ gimple def_stmt = get_def_stmt(lhs);
|
|
+ tree rhs1 = gimple_assign_rhs1(def_stmt);
|
|
+
|
|
+ if (is_gimple_constant(rhs1))
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
|
|
+ switch (TREE_CODE(rhs1)) {
|
|
+ case SSA_NAME:
|
|
+ return handle_unary_rhs(visited, potentionally_overflowed, def_stmt);
|
|
+ case ARRAY_REF:
|
|
+ case BIT_FIELD_REF:
|
|
+ case ADDR_EXPR:
|
|
+ case COMPONENT_REF:
|
|
+ case INDIRECT_REF:
|
|
+#if BUILDING_GCC_VERSION >= 4006
|
|
+ case MEM_REF:
|
|
+#endif
|
|
+ case PARM_DECL:
|
|
+ case TARGET_MEM_REF:
|
|
+ case VAR_DECL:
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ default:
|
|
+ debug_gimple_stmt(def_stmt);
|
|
+ debug_tree(rhs1);
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+}
|
|
+
|
|
+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
|
|
+{
|
|
+ gimple cond_stmt;
|
|
+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
|
|
+
|
|
+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
|
|
+ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
|
|
+ update_stmt(cond_stmt);
|
|
+}
|
|
+
|
|
+static tree create_string_param(tree string)
|
|
+{
|
|
+ tree i_type, a_type;
|
|
+ int length = TREE_STRING_LENGTH(string);
|
|
+
|
|
+ gcc_assert(length > 0);
|
|
+
|
|
+ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
|
|
+ a_type = build_array_type(char_type_node, i_type);
|
|
+
|
|
+ TREE_TYPE(string) = a_type;
|
|
+ TREE_CONSTANT(string) = 1;
|
|
+ TREE_READONLY(string) = 1;
|
|
+
|
|
+ return build1(ADDR_EXPR, ptr_type_node, string);
|
|
+}
|
|
+
|
|
+static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg, bool min)
|
|
+{
|
|
+ gimple func_stmt, def_stmt;
|
|
+ tree current_func, loc_file, loc_line, ssa_name;
|
|
+ expanded_location xloc;
|
|
+ char ssa_name_buf[100];
|
|
+ gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
|
|
+
|
|
+ def_stmt = get_def_stmt(arg);
|
|
+ xloc = expand_location(gimple_location(def_stmt));
|
|
+
|
|
+ if (!gimple_has_location(def_stmt)) {
|
|
+ xloc = expand_location(gimple_location(stmt));
|
|
+ if (!gimple_has_location(stmt))
|
|
+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
|
|
+ }
|
|
+
|
|
+ loc_line = build_int_cstu(unsigned_type_node, xloc.line);
|
|
+
|
|
+ loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
|
|
+ loc_file = create_string_param(loc_file);
|
|
+
|
|
+ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
|
|
+ current_func = create_string_param(current_func);
|
|
+
|
|
+ if (min)
|
|
+ snprintf(ssa_name_buf, 100, "%s_%u (min)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
|
|
+ else
|
|
+ snprintf(ssa_name_buf, 100, "%s_%u (max)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg));
|
|
+ ssa_name = build_string(100, ssa_name_buf);
|
|
+ ssa_name = create_string_param(ssa_name);
|
|
+
|
|
+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
|
|
+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
|
|
+
|
|
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
|
|
+}
|
|
+
|
|
+static void __unused print_the_code_insertions(gimple stmt)
|
|
+{
|
|
+ location_t loc = gimple_location(stmt);
|
|
+
|
|
+ inform(loc, "Integer size_overflow check applied here.");
|
|
+}
|
|
+
|
|
+static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
|
|
+{
|
|
+ basic_block cond_bb, join_bb, bb_true;
|
|
+ edge e;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
|
|
+
|
|
+ cond_bb = gimple_bb(stmt);
|
|
+ if (before)
|
|
+ gsi_prev(&gsi);
|
|
+ if (gsi_end_p(gsi))
|
|
+ e = split_block_after_labels(cond_bb);
|
|
+ else
|
|
+ e = split_block(cond_bb, gsi_stmt(gsi));
|
|
+ cond_bb = e->src;
|
|
+ join_bb = e->dest;
|
|
+ e->flags = EDGE_FALSE_VALUE;
|
|
+ e->probability = REG_BR_PROB_BASE;
|
|
+
|
|
+ bb_true = create_empty_bb(cond_bb);
|
|
+ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
|
|
+ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
|
|
+ make_edge(bb_true, join_bb, EDGE_FALLTHRU);
|
|
+
|
|
+ if (dom_info_available_p(CDI_DOMINATORS)) {
|
|
+ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
|
|
+ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
|
|
+ }
|
|
+
|
|
+ if (current_loops != NULL) {
|
|
+ gcc_assert(cond_bb->loop_father == join_bb->loop_father);
|
|
+ add_bb_to_loop(bb_true, cond_bb->loop_father);
|
|
+ }
|
|
+
|
|
+ insert_cond(cond_bb, arg, cond_code, type_value);
|
|
+ insert_cond_result(bb_true, stmt, arg, min);
|
|
+
|
|
+// print_the_code_insertions(stmt);
|
|
+}
|
|
+
|
|
+static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool *potentionally_overflowed, bool before)
|
|
+{
|
|
+ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min, rhs_type = TREE_TYPE(rhs);
|
|
+ gcc_assert(rhs_type != NULL_TREE);
|
|
+ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
|
|
+
|
|
+ if (!*potentionally_overflowed)
|
|
+ return;
|
|
+
|
|
+ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
|
|
+ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
|
|
+
|
|
+ gcc_assert(!TREE_OVERFLOW(type_max));
|
|
+
|
|
+ cast_rhs_type = TREE_TYPE(cast_rhs);
|
|
+ type_max_type = TREE_TYPE(type_max);
|
|
+ type_min_type = TREE_TYPE(type_min);
|
|
+ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type));
|
|
+ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type));
|
|
+
|
|
+ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false);
|
|
+ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true);
|
|
+}
|
|
+
|
|
+static tree get_handle_const_assign_size_overflow_type(gimple def_stmt, tree var_rhs)
|
|
+{
|
|
+ gimple var_rhs_def_stmt;
|
|
+ tree lhs = gimple_get_lhs(def_stmt);
|
|
+ tree lhs_type = TREE_TYPE(lhs);
|
|
+ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt));
|
|
+ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt));
|
|
+
|
|
+ if (var_rhs == NULL_TREE)
|
|
+ return get_size_overflow_type(def_stmt, lhs);
|
|
+
|
|
+ var_rhs_def_stmt = get_def_stmt(var_rhs);
|
|
+
|
|
+ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison)
|
|
+ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
|
|
+
|
|
+ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR)
|
|
+ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
|
|
+
|
|
+ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR)
|
|
+ return get_size_overflow_type(var_rhs_def_stmt, var_rhs);
|
|
+
|
|
+ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) {
|
|
+ debug_gimple_stmt(def_stmt);
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+
|
|
+ return get_size_overflow_type(def_stmt, lhs);
|
|
+}
|
|
+
|
|
+static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var_rhs, tree new_rhs1, tree new_rhs2)
|
|
+{
|
|
+ tree new_rhs, size_overflow_type, orig_rhs;
|
|
+ void (*gimple_assign_set_rhs)(gimple, tree);
|
|
+ tree rhs1 = gimple_assign_rhs1(def_stmt);
|
|
+ tree rhs2 = gimple_assign_rhs2(def_stmt);
|
|
+ tree lhs = gimple_get_lhs(def_stmt);
|
|
+
|
|
+ if (var_rhs == NULL_TREE)
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
|
|
+
|
|
+ if (new_rhs2 == NULL_TREE) {
|
|
+ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs1);
|
|
+ new_rhs2 = cast_a_tree(size_overflow_type, rhs2);
|
|
+ orig_rhs = rhs1;
|
|
+ gimple_assign_set_rhs = &gimple_assign_set_rhs1;
|
|
+ } else {
|
|
+ size_overflow_type = get_handle_const_assign_size_overflow_type(def_stmt, new_rhs2);
|
|
+ new_rhs1 = cast_a_tree(size_overflow_type, rhs1);
|
|
+ orig_rhs = rhs2;
|
|
+ gimple_assign_set_rhs = &gimple_assign_set_rhs2;
|
|
+ }
|
|
+
|
|
+ var_rhs = cast_to_new_size_overflow_type(def_stmt, var_rhs, size_overflow_type, BEFORE_STMT);
|
|
+
|
|
+ if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
|
|
+ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
|
|
+
|
|
+ check_size_overflow(def_stmt, size_overflow_type, var_rhs, orig_rhs, potentionally_overflowed, BEFORE_STMT);
|
|
+
|
|
+ new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
|
|
+ gimple_assign_set_rhs(def_stmt, new_rhs);
|
|
+ update_stmt(def_stmt);
|
|
+
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
|
|
+}
|
|
+
|
|
+static tree get_cast_def_stmt_rhs(tree new_rhs)
|
|
+{
|
|
+ gimple def_stmt;
|
|
+
|
|
+ def_stmt = get_def_stmt(new_rhs);
|
|
+ // get_size_overflow_type
|
|
+ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode))
|
|
+ gcc_assert(gimple_assign_cast_p(def_stmt));
|
|
+ return gimple_assign_rhs1(def_stmt);
|
|
+}
|
|
+
|
|
+static tree cast_to_int_TI_type_and_check(bool *potentionally_overflowed, gimple stmt, tree new_rhs)
|
|
+{
|
|
+ gimple_stmt_iterator gsi;
|
|
+ gimple cast_stmt, def_stmt;
|
|
+ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs));
|
|
+
|
|
+ if (mode != TImode && mode != DImode) {
|
|
+ def_stmt = get_def_stmt(new_rhs);
|
|
+ gcc_assert(gimple_assign_cast_p(def_stmt));
|
|
+ new_rhs = gimple_assign_rhs1(def_stmt);
|
|
+ mode = TYPE_MODE(TREE_TYPE(new_rhs));
|
|
+ }
|
|
+
|
|
+ gcc_assert(mode == TImode || mode == DImode);
|
|
+
|
|
+ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node))
|
|
+ return new_rhs;
|
|
+
|
|
+ gsi = gsi_for_stmt(stmt);
|
|
+ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
|
|
+ new_rhs = gimple_get_lhs(cast_stmt);
|
|
+
|
|
+ if (mode == DImode)
|
|
+ return new_rhs;
|
|
+
|
|
+ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, potentionally_overflowed, BEFORE_STMT);
|
|
+
|
|
+ return new_rhs;
|
|
+}
|
|
+
|
|
+static bool is_an_integer_trunction(gimple stmt)
|
|
+{
|
|
+ gimple rhs1_def_stmt, rhs2_def_stmt;
|
|
+ tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1;
|
|
+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode;
|
|
+ tree rhs1 = gimple_assign_rhs1(stmt);
|
|
+ tree rhs2 = gimple_assign_rhs2(stmt);
|
|
+ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1));
|
|
+ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2));
|
|
+
|
|
+ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
|
|
+ return false;
|
|
+
|
|
+ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
|
|
+
|
|
+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode)
|
|
+ return false;
|
|
+
|
|
+ rhs1_def_stmt = get_def_stmt(rhs1);
|
|
+ rhs2_def_stmt = get_def_stmt(rhs2);
|
|
+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
|
|
+ return false;
|
|
+
|
|
+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
|
|
+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
|
|
+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
|
|
+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
|
|
+ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode)
|
|
+ return false;
|
|
+
|
|
+ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
|
|
+ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static tree handle_integer_truncation(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
|
|
+{
|
|
+ tree new_rhs1, new_rhs2, size_overflow_type;
|
|
+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
|
|
+ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type;
|
|
+ gimple assign, stmt = get_def_stmt(lhs);
|
|
+ tree rhs1 = gimple_assign_rhs1(stmt);
|
|
+ tree rhs2 = gimple_assign_rhs2(stmt);
|
|
+
|
|
+ if (!is_an_integer_trunction(stmt))
|
|
+ return NULL_TREE;
|
|
+
|
|
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
|
|
+ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
|
|
+
|
|
+ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1);
|
|
+ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2);
|
|
+
|
|
+ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1);
|
|
+ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1);
|
|
+
|
|
+ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) {
|
|
+ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs1_def_stmt_rhs1);
|
|
+ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(potentionally_overflowed, stmt, new_rhs2_def_stmt_rhs1);
|
|
+ }
|
|
+
|
|
+ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
|
|
+ new_lhs = gimple_get_lhs(assign);
|
|
+ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, potentionally_overflowed, AFTER_STMT);
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(stmt, lhs);
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
|
|
+ return dup_assign(visited, potentionally_overflowed, stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
|
|
+}
|
|
+
|
|
+static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree lhs)
|
|
+{
|
|
+ tree rhs1, rhs2, size_overflow_type, new_lhs;
|
|
+ gimple def_stmt = get_def_stmt(lhs);
|
|
+ tree new_rhs1 = NULL_TREE;
|
|
+ tree new_rhs2 = NULL_TREE;
|
|
+
|
|
+ rhs1 = gimple_assign_rhs1(def_stmt);
|
|
+ rhs2 = gimple_assign_rhs2(def_stmt);
|
|
+
|
|
+ /* no DImode/TImode division in the 32/64 bit kernel */
|
|
+ switch (gimple_assign_rhs_code(def_stmt)) {
|
|
+ case RDIV_EXPR:
|
|
+ case TRUNC_DIV_EXPR:
|
|
+ case CEIL_DIV_EXPR:
|
|
+ case FLOOR_DIV_EXPR:
|
|
+ case ROUND_DIV_EXPR:
|
|
+ case TRUNC_MOD_EXPR:
|
|
+ case CEIL_MOD_EXPR:
|
|
+ case FLOOR_MOD_EXPR:
|
|
+ case ROUND_MOD_EXPR:
|
|
+ case EXACT_DIV_EXPR:
|
|
+ case POINTER_PLUS_EXPR:
|
|
+ case BIT_AND_EXPR:
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, lhs, AFTER_STMT);
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ *potentionally_overflowed = true;
|
|
+
|
|
+ new_lhs = handle_integer_truncation(visited, potentionally_overflowed, lhs);
|
|
+ if (new_lhs != NULL_TREE)
|
|
+ return new_lhs;
|
|
+
|
|
+ if (TREE_CODE(rhs1) == SSA_NAME)
|
|
+ new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
|
|
+ if (TREE_CODE(rhs2) == SSA_NAME)
|
|
+ new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
|
|
+
|
|
+ if (is_gimple_constant(rhs2))
|
|
+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs1, NULL_TREE);
|
|
+
|
|
+ if (is_gimple_constant(rhs1))
|
|
+ return handle_const_assign(visited, potentionally_overflowed, def_stmt, new_rhs2, NULL_TREE, new_rhs2);
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(def_stmt, lhs);
|
|
+
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
|
|
+
|
|
+ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, NULL_TREE);
|
|
+}
|
|
+
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree size_overflow_type, tree rhs)
|
|
+{
|
|
+ if (is_gimple_constant(rhs))
|
|
+ return cast_a_tree(size_overflow_type, rhs);
|
|
+ if (TREE_CODE(rhs) != SSA_NAME)
|
|
+ return NULL_TREE;
|
|
+ return expand(visited, potentionally_overflowed, rhs);
|
|
+}
|
|
+
|
|
+static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
|
|
+{
|
|
+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
|
|
+ gimple def_stmt = get_def_stmt(var);
|
|
+
|
|
+ *potentionally_overflowed = true;
|
|
+
|
|
+ size_overflow_type = get_size_overflow_type(def_stmt, var);
|
|
+
|
|
+ rhs1 = gimple_assign_rhs1(def_stmt);
|
|
+ rhs2 = gimple_assign_rhs2(def_stmt);
|
|
+ rhs3 = gimple_assign_rhs3(def_stmt);
|
|
+ new_rhs1 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs1);
|
|
+ new_rhs2 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs2);
|
|
+ new_rhs3 = get_new_rhs(visited, potentionally_overflowed, size_overflow_type, rhs3);
|
|
+
|
|
+ new_rhs1 = cast_to_new_size_overflow_type(def_stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
|
|
+ new_rhs2 = cast_to_new_size_overflow_type(def_stmt, new_rhs2, size_overflow_type, BEFORE_STMT);
|
|
+ new_rhs3 = cast_to_new_size_overflow_type(def_stmt, new_rhs3, size_overflow_type, BEFORE_STMT);
|
|
+
|
|
+ return dup_assign(visited, potentionally_overflowed, def_stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static tree get_size_overflow_type(gimple stmt, tree node)
|
|
+{
|
|
+ tree type;
|
|
+
|
|
+ gcc_assert(node != NULL_TREE);
|
|
+
|
|
+ type = TREE_TYPE(node);
|
|
+
|
|
+ if (gimple_plf(stmt, MY_STMT))
|
|
+ return TREE_TYPE(node);
|
|
+
|
|
+ switch (TYPE_MODE(type)) {
|
|
+ case QImode:
|
|
+ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
|
|
+ case HImode:
|
|
+ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
|
|
+ case SImode:
|
|
+ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
|
|
+ case DImode:
|
|
+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
|
|
+ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
|
|
+ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
|
|
+ default:
|
|
+ debug_tree(node);
|
|
+ error("get_size_overflow_type: unsupported gcc configuration.");
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+}
|
|
+
|
|
+static tree expand_visited(gimple def_stmt)
|
|
+{
|
|
+ gimple next_stmt;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
|
|
+
|
|
+ gsi_next(&gsi);
|
|
+ next_stmt = gsi_stmt(gsi);
|
|
+
|
|
+ switch (gimple_code(next_stmt)) {
|
|
+ case GIMPLE_ASSIGN:
|
|
+ return gimple_get_lhs(next_stmt);
|
|
+ case GIMPLE_PHI:
|
|
+ return gimple_phi_result(next_stmt);
|
|
+ case GIMPLE_CALL:
|
|
+ return gimple_call_lhs(next_stmt);
|
|
+ default:
|
|
+ return NULL_TREE;
|
|
+ }
|
|
+}
|
|
+
|
|
+static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
|
|
+{
|
|
+ gimple def_stmt;
|
|
+ enum tree_code code = TREE_CODE(TREE_TYPE(var));
|
|
+
|
|
+ if (is_gimple_constant(var))
|
|
+ return NULL_TREE;
|
|
+
|
|
+ if (TREE_CODE(var) == ADDR_EXPR)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
|
|
+
|
|
+ if (TREE_CODE(SSA_NAME_VAR(var)) == PARM_DECL)
|
|
+ check_missing_attribute(var);
|
|
+
|
|
+ def_stmt = get_def_stmt(var);
|
|
+
|
|
+ if (!def_stmt)
|
|
+ return NULL_TREE;
|
|
+
|
|
+ if (gimple_plf(def_stmt, MY_STMT))
|
|
+ return var;
|
|
+
|
|
+ if (pointer_set_contains(visited, def_stmt))
|
|
+ return expand_visited(def_stmt);
|
|
+
|
|
+ switch (gimple_code(def_stmt)) {
|
|
+ case GIMPLE_NOP:
|
|
+ check_missing_attribute(var);
|
|
+ return NULL_TREE;
|
|
+ case GIMPLE_PHI:
|
|
+ return build_new_phi(visited, potentionally_overflowed, var);
|
|
+ case GIMPLE_CALL:
|
|
+ case GIMPLE_ASM:
|
|
+ return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
|
|
+ case GIMPLE_ASSIGN:
|
|
+ switch (gimple_num_ops(def_stmt)) {
|
|
+ case 2:
|
|
+ return handle_unary_ops(visited, potentionally_overflowed, var);
|
|
+ case 3:
|
|
+ return handle_binary_ops(visited, potentionally_overflowed, var);
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ case 4:
|
|
+ return handle_ternary_ops(visited, potentionally_overflowed, var);
|
|
+#endif
|
|
+ }
|
|
+ default:
|
|
+ debug_gimple_stmt(def_stmt);
|
|
+ error("expand: unknown gimple code");
|
|
+ gcc_unreachable();
|
|
+ }
|
|
+}
|
|
+
|
|
+static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
|
|
+{
|
|
+ gimple assign;
|
|
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
|
|
+ tree origtype = TREE_TYPE(origarg);
|
|
+
|
|
+ gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
|
|
+
|
|
+ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT);
|
|
+
|
|
+ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
|
|
+ update_stmt(stmt);
|
|
+}
|
|
+
|
|
+static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
|
|
+{
|
|
+ const char *origid;
|
|
+ tree arg, origarg;
|
|
+
|
|
+ if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
|
|
+ gcc_assert(gimple_call_num_args(stmt) > argnum);
|
|
+ return gimple_call_arg(stmt, argnum);
|
|
+ }
|
|
+
|
|
+ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
|
|
+ while (origarg && argnum) {
|
|
+ argnum--;
|
|
+ origarg = TREE_CHAIN(origarg);
|
|
+ }
|
|
+
|
|
+ gcc_assert(argnum == 0);
|
|
+
|
|
+ gcc_assert(origarg != NULL_TREE);
|
|
+ origid = NAME(origarg);
|
|
+ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
|
|
+ if (!strcmp(origid, NAME(arg)))
|
|
+ return arg;
|
|
+ }
|
|
+ return NULL_TREE;
|
|
+}
|
|
+
|
|
+static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
|
|
+{
|
|
+ struct pointer_set_t *visited;
|
|
+ tree arg, newarg;
|
|
+ bool potentionally_overflowed;
|
|
+
|
|
+ arg = get_function_arg(argnum, stmt, fndecl);
|
|
+ if (arg == NULL_TREE)
|
|
+ return;
|
|
+
|
|
+ if (is_gimple_constant(arg))
|
|
+ return;
|
|
+ if (TREE_CODE(arg) != SSA_NAME)
|
|
+ return;
|
|
+
|
|
+ check_arg_type(arg);
|
|
+
|
|
+ visited = pointer_set_create();
|
|
+ potentionally_overflowed = false;
|
|
+ newarg = expand(visited, &potentionally_overflowed, arg);
|
|
+ pointer_set_destroy(visited);
|
|
+
|
|
+ if (newarg == NULL_TREE || !potentionally_overflowed)
|
|
+ return;
|
|
+
|
|
+ change_function_arg(stmt, arg, argnum, newarg);
|
|
+
|
|
+ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, &potentionally_overflowed, BEFORE_STMT);
|
|
+}
|
|
+
|
|
+static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
|
|
+{
|
|
+ tree p = TREE_VALUE(attr);
|
|
+ do {
|
|
+ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
|
|
+ p = TREE_CHAIN(p);
|
|
+ } while (p);
|
|
+}
|
|
+
|
|
+static void handle_function_by_hash(gimple stmt, tree fndecl)
|
|
+{
|
|
+ tree orig_fndecl;
|
|
+ unsigned int num;
|
|
+ struct size_overflow_hash *hash;
|
|
+
|
|
+ orig_fndecl = get_original_function_decl(fndecl);
|
|
+ hash = get_function_hash(orig_fndecl);
|
|
+ if (!hash)
|
|
+ return;
|
|
+
|
|
+ for (num = 1; num <= MAX_PARAM; num++)
|
|
+ if (hash->param & (1U << num))
|
|
+ handle_function_arg(stmt, fndecl, num - 1);
|
|
+}
|
|
+
|
|
+static void set_plf_false(void)
|
|
+{
|
|
+ basic_block bb;
|
|
+
|
|
+ FOR_ALL_BB(bb) {
|
|
+ gimple_stmt_iterator si;
|
|
+
|
|
+ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
|
|
+ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
|
|
+ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
|
|
+ gimple_set_plf(gsi_stmt(si), MY_STMT, false);
|
|
+ }
|
|
+}
|
|
+
|
|
+static unsigned int handle_function(void)
|
|
+{
|
|
+ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb;
|
|
+
|
|
+ set_plf_false();
|
|
+
|
|
+ do {
|
|
+ gimple_stmt_iterator gsi;
|
|
+ next = bb->next_bb;
|
|
+
|
|
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
|
|
+ tree fndecl, attr;
|
|
+ gimple stmt = gsi_stmt(gsi);
|
|
+
|
|
+ if (!(is_gimple_call(stmt)))
|
|
+ continue;
|
|
+ fndecl = gimple_call_fndecl(stmt);
|
|
+ if (fndecl == NULL_TREE)
|
|
+ continue;
|
|
+ if (gimple_call_num_args(stmt) == 0)
|
|
+ continue;
|
|
+ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
|
|
+ if (!attr || !TREE_VALUE(attr))
|
|
+ handle_function_by_hash(stmt, fndecl);
|
|
+ else
|
|
+ handle_function_by_attribute(stmt, attr, fndecl);
|
|
+ gsi = gsi_for_stmt(stmt);
|
|
+ next = gimple_bb(stmt)->next_bb;
|
|
+ }
|
|
+ bb = next;
|
|
+ } while (bb);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct gimple_opt_pass size_overflow_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "size_overflow",
|
|
+ .gate = NULL,
|
|
+ .execute = handle_function,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = PROP_cfg | PROP_referenced_vars,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
|
|
+ }
|
|
+};
|
|
+
|
|
+static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
|
|
+{
|
|
+ tree fntype;
|
|
+
|
|
+ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
|
|
+
|
|
+ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
|
|
+ fntype = build_function_type_list(void_type_node,
|
|
+ const_char_ptr_type_node,
|
|
+ unsigned_type_node,
|
|
+ const_char_ptr_type_node,
|
|
+ const_char_ptr_type_node,
|
|
+ NULL_TREE);
|
|
+ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
|
|
+
|
|
+ DECL_ASSEMBLER_NAME(report_size_overflow_decl);
|
|
+ TREE_PUBLIC(report_size_overflow_decl) = 1;
|
|
+ DECL_EXTERNAL(report_size_overflow_decl) = 1;
|
|
+ DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
|
|
+ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ int i;
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ const int argc = plugin_info->argc;
|
|
+ const struct plugin_argument * const argv = plugin_info->argv;
|
|
+ bool enable = true;
|
|
+
|
|
+ struct register_pass_info size_overflow_pass_info = {
|
|
+ .pass = &size_overflow_pass.pass,
|
|
+ .reference_pass_name = "ssa",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_AFTER
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < argc; ++i) {
|
|
+ if (!strcmp(argv[i].key, "no-size-overflow")) {
|
|
+ enable = false;
|
|
+ continue;
|
|
+ }
|
|
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
|
|
+ if (enable) {
|
|
+ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
|
|
+ }
|
|
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
|
|
new file mode 100644
|
|
index 0000000..38d2014
|
|
--- /dev/null
|
|
+++ b/tools/gcc/stackleak_plugin.c
|
|
@@ -0,0 +1,313 @@
|
|
+/*
|
|
+ * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
|
|
+ * Licensed under the GPL v2
|
|
+ *
|
|
+ * Note: the choice of the license means that the compilation process is
|
|
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
|
|
+ * but for the kernel it doesn't matter since it doesn't link against
|
|
+ * any of the gcc libraries
|
|
+ *
|
|
+ * gcc plugin to help implement various PaX features
|
|
+ *
|
|
+ * - track lowest stack pointer
|
|
+ *
|
|
+ * TODO:
|
|
+ * - initialize all local variables
|
|
+ *
|
|
+ * BUGS:
|
|
+ * - none known
|
|
+ */
|
|
+#include "gcc-plugin.h"
|
|
+#include "config.h"
|
|
+#include "system.h"
|
|
+#include "coretypes.h"
|
|
+#include "tree.h"
|
|
+#include "tree-pass.h"
|
|
+#include "flags.h"
|
|
+#include "intl.h"
|
|
+#include "toplev.h"
|
|
+#include "plugin.h"
|
|
+//#include "expr.h" where are you...
|
|
+#include "diagnostic.h"
|
|
+#include "plugin-version.h"
|
|
+#include "tm.h"
|
|
+#include "function.h"
|
|
+#include "basic-block.h"
|
|
+#include "gimple.h"
|
|
+#include "rtl.h"
|
|
+#include "emit-rtl.h"
|
|
+
|
|
+extern void print_gimple_stmt(FILE *, gimple, int, int);
|
|
+
|
|
+int plugin_is_GPL_compatible;
|
|
+
|
|
+static int track_frame_size = -1;
|
|
+static const char track_function[] = "pax_track_stack";
|
|
+static const char check_function[] = "pax_check_alloca";
|
|
+static bool init_locals;
|
|
+
|
|
+static struct plugin_info stackleak_plugin_info = {
|
|
+ .version = "201203140940",
|
|
+ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
|
|
+// "initialize-locals\t\tforcibly initialize all stack frames\n"
|
|
+};
|
|
+
|
|
+static bool gate_stackleak_track_stack(void);
|
|
+static unsigned int execute_stackleak_tree_instrument(void);
|
|
+static unsigned int execute_stackleak_final(void);
|
|
+
|
|
+static struct gimple_opt_pass stackleak_tree_instrument_pass = {
|
|
+ .pass = {
|
|
+ .type = GIMPLE_PASS,
|
|
+ .name = "stackleak_tree_instrument",
|
|
+ .gate = gate_stackleak_track_stack,
|
|
+ .execute = execute_stackleak_tree_instrument,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = PROP_gimple_leh | PROP_cfg,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
|
|
+ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
|
|
+ }
|
|
+};
|
|
+
|
|
+static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
|
|
+ .pass = {
|
|
+ .type = RTL_PASS,
|
|
+ .name = "stackleak_final",
|
|
+ .gate = gate_stackleak_track_stack,
|
|
+ .execute = execute_stackleak_final,
|
|
+ .sub = NULL,
|
|
+ .next = NULL,
|
|
+ .static_pass_number = 0,
|
|
+ .tv_id = TV_NONE,
|
|
+ .properties_required = 0,
|
|
+ .properties_provided = 0,
|
|
+ .properties_destroyed = 0,
|
|
+ .todo_flags_start = 0,
|
|
+ .todo_flags_finish = TODO_dump_func
|
|
+ }
|
|
+};
|
|
+
|
|
+static bool gate_stackleak_track_stack(void)
|
|
+{
|
|
+ return track_frame_size >= 0;
|
|
+}
|
|
+
|
|
+static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
|
|
+{
|
|
+ gimple check_alloca;
|
|
+ tree fntype, fndecl, alloca_size;
|
|
+
|
|
+ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
|
|
+ fndecl = build_fn_decl(check_function, fntype);
|
|
+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
|
|
+
|
|
+ // insert call to void pax_check_alloca(unsigned long size)
|
|
+ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
|
|
+ check_alloca = gimple_build_call(fndecl, 1, alloca_size);
|
|
+ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
|
|
+}
|
|
+
|
|
+static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
|
|
+{
|
|
+ gimple track_stack;
|
|
+ tree fntype, fndecl;
|
|
+
|
|
+ fntype = build_function_type_list(void_type_node, NULL_TREE);
|
|
+ fndecl = build_fn_decl(track_function, fntype);
|
|
+ DECL_ASSEMBLER_NAME(fndecl); // for LTO
|
|
+
|
|
+ // insert call to void pax_track_stack(void)
|
|
+ track_stack = gimple_build_call(fndecl, 0);
|
|
+ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
|
|
+}
|
|
+
|
|
+#if BUILDING_GCC_VERSION == 4005
|
|
+static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
|
|
+{
|
|
+ tree fndecl;
|
|
+
|
|
+ if (!is_gimple_call(stmt))
|
|
+ return false;
|
|
+ fndecl = gimple_call_fndecl(stmt);
|
|
+ if (!fndecl)
|
|
+ return false;
|
|
+ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
|
|
+ return false;
|
|
+// print_node(stderr, "pax", fndecl, 4);
|
|
+ return DECL_FUNCTION_CODE(fndecl) == code;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static bool is_alloca(gimple stmt)
|
|
+{
|
|
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
|
|
+ return true;
|
|
+
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
|
|
+ return true;
|
|
+#endif
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static unsigned int execute_stackleak_tree_instrument(void)
|
|
+{
|
|
+ basic_block bb, entry_bb;
|
|
+ bool prologue_instrumented = false, is_leaf = true;
|
|
+
|
|
+ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
|
|
+
|
|
+ // 1. loop through BBs and GIMPLE statements
|
|
+ FOR_EACH_BB(bb) {
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
|
|
+ gimple stmt;
|
|
+
|
|
+ stmt = gsi_stmt(gsi);
|
|
+
|
|
+ if (is_gimple_call(stmt))
|
|
+ is_leaf = false;
|
|
+
|
|
+ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
|
|
+ if (!is_alloca(stmt))
|
|
+ continue;
|
|
+
|
|
+ // 2. insert stack overflow check before each __builtin_alloca call
|
|
+ stackleak_check_alloca(&gsi);
|
|
+
|
|
+ // 3. insert track call after each __builtin_alloca call
|
|
+ stackleak_add_instrumentation(&gsi);
|
|
+ if (bb == entry_bb)
|
|
+ prologue_instrumented = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // special cases for some bad linux code: taking the address of static inline functions will materialize them
|
|
+ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
|
|
+ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
|
|
+ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
|
|
+ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
|
|
+ return 0;
|
|
+ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
|
|
+ return 0;
|
|
+
|
|
+ // 4. insert track call at the beginning
|
|
+ if (!prologue_instrumented) {
|
|
+ gimple_stmt_iterator gsi;
|
|
+
|
|
+ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
|
|
+ if (dom_info_available_p(CDI_DOMINATORS))
|
|
+ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
|
|
+ gsi = gsi_start_bb(bb);
|
|
+ stackleak_add_instrumentation(&gsi);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static unsigned int execute_stackleak_final(void)
|
|
+{
|
|
+ rtx insn;
|
|
+
|
|
+ if (cfun->calls_alloca)
|
|
+ return 0;
|
|
+
|
|
+ // keep calls only if function frame is big enough
|
|
+ if (get_frame_size() >= track_frame_size)
|
|
+ return 0;
|
|
+
|
|
+ // 1. find pax_track_stack calls
|
|
+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
|
|
+ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
|
|
+ rtx body;
|
|
+
|
|
+ if (!CALL_P(insn))
|
|
+ continue;
|
|
+ body = PATTERN(insn);
|
|
+ if (GET_CODE(body) != CALL)
|
|
+ continue;
|
|
+ body = XEXP(body, 0);
|
|
+ if (GET_CODE(body) != MEM)
|
|
+ continue;
|
|
+ body = XEXP(body, 0);
|
|
+ if (GET_CODE(body) != SYMBOL_REF)
|
|
+ continue;
|
|
+ if (strcmp(XSTR(body, 0), track_function))
|
|
+ continue;
|
|
+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
|
|
+ // 2. delete call
|
|
+ insn = delete_insn_and_edges(insn);
|
|
+#if BUILDING_GCC_VERSION >= 4007
|
|
+ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
|
|
+ insn = delete_insn_and_edges(insn);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+// print_simple_rtl(stderr, get_insns());
|
|
+// print_rtl(stderr, get_insns());
|
|
+// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
|
|
+{
|
|
+ const char * const plugin_name = plugin_info->base_name;
|
|
+ const int argc = plugin_info->argc;
|
|
+ const struct plugin_argument * const argv = plugin_info->argv;
|
|
+ int i;
|
|
+ struct register_pass_info stackleak_tree_instrument_pass_info = {
|
|
+ .pass = &stackleak_tree_instrument_pass.pass,
|
|
+// .reference_pass_name = "tree_profile",
|
|
+ .reference_pass_name = "optimized",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_BEFORE
|
|
+ };
|
|
+ struct register_pass_info stackleak_final_pass_info = {
|
|
+ .pass = &stackleak_final_rtl_opt_pass.pass,
|
|
+ .reference_pass_name = "final",
|
|
+ .ref_pass_instance_number = 1,
|
|
+ .pos_op = PASS_POS_INSERT_BEFORE
|
|
+ };
|
|
+
|
|
+ if (!plugin_default_version_check(version, &gcc_version)) {
|
|
+ error(G_("incompatible gcc/plugin versions"));
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
|
|
+
|
|
+ for (i = 0; i < argc; ++i) {
|
|
+ if (!strcmp(argv[i].key, "track-lowest-sp")) {
|
|
+ if (!argv[i].value) {
|
|
+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ continue;
|
|
+ }
|
|
+ track_frame_size = atoi(argv[i].value);
|
|
+ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
|
|
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
|
|
+ continue;
|
|
+ }
|
|
+ if (!strcmp(argv[i].key, "initialize-locals")) {
|
|
+ if (argv[i].value) {
|
|
+ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
|
|
+ continue;
|
|
+ }
|
|
+ init_locals = true;
|
|
+ continue;
|
|
+ }
|
|
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
|
|
+ }
|
|
+
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
|
|
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
|
|
index 6789d788..4afd019e 100644
|
|
--- a/tools/perf/util/include/asm/alternative-asm.h
|
|
+++ b/tools/perf/util/include/asm/alternative-asm.h
|
|
@@ -5,4 +5,7 @@
|
|
|
|
#define altinstruction_entry #
|
|
|
|
+ .macro pax_force_retaddr rip=0, reload=0
|
|
+ .endm
|
|
+
|
|
#endif
|
|
diff --git a/usr/gen_init_cpio.c.rej b/usr/gen_init_cpio.c.rej
|
|
new file mode 100644
|
|
index 0000000..3e1b7ce
|
|
--- /dev/null
|
|
+++ b/usr/gen_init_cpio.c.rej
|
|
@@ -0,0 +1,24 @@
|
|
+--- usr/gen_init_cpio.c 2011-07-22 04:17:23.000000000 +0200
|
|
++++ usr/gen_init_cpio.c 2012-05-21 12:10:12.504049050 +0200
|
|
+@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name,
|
|
+ int retval;
|
|
+ int rc = -1;
|
|
+ int namesize;
|
|
+- int i;
|
|
++ unsigned int i;
|
|
+
|
|
+ mode |= S_IFREG;
|
|
+
|
|
+@@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_
|
|
+ *env_var = *expanded = '\0';
|
|
+ strncat(env_var, start + 2, end - start - 2);
|
|
+ strncat(expanded, new_location, start - new_location);
|
|
+- strncat(expanded, getenv(env_var), PATH_MAX);
|
|
+- strncat(expanded, end + 1, PATH_MAX);
|
|
++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
|
|
++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
|
|
+ strncpy(new_location, expanded, PATH_MAX);
|
|
++ new_location[PATH_MAX] = 0;
|
|
+ } else
|
|
+ break;
|
|
+ }
|
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
index f4732bd..87951f1 100644
|
|
--- a/virt/kvm/kvm_main.c
|
|
+++ b/virt/kvm/kvm_main.c
|
|
@@ -76,7 +76,7 @@ LIST_HEAD(vm_list);
|
|
|
|
static cpumask_var_t cpus_hardware_enabled;
|
|
static int kvm_usage_count = 0;
|
|
-static atomic_t hardware_enable_failed;
|
|
+static atomic_unchecked_t hardware_enable_failed;
|
|
|
|
struct kmem_cache *kvm_vcpu_cache;
|
|
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
|
|
@@ -2281,7 +2281,7 @@ static void hardware_enable_nolock(void *junk)
|
|
|
|
if (r) {
|
|
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
|
|
- atomic_inc(&hardware_enable_failed);
|
|
+ atomic_inc_unchecked(&hardware_enable_failed);
|
|
printk(KERN_INFO "kvm: enabling virtualization on "
|
|
"CPU%d failed\n", cpu);
|
|
}
|
|
@@ -2335,10 +2335,10 @@ static int hardware_enable_all(void)
|
|
|
|
kvm_usage_count++;
|
|
if (kvm_usage_count == 1) {
|
|
- atomic_set(&hardware_enable_failed, 0);
|
|
+ atomic_set_unchecked(&hardware_enable_failed, 0);
|
|
on_each_cpu(hardware_enable_nolock, NULL, 1);
|
|
|
|
- if (atomic_read(&hardware_enable_failed)) {
|
|
+ if (atomic_read_unchecked(&hardware_enable_failed)) {
|
|
hardware_disable_all_nolock();
|
|
r = -EBUSY;
|
|
}
|
|
@@ -2701,7 +2701,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
|
|
kvm_arch_vcpu_put(vcpu);
|
|
}
|
|
|
|
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
struct module *module)
|
|
{
|
|
int r;
|
|
@@ -2764,7 +2764,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
if (!vcpu_align)
|
|
vcpu_align = __alignof__(struct kvm_vcpu);
|
|
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
|
|
- 0, NULL);
|
|
+ SLAB_USERCOPY, NULL);
|
|
if (!kvm_vcpu_cache) {
|
|
r = -ENOMEM;
|
|
goto out_free_3;
|
|
@@ -2774,9 +2774,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
|
if (r)
|
|
goto out_free;
|
|
|
|
- kvm_chardev_ops.owner = module;
|
|
- kvm_vm_fops.owner = module;
|
|
- kvm_vcpu_fops.owner = module;
|
|
+ pax_open_kernel();
|
|
+ *(void **)&kvm_chardev_ops.owner = module;
|
|
+ *(void **)&kvm_vm_fops.owner = module;
|
|
+ *(void **)&kvm_vcpu_fops.owner = module;
|
|
+ pax_close_kernel();
|
|
|
|
r = misc_register(&kvm_dev);
|
|
if (r) {
|