mirror of
https://github.com/Divested-Mobile/DivestOS-Build.git
synced 2024-10-01 01:35:54 -04:00
6255 lines
211 KiB
Diff
6255 lines
211 KiB
Diff
|
From d0746d5d63f158ee191d9679263449a23dfc7ecd Mon Sep 17 00:00:00 2001
|
||
|
From: Tad <tad@spotco.us>
|
||
|
Date: Wed, 28 Oct 2015 10:26:52 -0400
|
||
|
Subject: [PATCH] Linux 3.4.105 -> 3.4.106
|
||
|
|
||
|
---
|
||
|
Documentation/lzo.txt | 164 ++++++++++++++++
|
||
|
Makefile | 2 +-
|
||
|
arch/m68k/mm/hwtest.c | 6 +
|
||
|
arch/mips/mm/tlbex.c | 5 +
|
||
|
arch/mips/oprofile/backtrace.c | 2 +-
|
||
|
arch/powerpc/kernel/entry_64.S | 6 +
|
||
|
arch/s390/kvm/interrupt.c | 1 +
|
||
|
arch/x86/include/asm/desc.h | 20 +-
|
||
|
arch/x86/include/asm/elf.h | 5 +-
|
||
|
arch/x86/include/asm/kvm_host.h | 17 +-
|
||
|
arch/x86/include/asm/page_32_types.h | 1 -
|
||
|
arch/x86/include/asm/page_64_types.h | 11 +-
|
||
|
arch/x86/include/asm/vmx.h | 2 +
|
||
|
arch/x86/kernel/apic/apic.c | 4 +-
|
||
|
arch/x86/kernel/cpu/common.c | 2 +
|
||
|
arch/x86/kernel/cpu/intel.c | 15 ++
|
||
|
arch/x86/kernel/dumpstack_64.c | 1 -
|
||
|
arch/x86/kernel/entry_64.S | 84 +++-----
|
||
|
arch/x86/kernel/kvm.c | 9 +-
|
||
|
arch/x86/kernel/kvmclock.c | 1 -
|
||
|
arch/x86/kernel/tls.c | 46 ++++-
|
||
|
arch/x86/kernel/traps.c | 69 +++++--
|
||
|
arch/x86/kernel/tsc.c | 5 +-
|
||
|
arch/x86/kvm/emulate.c | 251 +++++++++++++++++-------
|
||
|
arch/x86/kvm/i8254.c | 2 +
|
||
|
arch/x86/kvm/mmu.c | 2 +-
|
||
|
arch/x86/kvm/svm.c | 8 +-
|
||
|
arch/x86/kvm/vmx.c | 51 ++++-
|
||
|
arch/x86/kvm/x86.c | 38 +++-
|
||
|
arch/x86/kvm/x86.h | 20 +-
|
||
|
arch/xtensa/include/asm/unistd.h | 3 +-
|
||
|
block/blk-settings.c | 4 +-
|
||
|
block/scsi_ioctl.c | 3 +-
|
||
|
drivers/ata/ahci.c | 5 +
|
||
|
drivers/ata/libata-sff.c | 20 +-
|
||
|
drivers/ata/pata_serverworks.c | 13 +-
|
||
|
drivers/base/core.c | 4 +-
|
||
|
drivers/base/regmap/regmap-debugfs.c | 7 +-
|
||
|
drivers/base/regmap/regmap.c | 5 +
|
||
|
drivers/char/random.c | 10 +-
|
||
|
drivers/edac/mpc85xx_edac.c | 3 +-
|
||
|
drivers/firewire/core-cdev.c | 3 +-
|
||
|
drivers/gpu/drm/radeon/evergreen.c | 1 +
|
||
|
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 16 +-
|
||
|
drivers/hv/channel.c | 47 +++--
|
||
|
drivers/hv/connection.c | 17 +-
|
||
|
drivers/infiniband/ulp/srpt/ib_srpt.c | 8 +
|
||
|
drivers/input/mouse/alps.c | 11 +-
|
||
|
drivers/input/mouse/synaptics.c | 22 ++-
|
||
|
drivers/input/mouse/synaptics.h | 8 +-
|
||
|
drivers/input/serio/i8042-x86ia64io.h | 22 +++
|
||
|
drivers/md/dm-bufio.c | 11 +-
|
||
|
drivers/md/dm-log-userspace-transfer.c | 2 +-
|
||
|
drivers/md/dm-raid.c | 11 +-
|
||
|
drivers/net/can/dev.c | 2 +-
|
||
|
drivers/net/can/usb/esd_usb2.c | 1 +
|
||
|
drivers/net/macvtap.c | 2 +
|
||
|
drivers/net/ppp/ppp_generic.c | 2 +-
|
||
|
drivers/net/wireless/rt2x00/rt2800.h | 2 +-
|
||
|
drivers/net/wireless/rt2x00/rt2800usb.c | 1 +
|
||
|
drivers/net/wireless/rt2x00/rt2x00queue.c | 50 ++---
|
||
|
drivers/of/address.c | 19 +-
|
||
|
drivers/of/base.c | 88 +++------
|
||
|
drivers/of/selftest.c | 66 ++++++-
|
||
|
drivers/pci/hotplug/pciehp_core.c | 7 +
|
||
|
drivers/pci/pci-sysfs.c | 2 +-
|
||
|
drivers/pci/quirks.c | 20 ++
|
||
|
drivers/platform/x86/acer-wmi.c | 11 ++
|
||
|
drivers/platform/x86/samsung-laptop.c | 10 +
|
||
|
drivers/power/charger-manager.c | 5 +
|
||
|
drivers/scsi/scsi_error.c | 4 +-
|
||
|
drivers/spi/spi-dw-mid.c | 7 +-
|
||
|
drivers/spi/spi-pl022.c | 2 +-
|
||
|
drivers/staging/iio/impedance-analyzer/ad5933.c | 47 ++++-
|
||
|
drivers/staging/iio/meter/ade7758_ring.c | 2 +-
|
||
|
drivers/target/target_core_transport.c | 3 +-
|
||
|
drivers/tty/serial/8250/8250_pci.c | 21 ++
|
||
|
drivers/tty/serial/serial_core.c | 2 +-
|
||
|
drivers/tty/tty_io.c | 13 +-
|
||
|
drivers/tty/vt/consolemap.c | 4 +
|
||
|
drivers/usb/class/cdc-acm.c | 6 +-
|
||
|
drivers/usb/core/hcd.c | 2 +
|
||
|
drivers/usb/core/hub.c | 3 +
|
||
|
drivers/usb/core/quirks.c | 6 +
|
||
|
drivers/usb/dwc3/gadget.c | 3 +-
|
||
|
drivers/usb/serial/cp210x.c | 3 +
|
||
|
drivers/usb/serial/ftdi_sio.c | 5 +
|
||
|
drivers/usb/serial/ftdi_sio_ids.h | 23 ++-
|
||
|
drivers/usb/serial/kobil_sct.c | 4 +-
|
||
|
drivers/usb/serial/opticon.c | 2 +-
|
||
|
drivers/usb/serial/option.c | 10 +
|
||
|
drivers/usb/storage/transport.c | 26 +++
|
||
|
drivers/video/console/bitblit.c | 3 +-
|
||
|
drivers/video/console/fbcon_ccw.c | 3 +-
|
||
|
drivers/video/console/fbcon_cw.c | 3 +-
|
||
|
drivers/video/console/fbcon_ud.c | 3 +-
|
||
|
drivers/virtio/virtio_pci.c | 33 +++-
|
||
|
fs/btrfs/file-item.c | 2 +-
|
||
|
fs/buffer.c | 8 +
|
||
|
fs/ecryptfs/inode.c | 2 +-
|
||
|
fs/ext3/super.c | 7 -
|
||
|
fs/ext4/ext4.h | 1 +
|
||
|
fs/ext4/ialloc.c | 4 +
|
||
|
fs/ext4/inode.c | 32 ++-
|
||
|
fs/ext4/namei.c | 33 ++--
|
||
|
fs/ext4/resize.c | 2 +-
|
||
|
fs/ext4/super.c | 9 +-
|
||
|
fs/ext4/xattr.c | 32 ++-
|
||
|
fs/ioprio.c | 14 +-
|
||
|
fs/lockd/mon.c | 6 +
|
||
|
fs/nfs/inode.c | 2 +-
|
||
|
fs/nfs/nfs4proc.c | 26 ++-
|
||
|
fs/nfs/nfs4renewd.c | 12 +-
|
||
|
fs/nfs/nfs4state.c | 16 +-
|
||
|
fs/nfsd/nfs4proc.c | 3 +-
|
||
|
fs/notify/fanotify/fanotify_user.c | 2 +-
|
||
|
fs/super.c | 2 +
|
||
|
fs/ubifs/commit.c | 10 +-
|
||
|
fs/ubifs/log.c | 19 +-
|
||
|
fs/ubifs/master.c | 7 +-
|
||
|
fs/ubifs/super.c | 1 -
|
||
|
fs/ubifs/ubifs.h | 2 -
|
||
|
include/drm/drm_pciids.h | 1 -
|
||
|
include/linux/blkdev.h | 5 +-
|
||
|
include/linux/compiler-gcc.h | 3 +
|
||
|
include/linux/compiler-intel.h | 7 +
|
||
|
include/linux/khugepaged.h | 17 +-
|
||
|
include/linux/mm.h | 1 +
|
||
|
include/linux/of.h | 84 ++++++--
|
||
|
include/linux/oom.h | 4 +
|
||
|
include/linux/string.h | 4 +-
|
||
|
include/linux/usb/quirks.h | 3 +
|
||
|
kernel/audit_tree.c | 1 +
|
||
|
kernel/freezer.c | 3 +
|
||
|
kernel/futex.c | 24 +--
|
||
|
kernel/posix-timers.c | 1 +
|
||
|
kernel/power/hibernate.c | 8 +-
|
||
|
kernel/power/process.c | 40 +++-
|
||
|
lib/bitmap.c | 8 +-
|
||
|
lib/lzo/lzo1x_decompress_safe.c | 103 +++++-----
|
||
|
lib/string.c | 16 ++
|
||
|
mm/huge_memory.c | 11 +-
|
||
|
mm/memory.c | 4 +-
|
||
|
mm/mmap.c | 8 +-
|
||
|
mm/oom_kill.c | 17 ++
|
||
|
mm/page_alloc.c | 8 +
|
||
|
mm/page_cgroup.c | 1 +
|
||
|
mm/percpu.c | 2 -
|
||
|
mm/truncate.c | 61 +++++-
|
||
|
net/ceph/crypto.c | 169 ++++++++++++----
|
||
|
net/mac80211/iface.c | 7 +-
|
||
|
net/mac80211/rx.c | 14 +-
|
||
|
security/integrity/evm/evm_main.c | 9 +-
|
||
|
sound/core/pcm_compat.c | 2 +
|
||
|
sound/core/pcm_native.c | 2 +-
|
||
|
sound/pci/emu10k1/emu10k1_callback.c | 6 +-
|
||
|
sound/soc/codecs/sgtl5000.c | 3 +-
|
||
|
sound/soc/codecs/sgtl5000.h | 2 +-
|
||
|
sound/soc/sh/fsi.c | 3 +-
|
||
|
sound/usb/quirks-table.h | 30 +++
|
||
|
virt/kvm/iommu.c | 8 +-
|
||
|
virt/kvm/kvm_main.c | 4 +
|
||
|
162 files changed, 1923 insertions(+), 672 deletions(-)
|
||
|
create mode 100644 Documentation/lzo.txt
|
||
|
|
||
|
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
|
||
|
new file mode 100644
|
||
|
index 0000000..ea45dd3
|
||
|
--- /dev/null
|
||
|
+++ b/Documentation/lzo.txt
|
||
|
@@ -0,0 +1,164 @@
|
||
|
+
|
||
|
+LZO stream format as understood by Linux's LZO decompressor
|
||
|
+===========================================================
|
||
|
+
|
||
|
+Introduction
|
||
|
+
|
||
|
+ This is not a specification. No specification seems to be publicly available
|
||
|
+ for the LZO stream format. This document describes what input format the LZO
|
||
|
+ decompressor as implemented in the Linux kernel understands. The file subject
|
||
|
+ of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on
|
||
|
+ the compressor nor on any other implementations though it seems likely that
|
||
|
+ the format matches the standard one. The purpose of this document is to
|
||
|
+ better understand what the code does in order to propose more efficient fixes
|
||
|
+ for future bug reports.
|
||
|
+
|
||
|
+Description
|
||
|
+
|
||
|
+ The stream is composed of a series of instructions, operands, and data. The
|
||
|
+ instructions consist in a few bits representing an opcode, and bits forming
|
||
|
+ the operands for the instruction, whose size and position depend on the
|
||
|
+ opcode and on the number of literals copied by previous instruction. The
|
||
|
+ operands are used to indicate :
|
||
|
+
|
||
|
+ - a distance when copying data from the dictionary (past output buffer)
|
||
|
+ - a length (number of bytes to copy from dictionary)
|
||
|
+ - the number of literals to copy, which is retained in variable "state"
|
||
|
+ as a piece of information for next instructions.
|
||
|
+
|
||
|
+ Optionally depending on the opcode and operands, extra data may follow. These
|
||
|
+ extra data can be a complement for the operand (eg: a length or a distance
|
||
|
+ encoded on larger values), or a literal to be copied to the output buffer.
|
||
|
+
|
||
|
+ The first byte of the block follows a different encoding from other bytes, it
|
||
|
+ seems to be optimized for literal use only, since there is no dictionary yet
|
||
|
+ prior to that byte.
|
||
|
+
|
||
|
+ Lengths are always encoded on a variable size starting with a small number
|
||
|
+ of bits in the operand. If the number of bits isn't enough to represent the
|
||
|
+ length, up to 255 may be added in increments by consuming more bytes with a
|
||
|
+ rate of at most 255 per extra byte (thus the compression ratio cannot exceed
|
||
|
+ around 255:1). The variable length encoding using #bits is always the same :
|
||
|
+
|
||
|
+ length = byte & ((1 << #bits) - 1)
|
||
|
+ if (!length) {
|
||
|
+ length = ((1 << #bits) - 1)
|
||
|
+ length += 255*(number of zero bytes)
|
||
|
+ length += first-non-zero-byte
|
||
|
+ }
|
||
|
+ length += constant (generally 2 or 3)
|
||
|
+
|
||
|
+ For references to the dictionary, distances are relative to the output
|
||
|
+ pointer. Distances are encoded using very few bits belonging to certain
|
||
|
+ ranges, resulting in multiple copy instructions using different encodings.
|
||
|
+ Certain encodings involve one extra byte, others involve two extra bytes
|
||
|
+ forming a little-endian 16-bit quantity (marked LE16 below).
|
||
|
+
|
||
|
+ After any instruction except the large literal copy, 0, 1, 2 or 3 literals
|
||
|
+ are copied before starting the next instruction. The number of literals that
|
||
|
+ were copied may change the meaning and behaviour of the next instruction. In
|
||
|
+ practice, only one instruction needs to know whether 0, less than 4, or more
|
||
|
+ literals were copied. This is the information stored in the <state> variable
|
||
|
+ in this implementation. This number of immediate literals to be copied is
|
||
|
+ generally encoded in the last two bits of the instruction but may also be
|
||
|
+ taken from the last two bits of an extra operand (eg: distance).
|
||
|
+
|
||
|
+ End of stream is declared when a block copy of distance 0 is seen. Only one
|
||
|
+ instruction may encode this distance (0001HLLL), it takes one LE16 operand
|
||
|
+ for the distance, thus requiring 3 bytes.
|
||
|
+
|
||
|
+ IMPORTANT NOTE : in the code some length checks are missing because certain
|
||
|
+ instructions are called under the assumption that a certain number of bytes
|
||
|
+ follow because it has already been garanteed before parsing the instructions.
|
||
|
+ They just have to "refill" this credit if they consume extra bytes. This is
|
||
|
+ an implementation design choice independant on the algorithm or encoding.
|
||
|
+
|
||
|
+Byte sequences
|
||
|
+
|
||
|
+ First byte encoding :
|
||
|
+
|
||
|
+ 0..17 : follow regular instruction encoding, see below. It is worth
|
||
|
+ noting that codes 16 and 17 will represent a block copy from
|
||
|
+ the dictionary which is empty, and that they will always be
|
||
|
+ invalid at this place.
|
||
|
+
|
||
|
+ 18..21 : copy 0..3 literals
|
||
|
+ state = (byte - 17) = 0..3 [ copy <state> literals ]
|
||
|
+ skip byte
|
||
|
+
|
||
|
+ 22..255 : copy literal string
|
||
|
+ length = (byte - 17) = 4..238
|
||
|
+ state = 4 [ don't copy extra literals ]
|
||
|
+ skip byte
|
||
|
+
|
||
|
+ Instruction encoding :
|
||
|
+
|
||
|
+ 0 0 0 0 X X X X (0..15)
|
||
|
+ Depends on the number of literals copied by the last instruction.
|
||
|
+ If last instruction did not copy any literal (state == 0), this
|
||
|
+ encoding will be a copy of 4 or more literal, and must be interpreted
|
||
|
+ like this :
|
||
|
+
|
||
|
+ 0 0 0 0 L L L L (0..15) : copy long literal string
|
||
|
+ length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte)
|
||
|
+ state = 4 (no extra literals are copied)
|
||
|
+
|
||
|
+ If last instruction used to copy between 1 to 3 literals (encoded in
|
||
|
+ the instruction's opcode or distance), the instruction is a copy of a
|
||
|
+ 2-byte block from the dictionary within a 1kB distance. It is worth
|
||
|
+ noting that this instruction provides little savings since it uses 2
|
||
|
+ bytes to encode a copy of 2 other bytes but it encodes the number of
|
||
|
+ following literals for free. It must be interpreted like this :
|
||
|
+
|
||
|
+ 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance
|
||
|
+ length = 2
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+ Always followed by exactly one byte : H H H H H H H H
|
||
|
+ distance = (H << 2) + D + 1
|
||
|
+
|
||
|
+ If last instruction used to copy 4 or more literals (as detected by
|
||
|
+ state == 4), the instruction becomes a copy of a 3-byte block from the
|
||
|
+ dictionary from a 2..3kB distance, and must be interpreted like this :
|
||
|
+
|
||
|
+ 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance
|
||
|
+ length = 3
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+ Always followed by exactly one byte : H H H H H H H H
|
||
|
+ distance = (H << 2) + D + 2049
|
||
|
+
|
||
|
+ 0 0 0 1 H L L L (16..31)
|
||
|
+ Copy of a block within 16..48kB distance (preferably less than 10B)
|
||
|
+ length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte)
|
||
|
+ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
|
||
|
+ distance = 16384 + (H << 14) + D
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+ End of stream is reached if distance == 16384
|
||
|
+
|
||
|
+ 0 0 1 L L L L L (32..63)
|
||
|
+ Copy of small block within 16kB distance (preferably less than 34B)
|
||
|
+ length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
|
||
|
+ Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S
|
||
|
+ distance = D + 1
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+
|
||
|
+ 0 1 L D D D S S (64..127)
|
||
|
+ Copy 3-4 bytes from block within 2kB distance
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+ length = 3 + L
|
||
|
+ Always followed by exactly one byte : H H H H H H H H
|
||
|
+ distance = (H << 3) + D + 1
|
||
|
+
|
||
|
+ 1 L L D D D S S (128..255)
|
||
|
+ Copy 5-8 bytes from block within 2kB distance
|
||
|
+ state = S (copy S literals after this block)
|
||
|
+ length = 5 + L
|
||
|
+ Always followed by exactly one byte : H H H H H H H H
|
||
|
+ distance = (H << 3) + D + 1
|
||
|
+
|
||
|
+Authors
|
||
|
+
|
||
|
+ This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
|
||
|
+ analysis of the decompression code available in Linux 3.16-rc5. The code is
|
||
|
+ tricky, it is possible that this document contains mistakes or that a few
|
||
|
+ corner cases were overlooked. In any case, please report any doubt, fix, or
|
||
|
+ proposed updates to the author(s) so that the document can be updated.
|
||
|
diff --git a/Makefile b/Makefile
|
||
|
index 2029ca8..e08bf16 100644
|
||
|
--- a/Makefile
|
||
|
+++ b/Makefile
|
||
|
@@ -1,6 +1,6 @@
|
||
|
VERSION = 3
|
||
|
PATCHLEVEL = 4
|
||
|
-SUBLEVEL = 105
|
||
|
+SUBLEVEL = 106
|
||
|
EXTRAVERSION =
|
||
|
NAME = Saber-toothed Squirrel
|
||
|
|
||
|
diff --git a/arch/m68k/mm/hwtest.c b/arch/m68k/mm/hwtest.c
|
||
|
index 2c7dde3..2a5259f 100644
|
||
|
--- a/arch/m68k/mm/hwtest.c
|
||
|
+++ b/arch/m68k/mm/hwtest.c
|
||
|
@@ -28,9 +28,11 @@
|
||
|
int hwreg_present( volatile void *regp )
|
||
|
{
|
||
|
int ret = 0;
|
||
|
+ unsigned long flags;
|
||
|
long save_sp, save_vbr;
|
||
|
long tmp_vectors[3];
|
||
|
|
||
|
+ local_irq_save(flags);
|
||
|
__asm__ __volatile__
|
||
|
( "movec %/vbr,%2\n\t"
|
||
|
"movel #Lberr1,%4@(8)\n\t"
|
||
|
@@ -46,6 +48,7 @@ int hwreg_present( volatile void *regp )
|
||
|
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
||
|
: "a" (regp), "a" (tmp_vectors)
|
||
|
);
|
||
|
+ local_irq_restore(flags);
|
||
|
|
||
|
return( ret );
|
||
|
}
|
||
|
@@ -58,9 +61,11 @@ EXPORT_SYMBOL(hwreg_present);
|
||
|
int hwreg_write( volatile void *regp, unsigned short val )
|
||
|
{
|
||
|
int ret;
|
||
|
+ unsigned long flags;
|
||
|
long save_sp, save_vbr;
|
||
|
long tmp_vectors[3];
|
||
|
|
||
|
+ local_irq_save(flags);
|
||
|
__asm__ __volatile__
|
||
|
( "movec %/vbr,%2\n\t"
|
||
|
"movel #Lberr2,%4@(8)\n\t"
|
||
|
@@ -78,6 +83,7 @@ int hwreg_write( volatile void *regp, unsigned short val )
|
||
|
: "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr)
|
||
|
: "a" (regp), "a" (tmp_vectors), "g" (val)
|
||
|
);
|
||
|
+ local_irq_restore(flags);
|
||
|
|
||
|
return( ret );
|
||
|
}
|
||
|
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
|
||
|
index 0bc485b..6d64efe 100644
|
||
|
--- a/arch/mips/mm/tlbex.c
|
||
|
+++ b/arch/mips/mm/tlbex.c
|
||
|
@@ -1041,6 +1041,7 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
|
||
|
struct mips_huge_tlb_info {
|
||
|
int huge_pte;
|
||
|
int restore_scratch;
|
||
|
+ bool need_reload_pte;
|
||
|
};
|
||
|
|
||
|
static struct mips_huge_tlb_info __cpuinit
|
||
|
@@ -1055,6 +1056,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
|
||
|
|
||
|
rv.huge_pte = scratch;
|
||
|
rv.restore_scratch = 0;
|
||
|
+ rv.need_reload_pte = false;
|
||
|
|
||
|
if (check_for_high_segbits) {
|
||
|
UASM_i_MFC0(p, tmp, C0_BADVADDR);
|
||
|
@@ -1247,6 +1249,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
|
||
|
} else {
|
||
|
htlb_info.huge_pte = K0;
|
||
|
htlb_info.restore_scratch = 0;
|
||
|
+ htlb_info.need_reload_pte = true;
|
||
|
vmalloc_mode = refill_noscratch;
|
||
|
/*
|
||
|
* create the plain linear handler
|
||
|
@@ -1283,6 +1286,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
|
||
|
}
|
||
|
#ifdef CONFIG_HUGETLB_PAGE
|
||
|
uasm_l_tlb_huge_update(&l, p);
|
||
|
+ if (htlb_info.need_reload_pte)
|
||
|
+ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
|
||
|
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
|
||
|
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
|
||
|
htlb_info.restore_scratch);
|
||
|
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
|
||
|
index 6854ed5..83a1dfd 100644
|
||
|
--- a/arch/mips/oprofile/backtrace.c
|
||
|
+++ b/arch/mips/oprofile/backtrace.c
|
||
|
@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
|
||
|
/* This marks the end of the previous function,
|
||
|
which means we overran. */
|
||
|
break;
|
||
|
- stack_size = (unsigned) stack_adjustment;
|
||
|
+ stack_size = (unsigned long) stack_adjustment;
|
||
|
} else if (is_ra_save_ins(&ip)) {
|
||
|
int ra_slot = ip.i_format.simmediate;
|
||
|
if (ra_slot < 0)
|
||
|
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
|
||
|
index e500969..c3fc39e 100644
|
||
|
--- a/arch/powerpc/kernel/entry_64.S
|
||
|
+++ b/arch/powerpc/kernel/entry_64.S
|
||
|
@@ -813,7 +813,13 @@ user_work:
|
||
|
b .ret_from_except_lite
|
||
|
|
||
|
1: bl .save_nvgprs
|
||
|
+ /*
|
||
|
+ * Use a non volatile GPR to save and restore our thread_info flags
|
||
|
+ * across the call to restore_interrupts.
|
||
|
+ */
|
||
|
+ mr r30,r4
|
||
|
bl .restore_interrupts
|
||
|
+ mr r4,r30
|
||
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||
|
bl .do_notify_resume
|
||
|
b .ret_from_except
|
||
|
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
|
||
|
index 10e13b3..df69bcb 100644
|
||
|
--- a/arch/s390/kvm/interrupt.c
|
||
|
+++ b/arch/s390/kvm/interrupt.c
|
||
|
@@ -43,6 +43,7 @@ static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
|
||
|
return 0;
|
||
|
if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
|
||
|
return 1;
|
||
|
+ return 0;
|
||
|
case KVM_S390_INT_EMERGENCY:
|
||
|
if (psw_extint_disabled(vcpu))
|
||
|
return 0;
|
||
|
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
|
||
|
index e95822d..fa9c8c7 100644
|
||
|
--- a/arch/x86/include/asm/desc.h
|
||
|
+++ b/arch/x86/include/asm/desc.h
|
||
|
@@ -250,7 +250,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||
|
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
||
|
}
|
||
|
|
||
|
-#define _LDT_empty(info) \
|
||
|
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
|
||
|
+#define LDT_empty(info) \
|
||
|
((info)->base_addr == 0 && \
|
||
|
(info)->limit == 0 && \
|
||
|
(info)->contents == 0 && \
|
||
|
@@ -260,11 +261,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||
|
(info)->seg_not_present == 1 && \
|
||
|
(info)->useable == 0)
|
||
|
|
||
|
-#ifdef CONFIG_X86_64
|
||
|
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
|
||
|
-#else
|
||
|
-#define LDT_empty(info) (_LDT_empty(info))
|
||
|
-#endif
|
||
|
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
|
||
|
+static inline bool LDT_zero(const struct user_desc *info)
|
||
|
+{
|
||
|
+ return (info->base_addr == 0 &&
|
||
|
+ info->limit == 0 &&
|
||
|
+ info->contents == 0 &&
|
||
|
+ info->read_exec_only == 0 &&
|
||
|
+ info->seg_32bit == 0 &&
|
||
|
+ info->limit_in_pages == 0 &&
|
||
|
+ info->seg_not_present == 0 &&
|
||
|
+ info->useable == 0);
|
||
|
+}
|
||
|
|
||
|
static inline void clear_LDT(void)
|
||
|
{
|
||
|
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
|
||
|
index 5939f44..06ec1fe 100644
|
||
|
--- a/arch/x86/include/asm/elf.h
|
||
|
+++ b/arch/x86/include/asm/elf.h
|
||
|
@@ -155,8 +155,9 @@ do { \
|
||
|
#define elf_check_arch(x) \
|
||
|
((x)->e_machine == EM_X86_64)
|
||
|
|
||
|
-#define compat_elf_check_arch(x) \
|
||
|
- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
|
||
|
+#define compat_elf_check_arch(x) \
|
||
|
+ (elf_check_arch_ia32(x) || \
|
||
|
+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
|
||
|
|
||
|
#if __USER32_DS != __USER_DS
|
||
|
# error "The following code assumes __USER32_DS == __USER_DS"
|
||
|
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
||
|
index 944471f..4f78757 100644
|
||
|
--- a/arch/x86/include/asm/kvm_host.h
|
||
|
+++ b/arch/x86/include/asm/kvm_host.h
|
||
|
@@ -453,6 +453,7 @@ struct kvm_vcpu_arch {
|
||
|
u64 mmio_gva;
|
||
|
unsigned access;
|
||
|
gfn_t mmio_gfn;
|
||
|
+ u64 mmio_gen;
|
||
|
|
||
|
struct kvm_pmu pmu;
|
||
|
|
||
|
@@ -881,6 +882,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
|
||
|
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
|
||
|
}
|
||
|
|
||
|
+static inline u64 get_canonical(u64 la)
|
||
|
+{
|
||
|
+ return ((int64_t)la << 16) >> 16;
|
||
|
+}
|
||
|
+
|
||
|
+static inline bool is_noncanonical_address(u64 la)
|
||
|
+{
|
||
|
+#ifdef CONFIG_X86_64
|
||
|
+ return get_canonical(la) != la;
|
||
|
+#else
|
||
|
+ return false;
|
||
|
+#endif
|
||
|
+}
|
||
|
+
|
||
|
#define TSS_IOPB_BASE_OFFSET 0x66
|
||
|
#define TSS_BASE_SIZE 0x68
|
||
|
#define TSS_IOPB_SIZE (65536 / 8)
|
||
|
@@ -939,7 +954,7 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||
|
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
|
||
|
|
||
|
void kvm_define_shared_msr(unsigned index, u32 msr);
|
||
|
-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
||
|
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
||
|
|
||
|
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
|
||
|
|
||
|
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
|
||
|
index ade619f..88dae6b3 100644
|
||
|
--- a/arch/x86/include/asm/page_32_types.h
|
||
|
+++ b/arch/x86/include/asm/page_32_types.h
|
||
|
@@ -18,7 +18,6 @@
|
||
|
#define THREAD_ORDER 1
|
||
|
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
|
||
|
|
||
|
-#define STACKFAULT_STACK 0
|
||
|
#define DOUBLEFAULT_STACK 1
|
||
|
#define NMI_STACK 0
|
||
|
#define DEBUG_STACK 0
|
||
|
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
|
||
|
index 7639dbf..a9e9937 100644
|
||
|
--- a/arch/x86/include/asm/page_64_types.h
|
||
|
+++ b/arch/x86/include/asm/page_64_types.h
|
||
|
@@ -14,12 +14,11 @@
|
||
|
#define IRQ_STACK_ORDER 2
|
||
|
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
|
||
|
|
||
|
-#define STACKFAULT_STACK 1
|
||
|
-#define DOUBLEFAULT_STACK 2
|
||
|
-#define NMI_STACK 3
|
||
|
-#define DEBUG_STACK 4
|
||
|
-#define MCE_STACK 5
|
||
|
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
|
||
|
+#define DOUBLEFAULT_STACK 1
|
||
|
+#define NMI_STACK 2
|
||
|
+#define DEBUG_STACK 3
|
||
|
+#define MCE_STACK 4
|
||
|
+#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
|
||
|
|
||
|
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
||
|
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
||
|
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
|
||
|
index 31f180c2..504d1cf 100644
|
||
|
--- a/arch/x86/include/asm/vmx.h
|
||
|
+++ b/arch/x86/include/asm/vmx.h
|
||
|
@@ -279,6 +279,8 @@ enum vmcs_field {
|
||
|
#define EXIT_REASON_APIC_ACCESS 44
|
||
|
#define EXIT_REASON_EPT_VIOLATION 48
|
||
|
#define EXIT_REASON_EPT_MISCONFIG 49
|
||
|
+#define EXIT_REASON_INVEPT 50
|
||
|
+#define EXIT_REASON_INVVPID 53
|
||
|
#define EXIT_REASON_WBINVD 54
|
||
|
#define EXIT_REASON_XSETBV 55
|
||
|
|
||
|
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
|
||
|
index edc2448..cb5b54e 100644
|
||
|
--- a/arch/x86/kernel/apic/apic.c
|
||
|
+++ b/arch/x86/kernel/apic/apic.c
|
||
|
@@ -1229,7 +1229,7 @@ void __cpuinit setup_local_APIC(void)
|
||
|
unsigned int value, queued;
|
||
|
int i, j, acked = 0;
|
||
|
unsigned long long tsc = 0, ntsc;
|
||
|
- long long max_loops = cpu_khz;
|
||
|
+ long long max_loops = cpu_khz ? cpu_khz : 1000000;
|
||
|
|
||
|
if (cpu_has_tsc)
|
||
|
rdtscll(tsc);
|
||
|
@@ -1325,7 +1325,7 @@ void __cpuinit setup_local_APIC(void)
|
||
|
acked);
|
||
|
break;
|
||
|
}
|
||
|
- if (cpu_has_tsc) {
|
||
|
+ if (cpu_has_tsc && cpu_khz) {
|
||
|
rdtscll(ntsc);
|
||
|
max_loops = (cpu_khz << 10) - (ntsc - tsc);
|
||
|
} else
|
||
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
||
|
index cf79302..114db0f 100644
|
||
|
--- a/arch/x86/kernel/cpu/common.c
|
||
|
+++ b/arch/x86/kernel/cpu/common.c
|
||
|
@@ -142,6 +142,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
||
|
|
||
|
static int __init x86_xsave_setup(char *s)
|
||
|
{
|
||
|
+ if (strlen(s))
|
||
|
+ return 0;
|
||
|
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
|
||
|
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
|
||
|
return 1;
|
||
|
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
|
||
|
index 3e6ff6c..e7a64dd 100644
|
||
|
--- a/arch/x86/kernel/cpu/intel.c
|
||
|
+++ b/arch/x86/kernel/cpu/intel.c
|
||
|
@@ -143,6 +143,21 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||
|
setup_clear_cpu_cap(X86_FEATURE_ERMS);
|
||
|
}
|
||
|
}
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Intel Quark Core DevMan_001.pdf section 6.4.11
|
||
|
+ * "The operating system also is required to invalidate (i.e., flush)
|
||
|
+ * the TLB when any changes are made to any of the page table entries.
|
||
|
+ * The operating system must reload CR3 to cause the TLB to be flushed"
|
||
|
+ *
|
||
|
+ * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should
|
||
|
+ * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
|
||
|
+ * to be modified
|
||
|
+ */
|
||
|
+ if (c->x86 == 5 && c->x86_model == 9) {
|
||
|
+ pr_info("Disabling PGE capability bit\n");
|
||
|
+ setup_clear_cpu_cap(X86_FEATURE_PGE);
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
#ifdef CONFIG_X86_32
|
||
|
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
|
||
|
index 17107bd..e820606 100644
|
||
|
--- a/arch/x86/kernel/dumpstack_64.c
|
||
|
+++ b/arch/x86/kernel/dumpstack_64.c
|
||
|
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
|
||
|
[ DEBUG_STACK-1 ] = "#DB",
|
||
|
[ NMI_STACK-1 ] = "NMI",
|
||
|
[ DOUBLEFAULT_STACK-1 ] = "#DF",
|
||
|
- [ STACKFAULT_STACK-1 ] = "#SS",
|
||
|
[ MCE_STACK-1 ] = "#MC",
|
||
|
#if DEBUG_STKSZ > EXCEPTION_STKSZ
|
||
|
[ N_EXCEPTION_STACKS ...
|
||
|
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
|
||
|
index 42b055e..45f9c70 100644
|
||
|
--- a/arch/x86/kernel/entry_64.S
|
||
|
+++ b/arch/x86/kernel/entry_64.S
|
||
|
@@ -912,13 +912,16 @@ ENTRY(native_iret)
|
||
|
jnz native_irq_return_ldt
|
||
|
#endif
|
||
|
|
||
|
+.global native_irq_return_iret
|
||
|
native_irq_return_iret:
|
||
|
+ /*
|
||
|
+ * This may fault. Non-paranoid faults on return to userspace are
|
||
|
+ * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
|
||
|
+ * Double-faults due to espfix64 are handled in do_double_fault.
|
||
|
+ * Other faults here are fatal.
|
||
|
+ */
|
||
|
iretq
|
||
|
|
||
|
- .section __ex_table,"a"
|
||
|
- .quad native_irq_return_iret, bad_iret
|
||
|
- .previous
|
||
|
-
|
||
|
#ifdef CONFIG_X86_ESPFIX64
|
||
|
native_irq_return_ldt:
|
||
|
pushq_cfi %rax
|
||
|
@@ -945,25 +948,6 @@ native_irq_return_ldt:
|
||
|
jmp native_irq_return_iret
|
||
|
#endif
|
||
|
|
||
|
- .section .fixup,"ax"
|
||
|
-bad_iret:
|
||
|
- /*
|
||
|
- * The iret traps when the %cs or %ss being restored is bogus.
|
||
|
- * We've lost the original trap vector and error code.
|
||
|
- * #GPF is the most likely one to get for an invalid selector.
|
||
|
- * So pretend we completed the iret and took the #GPF in user mode.
|
||
|
- *
|
||
|
- * We are now running with the kernel GS after exception recovery.
|
||
|
- * But error_entry expects us to have user GS to match the user %cs,
|
||
|
- * so swap back.
|
||
|
- */
|
||
|
- pushq $0
|
||
|
-
|
||
|
- SWAPGS
|
||
|
- jmp general_protection
|
||
|
-
|
||
|
- .previous
|
||
|
-
|
||
|
/* edi: workmask, edx: work */
|
||
|
retint_careful:
|
||
|
CFI_RESTORE_STATE
|
||
|
@@ -1011,37 +995,6 @@ ENTRY(retint_kernel)
|
||
|
CFI_ENDPROC
|
||
|
END(common_interrupt)
|
||
|
|
||
|
- /*
|
||
|
- * If IRET takes a fault on the espfix stack, then we
|
||
|
- * end up promoting it to a doublefault. In that case,
|
||
|
- * modify the stack to make it look like we just entered
|
||
|
- * the #GP handler from user space, similar to bad_iret.
|
||
|
- */
|
||
|
-#ifdef CONFIG_X86_ESPFIX64
|
||
|
- ALIGN
|
||
|
-__do_double_fault:
|
||
|
- XCPT_FRAME 1 RDI+8
|
||
|
- movq RSP(%rdi),%rax /* Trap on the espfix stack? */
|
||
|
- sarq $PGDIR_SHIFT,%rax
|
||
|
- cmpl $ESPFIX_PGD_ENTRY,%eax
|
||
|
- jne do_double_fault /* No, just deliver the fault */
|
||
|
- cmpl $__KERNEL_CS,CS(%rdi)
|
||
|
- jne do_double_fault
|
||
|
- movq RIP(%rdi),%rax
|
||
|
- cmpq $native_irq_return_iret,%rax
|
||
|
- jne do_double_fault /* This shouldn't happen... */
|
||
|
- movq PER_CPU_VAR(kernel_stack),%rax
|
||
|
- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
|
||
|
- movq %rax,RSP(%rdi)
|
||
|
- movq $0,(%rax) /* Missing (lost) #GP error code */
|
||
|
- movq $general_protection,RIP(%rdi)
|
||
|
- retq
|
||
|
- CFI_ENDPROC
|
||
|
-END(__do_double_fault)
|
||
|
-#else
|
||
|
-# define __do_double_fault do_double_fault
|
||
|
-#endif
|
||
|
-
|
||
|
/*
|
||
|
* End of kprobes section
|
||
|
*/
|
||
|
@@ -1217,7 +1170,7 @@ zeroentry overflow do_overflow
|
||
|
zeroentry bounds do_bounds
|
||
|
zeroentry invalid_op do_invalid_op
|
||
|
zeroentry device_not_available do_device_not_available
|
||
|
-paranoiderrorentry double_fault __do_double_fault
|
||
|
+paranoiderrorentry double_fault do_double_fault
|
||
|
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
|
||
|
errorentry invalid_TSS do_invalid_TSS
|
||
|
errorentry segment_not_present do_segment_not_present
|
||
|
@@ -1431,7 +1384,7 @@ apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
|
||
|
|
||
|
paranoidzeroentry_ist debug do_debug DEBUG_STACK
|
||
|
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
|
||
|
-paranoiderrorentry stack_segment do_stack_segment
|
||
|
+errorentry stack_segment do_stack_segment
|
||
|
#ifdef CONFIG_XEN
|
||
|
zeroentry xen_debug do_debug
|
||
|
zeroentry xen_int3 do_int3
|
||
|
@@ -1541,16 +1494,15 @@ error_sti:
|
||
|
|
||
|
/*
|
||
|
* There are two places in the kernel that can potentially fault with
|
||
|
- * usergs. Handle them here. The exception handlers after iret run with
|
||
|
- * kernel gs again, so don't set the user space flag. B stepping K8s
|
||
|
- * sometimes report an truncated RIP for IRET exceptions returning to
|
||
|
- * compat mode. Check for these here too.
|
||
|
+ * usergs. Handle them here. B stepping K8s sometimes report a
|
||
|
+ * truncated RIP for IRET exceptions returning to compat mode. Check
|
||
|
+ * for these here too.
|
||
|
*/
|
||
|
error_kernelspace:
|
||
|
incl %ebx
|
||
|
leaq native_irq_return_iret(%rip),%rcx
|
||
|
cmpq %rcx,RIP+8(%rsp)
|
||
|
- je error_swapgs
|
||
|
+ je error_bad_iret
|
||
|
movl %ecx,%eax /* zero extend */
|
||
|
cmpq %rax,RIP+8(%rsp)
|
||
|
je bstep_iret
|
||
|
@@ -1561,7 +1513,15 @@ error_kernelspace:
|
||
|
bstep_iret:
|
||
|
/* Fix truncated RIP */
|
||
|
movq %rcx,RIP+8(%rsp)
|
||
|
- jmp error_swapgs
|
||
|
+ /* fall through */
|
||
|
+
|
||
|
+error_bad_iret:
|
||
|
+ SWAPGS
|
||
|
+ mov %rsp,%rdi
|
||
|
+ call fixup_bad_iret
|
||
|
+ mov %rax,%rsp
|
||
|
+ decl %ebx /* Return to usergs */
|
||
|
+ jmp error_sti
|
||
|
CFI_ENDPROC
|
||
|
END(error_entry)
|
||
|
|
||
|
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
|
||
|
index e554e5a..226f284 100644
|
||
|
--- a/arch/x86/kernel/kvm.c
|
||
|
+++ b/arch/x86/kernel/kvm.c
|
||
|
@@ -258,7 +258,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||
|
static void __init paravirt_ops_setup(void)
|
||
|
{
|
||
|
pv_info.name = "KVM";
|
||
|
- pv_info.paravirt_enabled = 1;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
|
||
|
+ * guest kernel works like a bare metal kernel with additional
|
||
|
+ * features, and paravirt_enabled is about features that are
|
||
|
+ * missing.
|
||
|
+ */
|
||
|
+ pv_info.paravirt_enabled = 0;
|
||
|
|
||
|
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
|
||
|
pv_cpu_ops.io_delay = kvm_io_delay;
|
||
|
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
|
||
|
index f8492da6..5e3f91b 100644
|
||
|
--- a/arch/x86/kernel/kvmclock.c
|
||
|
+++ b/arch/x86/kernel/kvmclock.c
|
||
|
@@ -212,7 +212,6 @@ void __init kvmclock_init(void)
|
||
|
#endif
|
||
|
kvm_get_preset_lpj();
|
||
|
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
|
||
|
- pv_info.paravirt_enabled = 1;
|
||
|
pv_info.name = "KVM";
|
||
|
|
||
|
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
|
||
|
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
|
||
|
index 9d9d2f9..9d25a6e 100644
|
||
|
--- a/arch/x86/kernel/tls.c
|
||
|
+++ b/arch/x86/kernel/tls.c
|
||
|
@@ -27,6 +27,42 @@ static int get_free_idx(void)
|
||
|
return -ESRCH;
|
||
|
}
|
||
|
|
||
|
+static bool tls_desc_okay(const struct user_desc *info)
|
||
|
+{
|
||
|
+ /*
|
||
|
+ * For historical reasons (i.e. no one ever documented how any
|
||
|
+ * of the segmentation APIs work), user programs can and do
|
||
|
+ * assume that a struct user_desc that's all zeros except for
|
||
|
+ * entry_number means "no segment at all". This never actually
|
||
|
+ * worked. In fact, up to Linux 3.19, a struct user_desc like
|
||
|
+ * this would create a 16-bit read-write segment with base and
|
||
|
+ * limit both equal to zero.
|
||
|
+ *
|
||
|
+ * That was close enough to "no segment at all" until we
|
||
|
+ * hardened this function to disallow 16-bit TLS segments. Fix
|
||
|
+ * it up by interpreting these zeroed segments the way that they
|
||
|
+ * were almost certainly intended to be interpreted.
|
||
|
+ *
|
||
|
+ * The correct way to ask for "no segment at all" is to specify
|
||
|
+ * a user_desc that satisfies LDT_empty. To keep everything
|
||
|
+ * working, we accept both.
|
||
|
+ *
|
||
|
+ * Note that there's a similar kludge in modify_ldt -- look at
|
||
|
+ * the distinction between modes 1 and 0x11.
|
||
|
+ */
|
||
|
+ if (LDT_empty(info) || LDT_zero(info))
|
||
|
+ return true;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * espfix is required for 16-bit data segments, but espfix
|
||
|
+ * only works for LDT segments.
|
||
|
+ */
|
||
|
+ if (!info->seg_32bit)
|
||
|
+ return false;
|
||
|
+
|
||
|
+ return true;
|
||
|
+}
|
||
|
+
|
||
|
static void set_tls_desc(struct task_struct *p, int idx,
|
||
|
const struct user_desc *info, int n)
|
||
|
{
|
||
|
@@ -40,7 +76,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
|
||
|
cpu = get_cpu();
|
||
|
|
||
|
while (n-- > 0) {
|
||
|
- if (LDT_empty(info))
|
||
|
+ if (LDT_empty(info) || LDT_zero(info))
|
||
|
desc->a = desc->b = 0;
|
||
|
else
|
||
|
fill_ldt(desc, info);
|
||
|
@@ -66,6 +102,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
||
|
if (copy_from_user(&info, u_info, sizeof(info)))
|
||
|
return -EFAULT;
|
||
|
|
||
|
+ if (!tls_desc_okay(&info))
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
if (idx == -1)
|
||
|
idx = info.entry_number;
|
||
|
|
||
|
@@ -196,6 +235,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
|
||
|
{
|
||
|
struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
|
||
|
const struct user_desc *info;
|
||
|
+ int i;
|
||
|
|
||
|
if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
|
||
|
(pos % sizeof(struct user_desc)) != 0 ||
|
||
|
@@ -209,6 +249,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
|
||
|
else
|
||
|
info = infobuf;
|
||
|
|
||
|
+ for (i = 0; i < count / sizeof(struct user_desc); i++)
|
||
|
+ if (!tls_desc_okay(info + i))
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
set_tls_desc(target,
|
||
|
GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
|
||
|
info, count / sizeof(struct user_desc));
|
||
|
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
|
||
|
index ff9281f1..9bfe95f 100644
|
||
|
--- a/arch/x86/kernel/traps.c
|
||
|
+++ b/arch/x86/kernel/traps.c
|
||
|
@@ -213,29 +213,41 @@ DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
|
||
|
coprocessor_segment_overrun)
|
||
|
DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
|
||
|
DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
|
||
|
-#ifdef CONFIG_X86_32
|
||
|
DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
|
||
|
-#endif
|
||
|
DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
|
||
|
BUS_ADRALN, 0)
|
||
|
|
||
|
#ifdef CONFIG_X86_64
|
||
|
/* Runs on IST stack */
|
||
|
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
||
|
-{
|
||
|
- if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
||
|
- X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
|
||
|
- return;
|
||
|
- preempt_conditional_sti(regs);
|
||
|
- do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
|
||
|
- preempt_conditional_cli(regs);
|
||
|
-}
|
||
|
-
|
||
|
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
||
|
{
|
||
|
static const char str[] = "double fault";
|
||
|
struct task_struct *tsk = current;
|
||
|
|
||
|
+#ifdef CONFIG_X86_ESPFIX64
|
||
|
+ extern unsigned char native_irq_return_iret[];
|
||
|
+
|
||
|
+ /*
|
||
|
+ * If IRET takes a non-IST fault on the espfix64 stack, then we
|
||
|
+ * end up promoting it to a doublefault. In that case, modify
|
||
|
+ * the stack to make it look like we just entered the #GP
|
||
|
+ * handler from user space, similar to bad_iret.
|
||
|
+ */
|
||
|
+ if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
|
||
|
+ regs->cs == __KERNEL_CS &&
|
||
|
+ regs->ip == (unsigned long)native_irq_return_iret)
|
||
|
+ {
|
||
|
+ struct pt_regs *normal_regs = task_pt_regs(current);
|
||
|
+
|
||
|
+ /* Fake a #GP(0) from userspace. */
|
||
|
+ memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
|
||
|
+ normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
|
||
|
+ regs->ip = (unsigned long)general_protection;
|
||
|
+ regs->sp = (unsigned long)&normal_regs->orig_ax;
|
||
|
+ return;
|
||
|
+ }
|
||
|
+#endif
|
||
|
+
|
||
|
/* Return not checked because double check cannot be ignored */
|
||
|
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
|
||
|
|
||
|
@@ -332,7 +344,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
||
|
* for scheduling or signal handling. The actual stack switch is done in
|
||
|
* entry.S
|
||
|
*/
|
||
|
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||
|
+asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||
|
{
|
||
|
struct pt_regs *regs = eregs;
|
||
|
/* Did already sync */
|
||
|
@@ -351,6 +363,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
||
|
*regs = *eregs;
|
||
|
return regs;
|
||
|
}
|
||
|
+
|
||
|
+struct bad_iret_stack {
|
||
|
+ void *error_entry_ret;
|
||
|
+ struct pt_regs regs;
|
||
|
+};
|
||
|
+
|
||
|
+asmlinkage notrace __kprobes
|
||
|
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
|
||
|
+{
|
||
|
+ /*
|
||
|
+ * This is called from entry_64.S early in handling a fault
|
||
|
+ * caused by a bad iret to user mode. To handle the fault
|
||
|
+ * correctly, we want move our stack frame to task_pt_regs
|
||
|
+ * and we want to pretend that the exception came from the
|
||
|
+ * iret target.
|
||
|
+ */
|
||
|
+ struct bad_iret_stack *new_stack =
|
||
|
+ container_of(task_pt_regs(current),
|
||
|
+ struct bad_iret_stack, regs);
|
||
|
+
|
||
|
+ /* Copy the IRET target to the new stack. */
|
||
|
+ memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
|
||
|
+
|
||
|
+ /* Copy the remainder of the stack from the current stack. */
|
||
|
+ memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
|
||
|
+
|
||
|
+ BUG_ON(!user_mode_vm(&new_stack->regs));
|
||
|
+ return new_stack;
|
||
|
+}
|
||
|
#endif
|
||
|
|
||
|
/*
|
||
|
@@ -694,7 +735,7 @@ void __init trap_init(void)
|
||
|
set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
|
||
|
set_intr_gate(X86_TRAP_TS, &invalid_TSS);
|
||
|
set_intr_gate(X86_TRAP_NP, &segment_not_present);
|
||
|
- set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
|
||
|
+ set_intr_gate(X86_TRAP_SS, stack_segment);
|
||
|
set_intr_gate(X86_TRAP_GP, &general_protection);
|
||
|
set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
|
||
|
set_intr_gate(X86_TRAP_MF, &coprocessor_error);
|
||
|
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
|
||
|
index fc0a147..8652aa4 100644
|
||
|
--- a/arch/x86/kernel/tsc.c
|
||
|
+++ b/arch/x86/kernel/tsc.c
|
||
|
@@ -959,14 +959,17 @@ void __init tsc_init(void)
|
||
|
|
||
|
x86_init.timers.tsc_pre_init();
|
||
|
|
||
|
- if (!cpu_has_tsc)
|
||
|
+ if (!cpu_has_tsc) {
|
||
|
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||
|
return;
|
||
|
+ }
|
||
|
|
||
|
tsc_khz = x86_platform.calibrate_tsc();
|
||
|
cpu_khz = tsc_khz;
|
||
|
|
||
|
if (!tsc_khz) {
|
||
|
mark_tsc_unstable("could not calculate TSC khz");
|
||
|
+ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
|
||
|
index 8375622..91e8680 100644
|
||
|
--- a/arch/x86/kvm/emulate.c
|
||
|
+++ b/arch/x86/kvm/emulate.c
|
||
|
@@ -459,11 +459,6 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
|
||
|
*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
|
||
|
}
|
||
|
|
||
|
-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
||
|
-{
|
||
|
- register_address_increment(ctxt, &ctxt->_eip, rel);
|
||
|
-}
|
||
|
-
|
||
|
static u32 desc_limit_scaled(struct desc_struct *desc)
|
||
|
{
|
||
|
u32 limit = get_desc_limit(desc);
|
||
|
@@ -537,6 +532,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
|
||
|
return emulate_exception(ctxt, NM_VECTOR, 0, false);
|
||
|
}
|
||
|
|
||
|
+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||
|
+ int cs_l)
|
||
|
+{
|
||
|
+ switch (ctxt->op_bytes) {
|
||
|
+ case 2:
|
||
|
+ ctxt->_eip = (u16)dst;
|
||
|
+ break;
|
||
|
+ case 4:
|
||
|
+ ctxt->_eip = (u32)dst;
|
||
|
+ break;
|
||
|
+#ifdef CONFIG_X86_64
|
||
|
+ case 8:
|
||
|
+ if ((cs_l && is_noncanonical_address(dst)) ||
|
||
|
+ (!cs_l && (dst >> 32) != 0))
|
||
|
+ return emulate_gp(ctxt, 0);
|
||
|
+ ctxt->_eip = dst;
|
||
|
+ break;
|
||
|
+#endif
|
||
|
+ default:
|
||
|
+ WARN(1, "unsupported eip assignment size\n");
|
||
|
+ }
|
||
|
+ return X86EMUL_CONTINUE;
|
||
|
+}
|
||
|
+
|
||
|
+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||
|
+{
|
||
|
+ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
|
||
|
+}
|
||
|
+
|
||
|
+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
||
|
+{
|
||
|
+ return assign_eip_near(ctxt, ctxt->_eip + rel);
|
||
|
+}
|
||
|
+
|
||
|
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
|
||
|
{
|
||
|
u16 selector;
|
||
|
@@ -1224,11 +1253,13 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
}
|
||
|
|
||
|
/* Does not support long mode */
|
||
|
-static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
- u16 selector, int seg)
|
||
|
+static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
+ u16 selector, int seg, u8 cpl,
|
||
|
+ bool in_task_switch,
|
||
|
+ struct desc_struct *desc)
|
||
|
{
|
||
|
struct desc_struct seg_desc;
|
||
|
- u8 dpl, rpl, cpl;
|
||
|
+ u8 dpl, rpl;
|
||
|
unsigned err_vec = GP_VECTOR;
|
||
|
u32 err_code = 0;
|
||
|
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
|
||
|
@@ -1279,7 +1310,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
|
||
|
rpl = selector & 3;
|
||
|
dpl = seg_desc.dpl;
|
||
|
- cpl = ctxt->ops->cpl(ctxt);
|
||
|
|
||
|
switch (seg) {
|
||
|
case VCPU_SREG_SS:
|
||
|
@@ -1336,12 +1366,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
}
|
||
|
load:
|
||
|
ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
|
||
|
+ if (desc)
|
||
|
+ *desc = seg_desc;
|
||
|
return X86EMUL_CONTINUE;
|
||
|
exception:
|
||
|
emulate_exception(ctxt, err_vec, err_code, true);
|
||
|
return X86EMUL_PROPAGATE_FAULT;
|
||
|
}
|
||
|
|
||
|
+static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||
|
+ u16 selector, int seg)
|
||
|
+{
|
||
|
+ u8 cpl = ctxt->ops->cpl(ctxt);
|
||
|
+ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
|
||
|
+}
|
||
|
+
|
||
|
static void write_register_operand(struct operand *op)
|
||
|
{
|
||
|
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
|
||
|
@@ -1681,17 +1720,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
|
||
|
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
int rc;
|
||
|
- unsigned short sel;
|
||
|
+ unsigned short sel, old_sel;
|
||
|
+ struct desc_struct old_desc, new_desc;
|
||
|
+ const struct x86_emulate_ops *ops = ctxt->ops;
|
||
|
+ u8 cpl = ctxt->ops->cpl(ctxt);
|
||
|
+
|
||
|
+ /* Assignment of RIP may only fail in 64-bit mode */
|
||
|
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
|
||
|
+ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
|
||
|
+ VCPU_SREG_CS);
|
||
|
|
||
|
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
|
||
|
|
||
|
- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
|
||
|
+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
|
||
|
+ &new_desc);
|
||
|
if (rc != X86EMUL_CONTINUE)
|
||
|
return rc;
|
||
|
|
||
|
- ctxt->_eip = 0;
|
||
|
- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
|
||
|
- return X86EMUL_CONTINUE;
|
||
|
+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
|
||
|
+ if (rc != X86EMUL_CONTINUE) {
|
||
|
+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
|
||
|
+ /* assigning eip failed; restore the old cs */
|
||
|
+ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
|
||
|
+ return rc;
|
||
|
+ }
|
||
|
+ return rc;
|
||
|
}
|
||
|
|
||
|
static int em_grp2(struct x86_emulate_ctxt *ctxt)
|
||
|
@@ -1785,13 +1838,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
|
||
|
case 2: /* call near abs */ {
|
||
|
long int old_eip;
|
||
|
old_eip = ctxt->_eip;
|
||
|
- ctxt->_eip = ctxt->src.val;
|
||
|
+ rc = assign_eip_near(ctxt, ctxt->src.val);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ break;
|
||
|
ctxt->src.val = old_eip;
|
||
|
rc = em_push(ctxt);
|
||
|
break;
|
||
|
}
|
||
|
case 4: /* jmp abs */
|
||
|
- ctxt->_eip = ctxt->src.val;
|
||
|
+ rc = assign_eip_near(ctxt, ctxt->src.val);
|
||
|
break;
|
||
|
case 5: /* jmp far */
|
||
|
rc = em_jmp_far(ctxt);
|
||
|
@@ -1823,26 +1878,43 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
|
||
|
|
||
|
static int em_ret(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
- ctxt->dst.type = OP_REG;
|
||
|
- ctxt->dst.addr.reg = &ctxt->_eip;
|
||
|
- ctxt->dst.bytes = ctxt->op_bytes;
|
||
|
- return em_pop(ctxt);
|
||
|
+ int rc;
|
||
|
+ unsigned long eip;
|
||
|
+
|
||
|
+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ return assign_eip_near(ctxt, eip);
|
||
|
}
|
||
|
|
||
|
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
int rc;
|
||
|
- unsigned long cs;
|
||
|
+ unsigned long eip, cs;
|
||
|
+ u16 old_cs;
|
||
|
+ struct desc_struct old_desc, new_desc;
|
||
|
+ const struct x86_emulate_ops *ops = ctxt->ops;
|
||
|
+
|
||
|
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
|
||
|
+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
|
||
|
+ VCPU_SREG_CS);
|
||
|
|
||
|
- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
|
||
|
+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
||
|
if (rc != X86EMUL_CONTINUE)
|
||
|
return rc;
|
||
|
- if (ctxt->op_bytes == 4)
|
||
|
- ctxt->_eip = (u32)ctxt->_eip;
|
||
|
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
|
||
|
if (rc != X86EMUL_CONTINUE)
|
||
|
return rc;
|
||
|
- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
|
||
|
+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
|
||
|
+ &new_desc);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ return rc;
|
||
|
+ rc = assign_eip_far(ctxt, eip, new_desc.l);
|
||
|
+ if (rc != X86EMUL_CONTINUE) {
|
||
|
+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
|
||
|
+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
|
||
|
+ }
|
||
|
return rc;
|
||
|
}
|
||
|
|
||
|
@@ -2091,7 +2163,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
struct x86_emulate_ops *ops = ctxt->ops;
|
||
|
struct desc_struct cs, ss;
|
||
|
- u64 msr_data;
|
||
|
+ u64 msr_data, rcx, rdx;
|
||
|
int usermode;
|
||
|
u16 cs_sel = 0, ss_sel = 0;
|
||
|
|
||
|
@@ -2107,6 +2179,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||
|
else
|
||
|
usermode = X86EMUL_MODE_PROT32;
|
||
|
|
||
|
+ rcx = ctxt->regs[VCPU_REGS_RCX];
|
||
|
+ rdx = ctxt->regs[VCPU_REGS_RDX];
|
||
|
+
|
||
|
cs.dpl = 3;
|
||
|
ss.dpl = 3;
|
||
|
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
|
||
|
@@ -2124,6 +2199,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||
|
ss_sel = cs_sel + 8;
|
||
|
cs.d = 0;
|
||
|
cs.l = 1;
|
||
|
+ if (is_noncanonical_address(rcx) ||
|
||
|
+ is_noncanonical_address(rdx))
|
||
|
+ return emulate_gp(ctxt, 0);
|
||
|
break;
|
||
|
}
|
||
|
cs_sel |= SELECTOR_RPL_MASK;
|
||
|
@@ -2132,8 +2210,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||
|
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
|
||
|
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
||
|
|
||
|
- ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
|
||
|
- ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
|
||
|
+ ctxt->_eip = rdx;
|
||
|
+ ctxt->regs[VCPU_REGS_RSP] = rcx;
|
||
|
|
||
|
return X86EMUL_CONTINUE;
|
||
|
}
|
||
|
@@ -2222,6 +2300,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
|
||
|
struct tss_segment_16 *tss)
|
||
|
{
|
||
|
int ret;
|
||
|
+ u8 cpl;
|
||
|
|
||
|
ctxt->_eip = tss->ip;
|
||
|
ctxt->eflags = tss->flag | 2;
|
||
|
@@ -2244,23 +2323,30 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
|
||
|
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
|
||
|
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
|
||
|
|
||
|
+ cpl = tss->cs & 3;
|
||
|
+
|
||
|
/*
|
||
|
* Now load segment descriptors. If fault happenes at this stage
|
||
|
* it is handled in a context of new task
|
||
|
*/
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
|
||
|
@@ -2339,6 +2425,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
|
||
|
struct tss_segment_32 *tss)
|
||
|
{
|
||
|
int ret;
|
||
|
+ u8 cpl;
|
||
|
|
||
|
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
|
||
|
return emulate_gp(ctxt, 0);
|
||
|
@@ -2357,7 +2444,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
|
||
|
|
||
|
/*
|
||
|
* SDM says that segment selectors are loaded before segment
|
||
|
- * descriptors
|
||
|
+ * descriptors. This is important because CPL checks will
|
||
|
+ * use CS.RPL.
|
||
|
*/
|
||
|
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
|
||
|
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
|
||
|
@@ -2371,43 +2459,45 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
|
||
|
* If we're switching between Protected Mode and VM86, we need to make
|
||
|
* sure to update the mode before loading the segment descriptors so
|
||
|
* that the selectors are interpreted correctly.
|
||
|
- *
|
||
|
- * Need to get rflags to the vcpu struct immediately because it
|
||
|
- * influences the CPL which is checked at least when loading the segment
|
||
|
- * descriptors and when pushing an error code to the new kernel stack.
|
||
|
- *
|
||
|
- * TODO Introduce a separate ctxt->ops->set_cpl callback
|
||
|
*/
|
||
|
- if (ctxt->eflags & X86_EFLAGS_VM)
|
||
|
+ if (ctxt->eflags & X86_EFLAGS_VM) {
|
||
|
ctxt->mode = X86EMUL_MODE_VM86;
|
||
|
- else
|
||
|
+ cpl = 3;
|
||
|
+ } else {
|
||
|
ctxt->mode = X86EMUL_MODE_PROT32;
|
||
|
-
|
||
|
- ctxt->ops->set_rflags(ctxt, ctxt->eflags);
|
||
|
+ cpl = tss->cs & 3;
|
||
|
+ }
|
||
|
|
||
|
/*
|
||
|
* Now load segment descriptors. If fault happenes at this stage
|
||
|
* it is handled in a context of new task
|
||
|
*/
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
|
||
|
+ cpl, true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
- ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
|
||
|
+ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
|
||
|
+ true, NULL);
|
||
|
if (ret != X86EMUL_CONTINUE)
|
||
|
return ret;
|
||
|
|
||
|
@@ -2629,10 +2719,13 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
|
||
|
|
||
|
static int em_call(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
+ int rc;
|
||
|
long rel = ctxt->src.val;
|
||
|
|
||
|
ctxt->src.val = (unsigned long)ctxt->_eip;
|
||
|
- jmp_rel(ctxt, rel);
|
||
|
+ rc = jmp_rel(ctxt, rel);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ return rc;
|
||
|
return em_push(ctxt);
|
||
|
}
|
||
|
|
||
|
@@ -2641,34 +2734,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
|
||
|
u16 sel, old_cs;
|
||
|
ulong old_eip;
|
||
|
int rc;
|
||
|
+ struct desc_struct old_desc, new_desc;
|
||
|
+ const struct x86_emulate_ops *ops = ctxt->ops;
|
||
|
+ int cpl = ctxt->ops->cpl(ctxt);
|
||
|
|
||
|
- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
|
||
|
old_eip = ctxt->_eip;
|
||
|
+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
|
||
|
|
||
|
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
|
||
|
- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
|
||
|
+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
|
||
|
+ &new_desc);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
return X86EMUL_CONTINUE;
|
||
|
|
||
|
- ctxt->_eip = 0;
|
||
|
- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
|
||
|
+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ goto fail;
|
||
|
|
||
|
ctxt->src.val = old_cs;
|
||
|
rc = em_push(ctxt);
|
||
|
if (rc != X86EMUL_CONTINUE)
|
||
|
- return rc;
|
||
|
+ goto fail;
|
||
|
|
||
|
ctxt->src.val = old_eip;
|
||
|
- return em_push(ctxt);
|
||
|
+ rc = em_push(ctxt);
|
||
|
+ /* If we failed, we tainted the memory, but the very least we should
|
||
|
+ restore cs */
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ goto fail;
|
||
|
+ return rc;
|
||
|
+fail:
|
||
|
+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
|
||
|
+ return rc;
|
||
|
+
|
||
|
}
|
||
|
|
||
|
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
int rc;
|
||
|
+ unsigned long eip;
|
||
|
|
||
|
- ctxt->dst.type = OP_REG;
|
||
|
- ctxt->dst.addr.reg = &ctxt->_eip;
|
||
|
- ctxt->dst.bytes = ctxt->op_bytes;
|
||
|
- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
|
||
|
+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
||
|
+ if (rc != X86EMUL_CONTINUE)
|
||
|
+ return rc;
|
||
|
+ rc = assign_eip_near(ctxt, eip);
|
||
|
if (rc != X86EMUL_CONTINUE)
|
||
|
return rc;
|
||
|
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
|
||
|
@@ -2977,20 +3086,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
|
||
|
|
||
|
static int em_loop(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
+ int rc = X86EMUL_CONTINUE;
|
||
|
+
|
||
|
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
|
||
|
if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
|
||
|
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
|
||
|
- jmp_rel(ctxt, ctxt->src.val);
|
||
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
||
|
|
||
|
- return X86EMUL_CONTINUE;
|
||
|
+ return rc;
|
||
|
}
|
||
|
|
||
|
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
|
||
|
{
|
||
|
+ int rc = X86EMUL_CONTINUE;
|
||
|
+
|
||
|
if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
|
||
|
- jmp_rel(ctxt, ctxt->src.val);
|
||
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
||
|
|
||
|
- return X86EMUL_CONTINUE;
|
||
|
+ return rc;
|
||
|
}
|
||
|
|
||
|
static int em_in(struct x86_emulate_ctxt *ctxt)
|
||
|
@@ -4168,7 +4281,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||
|
break;
|
||
|
case 0x70 ... 0x7f: /* jcc (short) */
|
||
|
if (test_cc(ctxt->b, ctxt->eflags))
|
||
|
- jmp_rel(ctxt, ctxt->src.val);
|
||
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
||
|
break;
|
||
|
case 0x8d: /* lea r16/r32, m */
|
||
|
ctxt->dst.val = ctxt->src.addr.mem.ea;
|
||
|
@@ -4207,7 +4320,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||
|
break;
|
||
|
case 0xe9: /* jmp rel */
|
||
|
case 0xeb: /* jmp rel short */
|
||
|
- jmp_rel(ctxt, ctxt->src.val);
|
||
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
||
|
ctxt->dst.type = OP_NONE; /* Disable writeback. */
|
||
|
break;
|
||
|
case 0xf4: /* hlt */
|
||
|
@@ -4310,7 +4423,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||
|
break;
|
||
|
case 0x80 ... 0x8f: /* jnz rel, etc*/
|
||
|
if (test_cc(ctxt->b, ctxt->eflags))
|
||
|
- jmp_rel(ctxt, ctxt->src.val);
|
||
|
+ rc = jmp_rel(ctxt, ctxt->src.val);
|
||
|
break;
|
||
|
case 0x90 ... 0x9f: /* setcc r/m8 */
|
||
|
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
|
||
|
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
|
||
|
index d68f99d..db336f9 100644
|
||
|
--- a/arch/x86/kvm/i8254.c
|
||
|
+++ b/arch/x86/kvm/i8254.c
|
||
|
@@ -263,8 +263,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
|
||
|
return;
|
||
|
|
||
|
timer = &pit->pit_state.pit_timer.timer;
|
||
|
+ mutex_lock(&pit->pit_state.lock);
|
||
|
if (hrtimer_cancel(timer))
|
||
|
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
||
|
+ mutex_unlock(&pit->pit_state.lock);
|
||
|
}
|
||
|
|
||
|
static void destroy_pit_timer(struct kvm_pit *pit)
|
||
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
||
|
index fd6dec6..84f4bca 100644
|
||
|
--- a/arch/x86/kvm/mmu.c
|
||
|
+++ b/arch/x86/kvm/mmu.c
|
||
|
@@ -2842,7 +2842,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||
|
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
||
|
return;
|
||
|
|
||
|
- vcpu_clear_mmio_info(vcpu, ~0ul);
|
||
|
+ vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
|
||
|
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
|
||
|
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
|
||
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
||
|
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
|
||
|
index b567285..86c74c0 100644
|
||
|
--- a/arch/x86/kvm/svm.c
|
||
|
+++ b/arch/x86/kvm/svm.c
|
||
|
@@ -3212,7 +3212,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
|
||
|
|
||
|
|
||
|
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
|
||
|
- if (svm_set_msr(&svm->vcpu, ecx, data)) {
|
||
|
+ if (kvm_set_msr(&svm->vcpu, ecx, data)) {
|
||
|
trace_kvm_msr_write_ex(ecx, data);
|
||
|
kvm_inject_gp(&svm->vcpu, 0);
|
||
|
} else {
|
||
|
@@ -3494,9 +3494,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
||
|
|
||
|
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|
||
|
|| !svm_exit_handlers[exit_code]) {
|
||
|
- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
|
||
|
- kvm_run->hw.hardware_exit_reason = exit_code;
|
||
|
- return 0;
|
||
|
+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
|
||
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
||
|
+ return 1;
|
||
|
}
|
||
|
|
||
|
return svm_exit_handlers[exit_code](svm);
|
||
|
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
|
||
|
index 617b00b..2eb4e5a 100644
|
||
|
--- a/arch/x86/kvm/vmx.c
|
||
|
+++ b/arch/x86/kvm/vmx.c
|
||
|
@@ -388,6 +388,7 @@ struct vcpu_vmx {
|
||
|
u16 fs_sel, gs_sel, ldt_sel;
|
||
|
int gs_ldt_reload_needed;
|
||
|
int fs_reload_needed;
|
||
|
+ unsigned long vmcs_host_cr4; /* May not match real cr4 */
|
||
|
} host_state;
|
||
|
struct {
|
||
|
int vm86_active;
|
||
|
@@ -2209,12 +2210,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||
|
break;
|
||
|
msr = find_msr_entry(vmx, msr_index);
|
||
|
if (msr) {
|
||
|
+ u64 old_msr_data = msr->data;
|
||
|
msr->data = data;
|
||
|
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
|
||
|
preempt_disable();
|
||
|
- kvm_set_shared_msr(msr->index, msr->data,
|
||
|
- msr->mask);
|
||
|
+ ret = kvm_set_shared_msr(msr->index, msr->data,
|
||
|
+ msr->mask);
|
||
|
preempt_enable();
|
||
|
+ if (ret)
|
||
|
+ msr->data = old_msr_data;
|
||
|
}
|
||
|
break;
|
||
|
}
|
||
|
@@ -3622,16 +3626,21 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
|
||
|
* Note that host-state that does change is set elsewhere. E.g., host-state
|
||
|
* that is set differently for each CPU is set in vmx_vcpu_load(), not here.
|
||
|
*/
|
||
|
-static void vmx_set_constant_host_state(void)
|
||
|
+static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
||
|
{
|
||
|
u32 low32, high32;
|
||
|
unsigned long tmpl;
|
||
|
struct desc_ptr dt;
|
||
|
+ unsigned long cr4;
|
||
|
|
||
|
vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
|
||
|
- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
|
||
|
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
||
|
|
||
|
+ /* Save the most likely value for this task's CR4 in the VMCS. */
|
||
|
+ cr4 = read_cr4();
|
||
|
+ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
|
||
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
||
|
+
|
||
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
||
|
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||
|
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
||
|
@@ -3753,7 +3762,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
||
|
|
||
|
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
|
||
|
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
|
||
|
- vmx_set_constant_host_state();
|
||
|
+ vmx_set_constant_host_state(vmx);
|
||
|
#ifdef CONFIG_X86_64
|
||
|
rdmsrl(MSR_FS_BASE, a);
|
||
|
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
|
||
|
@@ -4539,7 +4548,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
|
||
|
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
|
||
|
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
|
||
|
|
||
|
- if (vmx_set_msr(vcpu, ecx, data) != 0) {
|
||
|
+ if (kvm_set_msr(vcpu, ecx, data) != 0) {
|
||
|
trace_kvm_msr_write_ex(ecx, data);
|
||
|
kvm_inject_gp(vcpu, 0);
|
||
|
return 1;
|
||
|
@@ -5557,6 +5566,18 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
|
||
|
return 1;
|
||
|
}
|
||
|
|
||
|
+static int handle_invept(struct kvm_vcpu *vcpu)
|
||
|
+{
|
||
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
||
|
+ return 1;
|
||
|
+}
|
||
|
+
|
||
|
+static int handle_invvpid(struct kvm_vcpu *vcpu)
|
||
|
+{
|
||
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
||
|
+ return 1;
|
||
|
+}
|
||
|
+
|
||
|
/*
|
||
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
||
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
||
|
@@ -5599,6 +5620,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
||
|
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
|
||
|
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
|
||
|
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
|
||
|
+ [EXIT_REASON_INVEPT] = handle_invept,
|
||
|
+ [EXIT_REASON_INVVPID] = handle_invvpid,
|
||
|
};
|
||
|
|
||
|
static const int kvm_vmx_max_exit_handlers =
|
||
|
@@ -5783,6 +5806,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
||
|
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
||
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
||
|
+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
|
||
|
/*
|
||
|
* VMX instructions trap unconditionally. This allows L1 to
|
||
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
||
|
@@ -5912,10 +5936,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
||
|
&& kvm_vmx_exit_handlers[exit_reason])
|
||
|
return kvm_vmx_exit_handlers[exit_reason](vcpu);
|
||
|
else {
|
||
|
- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
|
||
|
- vcpu->run->hw.hardware_exit_reason = exit_reason;
|
||
|
+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
|
||
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
||
|
+ return 1;
|
||
|
}
|
||
|
- return 0;
|
||
|
}
|
||
|
|
||
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||
|
@@ -6101,6 +6125,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||
|
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||
|
{
|
||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||
|
+ unsigned long cr4;
|
||
|
|
||
|
if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
|
||
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||
|
@@ -6131,6 +6156,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||
|
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||
|
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||
|
|
||
|
+ cr4 = read_cr4();
|
||
|
+ if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
||
|
+ vmcs_writel(HOST_CR4, cr4);
|
||
|
+ vmx->host_state.vmcs_host_cr4 = cr4;
|
||
|
+ }
|
||
|
+
|
||
|
/* When single-stepping over STI and MOV SS, we must clear the
|
||
|
* corresponding interruptibility bits in the guest state. Otherwise
|
||
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
||
|
@@ -6589,7 +6620,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||
|
* Other fields are different per CPU, and will be set later when
|
||
|
* vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
|
||
|
*/
|
||
|
- vmx_set_constant_host_state();
|
||
|
+ vmx_set_constant_host_state(vmx);
|
||
|
|
||
|
/*
|
||
|
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
|
||
|
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
||
|
index 4b1be29..318a245 100644
|
||
|
--- a/arch/x86/kvm/x86.c
|
||
|
+++ b/arch/x86/kvm/x86.c
|
||
|
@@ -220,19 +220,24 @@ static void kvm_shared_msr_cpu_online(void)
|
||
|
shared_msr_update(i, shared_msrs_global.msrs[i]);
|
||
|
}
|
||
|
|
||
|
-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
||
|
+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
|
||
|
{
|
||
|
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
|
||
|
+ int err;
|
||
|
|
||
|
if (((value ^ smsr->values[slot].curr) & mask) == 0)
|
||
|
- return;
|
||
|
+ return 0;
|
||
|
smsr->values[slot].curr = value;
|
||
|
- wrmsrl(shared_msrs_global.msrs[slot], value);
|
||
|
+ err = checking_wrmsrl(shared_msrs_global.msrs[slot], value);
|
||
|
+ if (err)
|
||
|
+ return 1;
|
||
|
+
|
||
|
if (!smsr->registered) {
|
||
|
smsr->urn.on_user_return = kvm_on_user_return;
|
||
|
user_return_notifier_register(&smsr->urn);
|
||
|
smsr->registered = true;
|
||
|
}
|
||
|
+ return 0;
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
|
||
|
|
||
|
@@ -858,7 +863,6 @@ void kvm_enable_efer_bits(u64 mask)
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
||
|
|
||
|
-
|
||
|
/*
|
||
|
* Writes msr value into into the appropriate "register".
|
||
|
* Returns 0 on success, non-0 otherwise.
|
||
|
@@ -866,8 +870,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
||
|
*/
|
||
|
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
||
|
{
|
||
|
+ switch (msr_index) {
|
||
|
+ case MSR_FS_BASE:
|
||
|
+ case MSR_GS_BASE:
|
||
|
+ case MSR_KERNEL_GS_BASE:
|
||
|
+ case MSR_CSTAR:
|
||
|
+ case MSR_LSTAR:
|
||
|
+ if (is_noncanonical_address(data))
|
||
|
+ return 1;
|
||
|
+ break;
|
||
|
+ case MSR_IA32_SYSENTER_EIP:
|
||
|
+ case MSR_IA32_SYSENTER_ESP:
|
||
|
+ /*
|
||
|
+ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
|
||
|
+ * non-canonical address is written on Intel but not on
|
||
|
+ * AMD (which ignores the top 32-bits, because it does
|
||
|
+ * not implement 64-bit SYSENTER).
|
||
|
+ *
|
||
|
+ * 64-bit code should hence be able to write a non-canonical
|
||
|
+ * value on AMD. Making the address canonical ensures that
|
||
|
+ * vmentry does not fail on Intel after writing a non-canonical
|
||
|
+ * value, and that something deterministic happens if the guest
|
||
|
+ * invokes 64-bit SYSENTER.
|
||
|
+ */
|
||
|
+ data = get_canonical(data);
|
||
|
+ }
|
||
|
return kvm_x86_ops->set_msr(vcpu, msr_index, data);
|
||
|
}
|
||
|
+EXPORT_SYMBOL_GPL(kvm_set_msr);
|
||
|
|
||
|
/*
|
||
|
* Adapt set_msr() to msr_io()'s calling convention
|
||
|
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
|
||
|
index cb80c29..1ce5611 100644
|
||
|
--- a/arch/x86/kvm/x86.h
|
||
|
+++ b/arch/x86/kvm/x86.h
|
||
|
@@ -78,15 +78,23 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
|
||
|
vcpu->arch.mmio_gva = gva & PAGE_MASK;
|
||
|
vcpu->arch.access = access;
|
||
|
vcpu->arch.mmio_gfn = gfn;
|
||
|
+ vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
|
||
|
+}
|
||
|
+
|
||
|
+static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
|
||
|
+{
|
||
|
+ return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
- * Clear the mmio cache info for the given gva,
|
||
|
- * specially, if gva is ~0ul, we clear all mmio cache info.
|
||
|
+ * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
|
||
|
+ * clear all mmio cache info.
|
||
|
*/
|
||
|
+#define MMIO_GVA_ANY (~(gva_t)0)
|
||
|
+
|
||
|
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
||
|
{
|
||
|
- if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
||
|
+ if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
|
||
|
return;
|
||
|
|
||
|
vcpu->arch.mmio_gva = 0;
|
||
|
@@ -94,7 +102,8 @@ static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
|
||
|
|
||
|
static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
||
|
{
|
||
|
- if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
||
|
+ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
|
||
|
+ vcpu->arch.mmio_gva == (gva & PAGE_MASK))
|
||
|
return true;
|
||
|
|
||
|
return false;
|
||
|
@@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
|
||
|
|
||
|
static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
|
||
|
{
|
||
|
- if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
||
|
+ if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
|
||
|
+ vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
|
||
|
return true;
|
||
|
|
||
|
return false;
|
||
|
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
|
||
|
index 798ee6d..7ab1f52 100644
|
||
|
--- a/arch/xtensa/include/asm/unistd.h
|
||
|
+++ b/arch/xtensa/include/asm/unistd.h
|
||
|
@@ -394,7 +394,8 @@ __SYSCALL(174, sys_chroot, 1)
|
||
|
#define __NR_pivot_root 175
|
||
|
__SYSCALL(175, sys_pivot_root, 2)
|
||
|
#define __NR_umount 176
|
||
|
-__SYSCALL(176, sys_umount, 2)
|
||
|
+__SYSCALL(176, sys_oldumount, 1)
|
||
|
+#define __ARCH_WANT_SYS_OLDUMOUNT
|
||
|
#define __NR_swapoff 177
|
||
|
__SYSCALL(177, sys_swapoff, 1)
|
||
|
#define __NR_sync 178
|
||
|
diff --git a/block/blk-settings.c b/block/blk-settings.c
|
||
|
index 0be72a0..4aa6096 100644
|
||
|
--- a/block/blk-settings.c
|
||
|
+++ b/block/blk-settings.c
|
||
|
@@ -550,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||
|
bottom = max(b->physical_block_size, b->io_min) + alignment;
|
||
|
|
||
|
/* Verify that top and bottom intervals line up */
|
||
|
- if (max(top, bottom) & (min(top, bottom) - 1)) {
|
||
|
+ if (max(top, bottom) % min(top, bottom)) {
|
||
|
t->misaligned = 1;
|
||
|
ret = -1;
|
||
|
}
|
||
|
@@ -591,7 +591,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||
|
|
||
|
/* Find lowest common alignment_offset */
|
||
|
t->alignment_offset = lcm(t->alignment_offset, alignment)
|
||
|
- & (max(t->physical_block_size, t->io_min) - 1);
|
||
|
+ % max(t->physical_block_size, t->io_min);
|
||
|
|
||
|
/* Verify that new alignment_offset is on a logical block boundary */
|
||
|
if (t->alignment_offset & (t->logical_block_size - 1)) {
|
||
|
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
|
||
|
index 9a87daa..f1c00c9 100644
|
||
|
--- a/block/scsi_ioctl.c
|
||
|
+++ b/block/scsi_ioctl.c
|
||
|
@@ -505,7 +505,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
||
|
|
||
|
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
|
||
|
err = DRIVER_ERROR << 24;
|
||
|
- goto out;
|
||
|
+ goto error;
|
||
|
}
|
||
|
|
||
|
memset(sense, 0, sizeof(sense));
|
||
|
@@ -515,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
||
|
|
||
|
blk_execute_rq(q, disk, rq, 0);
|
||
|
|
||
|
-out:
|
||
|
err = rq->errors & 0xff; /* only 8 bit SCSI status */
|
||
|
if (err) {
|
||
|
if (rq->sense_len && rq->sense) {
|
||
|
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
|
||
|
index d366a75..ca9a287 100644
|
||
|
--- a/drivers/ata/ahci.c
|
||
|
+++ b/drivers/ata/ahci.c
|
||
|
@@ -313,6 +313,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||
|
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
|
||
|
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
|
||
|
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
|
||
|
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
||
|
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
|
||
|
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
||
|
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
|
||
|
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
||
|
|
||
|
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||
|
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||
|
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
|
||
|
index d8af325..3723e5e 100644
|
||
|
--- a/drivers/ata/libata-sff.c
|
||
|
+++ b/drivers/ata/libata-sff.c
|
||
|
@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
||
|
|
||
|
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
|
||
|
|
||
|
- /* software reset. causes dev0 to be selected */
|
||
|
- iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||
|
- udelay(20); /* FIXME: flush */
|
||
|
- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
||
|
- udelay(20); /* FIXME: flush */
|
||
|
- iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||
|
- ap->last_ctl = ap->ctl;
|
||
|
+ if (ap->ioaddr.ctl_addr) {
|
||
|
+ /* software reset. causes dev0 to be selected */
|
||
|
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||
|
+ udelay(20); /* FIXME: flush */
|
||
|
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
||
|
+ udelay(20); /* FIXME: flush */
|
||
|
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||
|
+ ap->last_ctl = ap->ctl;
|
||
|
+ }
|
||
|
|
||
|
/* wait the port to become ready */
|
||
|
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
|
||
|
@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
|
||
|
|
||
|
spin_unlock_irqrestore(ap->lock, flags);
|
||
|
|
||
|
- /* ignore ata_sff_softreset if ctl isn't accessible */
|
||
|
- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
|
||
|
- softreset = NULL;
|
||
|
-
|
||
|
/* ignore built-in hardresets if SCR access is not available */
|
||
|
if ((hardreset == sata_std_hardreset ||
|
||
|
hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
|
||
|
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
|
||
|
index 71eaf385..5929dde 100644
|
||
|
--- a/drivers/ata/pata_serverworks.c
|
||
|
+++ b/drivers/ata/pata_serverworks.c
|
||
|
@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
|
||
|
pci_write_config_byte(pdev, 0x54, ultra_cfg);
|
||
|
}
|
||
|
|
||
|
-static struct scsi_host_template serverworks_sht = {
|
||
|
+static struct scsi_host_template serverworks_osb4_sht = {
|
||
|
+ ATA_BMDMA_SHT(DRV_NAME),
|
||
|
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
|
||
|
+};
|
||
|
+
|
||
|
+static struct scsi_host_template serverworks_csb_sht = {
|
||
|
ATA_BMDMA_SHT(DRV_NAME),
|
||
|
};
|
||
|
|
||
|
static struct ata_port_operations serverworks_osb4_port_ops = {
|
||
|
.inherits = &ata_bmdma_port_ops,
|
||
|
+ .qc_prep = ata_bmdma_dumb_qc_prep,
|
||
|
.cable_detect = serverworks_cable_detect,
|
||
|
.mode_filter = serverworks_osb4_filter,
|
||
|
.set_piomode = serverworks_set_piomode,
|
||
|
@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
|
||
|
|
||
|
static struct ata_port_operations serverworks_csb_port_ops = {
|
||
|
.inherits = &serverworks_osb4_port_ops,
|
||
|
+ .qc_prep = ata_bmdma_qc_prep,
|
||
|
.mode_filter = serverworks_csb_filter,
|
||
|
};
|
||
|
|
||
|
@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
||
|
}
|
||
|
};
|
||
|
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
|
||
|
+ struct scsi_host_template *sht = &serverworks_csb_sht;
|
||
|
int rc;
|
||
|
|
||
|
rc = pcim_enable_device(pdev);
|
||
|
@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
||
|
/* Select non UDMA capable OSB4 if we can't do fixups */
|
||
|
if (rc < 0)
|
||
|
ppi[0] = &info[1];
|
||
|
+ sht = &serverworks_osb4_sht;
|
||
|
}
|
||
|
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
|
||
|
else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
|
||
|
@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
|
||
|
ppi[1] = &ata_dummy_port_info;
|
||
|
}
|
||
|
|
||
|
- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
|
||
|
+ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
|
||
|
}
|
||
|
|
||
|
#ifdef CONFIG_PM
|
||
|
diff --git a/drivers/base/core.c b/drivers/base/core.c
|
||
|
index 936c3d3..bde1832 100644
|
||
|
--- a/drivers/base/core.c
|
||
|
+++ b/drivers/base/core.c
|
||
|
@@ -718,12 +718,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
|
||
|
return &dir->kobj;
|
||
|
}
|
||
|
|
||
|
+static DEFINE_MUTEX(gdp_mutex);
|
||
|
|
||
|
static struct kobject *get_device_parent(struct device *dev,
|
||
|
struct device *parent)
|
||
|
{
|
||
|
if (dev->class) {
|
||
|
- static DEFINE_MUTEX(gdp_mutex);
|
||
|
struct kobject *kobj = NULL;
|
||
|
struct kobject *parent_kobj;
|
||
|
struct kobject *k;
|
||
|
@@ -787,7 +787,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
|
||
|
glue_dir->kset != &dev->class->p->glue_dirs)
|
||
|
return;
|
||
|
|
||
|
+ mutex_lock(&gdp_mutex);
|
||
|
kobject_put(glue_dir);
|
||
|
+ mutex_unlock(&gdp_mutex);
|
||
|
}
|
||
|
|
||
|
static void cleanup_device_parent(struct device *dev)
|
||
|
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
|
||
|
index 8ab1eab..1db1289 100644
|
||
|
--- a/drivers/base/regmap/regmap-debugfs.c
|
||
|
+++ b/drivers/base/regmap/regmap-debugfs.c
|
||
|
@@ -244,7 +244,12 @@ static const struct file_operations regmap_access_fops = {
|
||
|
|
||
|
void regmap_debugfs_init(struct regmap *map)
|
||
|
{
|
||
|
- map->debugfs = debugfs_create_dir(dev_name(map->dev),
|
||
|
+ const char *devname = "dummy";
|
||
|
+
|
||
|
+ if (map->dev)
|
||
|
+ devname = dev_name(map->dev);
|
||
|
+
|
||
|
+ map->debugfs = debugfs_create_dir(devname,
|
||
|
regmap_debugfs_root);
|
||
|
if (!map->debugfs) {
|
||
|
dev_warn(map->dev, "Failed to create debugfs directory\n");
|
||
|
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
|
||
|
index e554542..8e81f85 100644
|
||
|
--- a/drivers/base/regmap/regmap.c
|
||
|
+++ b/drivers/base/regmap/regmap.c
|
||
|
@@ -600,6 +600,11 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
||
|
if (val_bytes == 1) {
|
||
|
wval = (void *)val;
|
||
|
} else {
|
||
|
+ if (!val_count) {
|
||
|
+ ret = -EINVAL;
|
||
|
+ goto out;
|
||
|
+ }
|
||
|
+
|
||
|
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
|
||
|
if (!wval) {
|
||
|
ret = -ENOMEM;
|
||
|
diff --git a/drivers/char/random.c b/drivers/char/random.c
|
||
|
index 1052fc4..85172fa 100644
|
||
|
--- a/drivers/char/random.c
|
||
|
+++ b/drivers/char/random.c
|
||
|
@@ -932,8 +932,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
|
||
|
* pool while mixing, and hash one final time.
|
||
|
*/
|
||
|
sha_transform(hash.w, extract, workspace);
|
||
|
- memset(extract, 0, sizeof(extract));
|
||
|
- memset(workspace, 0, sizeof(workspace));
|
||
|
+ memzero_explicit(extract, sizeof(extract));
|
||
|
+ memzero_explicit(workspace, sizeof(workspace));
|
||
|
|
||
|
/*
|
||
|
* In case the hash function has some recognizable output
|
||
|
@@ -956,7 +956,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
|
||
|
}
|
||
|
|
||
|
memcpy(out, &hash, EXTRACT_SIZE);
|
||
|
- memset(&hash, 0, sizeof(hash));
|
||
|
+ memzero_explicit(&hash, sizeof(hash));
|
||
|
}
|
||
|
|
||
|
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
|
||
|
@@ -989,7 +989,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
|
||
|
}
|
||
|
|
||
|
/* Wipe data just returned from memory */
|
||
|
- memset(tmp, 0, sizeof(tmp));
|
||
|
+ memzero_explicit(tmp, sizeof(tmp));
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
@@ -1027,7 +1027,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
|
||
|
}
|
||
|
|
||
|
/* Wipe data just returned from memory */
|
||
|
- memset(tmp, 0, sizeof(tmp));
|
||
|
+ memzero_explicit(tmp, sizeof(tmp));
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
|
||
|
index 73464a6..0f0bf1a 100644
|
||
|
--- a/drivers/edac/mpc85xx_edac.c
|
||
|
+++ b/drivers/edac/mpc85xx_edac.c
|
||
|
@@ -577,7 +577,8 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
|
||
|
if (edac_op_state == EDAC_OPSTATE_INT) {
|
||
|
pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
|
||
|
res = devm_request_irq(&op->dev, pdata->irq,
|
||
|
- mpc85xx_l2_isr, IRQF_DISABLED,
|
||
|
+ mpc85xx_l2_isr,
|
||
|
+ IRQF_DISABLED | IRQF_SHARED,
|
||
|
"[EDAC] L2 err", edac_dev);
|
||
|
if (res < 0) {
|
||
|
printk(KERN_ERR
|
||
|
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
|
||
|
index b558810..b449572 100644
|
||
|
--- a/drivers/firewire/core-cdev.c
|
||
|
+++ b/drivers/firewire/core-cdev.c
|
||
|
@@ -1619,8 +1619,7 @@ static int dispatch_ioctl(struct client *client,
|
||
|
_IOC_SIZE(cmd) > sizeof(buffer))
|
||
|
return -ENOTTY;
|
||
|
|
||
|
- if (_IOC_DIR(cmd) == _IOC_READ)
|
||
|
- memset(&buffer, 0, _IOC_SIZE(cmd));
|
||
|
+ memset(&buffer, 0, sizeof(buffer));
|
||
|
|
||
|
if (_IOC_DIR(cmd) & _IOC_WRITE)
|
||
|
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
|
||
|
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
|
||
|
index df62c39..01434ef 100644
|
||
|
--- a/drivers/gpu/drm/radeon/evergreen.c
|
||
|
+++ b/drivers/gpu/drm/radeon/evergreen.c
|
||
|
@@ -1176,6 +1176,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||
|
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||
|
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||
|
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||
|
}
|
||
|
} else {
|
||
|
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||
|
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
||
|
index 00fb5aa..7ca1d47 100644
|
||
|
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
||
|
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
|
||
|
@@ -1915,6 +1915,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
||
|
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
|
||
|
};
|
||
|
int i;
|
||
|
+ u32 assumed_bpp = 2;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * If using screen objects, then assume 32-bpp because that's what the
|
||
|
+ * SVGA device is assuming
|
||
|
+ */
|
||
|
+ if (dev_priv->sou_priv)
|
||
|
+ assumed_bpp = 4;
|
||
|
|
||
|
/* Add preferred mode */
|
||
|
{
|
||
|
@@ -1925,8 +1933,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
||
|
mode->vdisplay = du->pref_height;
|
||
|
vmw_guess_mode_timing(mode);
|
||
|
|
||
|
- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
|
||
|
- mode->vdisplay)) {
|
||
|
+ if (vmw_kms_validate_mode_vram(dev_priv,
|
||
|
+ mode->hdisplay * assumed_bpp,
|
||
|
+ mode->vdisplay)) {
|
||
|
drm_mode_probed_add(connector, mode);
|
||
|
} else {
|
||
|
drm_mode_destroy(dev, mode);
|
||
|
@@ -1948,7 +1957,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
||
|
bmode->vdisplay > max_height)
|
||
|
continue;
|
||
|
|
||
|
- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
|
||
|
+ if (!vmw_kms_validate_mode_vram(dev_priv,
|
||
|
+ bmode->hdisplay * assumed_bpp,
|
||
|
bmode->vdisplay))
|
||
|
continue;
|
||
|
|
||
|
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
|
||
|
index f4c3d28..3c8b2c4 100644
|
||
|
--- a/drivers/hv/channel.c
|
||
|
+++ b/drivers/hv/channel.c
|
||
|
@@ -207,8 +207,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||
|
ret = vmbus_post_msg(open_msg,
|
||
|
sizeof(struct vmbus_channel_open_channel));
|
||
|
|
||
|
- if (ret != 0)
|
||
|
+ if (ret != 0) {
|
||
|
+ err = ret;
|
||
|
goto error1;
|
||
|
+ }
|
||
|
|
||
|
t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
|
||
|
if (t == 0) {
|
||
|
@@ -400,7 +402,6 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||
|
u32 next_gpadl_handle;
|
||
|
unsigned long flags;
|
||
|
int ret = 0;
|
||
|
- int t;
|
||
|
|
||
|
next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
|
||
|
atomic_inc(&vmbus_connection.next_gpadl_handle);
|
||
|
@@ -447,9 +448,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||
|
|
||
|
}
|
||
|
}
|
||
|
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
|
||
|
- BUG_ON(t == 0);
|
||
|
-
|
||
|
+ wait_for_completion(&msginfo->waitevent);
|
||
|
|
||
|
/* At this point, we received the gpadl created msg */
|
||
|
*gpadl_handle = gpadlmsg->gpadl;
|
||
|
@@ -472,7 +471,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
||
|
struct vmbus_channel_gpadl_teardown *msg;
|
||
|
struct vmbus_channel_msginfo *info;
|
||
|
unsigned long flags;
|
||
|
- int ret, t;
|
||
|
+ int ret;
|
||
|
|
||
|
info = kmalloc(sizeof(*info) +
|
||
|
sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
|
||
|
@@ -494,11 +493,12 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
||
|
ret = vmbus_post_msg(msg,
|
||
|
sizeof(struct vmbus_channel_gpadl_teardown));
|
||
|
|
||
|
- BUG_ON(ret != 0);
|
||
|
- t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
|
||
|
- BUG_ON(t == 0);
|
||
|
+ if (ret)
|
||
|
+ goto post_msg_err;
|
||
|
+
|
||
|
+ wait_for_completion(&info->waitevent);
|
||
|
|
||
|
- /* Received a torndown response */
|
||
|
+post_msg_err:
|
||
|
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||
|
list_del(&info->msglistentry);
|
||
|
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||
|
@@ -531,11 +531,28 @@ void vmbus_close(struct vmbus_channel *channel)
|
||
|
|
||
|
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
|
||
|
|
||
|
- BUG_ON(ret != 0);
|
||
|
+ if (ret) {
|
||
|
+ pr_err("Close failed: close post msg return is %d\n", ret);
|
||
|
+ /*
|
||
|
+ * If we failed to post the close msg,
|
||
|
+ * it is perhaps better to leak memory.
|
||
|
+ */
|
||
|
+ return;
|
||
|
+ }
|
||
|
+
|
||
|
/* Tear down the gpadl for the channel's ring buffer */
|
||
|
- if (channel->ringbuffer_gpadlhandle)
|
||
|
- vmbus_teardown_gpadl(channel,
|
||
|
- channel->ringbuffer_gpadlhandle);
|
||
|
+ if (channel->ringbuffer_gpadlhandle) {
|
||
|
+ ret = vmbus_teardown_gpadl(channel,
|
||
|
+ channel->ringbuffer_gpadlhandle);
|
||
|
+ if (ret) {
|
||
|
+ pr_err("Close failed: teardown gpadl return %d\n", ret);
|
||
|
+ /*
|
||
|
+ * If we failed to teardown gpadl,
|
||
|
+ * it is perhaps better to leak memory.
|
||
|
+ */
|
||
|
+ return;
|
||
|
+ }
|
||
|
+ }
|
||
|
|
||
|
/* Cleanup the ring buffers for this channel */
|
||
|
hv_ringbuffer_cleanup(&channel->outbound);
|
||
|
@@ -543,8 +560,6 @@ void vmbus_close(struct vmbus_channel *channel)
|
||
|
|
||
|
free_pages((unsigned long)channel->ringbuffer_pages,
|
||
|
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
||
|
-
|
||
|
-
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(vmbus_close);
|
||
|
|
||
|
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
|
||
|
index 650c9f0..2d52a1b 100644
|
||
|
--- a/drivers/hv/connection.c
|
||
|
+++ b/drivers/hv/connection.c
|
||
|
@@ -294,10 +294,21 @@ int vmbus_post_msg(void *buffer, size_t buflen)
|
||
|
* insufficient resources. Retry the operation a couple of
|
||
|
* times before giving up.
|
||
|
*/
|
||
|
- while (retries < 3) {
|
||
|
- ret = hv_post_message(conn_id, 1, buffer, buflen);
|
||
|
- if (ret != HV_STATUS_INSUFFICIENT_BUFFERS)
|
||
|
+ while (retries < 10) {
|
||
|
+ ret = hv_post_message(conn_id, 1, buffer, buflen);
|
||
|
+
|
||
|
+ switch (ret) {
|
||
|
+ case HV_STATUS_INSUFFICIENT_BUFFERS:
|
||
|
+ ret = -ENOMEM;
|
||
|
+ case -ENOMEM:
|
||
|
+ break;
|
||
|
+ case HV_STATUS_SUCCESS:
|
||
|
return ret;
|
||
|
+ default:
|
||
|
+ pr_err("hv_post_msg() failed; error code:%d\n", ret);
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
+
|
||
|
retries++;
|
||
|
msleep(100);
|
||
|
}
|
||
|
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
||
|
index 550c9ca..00aaaf38 100644
|
||
|
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
|
||
|
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
|
||
|
@@ -2146,6 +2146,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
|
||
|
if (!qp_init)
|
||
|
goto out;
|
||
|
|
||
|
+retry:
|
||
|
ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
|
||
|
ch->rq_size + srp_sq_size, 0);
|
||
|
if (IS_ERR(ch->cq)) {
|
||
|
@@ -2169,6 +2170,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
|
||
|
ch->qp = ib_create_qp(sdev->pd, qp_init);
|
||
|
if (IS_ERR(ch->qp)) {
|
||
|
ret = PTR_ERR(ch->qp);
|
||
|
+ if (ret == -ENOMEM) {
|
||
|
+ srp_sq_size /= 2;
|
||
|
+ if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
|
||
|
+ ib_destroy_cq(ch->cq);
|
||
|
+ goto retry;
|
||
|
+ }
|
||
|
+ }
|
||
|
printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
|
||
|
goto err_destroy_cq;
|
||
|
}
|
||
|
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
|
||
|
index 4c6a72d..9854a1f 100644
|
||
|
--- a/drivers/input/mouse/alps.c
|
||
|
+++ b/drivers/input/mouse/alps.c
|
||
|
@@ -787,7 +787,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
|
||
|
struct alps_data *priv = psmouse->private;
|
||
|
const struct alps_model_info *model = priv->i;
|
||
|
|
||
|
- if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
|
||
|
+ /*
|
||
|
+ * Check if we are dealing with a bare PS/2 packet, presumably from
|
||
|
+ * a device connected to the external PS/2 port. Because bare PS/2
|
||
|
+ * protocol does not have enough constant bits to self-synchronize
|
||
|
+ * properly we only do this if the device is fully synchronized.
|
||
|
+ */
|
||
|
+ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
|
||
|
if (psmouse->pktcnt == 3) {
|
||
|
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
|
||
|
true);
|
||
|
@@ -1619,6 +1625,9 @@ int alps_init(struct psmouse *psmouse)
|
||
|
/* We are having trouble resyncing ALPS touchpads so disable it for now */
|
||
|
psmouse->resync_time = 0;
|
||
|
|
||
|
+ /* Allow 2 invalid packets without resetting device */
|
||
|
+ psmouse->resetafter = psmouse->pktsize * 2;
|
||
|
+
|
||
|
return 0;
|
||
|
|
||
|
init_fail:
|
||
|
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
|
||
|
index 32b1363..97e5f6f 100644
|
||
|
--- a/drivers/input/mouse/synaptics.c
|
||
|
+++ b/drivers/input/mouse/synaptics.c
|
||
|
@@ -506,6 +506,8 @@ static void synaptics_parse_agm(const unsigned char buf[],
|
||
|
priv->agm_pending = true;
|
||
|
}
|
||
|
|
||
|
+static bool is_forcepad;
|
||
|
+
|
||
|
static int synaptics_parse_hw_state(const unsigned char buf[],
|
||
|
struct synaptics_data *priv,
|
||
|
struct synaptics_hw_state *hw)
|
||
|
@@ -535,7 +537,7 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
|
||
|
hw->left = (buf[0] & 0x01) ? 1 : 0;
|
||
|
hw->right = (buf[0] & 0x02) ? 1 : 0;
|
||
|
|
||
|
- if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
|
||
|
+ if (is_forcepad) {
|
||
|
/*
|
||
|
* ForcePads, like Clickpads, use middle button
|
||
|
* bits to report primary button clicks.
|
||
|
@@ -1512,6 +1514,18 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||
|
{ }
|
||
|
};
|
||
|
|
||
|
+static const struct dmi_system_id forcepad_dmi_table[] __initconst = {
|
||
|
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
|
||
|
+ {
|
||
|
+ .matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"),
|
||
|
+ },
|
||
|
+ },
|
||
|
+#endif
|
||
|
+ { }
|
||
|
+};
|
||
|
+
|
||
|
void __init synaptics_module_init(void)
|
||
|
{
|
||
|
const struct dmi_system_id *min_max_dmi;
|
||
|
@@ -1522,6 +1536,12 @@ void __init synaptics_module_init(void)
|
||
|
min_max_dmi = dmi_first_match(min_max_dmi_table);
|
||
|
if (min_max_dmi)
|
||
|
quirk_min_max = min_max_dmi->driver_data;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Unfortunately ForcePad capability is not exported over PS/2,
|
||
|
+ * so we have to resort to checking DMI.
|
||
|
+ */
|
||
|
+ is_forcepad = dmi_check_system(forcepad_dmi_table);
|
||
|
}
|
||
|
|
||
|
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
|
||
|
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
|
||
|
index ac1b773..20d861b4 100644
|
||
|
--- a/drivers/input/mouse/synaptics.h
|
||
|
+++ b/drivers/input/mouse/synaptics.h
|
||
|
@@ -76,12 +76,9 @@
|
||
|
* for noise.
|
||
|
* 2 0x08 image sensor image sensor tracks 5 fingers, but only
|
||
|
* reports 2.
|
||
|
+ * 2 0x01 uniform clickpad whole clickpad moves instead of being
|
||
|
+ * hinged at the top.
|
||
|
* 2 0x20 report min query 0x0f gives min coord reported
|
||
|
- * 2 0x80 forcepad forcepad is a variant of clickpad that
|
||
|
- * does not have physical buttons but rather
|
||
|
- * uses pressure above certain threshold to
|
||
|
- * report primary clicks. Forcepads also have
|
||
|
- * clickpad bit set.
|
||
|
*/
|
||
|
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
|
||
|
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
|
||
|
@@ -90,7 +87,6 @@
|
||
|
#define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
|
||
|
#define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
|
||
|
#define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
|
||
|
-#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
|
||
|
|
||
|
/* synaptics modes query bits */
|
||
|
#define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
|
||
|
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
|
||
|
index 1291673..ce715b1 100644
|
||
|
--- a/drivers/input/serio/i8042-x86ia64io.h
|
||
|
+++ b/drivers/input/serio/i8042-x86ia64io.h
|
||
|
@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
||
|
},
|
||
|
{
|
||
|
.matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
|
||
|
+ },
|
||
|
+ },
|
||
|
+ {
|
||
|
+ .matches = {
|
||
|
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
|
||
|
DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
|
||
|
DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
|
||
|
@@ -609,6 +615,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
|
||
|
},
|
||
|
},
|
||
|
{
|
||
|
+ /* Fujitsu A544 laptop */
|
||
|
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
|
||
|
+ .matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
|
||
|
+ },
|
||
|
+ },
|
||
|
+ {
|
||
|
+ /* Fujitsu AH544 laptop */
|
||
|
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
|
||
|
+ .matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
|
||
|
+ },
|
||
|
+ },
|
||
|
+ {
|
||
|
/* Fujitsu U574 laptop */
|
||
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
|
||
|
.matches = {
|
||
|
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
|
||
|
index 6f99500..535b09c 100644
|
||
|
--- a/drivers/md/dm-bufio.c
|
||
|
+++ b/drivers/md/dm-bufio.c
|
||
|
@@ -467,6 +467,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
|
||
|
b->list_mode = dirty;
|
||
|
list_del(&b->lru_list);
|
||
|
list_add(&b->lru_list, &c->lru[dirty]);
|
||
|
+ b->last_accessed = jiffies;
|
||
|
}
|
||
|
|
||
|
/*----------------------------------------------------------------
|
||
|
@@ -1378,9 +1379,9 @@ static void drop_buffers(struct dm_bufio_client *c)
|
||
|
|
||
|
/*
|
||
|
* Test if the buffer is unused and too old, and commit it.
|
||
|
- * At if noio is set, we must not do any I/O because we hold
|
||
|
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
|
||
|
- * different bufio client.
|
||
|
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
|
||
|
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
|
||
|
+ * rerouted to different bufio client.
|
||
|
*/
|
||
|
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
|
||
|
unsigned long max_jiffies)
|
||
|
@@ -1388,7 +1389,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
|
||
|
if (jiffies - b->last_accessed < max_jiffies)
|
||
|
return 1;
|
||
|
|
||
|
- if (!(gfp & __GFP_IO)) {
|
||
|
+ if (!(gfp & __GFP_FS)) {
|
||
|
if (test_bit(B_READING, &b->state) ||
|
||
|
test_bit(B_WRITING, &b->state) ||
|
||
|
test_bit(B_DIRTY, &b->state))
|
||
|
@@ -1427,7 +1428,7 @@ static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
|
||
|
unsigned long r;
|
||
|
unsigned long nr_to_scan = sc->nr_to_scan;
|
||
|
|
||
|
- if (sc->gfp_mask & __GFP_IO)
|
||
|
+ if (sc->gfp_mask & __GFP_FS)
|
||
|
dm_bufio_lock(c);
|
||
|
else if (!dm_bufio_trylock(c))
|
||
|
return !nr_to_scan ? 0 : -1;
|
||
|
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
|
||
|
index 08d9a20..c69d0b7 100644
|
||
|
--- a/drivers/md/dm-log-userspace-transfer.c
|
||
|
+++ b/drivers/md/dm-log-userspace-transfer.c
|
||
|
@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
|
||
|
|
||
|
r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
|
||
|
if (r) {
|
||
|
- cn_del_callback(&ulog_cn_id);
|
||
|
+ kfree(prealloced_cn_msg);
|
||
|
return r;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
|
||
|
index 68965e6..fb2335b 100644
|
||
|
--- a/drivers/md/dm-raid.c
|
||
|
+++ b/drivers/md/dm-raid.c
|
||
|
@@ -592,8 +592,7 @@ struct dm_raid_superblock {
|
||
|
__le32 layout;
|
||
|
__le32 stripe_sectors;
|
||
|
|
||
|
- __u8 pad[452]; /* Round struct to 512 bytes. */
|
||
|
- /* Always set to 0 when writing. */
|
||
|
+ /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
|
||
|
} __packed;
|
||
|
|
||
|
static int read_disk_sb(struct md_rdev *rdev, int size)
|
||
|
@@ -628,7 +627,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
|
||
|
if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
|
||
|
failed_devices |= (1ULL << r->raid_disk);
|
||
|
|
||
|
- memset(sb, 0, sizeof(*sb));
|
||
|
+ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
|
||
|
|
||
|
sb->magic = cpu_to_le32(DM_RAID_MAGIC);
|
||
|
sb->features = cpu_to_le32(0); /* No features yet */
|
||
|
@@ -663,7 +662,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
|
||
|
uint64_t events_sb, events_refsb;
|
||
|
|
||
|
rdev->sb_start = 0;
|
||
|
- rdev->sb_size = sizeof(*sb);
|
||
|
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
|
||
|
+ if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
|
||
|
+ DMERR("superblock size of a logical block is no longer valid");
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
|
||
|
ret = read_disk_sb(rdev, rdev->sb_size);
|
||
|
if (ret)
|
||
|
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
|
||
|
index c2cdefa..d4f7f95 100644
|
||
|
--- a/drivers/net/can/dev.c
|
||
|
+++ b/drivers/net/can/dev.c
|
||
|
@@ -359,7 +359,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
|
||
|
BUG_ON(idx >= priv->echo_skb_max);
|
||
|
|
||
|
if (priv->echo_skb[idx]) {
|
||
|
- kfree_skb(priv->echo_skb[idx]);
|
||
|
+ dev_kfree_skb_any(priv->echo_skb[idx]);
|
||
|
priv->echo_skb[idx] = NULL;
|
||
|
}
|
||
|
}
|
||
|
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
|
||
|
index 09b1da5..6c84234 100644
|
||
|
--- a/drivers/net/can/usb/esd_usb2.c
|
||
|
+++ b/drivers/net/can/usb/esd_usb2.c
|
||
|
@@ -1094,6 +1094,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
|
||
|
}
|
||
|
}
|
||
|
unlink_all_urbs(dev);
|
||
|
+ kfree(dev);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
|
||
|
index f5b9de4..f0e3764 100644
|
||
|
--- a/drivers/net/macvtap.c
|
||
|
+++ b/drivers/net/macvtap.c
|
||
|
@@ -634,6 +634,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
|
||
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||
|
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
|
||
|
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
|
||
|
+ if (vlan_tx_tag_present(skb))
|
||
|
+ vnet_hdr->csum_start += VLAN_HLEN;
|
||
|
vnet_hdr->csum_offset = skb->csum_offset;
|
||
|
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||
|
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
|
||
|
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
|
||
|
index 21d7151..1207bb1 100644
|
||
|
--- a/drivers/net/ppp/ppp_generic.c
|
||
|
+++ b/drivers/net/ppp/ppp_generic.c
|
||
|
@@ -588,7 +588,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||
|
if (file == ppp->owner)
|
||
|
ppp_shutdown_interface(ppp);
|
||
|
}
|
||
|
- if (atomic_long_read(&file->f_count) <= 2) {
|
||
|
+ if (atomic_long_read(&file->f_count) < 2) {
|
||
|
ppp_release(NULL, file);
|
||
|
err = 0;
|
||
|
} else
|
||
|
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
|
||
|
index 063bfa8..9105493 100644
|
||
|
--- a/drivers/net/wireless/rt2x00/rt2800.h
|
||
|
+++ b/drivers/net/wireless/rt2x00/rt2800.h
|
||
|
@@ -1751,7 +1751,7 @@ struct mac_iveiv_entry {
|
||
|
* 2 - drop tx power by 12dBm,
|
||
|
* 3 - increase tx power by 6dBm
|
||
|
*/
|
||
|
-#define BBP1_TX_POWER_CTRL FIELD8(0x07)
|
||
|
+#define BBP1_TX_POWER_CTRL FIELD8(0x03)
|
||
|
#define BBP1_TX_ANTENNA FIELD8(0x18)
|
||
|
|
||
|
/*
|
||
|
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
|
||
|
index 664e93d..49baf0c 100644
|
||
|
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
|
||
|
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
|
||
|
@@ -1081,6 +1081,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
||
|
/* Ovislink */
|
||
|
{ USB_DEVICE(0x1b75, 0x3071) },
|
||
|
{ USB_DEVICE(0x1b75, 0x3072) },
|
||
|
+ { USB_DEVICE(0x1b75, 0xa200) },
|
||
|
/* Para */
|
||
|
{ USB_DEVICE(0x20b8, 0x8888) },
|
||
|
/* Pegatron */
|
||
|
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
||
|
index 4d792a2..c5bdbe9 100644
|
||
|
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
|
||
|
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
|
||
|
@@ -148,55 +148,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
|
||
|
skb_trim(skb, frame_length);
|
||
|
}
|
||
|
|
||
|
-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
|
||
|
+/*
|
||
|
+ * H/W needs L2 padding between the header and the paylod if header size
|
||
|
+ * is not 4 bytes aligned.
|
||
|
+ */
|
||
|
+void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
|
||
|
{
|
||
|
- unsigned int payload_length = skb->len - header_length;
|
||
|
- unsigned int header_align = ALIGN_SIZE(skb, 0);
|
||
|
- unsigned int payload_align = ALIGN_SIZE(skb, header_length);
|
||
|
- unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
|
||
|
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
|
||
|
|
||
|
- /*
|
||
|
- * Adjust the header alignment if the payload needs to be moved more
|
||
|
- * than the header.
|
||
|
- */
|
||
|
- if (payload_align > header_align)
|
||
|
- header_align += 4;
|
||
|
-
|
||
|
- /* There is nothing to do if no alignment is needed */
|
||
|
- if (!header_align)
|
||
|
+ if (!l2pad)
|
||
|
return;
|
||
|
|
||
|
- /* Reserve the amount of space needed in front of the frame */
|
||
|
- skb_push(skb, header_align);
|
||
|
-
|
||
|
- /*
|
||
|
- * Move the header.
|
||
|
- */
|
||
|
- memmove(skb->data, skb->data + header_align, header_length);
|
||
|
-
|
||
|
- /* Move the payload, if present and if required */
|
||
|
- if (payload_length && payload_align)
|
||
|
- memmove(skb->data + header_length + l2pad,
|
||
|
- skb->data + header_length + l2pad + payload_align,
|
||
|
- payload_length);
|
||
|
-
|
||
|
- /* Trim the skb to the correct size */
|
||
|
- skb_trim(skb, header_length + l2pad + payload_length);
|
||
|
+ skb_push(skb, l2pad);
|
||
|
+ memmove(skb->data, skb->data + l2pad, hdr_len);
|
||
|
}
|
||
|
|
||
|
-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
|
||
|
+void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
|
||
|
{
|
||
|
- /*
|
||
|
- * L2 padding is only present if the skb contains more than just the
|
||
|
- * IEEE 802.11 header.
|
||
|
- */
|
||
|
- unsigned int l2pad = (skb->len > header_length) ?
|
||
|
- L2PAD_SIZE(header_length) : 0;
|
||
|
+ unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
|
||
|
|
||
|
if (!l2pad)
|
||
|
return;
|
||
|
|
||
|
- memmove(skb->data + l2pad, skb->data, header_length);
|
||
|
+ memmove(skb->data + l2pad, skb->data, hdr_len);
|
||
|
skb_pull(skb, l2pad);
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/of/address.c b/drivers/of/address.c
|
||
|
index fcc680a..693cdaa 100644
|
||
|
--- a/drivers/of/address.c
|
||
|
+++ b/drivers/of/address.c
|
||
|
@@ -328,6 +328,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
|
||
|
return NULL;
|
||
|
}
|
||
|
|
||
|
+static int of_empty_ranges_quirk(void)
|
||
|
+{
|
||
|
+ if (IS_ENABLED(CONFIG_PPC)) {
|
||
|
+ /* To save cycles, we cache the result */
|
||
|
+ static int quirk_state = -1;
|
||
|
+
|
||
|
+ if (quirk_state < 0)
|
||
|
+ quirk_state =
|
||
|
+ of_machine_is_compatible("Power Macintosh") ||
|
||
|
+ of_machine_is_compatible("MacRISC");
|
||
|
+ return quirk_state;
|
||
|
+ }
|
||
|
+ return false;
|
||
|
+}
|
||
|
+
|
||
|
static int of_translate_one(struct device_node *parent, struct of_bus *bus,
|
||
|
struct of_bus *pbus, u32 *addr,
|
||
|
int na, int ns, int pna, const char *rprop)
|
||
|
@@ -353,12 +368,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
|
||
|
* This code is only enabled on powerpc. --gcl
|
||
|
*/
|
||
|
ranges = of_get_property(parent, rprop, &rlen);
|
||
|
-#if !defined(CONFIG_PPC)
|
||
|
- if (ranges == NULL) {
|
||
|
+ if (ranges == NULL && !of_empty_ranges_quirk()) {
|
||
|
pr_err("OF: no ranges; cannot translate\n");
|
||
|
return 1;
|
||
|
}
|
||
|
-#endif /* !defined(CONFIG_PPC) */
|
||
|
if (ranges == NULL || rlen == 0) {
|
||
|
offset = of_read_number(addr, na);
|
||
|
memset(addr, 0, pna * 4);
|
||
|
diff --git a/drivers/of/base.c b/drivers/of/base.c
|
||
|
index a3cf2ee..c29a980 100644
|
||
|
--- a/drivers/of/base.c
|
||
|
+++ b/drivers/of/base.c
|
||
|
@@ -859,52 +859,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
|
||
|
EXPORT_SYMBOL_GPL(of_property_read_string);
|
||
|
|
||
|
/**
|
||
|
- * of_property_read_string_index - Find and read a string from a multiple
|
||
|
- * strings property.
|
||
|
- * @np: device node from which the property value is to be read.
|
||
|
- * @propname: name of the property to be searched.
|
||
|
- * @index: index of the string in the list of strings
|
||
|
- * @out_string: pointer to null terminated return string, modified only if
|
||
|
- * return value is 0.
|
||
|
- *
|
||
|
- * Search for a property in a device tree node and retrieve a null
|
||
|
- * terminated string value (pointer to data, not a copy) in the list of strings
|
||
|
- * contained in that property.
|
||
|
- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
|
||
|
- * property does not have a value, and -EILSEQ if the string is not
|
||
|
- * null-terminated within the length of the property data.
|
||
|
- *
|
||
|
- * The out_string pointer is modified only if a valid string can be decoded.
|
||
|
- */
|
||
|
-int of_property_read_string_index(struct device_node *np, const char *propname,
|
||
|
- int index, const char **output)
|
||
|
-{
|
||
|
- struct property *prop = of_find_property(np, propname, NULL);
|
||
|
- int i = 0;
|
||
|
- size_t l = 0, total = 0;
|
||
|
- const char *p;
|
||
|
-
|
||
|
- if (!prop)
|
||
|
- return -EINVAL;
|
||
|
- if (!prop->value)
|
||
|
- return -ENODATA;
|
||
|
- if (strnlen(prop->value, prop->length) >= prop->length)
|
||
|
- return -EILSEQ;
|
||
|
-
|
||
|
- p = prop->value;
|
||
|
-
|
||
|
- for (i = 0; total < prop->length; total += l, p += l) {
|
||
|
- l = strlen(p) + 1;
|
||
|
- if (i++ == index) {
|
||
|
- *output = p;
|
||
|
- return 0;
|
||
|
- }
|
||
|
- }
|
||
|
- return -ENODATA;
|
||
|
-}
|
||
|
-EXPORT_SYMBOL_GPL(of_property_read_string_index);
|
||
|
-
|
||
|
-/**
|
||
|
* of_property_match_string() - Find string in a list and return index
|
||
|
* @np: pointer to node containing string list property
|
||
|
* @propname: string list property name
|
||
|
@@ -930,7 +884,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
|
||
|
end = p + prop->length;
|
||
|
|
||
|
for (i = 0; p < end; i++, p += l) {
|
||
|
- l = strlen(p) + 1;
|
||
|
+ l = strnlen(p, end - p) + 1;
|
||
|
if (p + l > end)
|
||
|
return -EILSEQ;
|
||
|
pr_debug("comparing %s with %s\n", string, p);
|
||
|
@@ -942,39 +896,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
|
||
|
EXPORT_SYMBOL_GPL(of_property_match_string);
|
||
|
|
||
|
/**
|
||
|
- * of_property_count_strings - Find and return the number of strings from a
|
||
|
- * multiple strings property.
|
||
|
+ * of_property_read_string_util() - Utility helper for parsing string properties
|
||
|
* @np: device node from which the property value is to be read.
|
||
|
* @propname: name of the property to be searched.
|
||
|
+ * @out_strs: output array of string pointers.
|
||
|
+ * @sz: number of array elements to read.
|
||
|
+ * @skip: Number of strings to skip over at beginning of list.
|
||
|
*
|
||
|
- * Search for a property in a device tree node and retrieve the number of null
|
||
|
- * terminated string contain in it. Returns the number of strings on
|
||
|
- * success, -EINVAL if the property does not exist, -ENODATA if property
|
||
|
- * does not have a value, and -EILSEQ if the string is not null-terminated
|
||
|
- * within the length of the property data.
|
||
|
+ * Don't call this function directly. It is a utility helper for the
|
||
|
+ * of_property_read_string*() family of functions.
|
||
|
*/
|
||
|
-int of_property_count_strings(struct device_node *np, const char *propname)
|
||
|
+int of_property_read_string_helper(struct device_node *np, const char *propname,
|
||
|
+ const char **out_strs, size_t sz, int skip)
|
||
|
{
|
||
|
struct property *prop = of_find_property(np, propname, NULL);
|
||
|
- int i = 0;
|
||
|
- size_t l = 0, total = 0;
|
||
|
- const char *p;
|
||
|
+ int l = 0, i = 0;
|
||
|
+ const char *p, *end;
|
||
|
|
||
|
if (!prop)
|
||
|
return -EINVAL;
|
||
|
if (!prop->value)
|
||
|
return -ENODATA;
|
||
|
- if (strnlen(prop->value, prop->length) >= prop->length)
|
||
|
- return -EILSEQ;
|
||
|
-
|
||
|
p = prop->value;
|
||
|
+ end = p + prop->length;
|
||
|
|
||
|
- for (i = 0; total < prop->length; total += l, p += l, i++)
|
||
|
- l = strlen(p) + 1;
|
||
|
-
|
||
|
- return i;
|
||
|
+ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
|
||
|
+ l = strnlen(p, end - p) + 1;
|
||
|
+ if (p + l > end)
|
||
|
+ return -EILSEQ;
|
||
|
+ if (out_strs && i >= skip)
|
||
|
+ *out_strs++ = p;
|
||
|
+ }
|
||
|
+ i -= skip;
|
||
|
+ return i <= 0 ? -ENODATA : i;
|
||
|
}
|
||
|
-EXPORT_SYMBOL_GPL(of_property_count_strings);
|
||
|
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
|
||
|
|
||
|
/**
|
||
|
* of_parse_phandle - Resolve a phandle property to a device_node pointer
|
||
|
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
|
||
|
index f24ffd7..5a0771c 100644
|
||
|
--- a/drivers/of/selftest.c
|
||
|
+++ b/drivers/of/selftest.c
|
||
|
@@ -120,8 +120,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
|
||
|
pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
|
||
|
}
|
||
|
|
||
|
-static void __init of_selftest_property_match_string(void)
|
||
|
+static void __init of_selftest_property_string(void)
|
||
|
{
|
||
|
+ const char *strings[4];
|
||
|
struct device_node *np;
|
||
|
int rc;
|
||
|
|
||
|
@@ -139,13 +140,66 @@ static void __init of_selftest_property_match_string(void)
|
||
|
rc = of_property_match_string(np, "phandle-list-names", "third");
|
||
|
selftest(rc == 2, "third expected:0 got:%i\n", rc);
|
||
|
rc = of_property_match_string(np, "phandle-list-names", "fourth");
|
||
|
- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
|
||
|
+ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
|
||
|
rc = of_property_match_string(np, "missing-property", "blah");
|
||
|
- selftest(rc == -EINVAL, "missing property; rc=%i", rc);
|
||
|
+ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
|
||
|
rc = of_property_match_string(np, "empty-property", "blah");
|
||
|
- selftest(rc == -ENODATA, "empty property; rc=%i", rc);
|
||
|
+ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
|
||
|
rc = of_property_match_string(np, "unterminated-string", "blah");
|
||
|
- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
|
||
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
||
|
+
|
||
|
+ /* of_property_count_strings() tests */
|
||
|
+ rc = of_property_count_strings(np, "string-property");
|
||
|
+ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
|
||
|
+ rc = of_property_count_strings(np, "phandle-list-names");
|
||
|
+ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
|
||
|
+ rc = of_property_count_strings(np, "unterminated-string");
|
||
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
||
|
+ rc = of_property_count_strings(np, "unterminated-string-list");
|
||
|
+ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
|
||
|
+
|
||
|
+ /* of_property_read_string_index() tests */
|
||
|
+ rc = of_property_read_string_index(np, "string-property", 0, strings);
|
||
|
+ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ strings[0] = NULL;
|
||
|
+ rc = of_property_read_string_index(np, "string-property", 1, strings);
|
||
|
+ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
|
||
|
+ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
|
||
|
+ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
|
||
|
+ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ strings[0] = NULL;
|
||
|
+ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
|
||
|
+ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ strings[0] = NULL;
|
||
|
+ rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
|
||
|
+ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
|
||
|
+ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ strings[0] = NULL;
|
||
|
+ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
|
||
|
+ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
|
||
|
+ strings[1] = NULL;
|
||
|
+
|
||
|
+ /* of_property_read_string_array() tests */
|
||
|
+ rc = of_property_read_string_array(np, "string-property", strings, 4);
|
||
|
+ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
|
||
|
+ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
|
||
|
+ rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
|
||
|
+ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
|
||
|
+ /* -- An incorrectly formed string should cause a failure */
|
||
|
+ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
|
||
|
+ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
|
||
|
+ /* -- parsing the correctly formed strings should still work: */
|
||
|
+ strings[2] = NULL;
|
||
|
+ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
|
||
|
+ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
|
||
|
+ strings[1] = NULL;
|
||
|
+ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
|
||
|
+ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
|
||
|
}
|
||
|
|
||
|
static int __init of_selftest(void)
|
||
|
@@ -161,7 +215,7 @@ static int __init of_selftest(void)
|
||
|
|
||
|
pr_info("start of selftest - you will see error messages\n");
|
||
|
of_selftest_parse_phandle_with_args();
|
||
|
- of_selftest_property_match_string();
|
||
|
+ of_selftest_property_string();
|
||
|
pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
|
||
|
return 0;
|
||
|
}
|
||
|
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
|
||
|
index 9e39df9..75dc402 100644
|
||
|
--- a/drivers/pci/hotplug/pciehp_core.c
|
||
|
+++ b/drivers/pci/hotplug/pciehp_core.c
|
||
|
@@ -237,6 +237,13 @@ static int pciehp_probe(struct pcie_device *dev)
|
||
|
else if (pciehp_acpi_slot_detection_check(dev->port))
|
||
|
goto err_out_none;
|
||
|
|
||
|
+ if (!dev->port->subordinate) {
|
||
|
+ /* Can happen if we run out of bus numbers during probe */
|
||
|
+ dev_err(&dev->device,
|
||
|
+ "Hotplug bridge without secondary bus, ignoring\n");
|
||
|
+ goto err_out_none;
|
||
|
+ }
|
||
|
+
|
||
|
ctrl = pcie_init(dev);
|
||
|
if (!ctrl) {
|
||
|
dev_err(&dev->device, "Controller initialization failed\n");
|
||
|
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
|
||
|
index a55e248..985ada7 100644
|
||
|
--- a/drivers/pci/pci-sysfs.c
|
||
|
+++ b/drivers/pci/pci-sysfs.c
|
||
|
@@ -173,7 +173,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
||
|
{
|
||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||
|
|
||
|
- return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
|
||
|
+ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
|
||
|
pci_dev->vendor, pci_dev->device,
|
||
|
pci_dev->subsystem_vendor, pci_dev->subsystem_device,
|
||
|
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
|
||
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
||
|
index 61bc33e..e587d00 100644
|
||
|
--- a/drivers/pci/quirks.c
|
||
|
+++ b/drivers/pci/quirks.c
|
||
|
@@ -28,6 +28,7 @@
|
||
|
#include <linux/ioport.h>
|
||
|
#include <linux/sched.h>
|
||
|
#include <linux/ktime.h>
|
||
|
+#include <linux/mm.h>
|
||
|
#include <asm/dma.h> /* isa_dma_bridge_buggy */
|
||
|
#include "pci.h"
|
||
|
|
||
|
@@ -291,6 +292,25 @@ static void __devinit quirk_citrine(struct pci_dev *dev)
|
||
|
}
|
||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
|
||
|
|
||
|
+/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
|
||
|
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
|
||
|
+{
|
||
|
+ int i;
|
||
|
+
|
||
|
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
|
||
|
+ struct resource *r = &dev->resource[i];
|
||
|
+
|
||
|
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
|
||
|
+ r->end = PAGE_SIZE - 1;
|
||
|
+ r->start = 0;
|
||
|
+ r->flags |= IORESOURCE_UNSET;
|
||
|
+ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
|
||
|
+ i, r);
|
||
|
+ }
|
||
|
+ }
|
||
|
+}
|
||
|
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
|
||
|
+
|
||
|
/*
|
||
|
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
|
||
|
* If it's needed, re-allocate the region.
|
||
|
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
|
||
|
index c1a3fd8..4d04731 100644
|
||
|
--- a/drivers/platform/x86/acer-wmi.c
|
||
|
+++ b/drivers/platform/x86/acer-wmi.c
|
||
|
@@ -523,6 +523,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
|
||
|
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
|
||
|
},
|
||
|
},
|
||
|
+ {
|
||
|
+ /*
|
||
|
+ * Note no video_set_backlight_video_vendor, we must use the
|
||
|
+ * acer interface, as there is no native backlight interface.
|
||
|
+ */
|
||
|
+ .ident = "Acer KAV80",
|
||
|
+ .matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
|
||
|
+ },
|
||
|
+ },
|
||
|
{}
|
||
|
};
|
||
|
|
||
|
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
|
||
|
index de9f432..28c1bdb 100644
|
||
|
--- a/drivers/platform/x86/samsung-laptop.c
|
||
|
+++ b/drivers/platform/x86/samsung-laptop.c
|
||
|
@@ -1517,6 +1517,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
|
||
|
},
|
||
|
.driver_data = &samsung_broken_acpi_video,
|
||
|
},
|
||
|
+ {
|
||
|
+ .callback = samsung_dmi_matched,
|
||
|
+ .ident = "NC210",
|
||
|
+ .matches = {
|
||
|
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
|
||
|
+ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
|
||
|
+ },
|
||
|
+ .driver_data = &samsung_broken_acpi_video,
|
||
|
+ },
|
||
|
{ },
|
||
|
};
|
||
|
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
|
||
|
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
|
||
|
index 4c449b2..102267f 100644
|
||
|
--- a/drivers/power/charger-manager.c
|
||
|
+++ b/drivers/power/charger-manager.c
|
||
|
@@ -808,6 +808,11 @@ static int charger_manager_probe(struct platform_device *pdev)
|
||
|
goto err_no_charger_stat;
|
||
|
}
|
||
|
|
||
|
+ if (!desc->psy_fuel_gauge) {
|
||
|
+ dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
+
|
||
|
/* Counting index only */
|
||
|
while (desc->psy_charger_stat[i])
|
||
|
i++;
|
||
|
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
|
||
|
index dcc39b6..185971c 100644
|
||
|
--- a/drivers/scsi/scsi_error.c
|
||
|
+++ b/drivers/scsi/scsi_error.c
|
||
|
@@ -1679,8 +1679,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
|
||
|
* is no point trying to lock the door of an off-line device.
|
||
|
*/
|
||
|
shost_for_each_device(sdev, shost) {
|
||
|
- if (scsi_device_online(sdev) && sdev->locked)
|
||
|
+ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
|
||
|
scsi_eh_lock_door(sdev);
|
||
|
+ sdev->was_reset = 0;
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
|
||
|
index b9f0192..efc494a 100644
|
||
|
--- a/drivers/spi/spi-dw-mid.c
|
||
|
+++ b/drivers/spi/spi-dw-mid.c
|
||
|
@@ -89,7 +89,10 @@ static int mid_spi_dma_init(struct dw_spi *dws)
|
||
|
|
||
|
static void mid_spi_dma_exit(struct dw_spi *dws)
|
||
|
{
|
||
|
+ dmaengine_terminate_all(dws->txchan);
|
||
|
dma_release_channel(dws->txchan);
|
||
|
+
|
||
|
+ dmaengine_terminate_all(dws->rxchan);
|
||
|
dma_release_channel(dws->rxchan);
|
||
|
}
|
||
|
|
||
|
@@ -136,7 +139,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
||
|
txconf.dst_addr = dws->dma_addr;
|
||
|
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
|
||
|
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||
|
- txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||
|
+ txconf.dst_addr_width = dws->dma_width;
|
||
|
txconf.device_fc = false;
|
||
|
|
||
|
txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
|
||
|
@@ -159,7 +162,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
|
||
|
rxconf.src_addr = dws->dma_addr;
|
||
|
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
|
||
|
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||
|
- rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||
|
+ rxconf.src_addr_width = dws->dma_width;
|
||
|
rxconf.device_fc = false;
|
||
|
|
||
|
rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
|
||
|
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
|
||
|
index 469eb28..e3b845a 100644
|
||
|
--- a/drivers/spi/spi-pl022.c
|
||
|
+++ b/drivers/spi/spi-pl022.c
|
||
|
@@ -1061,7 +1061,7 @@ static int configure_dma(struct pl022 *pl022)
|
||
|
pl022->sgt_tx.nents, DMA_TO_DEVICE);
|
||
|
err_tx_sgmap:
|
||
|
dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
|
||
|
- pl022->sgt_tx.nents, DMA_FROM_DEVICE);
|
||
|
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
|
||
|
err_rx_sgmap:
|
||
|
sg_free_table(&pl022->sgt_tx);
|
||
|
err_alloc_tx_sg:
|
||
|
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
|
||
|
index cd82b56..2db80b1 100644
|
||
|
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
|
||
|
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
|
||
|
@@ -109,15 +109,44 @@ static struct ad5933_platform_data ad5933_default_pdata = {
|
||
|
};
|
||
|
|
||
|
static struct iio_chan_spec ad5933_channels[] = {
|
||
|
- IIO_CHAN(IIO_TEMP, 0, 1, 1, NULL, 0, 0, 0,
|
||
|
- 0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
|
||
|
- /* Ring Channels */
|
||
|
- IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
|
||
|
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
|
||
|
- AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
|
||
|
- IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
|
||
|
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
|
||
|
- AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
|
||
|
+ {
|
||
|
+ .type = IIO_TEMP,
|
||
|
+ .indexed = 1,
|
||
|
+ .processed_val = 1,
|
||
|
+ .channel = 0,
|
||
|
+ .address = AD5933_REG_TEMP_DATA,
|
||
|
+ .scan_type = {
|
||
|
+ .sign = 's',
|
||
|
+ .realbits = 14,
|
||
|
+ .storagebits = 16,
|
||
|
+ },
|
||
|
+ }, { /* Ring Channels */
|
||
|
+ .type = IIO_VOLTAGE,
|
||
|
+ .indexed = 1,
|
||
|
+ .channel = 0,
|
||
|
+ .extend_name = "real",
|
||
|
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
|
||
|
+ .address = AD5933_REG_REAL_DATA,
|
||
|
+ .scan_index = 0,
|
||
|
+ .scan_type = {
|
||
|
+ .sign = 's',
|
||
|
+ .realbits = 16,
|
||
|
+ .storagebits = 16,
|
||
|
+ },
|
||
|
+ }, {
|
||
|
+ .type = IIO_VOLTAGE,
|
||
|
+ .indexed = 1,
|
||
|
+ .channel = 0,
|
||
|
+ .extend_name = "imag",
|
||
|
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
|
||
|
+ .address = AD5933_REG_IMAG_DATA,
|
||
|
+ .scan_index = 1,
|
||
|
+ .scan_type = {
|
||
|
+ .sign = 's',
|
||
|
+ .realbits = 16,
|
||
|
+ .storagebits = 16,
|
||
|
+ },
|
||
|
+ },
|
||
|
};
|
||
|
|
||
|
static int ad5933_i2c_write(struct i2c_client *client,
|
||
|
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
|
||
|
index c45b23bb..629a6ed 100644
|
||
|
--- a/drivers/staging/iio/meter/ade7758_ring.c
|
||
|
+++ b/drivers/staging/iio/meter/ade7758_ring.c
|
||
|
@@ -96,7 +96,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
|
||
|
size_t d_size;
|
||
|
unsigned channel;
|
||
|
|
||
|
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
|
||
|
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
|
||
|
return -EINVAL;
|
||
|
|
||
|
channel = find_first_bit(indio_dev->active_scan_mask,
|
||
|
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
|
||
|
index 712999c7..eff40fd 100644
|
||
|
--- a/drivers/target/target_core_transport.c
|
||
|
+++ b/drivers/target/target_core_transport.c
|
||
|
@@ -3289,8 +3289,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||
|
|
||
|
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
|
||
|
ret = cmd->se_tfo->queue_status(cmd);
|
||
|
- if (ret)
|
||
|
- goto out;
|
||
|
+ goto out;
|
||
|
}
|
||
|
|
||
|
switch (cmd->data_direction) {
|
||
|
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
|
||
|
index d53f396..6f8f985 100644
|
||
|
--- a/drivers/tty/serial/8250/8250_pci.c
|
||
|
+++ b/drivers/tty/serial/8250/8250_pci.c
|
||
|
@@ -1164,6 +1164,7 @@ pci_xr17c154_setup(struct serial_private *priv,
|
||
|
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
|
||
|
#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
|
||
|
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
|
||
|
+#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936
|
||
|
|
||
|
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
|
||
|
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
|
||
|
@@ -1686,6 +1687,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
||
|
.init = pci_eg20t_init,
|
||
|
.setup = pci_default_setup,
|
||
|
},
|
||
|
+ {
|
||
|
+ .vendor = PCI_VENDOR_ID_INTEL,
|
||
|
+ .device = PCI_DEVICE_ID_INTEL_QRK_UART,
|
||
|
+ .subvendor = PCI_ANY_ID,
|
||
|
+ .subdevice = PCI_ANY_ID,
|
||
|
+ .setup = pci_default_setup,
|
||
|
+ },
|
||
|
/*
|
||
|
* Cronyx Omega PCI (PLX-chip based)
|
||
|
*/
|
||
|
@@ -1894,6 +1902,7 @@ enum pci_board_num_t {
|
||
|
pbn_ADDIDATA_PCIe_4_3906250,
|
||
|
pbn_ADDIDATA_PCIe_8_3906250,
|
||
|
pbn_ce4100_1_115200,
|
||
|
+ pbn_qrk,
|
||
|
pbn_omegapci,
|
||
|
pbn_NETMOS9900_2s_115200,
|
||
|
pbn_brcm_trumanage,
|
||
|
@@ -2592,6 +2601,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
|
||
|
.base_baud = 921600,
|
||
|
.reg_shift = 2,
|
||
|
},
|
||
|
+ [pbn_qrk] = {
|
||
|
+ .flags = FL_BASE0,
|
||
|
+ .num_ports = 1,
|
||
|
+ .base_baud = 2764800,
|
||
|
+ .reg_shift = 2,
|
||
|
+ },
|
||
|
[pbn_omegapci] = {
|
||
|
.flags = FL_BASE0,
|
||
|
.num_ports = 8,
|
||
|
@@ -4164,6 +4179,12 @@ static struct pci_device_id serial_pci_tbl[] = {
|
||
|
pbn_ce4100_1_115200 },
|
||
|
|
||
|
/*
|
||
|
+ * Intel Quark x1000
|
||
|
+ */
|
||
|
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART,
|
||
|
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
||
|
+ pbn_qrk },
|
||
|
+ /*
|
||
|
* Cronyx Omega PCI
|
||
|
*/
|
||
|
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA,
|
||
|
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
|
||
|
index 7ffdbee..fe090e2 100644
|
||
|
--- a/drivers/tty/serial/serial_core.c
|
||
|
+++ b/drivers/tty/serial/serial_core.c
|
||
|
@@ -358,7 +358,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
|
||
|
* The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
|
||
|
* Die! Die! Die!
|
||
|
*/
|
||
|
- if (baud == 38400)
|
||
|
+ if (try == 0 && baud == 38400)
|
||
|
baud = altbaud;
|
||
|
|
||
|
/*
|
||
|
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
|
||
|
index 125bd3f..613a3df 100644
|
||
|
--- a/drivers/tty/tty_io.c
|
||
|
+++ b/drivers/tty/tty_io.c
|
||
|
@@ -1633,6 +1633,8 @@ int tty_release(struct inode *inode, struct file *filp)
|
||
|
int devpts;
|
||
|
int idx;
|
||
|
char buf[64];
|
||
|
+ long timeout = 0;
|
||
|
+ int once = 1;
|
||
|
|
||
|
if (tty_paranoia_check(tty, inode, __func__))
|
||
|
return 0;
|
||
|
@@ -1713,11 +1715,18 @@ int tty_release(struct inode *inode, struct file *filp)
|
||
|
if (!do_sleep)
|
||
|
break;
|
||
|
|
||
|
- printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
|
||
|
+ if (once) {
|
||
|
+ once = 0;
|
||
|
+ printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
|
||
|
__func__, tty_name(tty, buf));
|
||
|
+ }
|
||
|
tty_unlock();
|
||
|
mutex_unlock(&tty_mutex);
|
||
|
- schedule();
|
||
|
+ schedule_timeout_killable(timeout);
|
||
|
+ if (timeout < 120 * HZ)
|
||
|
+ timeout = 2 * timeout + 1;
|
||
|
+ else
|
||
|
+ timeout = MAX_SCHEDULE_TIMEOUT;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
|
||
|
index 8308fc7..87025d0 100644
|
||
|
--- a/drivers/tty/vt/consolemap.c
|
||
|
+++ b/drivers/tty/vt/consolemap.c
|
||
|
@@ -518,6 +518,10 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
|
||
|
|
||
|
/* Save original vc_unipagdir_loc in case we allocate a new one */
|
||
|
p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
|
||
|
+
|
||
|
+ if (!p)
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
if (p->readonly) return -EIO;
|
||
|
|
||
|
if (!ct) return 0;
|
||
|
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
|
||
|
index 2f2540f..8f4a628 100644
|
||
|
--- a/drivers/usb/class/cdc-acm.c
|
||
|
+++ b/drivers/usb/class/cdc-acm.c
|
||
|
@@ -910,11 +910,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
|
||
|
/* FIXME: Needs to clear unsupported bits in the termios */
|
||
|
acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
|
||
|
|
||
|
- if (!newline.dwDTERate) {
|
||
|
+ if (C_BAUD(tty) == B0) {
|
||
|
newline.dwDTERate = acm->line.dwDTERate;
|
||
|
newctrl &= ~ACM_CTRL_DTR;
|
||
|
- } else
|
||
|
+ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
|
||
|
newctrl |= ACM_CTRL_DTR;
|
||
|
+ }
|
||
|
|
||
|
if (newctrl != acm->ctrlout)
|
||
|
acm_set_control(acm, acm->ctrlout = newctrl);
|
||
|
@@ -1601,6 +1602,7 @@ static const struct usb_device_id acm_ids[] = {
|
||
|
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
|
||
|
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
||
|
},
|
||
|
+ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
|
||
|
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
|
||
|
},
|
||
|
/* Motorola H24 HSPA module: */
|
||
|
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
|
||
|
index f4ab5c9..da5b0f8 100644
|
||
|
--- a/drivers/usb/core/hcd.c
|
||
|
+++ b/drivers/usb/core/hcd.c
|
||
|
@@ -1896,6 +1896,8 @@ int usb_alloc_streams(struct usb_interface *interface,
|
||
|
return -EINVAL;
|
||
|
if (dev->speed != USB_SPEED_SUPER)
|
||
|
return -EINVAL;
|
||
|
+ if (dev->state < USB_STATE_CONFIGURED)
|
||
|
+ return -ENODEV;
|
||
|
|
||
|
/* Streams only apply to bulk endpoints. */
|
||
|
for (i = 0; i < num_eps; i++)
|
||
|
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
|
||
|
index 48aefff..05564d2 100644
|
||
|
--- a/drivers/usb/core/hub.c
|
||
|
+++ b/drivers/usb/core/hub.c
|
||
|
@@ -3477,6 +3477,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
|
||
|
struct usb_qualifier_descriptor *qual;
|
||
|
int status;
|
||
|
|
||
|
+ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
|
||
|
+ return;
|
||
|
+
|
||
|
qual = kmalloc (sizeof *qual, GFP_KERNEL);
|
||
|
if (qual == NULL)
|
||
|
return;
|
||
|
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
|
||
|
index 5e8e69d..d482141 100644
|
||
|
--- a/drivers/usb/core/quirks.c
|
||
|
+++ b/drivers/usb/core/quirks.c
|
||
|
@@ -113,6 +113,12 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||
|
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
|
||
|
USB_QUIRK_CONFIG_INTF_STRINGS },
|
||
|
|
||
|
+ { USB_DEVICE(0x04f3, 0x009b), .driver_info =
|
||
|
+ USB_QUIRK_DEVICE_QUALIFIER },
|
||
|
+
|
||
|
+ { USB_DEVICE(0x04f3, 0x016f), .driver_info =
|
||
|
+ USB_QUIRK_DEVICE_QUALIFIER },
|
||
|
+
|
||
|
/* Roland SC-8820 */
|
||
|
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
|
||
|
|
||
|
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
|
||
|
index a860254..536ef99 100644
|
||
|
--- a/drivers/usb/dwc3/gadget.c
|
||
|
+++ b/drivers/usb/dwc3/gadget.c
|
||
|
@@ -638,12 +638,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
|
||
|
if (!usb_endpoint_xfer_isoc(desc))
|
||
|
return 0;
|
||
|
|
||
|
- memset(&trb_link, 0, sizeof(trb_link));
|
||
|
-
|
||
|
/* Link TRB for ISOC. The HWO bit is never reset */
|
||
|
trb_st_hw = &dep->trb_pool[0];
|
||
|
|
||
|
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
|
||
|
+ memset(trb_link, 0, sizeof(*trb_link));
|
||
|
|
||
|
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
|
||
|
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
|
||
|
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
|
||
|
index ac33957..19074db 100644
|
||
|
--- a/drivers/usb/serial/cp210x.c
|
||
|
+++ b/drivers/usb/serial/cp210x.c
|
||
|
@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
|
||
|
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
|
||
|
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
|
||
|
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
|
||
|
+ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
|
||
|
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
|
||
|
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
|
||
|
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
|
||
|
@@ -160,7 +161,9 @@ static const struct usb_device_id id_table[] = {
|
||
|
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
|
||
|
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
|
||
|
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
|
||
|
+ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
|
||
|
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
|
||
|
+ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
|
||
|
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
|
||
|
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
|
||
|
{ USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
|
||
|
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
|
||
|
index 8425e9e..a89433b 100644
|
||
|
--- a/drivers/usb/serial/ftdi_sio.c
|
||
|
+++ b/drivers/usb/serial/ftdi_sio.c
|
||
|
@@ -156,6 +156,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
|
||
|
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
|
||
|
*/
|
||
|
static struct usb_device_id id_table_combined [] = {
|
||
|
+ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
|
||
|
@@ -685,6 +686,10 @@ static struct usb_device_id id_table_combined [] = {
|
||
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
|
||
|
+ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
|
||
|
+ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
|
||
|
+ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
|
||
|
+ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
|
||
|
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
|
||
|
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
|
||
|
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
|
||
|
index 7628b91..64ee791 100644
|
||
|
--- a/drivers/usb/serial/ftdi_sio_ids.h
|
||
|
+++ b/drivers/usb/serial/ftdi_sio_ids.h
|
||
|
@@ -30,6 +30,12 @@
|
||
|
|
||
|
/*** third-party PIDs (using FTDI_VID) ***/
|
||
|
|
||
|
+/*
|
||
|
+ * Certain versions of the official Windows FTDI driver reprogrammed
|
||
|
+ * counterfeit FTDI devices to PID 0. Support these devices anyway.
|
||
|
+ */
|
||
|
+#define FTDI_BRICK_PID 0x0000
|
||
|
+
|
||
|
#define FTDI_LUMEL_PD12_PID 0x6002
|
||
|
|
||
|
/*
|
||
|
@@ -142,12 +148,19 @@
|
||
|
/*
|
||
|
* Xsens Technologies BV products (http://www.xsens.com).
|
||
|
*/
|
||
|
-#define XSENS_CONVERTER_0_PID 0xD388
|
||
|
-#define XSENS_CONVERTER_1_PID 0xD389
|
||
|
+#define XSENS_VID 0x2639
|
||
|
+#define XSENS_AWINDA_STATION_PID 0x0101
|
||
|
+#define XSENS_AWINDA_DONGLE_PID 0x0102
|
||
|
+#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
|
||
|
+#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
|
||
|
+
|
||
|
+/* Xsens devices using FTDI VID */
|
||
|
+#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
|
||
|
+#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
|
||
|
#define XSENS_CONVERTER_2_PID 0xD38A
|
||
|
-#define XSENS_CONVERTER_3_PID 0xD38B
|
||
|
-#define XSENS_CONVERTER_4_PID 0xD38C
|
||
|
-#define XSENS_CONVERTER_5_PID 0xD38D
|
||
|
+#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */
|
||
|
+#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */
|
||
|
+#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */
|
||
|
#define XSENS_CONVERTER_6_PID 0xD38E
|
||
|
#define XSENS_CONVERTER_7_PID 0xD38F
|
||
|
|
||
|
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
|
||
|
index 4a9a75e..c3a53ac 100644
|
||
|
--- a/drivers/usb/serial/kobil_sct.c
|
||
|
+++ b/drivers/usb/serial/kobil_sct.c
|
||
|
@@ -447,7 +447,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
|
||
|
);
|
||
|
|
||
|
priv->cur_pos = priv->cur_pos + length;
|
||
|
- result = usb_submit_urb(port->write_urb, GFP_NOIO);
|
||
|
+ result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
|
||
|
dbg("%s - port %d Send write URB returns: %i",
|
||
|
__func__, port->number, result);
|
||
|
todo = priv->filled - priv->cur_pos;
|
||
|
@@ -463,7 +463,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
|
||
|
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
|
||
|
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
|
||
|
result = usb_submit_urb(port->interrupt_in_urb,
|
||
|
- GFP_NOIO);
|
||
|
+ GFP_ATOMIC);
|
||
|
dbg("%s - port %d Send read URB returns: %i",
|
||
|
__func__, port->number, result);
|
||
|
}
|
||
|
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
|
||
|
index 1f85006..58b7cec 100644
|
||
|
--- a/drivers/usb/serial/opticon.c
|
||
|
+++ b/drivers/usb/serial/opticon.c
|
||
|
@@ -293,7 +293,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
|
||
|
|
||
|
/* The conncected devices do not have a bulk write endpoint,
|
||
|
* to transmit data to de barcode device the control endpoint is used */
|
||
|
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
|
||
|
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
|
||
|
if (!dr) {
|
||
|
dev_err(&port->dev, "out of memory\n");
|
||
|
count = -ENOMEM;
|
||
|
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
|
||
|
index 703ebe7..d8232df 100644
|
||
|
--- a/drivers/usb/serial/option.c
|
||
|
+++ b/drivers/usb/serial/option.c
|
||
|
@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
|
||
|
#define TELIT_PRODUCT_DE910_DUAL 0x1010
|
||
|
#define TELIT_PRODUCT_UE910_V2 0x1012
|
||
|
#define TELIT_PRODUCT_LE920 0x1200
|
||
|
+#define TELIT_PRODUCT_LE910 0x1201
|
||
|
|
||
|
/* ZTE PRODUCTS */
|
||
|
#define ZTE_VENDOR_ID 0x19d2
|
||
|
@@ -362,6 +363,7 @@ static void option_instat_callback(struct urb *urb);
|
||
|
|
||
|
/* Haier products */
|
||
|
#define HAIER_VENDOR_ID 0x201e
|
||
|
+#define HAIER_PRODUCT_CE81B 0x10f8
|
||
|
#define HAIER_PRODUCT_CE100 0x2009
|
||
|
|
||
|
/* Cinterion (formerly Siemens) products */
|
||
|
@@ -589,6 +591,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
|
||
|
.reserved = BIT(3) | BIT(4),
|
||
|
};
|
||
|
|
||
|
+static const struct option_blacklist_info telit_le910_blacklist = {
|
||
|
+ .sendsetup = BIT(0),
|
||
|
+ .reserved = BIT(1) | BIT(2),
|
||
|
+};
|
||
|
+
|
||
|
static const struct option_blacklist_info telit_le920_blacklist = {
|
||
|
.sendsetup = BIT(0),
|
||
|
.reserved = BIT(1) | BIT(5),
|
||
|
@@ -1138,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
|
||
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
|
||
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
|
||
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
|
||
|
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
||
|
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
|
||
|
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
|
||
|
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
|
||
|
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
||
|
@@ -1614,6 +1623,7 @@ static const struct usb_device_id option_ids[] = {
|
||
|
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
|
||
|
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
|
||
|
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
|
||
|
+ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
|
||
|
/* Pirelli */
|
||
|
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
|
||
|
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
|
||
|
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
|
||
|
index c70109e..d8d26f4 100644
|
||
|
--- a/drivers/usb/storage/transport.c
|
||
|
+++ b/drivers/usb/storage/transport.c
|
||
|
@@ -1120,6 +1120,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
|
||
|
*/
|
||
|
if (result == USB_STOR_XFER_LONG)
|
||
|
fake_sense = 1;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Sometimes a device will mistakenly skip the data phase
|
||
|
+ * and go directly to the status phase without sending a
|
||
|
+ * zero-length packet. If we get a 13-byte response here,
|
||
|
+ * check whether it really is a CSW.
|
||
|
+ */
|
||
|
+ if (result == USB_STOR_XFER_SHORT &&
|
||
|
+ srb->sc_data_direction == DMA_FROM_DEVICE &&
|
||
|
+ transfer_length - scsi_get_resid(srb) ==
|
||
|
+ US_BULK_CS_WRAP_LEN) {
|
||
|
+ struct scatterlist *sg = NULL;
|
||
|
+ unsigned int offset = 0;
|
||
|
+
|
||
|
+ if (usb_stor_access_xfer_buf((unsigned char *) bcs,
|
||
|
+ US_BULK_CS_WRAP_LEN, srb, &sg,
|
||
|
+ &offset, FROM_XFER_BUF) ==
|
||
|
+ US_BULK_CS_WRAP_LEN &&
|
||
|
+ bcs->Signature ==
|
||
|
+ cpu_to_le32(US_BULK_CS_SIGN)) {
|
||
|
+ US_DEBUGP("Device skipped data phase\n");
|
||
|
+ scsi_set_resid(srb, transfer_length);
|
||
|
+ goto skipped_data_phase;
|
||
|
+ }
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
/* See flow chart on pg 15 of the Bulk Only Transport spec for
|
||
|
@@ -1155,6 +1180,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
|
||
|
if (result != USB_STOR_XFER_GOOD)
|
||
|
return USB_STOR_TRANSPORT_ERROR;
|
||
|
|
||
|
+ skipped_data_phase:
|
||
|
/* check bulk status */
|
||
|
residue = le32_to_cpu(bcs->Residue);
|
||
|
US_DEBUGP("Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
|
||
|
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
|
||
|
index 28b1a83..6cbb206 100644
|
||
|
--- a/drivers/video/console/bitblit.c
|
||
|
+++ b/drivers/video/console/bitblit.c
|
||
|
@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
||
|
static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
|
||
|
int bottom_only)
|
||
|
{
|
||
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
||
|
unsigned int cw = vc->vc_font.width;
|
||
|
unsigned int ch = vc->vc_font.height;
|
||
|
unsigned int rw = info->var.xres - (vc->vc_cols*cw);
|
||
|
@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
|
||
|
unsigned int bs = info->var.yres - bh;
|
||
|
struct fb_fillrect region;
|
||
|
|
||
|
- region.color = attr_bgcol_ec(bgshift, vc, info);
|
||
|
+ region.color = 0;
|
||
|
region.rop = ROP_COPY;
|
||
|
|
||
|
if (rw && !bottom_only) {
|
||
|
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
|
||
|
index 41b32ae..5a3cbf6 100644
|
||
|
--- a/drivers/video/console/fbcon_ccw.c
|
||
|
+++ b/drivers/video/console/fbcon_ccw.c
|
||
|
@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
|
||
|
unsigned int bh = info->var.xres - (vc->vc_rows*ch);
|
||
|
unsigned int bs = vc->vc_rows*ch;
|
||
|
struct fb_fillrect region;
|
||
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
||
|
|
||
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
||
|
+ region.color = 0;
|
||
|
region.rop = ROP_COPY;
|
||
|
|
||
|
if (rw && !bottom_only) {
|
||
|
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
|
||
|
index 6a73782..7d3fd9b 100644
|
||
|
--- a/drivers/video/console/fbcon_cw.c
|
||
|
+++ b/drivers/video/console/fbcon_cw.c
|
||
|
@@ -181,9 +181,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
|
||
|
unsigned int bh = info->var.xres - (vc->vc_rows*ch);
|
||
|
unsigned int rs = info->var.yres - rw;
|
||
|
struct fb_fillrect region;
|
||
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
||
|
|
||
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
||
|
+ region.color = 0;
|
||
|
region.rop = ROP_COPY;
|
||
|
|
||
|
if (rw && !bottom_only) {
|
||
|
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
|
||
|
index ff0872c..19e3714 100644
|
||
|
--- a/drivers/video/console/fbcon_ud.c
|
||
|
+++ b/drivers/video/console/fbcon_ud.c
|
||
|
@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
|
||
|
unsigned int rw = info->var.xres - (vc->vc_cols*cw);
|
||
|
unsigned int bh = info->var.yres - (vc->vc_rows*ch);
|
||
|
struct fb_fillrect region;
|
||
|
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
|
||
|
|
||
|
- region.color = attr_bgcol_ec(bgshift,vc,info);
|
||
|
+ region.color = 0;
|
||
|
region.rop = ROP_COPY;
|
||
|
|
||
|
if (rw && !bottom_only) {
|
||
|
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
|
||
|
index 2e03d41..a41f264 100644
|
||
|
--- a/drivers/virtio/virtio_pci.c
|
||
|
+++ b/drivers/virtio/virtio_pci.c
|
||
|
@@ -745,6 +745,7 @@ static int virtio_pci_restore(struct device *dev)
|
||
|
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||
|
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
|
||
|
struct virtio_driver *drv;
|
||
|
+ unsigned status = 0;
|
||
|
int ret;
|
||
|
|
||
|
drv = container_of(vp_dev->vdev.dev.driver,
|
||
|
@@ -755,14 +756,40 @@ static int virtio_pci_restore(struct device *dev)
|
||
|
return ret;
|
||
|
|
||
|
pci_set_master(pci_dev);
|
||
|
+ /* We always start by resetting the device, in case a previous
|
||
|
+ * driver messed it up. */
|
||
|
+ vp_reset(&vp_dev->vdev);
|
||
|
+
|
||
|
+ /* Acknowledge that we've seen the device. */
|
||
|
+ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
|
||
|
+ vp_set_status(&vp_dev->vdev, status);
|
||
|
+
|
||
|
+ /* Maybe driver failed before freeze.
|
||
|
+ * Restore the failed status, for debugging. */
|
||
|
+ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
|
||
|
+ vp_set_status(&vp_dev->vdev, status);
|
||
|
+
|
||
|
+ if (!drv)
|
||
|
+ return 0;
|
||
|
+
|
||
|
+ /* We have a driver! */
|
||
|
+ status |= VIRTIO_CONFIG_S_DRIVER;
|
||
|
+ vp_set_status(&vp_dev->vdev, status);
|
||
|
+
|
||
|
vp_finalize_features(&vp_dev->vdev);
|
||
|
|
||
|
- if (drv && drv->restore)
|
||
|
+ if (drv->restore) {
|
||
|
ret = drv->restore(&vp_dev->vdev);
|
||
|
+ if (ret) {
|
||
|
+ status |= VIRTIO_CONFIG_S_FAILED;
|
||
|
+ vp_set_status(&vp_dev->vdev, status);
|
||
|
+ return ret;
|
||
|
+ }
|
||
|
+ }
|
||
|
|
||
|
/* Finally, tell the device we're all set */
|
||
|
- if (!ret)
|
||
|
- vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
|
||
|
+ status |= VIRTIO_CONFIG_S_DRIVER_OK;
|
||
|
+ vp_set_status(&vp_dev->vdev, status);
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
|
||
|
index 5d158d3..6eab2dd 100644
|
||
|
--- a/fs/btrfs/file-item.c
|
||
|
+++ b/fs/btrfs/file-item.c
|
||
|
@@ -393,7 +393,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
||
|
ret = 0;
|
||
|
fail:
|
||
|
while (ret < 0 && !list_empty(&tmplist)) {
|
||
|
- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
|
||
|
+ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
|
||
|
list_del(&sums->list);
|
||
|
kfree(sums);
|
||
|
}
|
||
|
diff --git a/fs/buffer.c b/fs/buffer.c
|
||
|
index 4279f0f..fb6ad35 100644
|
||
|
--- a/fs/buffer.c
|
||
|
+++ b/fs/buffer.c
|
||
|
@@ -2019,6 +2019,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
||
|
struct page *page, void *fsdata)
|
||
|
{
|
||
|
struct inode *inode = mapping->host;
|
||
|
+ loff_t old_size = inode->i_size;
|
||
|
int i_size_changed = 0;
|
||
|
|
||
|
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
||
|
@@ -2038,6 +2039,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
|
||
|
unlock_page(page);
|
||
|
page_cache_release(page);
|
||
|
|
||
|
+ if (old_size < pos)
|
||
|
+ pagecache_isize_extended(inode, old_size, pos);
|
||
|
/*
|
||
|
* Don't mark the inode dirty under page lock. First, it unnecessarily
|
||
|
* makes the holding time of page lock longer. Second, it forces lock
|
||
|
@@ -2258,6 +2261,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||
|
err = 0;
|
||
|
|
||
|
balance_dirty_pages_ratelimited(mapping);
|
||
|
+
|
||
|
+ if (unlikely(fatal_signal_pending(current))) {
|
||
|
+ err = -EINTR;
|
||
|
+ goto out;
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
/* page covers the boundary, find the boundary offset */
|
||
|
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
|
||
|
index 11030b2..b5b9b40 100644
|
||
|
--- a/fs/ecryptfs/inode.c
|
||
|
+++ b/fs/ecryptfs/inode.c
|
||
|
@@ -1093,7 +1093,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
|
||
|
}
|
||
|
|
||
|
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
|
||
|
- if (!rc)
|
||
|
+ if (!rc && dentry->d_inode)
|
||
|
fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
|
||
|
out:
|
||
|
return rc;
|
||
|
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
|
||
|
index ef4c812..564f942 100644
|
||
|
--- a/fs/ext3/super.c
|
||
|
+++ b/fs/ext3/super.c
|
||
|
@@ -1292,13 +1292,6 @@ static int parse_options (char *options, struct super_block *sb,
|
||
|
"not specified.");
|
||
|
return 0;
|
||
|
}
|
||
|
- } else {
|
||
|
- if (sbi->s_jquota_fmt) {
|
||
|
- ext3_msg(sb, KERN_ERR, "error: journaled quota format "
|
||
|
- "specified with no journaling "
|
||
|
- "enabled.");
|
||
|
- return 0;
|
||
|
- }
|
||
|
}
|
||
|
#endif
|
||
|
return 1;
|
||
|
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
|
||
|
index f1551f0..78cbec5 100644
|
||
|
--- a/fs/ext4/ext4.h
|
||
|
+++ b/fs/ext4/ext4.h
|
||
|
@@ -1898,6 +1898,7 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
|
||
|
struct buffer_head *bh_result, int create);
|
||
|
|
||
|
extern struct inode *ext4_iget(struct super_block *, unsigned long);
|
||
|
+extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
|
||
|
extern int ext4_write_inode(struct inode *, struct writeback_control *);
|
||
|
extern int ext4_setattr(struct dentry *, struct iattr *);
|
||
|
extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||
|
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
|
||
|
index 3ad47ba..78ff109 100644
|
||
|
--- a/fs/ext4/ialloc.c
|
||
|
+++ b/fs/ext4/ialloc.c
|
||
|
@@ -725,6 +725,10 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
|
||
|
struct buffer_head *block_bitmap_bh;
|
||
|
|
||
|
block_bitmap_bh = ext4_read_block_bitmap(sb, group);
|
||
|
+ if (!block_bitmap_bh) {
|
||
|
+ err = -EIO;
|
||
|
+ goto out;
|
||
|
+ }
|
||
|
BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
|
||
|
err = ext4_journal_get_write_access(handle, block_bitmap_bh);
|
||
|
if (err) {
|
||
|
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
|
||
|
index bb5fd6b..c68d3e1 100644
|
||
|
--- a/fs/ext4/inode.c
|
||
|
+++ b/fs/ext4/inode.c
|
||
|
@@ -157,16 +157,14 @@ void ext4_evict_inode(struct inode *inode)
|
||
|
goto no_delete;
|
||
|
}
|
||
|
|
||
|
- if (!is_bad_inode(inode))
|
||
|
- dquot_initialize(inode);
|
||
|
+ if (is_bad_inode(inode))
|
||
|
+ goto no_delete;
|
||
|
+ dquot_initialize(inode);
|
||
|
|
||
|
if (ext4_should_order_data(inode))
|
||
|
ext4_begin_ordered_truncate(inode, 0);
|
||
|
truncate_inode_pages(&inode->i_data, 0);
|
||
|
|
||
|
- if (is_bad_inode(inode))
|
||
|
- goto no_delete;
|
||
|
-
|
||
|
handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
|
||
|
if (IS_ERR(handle)) {
|
||
|
ext4_std_error(inode->i_sb, PTR_ERR(handle));
|
||
|
@@ -2410,6 +2408,20 @@ static int ext4_nonda_switch(struct super_block *sb)
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
+/* We always reserve for an inode update; the superblock could be there too */
|
||
|
+static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
|
||
|
+{
|
||
|
+ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
|
||
|
+ EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
|
||
|
+ return 1;
|
||
|
+
|
||
|
+ if (pos + len <= 0x7fffffffULL)
|
||
|
+ return 1;
|
||
|
+
|
||
|
+ /* We might need to update the superblock to set LARGE_FILE */
|
||
|
+ return 2;
|
||
|
+}
|
||
|
+
|
||
|
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||
|
loff_t pos, unsigned len, unsigned flags,
|
||
|
struct page **pagep, void **fsdata)
|
||
|
@@ -2436,7 +2448,8 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||
|
* to journalling the i_disksize update if writes to the end
|
||
|
* of file which has an already mapped buffer.
|
||
|
*/
|
||
|
- handle = ext4_journal_start(inode, 1);
|
||
|
+ handle = ext4_journal_start(inode,
|
||
|
+ ext4_da_write_credits(inode, pos, len));
|
||
|
if (IS_ERR(handle)) {
|
||
|
ret = PTR_ERR(handle);
|
||
|
goto out;
|
||
|
@@ -3843,6 +3856,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
||
|
return ERR_PTR(ret);
|
||
|
}
|
||
|
|
||
|
+struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
|
||
|
+{
|
||
|
+ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
|
||
|
+ return ERR_PTR(-EIO);
|
||
|
+ return ext4_iget(sb, ino);
|
||
|
+}
|
||
|
+
|
||
|
static int ext4_inode_blocks_set(handle_t *handle,
|
||
|
struct ext4_inode *raw_inode,
|
||
|
struct ext4_inode_info *ei)
|
||
|
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
|
||
|
index 1a3f19a..0103f53 100644
|
||
|
--- a/fs/ext4/namei.c
|
||
|
+++ b/fs/ext4/namei.c
|
||
|
@@ -1051,7 +1051,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
|
||
|
dentry->d_name.name);
|
||
|
return ERR_PTR(-EIO);
|
||
|
}
|
||
|
- inode = ext4_iget(dir->i_sb, ino);
|
||
|
+ inode = ext4_iget_normal(dir->i_sb, ino);
|
||
|
if (inode == ERR_PTR(-ESTALE)) {
|
||
|
EXT4_ERROR_INODE(dir,
|
||
|
"deleted inode referenced: %u",
|
||
|
@@ -1087,7 +1087,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
|
||
|
return ERR_PTR(-EIO);
|
||
|
}
|
||
|
|
||
|
- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
|
||
|
+ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
|
||
|
}
|
||
|
|
||
|
#define S_SHIFT 12
|
||
|
@@ -1421,31 +1421,38 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
|
||
|
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
|
||
|
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
|
||
|
ext4fs_dirhash(name, namelen, &hinfo);
|
||
|
+ memset(frames, 0, sizeof(frames));
|
||
|
frame = frames;
|
||
|
frame->entries = entries;
|
||
|
frame->at = entries;
|
||
|
frame->bh = bh;
|
||
|
bh = bh2;
|
||
|
|
||
|
- ext4_handle_dirty_metadata(handle, dir, frame->bh);
|
||
|
- ext4_handle_dirty_metadata(handle, dir, bh);
|
||
|
+ retval = ext4_handle_dirty_metadata(handle, dir, frame->bh);
|
||
|
+ if (retval)
|
||
|
+ goto out_frames;
|
||
|
+ retval = ext4_handle_dirty_metadata(handle, dir, bh);
|
||
|
+ if (retval)
|
||
|
+ goto out_frames;
|
||
|
|
||
|
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
|
||
|
if (!de) {
|
||
|
- /*
|
||
|
- * Even if the block split failed, we have to properly write
|
||
|
- * out all the changes we did so far. Otherwise we can end up
|
||
|
- * with corrupted filesystem.
|
||
|
- */
|
||
|
- ext4_mark_inode_dirty(handle, dir);
|
||
|
- dx_release(frames);
|
||
|
- return retval;
|
||
|
+ goto out_frames;
|
||
|
}
|
||
|
dx_release(frames);
|
||
|
|
||
|
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
|
||
|
brelse(bh);
|
||
|
return retval;
|
||
|
+out_frames:
|
||
|
+ /*
|
||
|
+ * Even if the block split failed, we have to properly write
|
||
|
+ * out all the changes we did so far. Otherwise we can end up
|
||
|
+ * with corrupted filesystem.
|
||
|
+ */
|
||
|
+ ext4_mark_inode_dirty(handle, dir);
|
||
|
+ dx_release(frames);
|
||
|
+ return retval;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
@@ -1998,7 +2005,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
|
||
|
struct ext4_iloc iloc;
|
||
|
int err = 0, rc;
|
||
|
|
||
|
- if (!ext4_handle_valid(handle))
|
||
|
+ if (!ext4_handle_valid(handle) || is_bad_inode(inode))
|
||
|
return 0;
|
||
|
|
||
|
mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
|
||
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
||
|
index 28cc353..511ce69 100644
|
||
|
--- a/fs/ext4/resize.c
|
||
|
+++ b/fs/ext4/resize.c
|
||
|
@@ -991,7 +991,7 @@ static void update_backups(struct super_block *sb,
|
||
|
(err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
|
||
|
break;
|
||
|
|
||
|
- bh = sb_getblk(sb, group * bpg + blk_off);
|
||
|
+ bh = sb_getblk(sb, ((ext4_fsblk_t)group) * bpg + blk_off);
|
||
|
if (!bh) {
|
||
|
err = -ENOMEM;
|
||
|
break;
|
||
|
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
|
||
|
index 230204c..c222d25 100644
|
||
|
--- a/fs/ext4/super.c
|
||
|
+++ b/fs/ext4/super.c
|
||
|
@@ -1042,7 +1042,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
|
||
|
* Currently we don't know the generation for parent directory, so
|
||
|
* a generation of 0 means "accept any"
|
||
|
*/
|
||
|
- inode = ext4_iget(sb, ino);
|
||
|
+ inode = ext4_iget_normal(sb, ino);
|
||
|
if (IS_ERR(inode))
|
||
|
return ERR_CAST(inode);
|
||
|
if (generation && inode->i_generation != generation) {
|
||
|
@@ -1674,13 +1674,6 @@ static int parse_options(char *options, struct super_block *sb,
|
||
|
"not specified");
|
||
|
return 0;
|
||
|
}
|
||
|
- } else {
|
||
|
- if (sbi->s_jquota_fmt) {
|
||
|
- ext4_msg(sb, KERN_ERR, "journaled quota format "
|
||
|
- "specified with no journaling "
|
||
|
- "enabled");
|
||
|
- return 0;
|
||
|
- }
|
||
|
}
|
||
|
#endif
|
||
|
if (test_opt(sb, DIOREAD_NOLOCK)) {
|
||
|
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
|
||
|
index 5743e9d..96455e6 100644
|
||
|
--- a/fs/ext4/xattr.c
|
||
|
+++ b/fs/ext4/xattr.c
|
||
|
@@ -144,14 +144,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||
|
}
|
||
|
|
||
|
static int
|
||
|
-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
|
||
|
+ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
|
||
|
+ void *value_start)
|
||
|
{
|
||
|
- while (!IS_LAST_ENTRY(entry)) {
|
||
|
- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
|
||
|
+ struct ext4_xattr_entry *e = entry;
|
||
|
+
|
||
|
+ while (!IS_LAST_ENTRY(e)) {
|
||
|
+ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
|
||
|
if ((void *)next >= end)
|
||
|
return -EIO;
|
||
|
- entry = next;
|
||
|
+ e = next;
|
||
|
}
|
||
|
+
|
||
|
+ while (!IS_LAST_ENTRY(entry)) {
|
||
|
+ if (entry->e_value_size != 0 &&
|
||
|
+ (value_start + le16_to_cpu(entry->e_value_offs) <
|
||
|
+ (void *)e + sizeof(__u32) ||
|
||
|
+ value_start + le16_to_cpu(entry->e_value_offs) +
|
||
|
+ le32_to_cpu(entry->e_value_size) > end))
|
||
|
+ return -EIO;
|
||
|
+ entry = EXT4_XATTR_NEXT(entry);
|
||
|
+ }
|
||
|
+
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
@@ -161,7 +175,8 @@ ext4_xattr_check_block(struct buffer_head *bh)
|
||
|
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
|
||
|
BHDR(bh)->h_blocks != cpu_to_le32(1))
|
||
|
return -EIO;
|
||
|
- return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
|
||
|
+ return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
|
||
|
+ bh->b_data);
|
||
|
}
|
||
|
|
||
|
static inline int
|
||
|
@@ -274,7 +289,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
||
|
header = IHDR(inode, raw_inode);
|
||
|
entry = IFIRST(header);
|
||
|
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||
|
- error = ext4_xattr_check_names(entry, end);
|
||
|
+ error = ext4_xattr_check_names(entry, end, entry);
|
||
|
if (error)
|
||
|
goto cleanup;
|
||
|
error = ext4_xattr_find_entry(&entry, name_index, name,
|
||
|
@@ -402,7 +417,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||
|
raw_inode = ext4_raw_inode(&iloc);
|
||
|
header = IHDR(inode, raw_inode);
|
||
|
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||
|
- error = ext4_xattr_check_names(IFIRST(header), end);
|
||
|
+ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
|
||
|
if (error)
|
||
|
goto cleanup;
|
||
|
error = ext4_xattr_list_entries(dentry, IFIRST(header),
|
||
|
@@ -914,7 +929,8 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
||
|
is->s.here = is->s.first;
|
||
|
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||
|
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
||
|
- error = ext4_xattr_check_names(IFIRST(header), is->s.end);
|
||
|
+ error = ext4_xattr_check_names(IFIRST(header), is->s.end,
|
||
|
+ IFIRST(header));
|
||
|
if (error)
|
||
|
return error;
|
||
|
/* Find the named attribute. */
|
||
|
diff --git a/fs/ioprio.c b/fs/ioprio.c
|
||
|
index 0f1b951..0dd6a2a 100644
|
||
|
--- a/fs/ioprio.c
|
||
|
+++ b/fs/ioprio.c
|
||
|
@@ -153,14 +153,16 @@ static int get_task_ioprio(struct task_struct *p)
|
||
|
|
||
|
int ioprio_best(unsigned short aprio, unsigned short bprio)
|
||
|
{
|
||
|
- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
|
||
|
- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
|
||
|
+ unsigned short aclass;
|
||
|
+ unsigned short bclass;
|
||
|
|
||
|
- if (aclass == IOPRIO_CLASS_NONE)
|
||
|
- aclass = IOPRIO_CLASS_BE;
|
||
|
- if (bclass == IOPRIO_CLASS_NONE)
|
||
|
- bclass = IOPRIO_CLASS_BE;
|
||
|
+ if (!ioprio_valid(aprio))
|
||
|
+ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
|
||
|
+ if (!ioprio_valid(bprio))
|
||
|
+ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
|
||
|
|
||
|
+ aclass = IOPRIO_PRIO_CLASS(aprio);
|
||
|
+ bclass = IOPRIO_PRIO_CLASS(bprio);
|
||
|
if (aclass == bclass)
|
||
|
return min(aprio, bprio);
|
||
|
if (aclass > bclass)
|
||
|
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
|
||
|
index 606a8dd..0a68e0b 100644
|
||
|
--- a/fs/lockd/mon.c
|
||
|
+++ b/fs/lockd/mon.c
|
||
|
@@ -114,6 +114,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
|
||
|
|
||
|
msg.rpc_proc = &clnt->cl_procinfo[proc];
|
||
|
status = rpc_call_sync(clnt, &msg, 0);
|
||
|
+ if (status == -ECONNREFUSED) {
|
||
|
+ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
|
||
|
+ status);
|
||
|
+ rpc_force_rebind(clnt);
|
||
|
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
|
||
|
+ }
|
||
|
if (status < 0)
|
||
|
dprintk("lockd: NSM upcall RPC failed, status=%d\n",
|
||
|
status);
|
||
|
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
|
||
|
index 2faed2f..6064250 100644
|
||
|
--- a/fs/nfs/inode.c
|
||
|
+++ b/fs/nfs/inode.c
|
||
|
@@ -512,7 +512,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
||
|
{
|
||
|
struct inode *inode = dentry->d_inode;
|
||
|
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
|
||
|
- int err;
|
||
|
+ int err = 0;
|
||
|
|
||
|
/* Flush out writes to the server in order to update c/mtime. */
|
||
|
if (S_ISREG(inode->i_mode)) {
|
||
|
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
|
||
|
index 720fa0b..58c9810 100644
|
||
|
--- a/fs/nfs/nfs4proc.c
|
||
|
+++ b/fs/nfs/nfs4proc.c
|
||
|
@@ -1740,6 +1740,28 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
|
||
|
+{
|
||
|
+ nfs_remove_bad_delegation(state->inode);
|
||
|
+ write_seqlock(&state->seqlock);
|
||
|
+ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
|
||
|
+ write_sequnlock(&state->seqlock);
|
||
|
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
||
|
+}
|
||
|
+
|
||
|
+static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
|
||
|
+{
|
||
|
+ if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
|
||
|
+ nfs_finish_clear_delegation_stateid(state);
|
||
|
+}
|
||
|
+
|
||
|
+static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
||
|
+{
|
||
|
+ /* NFSv4.0 doesn't allow for delegation recovery on open expire */
|
||
|
+ nfs40_clear_delegation_stateid(state);
|
||
|
+ return nfs4_open_expired(sp, state);
|
||
|
+}
|
||
|
+
|
||
|
#if defined(CONFIG_NFS_V4_1)
|
||
|
static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
|
||
|
{
|
||
|
@@ -5796,7 +5818,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
|
||
|
int ret = 0;
|
||
|
|
||
|
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
|
||
|
- return 0;
|
||
|
+ return -EAGAIN;
|
||
|
task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
|
||
|
if (IS_ERR(task))
|
||
|
ret = PTR_ERR(task);
|
||
|
@@ -6547,7 +6569,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
|
||
|
static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
|
||
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
||
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
||
|
- .recover_open = nfs4_open_expired,
|
||
|
+ .recover_open = nfs40_open_expired,
|
||
|
.recover_lock = nfs4_lock_expired,
|
||
|
.establish_clid = nfs4_init_clientid,
|
||
|
.get_clid_cred = nfs4_get_setclientid_cred,
|
||
|
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
|
||
|
index dc484c0..78071cf9 100644
|
||
|
--- a/fs/nfs/nfs4renewd.c
|
||
|
+++ b/fs/nfs/nfs4renewd.c
|
||
|
@@ -88,10 +88,18 @@ nfs4_renew_state(struct work_struct *work)
|
||
|
}
|
||
|
nfs_expire_all_delegations(clp);
|
||
|
} else {
|
||
|
+ int ret;
|
||
|
+
|
||
|
/* Queue an asynchronous RENEW. */
|
||
|
- ops->sched_state_renewal(clp, cred, renew_flags);
|
||
|
+ ret = ops->sched_state_renewal(clp, cred, renew_flags);
|
||
|
put_rpccred(cred);
|
||
|
- goto out_exp;
|
||
|
+ switch (ret) {
|
||
|
+ default:
|
||
|
+ goto out_exp;
|
||
|
+ case -EAGAIN:
|
||
|
+ case -ENOMEM:
|
||
|
+ break;
|
||
|
+ }
|
||
|
}
|
||
|
} else {
|
||
|
dprintk("%s: failed to call renewd. Reason: lease not expired \n",
|
||
|
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
|
||
|
index 461816b..c4600b5 100644
|
||
|
--- a/fs/nfs/nfs4state.c
|
||
|
+++ b/fs/nfs/nfs4state.c
|
||
|
@@ -1515,7 +1515,8 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
|
||
|
if (status < 0) {
|
||
|
set_bit(ops->owner_flag_bit, &sp->so_flags);
|
||
|
nfs4_put_state_owner(sp);
|
||
|
- return nfs4_recovery_handle_error(clp, status);
|
||
|
+ status = nfs4_recovery_handle_error(clp, status);
|
||
|
+ return (status != 0) ? status : -EAGAIN;
|
||
|
}
|
||
|
|
||
|
nfs4_put_state_owner(sp);
|
||
|
@@ -1524,7 +1525,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
|
||
|
spin_unlock(&clp->cl_lock);
|
||
|
}
|
||
|
rcu_read_unlock();
|
||
|
- return status;
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
static int nfs4_check_lease(struct nfs_client *clp)
|
||
|
@@ -1796,23 +1797,18 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||
|
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
|
||
|
status = nfs4_do_reclaim(clp,
|
||
|
clp->cl_mvops->reboot_recovery_ops);
|
||
|
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
||
|
- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
|
||
|
- continue;
|
||
|
- nfs4_state_end_reclaim_reboot(clp);
|
||
|
- if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
|
||
|
+ if (status == -EAGAIN)
|
||
|
continue;
|
||
|
if (status < 0)
|
||
|
goto out_error;
|
||
|
+ nfs4_state_end_reclaim_reboot(clp);
|
||
|
}
|
||
|
|
||
|
/* Now recover expired state... */
|
||
|
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
|
||
|
status = nfs4_do_reclaim(clp,
|
||
|
clp->cl_mvops->nograce_recovery_ops);
|
||
|
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
||
|
- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
|
||
|
- test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
||
|
+ if (status == -EAGAIN)
|
||
|
continue;
|
||
|
if (status < 0)
|
||
|
goto out_error;
|
||
|
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
|
||
|
index 22beaff..b2ce878 100644
|
||
|
--- a/fs/nfsd/nfs4proc.c
|
||
|
+++ b/fs/nfsd/nfs4proc.c
|
||
|
@@ -1132,7 +1132,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
|
||
|
*/
|
||
|
if (argp->opcnt == resp->opcnt)
|
||
|
return false;
|
||
|
-
|
||
|
+ if (next->opnum == OP_ILLEGAL)
|
||
|
+ return false;
|
||
|
nextd = OPDESC(next);
|
||
|
/*
|
||
|
* Rest of 2.6.3.1.1: certain operations will return WRONGSEC
|
||
|
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
|
||
|
index 48bc91d..97d91f0 100644
|
||
|
--- a/fs/notify/fanotify/fanotify_user.c
|
||
|
+++ b/fs/notify/fanotify/fanotify_user.c
|
||
|
@@ -67,7 +67,7 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
|
||
|
|
||
|
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
|
||
|
|
||
|
- client_fd = get_unused_fd();
|
||
|
+ client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
|
||
|
if (client_fd < 0)
|
||
|
return client_fd;
|
||
|
|
||
|
diff --git a/fs/super.c b/fs/super.c
|
||
|
index fc3c0b2..3064524 100644
|
||
|
--- a/fs/super.c
|
||
|
+++ b/fs/super.c
|
||
|
@@ -69,6 +69,8 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
|
||
|
|
||
|
total_objects = sb->s_nr_dentry_unused +
|
||
|
sb->s_nr_inodes_unused + fs_objects + 1;
|
||
|
+ if (!total_objects)
|
||
|
+ total_objects = 1;
|
||
|
|
||
|
if (sc->nr_to_scan) {
|
||
|
int dentries;
|
||
|
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
|
||
|
index fb3b5c8..b2ca12f 100644
|
||
|
--- a/fs/ubifs/commit.c
|
||
|
+++ b/fs/ubifs/commit.c
|
||
|
@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c)
|
||
|
err = ubifs_orphan_end_commit(c);
|
||
|
if (err)
|
||
|
goto out;
|
||
|
- old_ltail_lnum = c->ltail_lnum;
|
||
|
- err = ubifs_log_end_commit(c, new_ltail_lnum);
|
||
|
- if (err)
|
||
|
- goto out;
|
||
|
err = dbg_check_old_index(c, &zroot);
|
||
|
if (err)
|
||
|
goto out;
|
||
|
|
||
|
- mutex_lock(&c->mst_mutex);
|
||
|
c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
|
||
|
c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
|
||
|
c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
|
||
|
@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c)
|
||
|
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
||
|
else
|
||
|
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
||
|
- err = ubifs_write_master(c);
|
||
|
- mutex_unlock(&c->mst_mutex);
|
||
|
+
|
||
|
+ old_ltail_lnum = c->ltail_lnum;
|
||
|
+ err = ubifs_log_end_commit(c, new_ltail_lnum);
|
||
|
if (err)
|
||
|
goto out;
|
||
|
|
||
|
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
|
||
|
index f9fd068d..843beda 100644
|
||
|
--- a/fs/ubifs/log.c
|
||
|
+++ b/fs/ubifs/log.c
|
||
|
@@ -110,10 +110,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
|
||
|
h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
|
||
|
t = (long long)c->ltail_lnum * c->leb_size;
|
||
|
|
||
|
- if (h >= t)
|
||
|
+ if (h > t)
|
||
|
return c->log_bytes - h + t;
|
||
|
- else
|
||
|
+ else if (h != t)
|
||
|
return t - h;
|
||
|
+ else if (c->lhead_lnum != c->ltail_lnum)
|
||
|
+ return 0;
|
||
|
+ else
|
||
|
+ return c->log_bytes;
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
@@ -453,9 +457,9 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
|
||
|
* @ltail_lnum: new log tail LEB number
|
||
|
*
|
||
|
* This function is called on when the commit operation was finished. It
|
||
|
- * moves log tail to new position and unmaps LEBs which contain obsolete data.
|
||
|
- * Returns zero in case of success and a negative error code in case of
|
||
|
- * failure.
|
||
|
+ * moves log tail to new position and updates the master node so that it stores
|
||
|
+ * the new log tail LEB number. Returns zero in case of success and a negative
|
||
|
+ * error code in case of failure.
|
||
|
*/
|
||
|
int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
||
|
{
|
||
|
@@ -483,7 +487,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
|
||
|
spin_unlock(&c->buds_lock);
|
||
|
|
||
|
err = dbg_check_bud_bytes(c);
|
||
|
+ if (err)
|
||
|
+ goto out;
|
||
|
|
||
|
+ err = ubifs_write_master(c);
|
||
|
+
|
||
|
+out:
|
||
|
mutex_unlock(&c->log_mutex);
|
||
|
return err;
|
||
|
}
|
||
|
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
|
||
|
index 278c238..bb9f481 100644
|
||
|
--- a/fs/ubifs/master.c
|
||
|
+++ b/fs/ubifs/master.c
|
||
|
@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c)
|
||
|
* ubifs_write_master - write master node.
|
||
|
* @c: UBIFS file-system description object
|
||
|
*
|
||
|
- * This function writes the master node. The caller has to take the
|
||
|
- * @c->mst_mutex lock before calling this function. Returns zero in case of
|
||
|
- * success and a negative error code in case of failure. The master node is
|
||
|
- * written twice to enable recovery.
|
||
|
+ * This function writes the master node. Returns zero in case of success and a
|
||
|
+ * negative error code in case of failure. The master node is written twice to
|
||
|
+ * enable recovery.
|
||
|
*/
|
||
|
int ubifs_write_master(struct ubifs_info *c)
|
||
|
{
|
||
|
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
|
||
|
index d867bd9..129bb48 100644
|
||
|
--- a/fs/ubifs/super.c
|
||
|
+++ b/fs/ubifs/super.c
|
||
|
@@ -1984,7 +1984,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
|
||
|
mutex_init(&c->lp_mutex);
|
||
|
mutex_init(&c->tnc_mutex);
|
||
|
mutex_init(&c->log_mutex);
|
||
|
- mutex_init(&c->mst_mutex);
|
||
|
mutex_init(&c->umount_mutex);
|
||
|
mutex_init(&c->bu_mutex);
|
||
|
mutex_init(&c->write_reserve_mutex);
|
||
|
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
|
||
|
index 3f96261..cd62067 100644
|
||
|
--- a/fs/ubifs/ubifs.h
|
||
|
+++ b/fs/ubifs/ubifs.h
|
||
|
@@ -1041,7 +1041,6 @@ struct ubifs_debug_info;
|
||
|
*
|
||
|
* @mst_node: master node
|
||
|
* @mst_offs: offset of valid master node
|
||
|
- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
|
||
|
*
|
||
|
* @max_bu_buf_len: maximum bulk-read buffer length
|
||
|
* @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
|
||
|
@@ -1281,7 +1280,6 @@ struct ubifs_info {
|
||
|
|
||
|
struct ubifs_mst_node *mst_node;
|
||
|
int mst_offs;
|
||
|
- struct mutex mst_mutex;
|
||
|
|
||
|
int max_bu_buf_len;
|
||
|
struct mutex bu_mutex;
|
||
|
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
|
||
|
index 757f980..53baa0d 100644
|
||
|
--- a/include/drm/drm_pciids.h
|
||
|
+++ b/include/drm/drm_pciids.h
|
||
|
@@ -56,7 +56,6 @@
|
||
|
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
||
|
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
||
|
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
|
||
|
- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
|
||
|
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
||
|
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
||
|
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
|
||
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||
|
index 651a0fc..6619eb2 100644
|
||
|
--- a/include/linux/blkdev.h
|
||
|
+++ b/include/linux/blkdev.h
|
||
|
@@ -1105,10 +1105,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
|
||
|
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
|
||
|
{
|
||
|
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
|
||
|
- unsigned int alignment = (sector << 9) & (granularity - 1);
|
||
|
+ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
|
||
|
|
||
|
- return (granularity + lim->alignment_offset - alignment)
|
||
|
- & (granularity - 1);
|
||
|
+ return (granularity + lim->alignment_offset - alignment) % granularity;
|
||
|
}
|
||
|
|
||
|
static inline int bdev_alignment_offset(struct block_device *bdev)
|
||
|
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
|
||
|
index 7970e31..ea9cffe 100644
|
||
|
--- a/include/linux/compiler-gcc.h
|
||
|
+++ b/include/linux/compiler-gcc.h
|
||
|
@@ -37,6 +37,9 @@
|
||
|
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
|
||
|
(typeof(ptr)) (__ptr + (off)); })
|
||
|
|
||
|
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
||
|
+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
|
||
|
+
|
||
|
#ifdef __CHECKER__
|
||
|
#define __must_be_array(arr) 0
|
||
|
#else
|
||
|
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
|
||
|
index cba9593..1a97cac 100644
|
||
|
--- a/include/linux/compiler-intel.h
|
||
|
+++ b/include/linux/compiler-intel.h
|
||
|
@@ -15,6 +15,7 @@
|
||
|
*/
|
||
|
#undef barrier
|
||
|
#undef RELOC_HIDE
|
||
|
+#undef OPTIMIZER_HIDE_VAR
|
||
|
|
||
|
#define barrier() __memory_barrier()
|
||
|
|
||
|
@@ -23,6 +24,12 @@
|
||
|
__ptr = (unsigned long) (ptr); \
|
||
|
(typeof(ptr)) (__ptr + (off)); })
|
||
|
|
||
|
+/* This should act as an optimization barrier on var.
|
||
|
+ * Given that this compiler does not have inline assembly, a compiler barrier
|
||
|
+ * is the best we can do.
|
||
|
+ */
|
||
|
+#define OPTIMIZER_HIDE_VAR(var) barrier()
|
||
|
+
|
||
|
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
|
||
|
#define __must_be_array(a) 0
|
||
|
|
||
|
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
|
||
|
index 6b394f0..eeb3079 100644
|
||
|
--- a/include/linux/khugepaged.h
|
||
|
+++ b/include/linux/khugepaged.h
|
||
|
@@ -6,7 +6,8 @@
|
||
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||
|
extern int __khugepaged_enter(struct mm_struct *mm);
|
||
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
||
|
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
|
||
|
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||
|
+ unsigned long vm_flags);
|
||
|
|
||
|
#define khugepaged_enabled() \
|
||
|
(transparent_hugepage_flags & \
|
||
|
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
|
||
|
__khugepaged_exit(mm);
|
||
|
}
|
||
|
|
||
|
-static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||
|
+static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||
|
+ unsigned long vm_flags)
|
||
|
{
|
||
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
||
|
if ((khugepaged_always() ||
|
||
|
- (khugepaged_req_madv() &&
|
||
|
- vma->vm_flags & VM_HUGEPAGE)) &&
|
||
|
- !(vma->vm_flags & VM_NOHUGEPAGE))
|
||
|
+ (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
|
||
|
+ !(vm_flags & VM_NOHUGEPAGE))
|
||
|
if (__khugepaged_enter(vma->vm_mm))
|
||
|
return -ENOMEM;
|
||
|
return 0;
|
||
|
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
||
|
{
|
||
|
}
|
||
|
-static inline int khugepaged_enter(struct vm_area_struct *vma)
|
||
|
+static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||
|
+ unsigned long vm_flags)
|
||
|
{
|
||
|
return 0;
|
||
|
}
|
||
|
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||
|
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||
|
+ unsigned long vm_flags)
|
||
|
{
|
||
|
return 0;
|
||
|
}
|
||
|
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
||
|
index 655a58ce..dcff232 100644
|
||
|
--- a/include/linux/mm.h
|
||
|
+++ b/include/linux/mm.h
|
||
|
@@ -984,6 +984,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
|
||
|
|
||
|
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
|
||
|
extern void truncate_setsize(struct inode *inode, loff_t newsize);
|
||
|
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
||
|
extern int vmtruncate(struct inode *inode, loff_t offset);
|
||
|
extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
|
||
|
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
|
||
|
diff --git a/include/linux/of.h b/include/linux/of.h
|
||
|
index ba8f1f4..c648590 100644
|
||
|
--- a/include/linux/of.h
|
||
|
+++ b/include/linux/of.h
|
||
|
@@ -229,14 +229,12 @@ extern int of_property_read_u64(const struct device_node *np,
|
||
|
extern int of_property_read_string(struct device_node *np,
|
||
|
const char *propname,
|
||
|
const char **out_string);
|
||
|
-extern int of_property_read_string_index(struct device_node *np,
|
||
|
- const char *propname,
|
||
|
- int index, const char **output);
|
||
|
extern int of_property_match_string(struct device_node *np,
|
||
|
const char *propname,
|
||
|
const char *string);
|
||
|
-extern int of_property_count_strings(struct device_node *np,
|
||
|
- const char *propname);
|
||
|
+extern int of_property_read_string_helper(struct device_node *np,
|
||
|
+ const char *propname,
|
||
|
+ const char **out_strs, size_t sz, int index);
|
||
|
extern int of_device_is_compatible(const struct device_node *device,
|
||
|
const char *);
|
||
|
extern int of_device_is_available(const struct device_node *device);
|
||
|
@@ -339,15 +337,9 @@ static inline int of_property_read_string(struct device_node *np,
|
||
|
return -ENOSYS;
|
||
|
}
|
||
|
|
||
|
-static inline int of_property_read_string_index(struct device_node *np,
|
||
|
- const char *propname, int index,
|
||
|
- const char **out_string)
|
||
|
-{
|
||
|
- return -ENOSYS;
|
||
|
-}
|
||
|
-
|
||
|
-static inline int of_property_count_strings(struct device_node *np,
|
||
|
- const char *propname)
|
||
|
+static inline int of_property_read_string_helper(struct device_node *np,
|
||
|
+ const char *propname,
|
||
|
+ const char **out_strs, size_t sz, int index)
|
||
|
{
|
||
|
return -ENOSYS;
|
||
|
}
|
||
|
@@ -387,6 +379,70 @@ static inline int of_machine_is_compatible(const char *compat)
|
||
|
#endif /* CONFIG_OF */
|
||
|
|
||
|
/**
|
||
|
+ * of_property_read_string_array() - Read an array of strings from a multiple
|
||
|
+ * strings property.
|
||
|
+ * @np: device node from which the property value is to be read.
|
||
|
+ * @propname: name of the property to be searched.
|
||
|
+ * @out_strs: output array of string pointers.
|
||
|
+ * @sz: number of array elements to read.
|
||
|
+ *
|
||
|
+ * Search for a property in a device tree node and retrieve a list of
|
||
|
+ * terminated string values (pointer to data, not a copy) in that property.
|
||
|
+ *
|
||
|
+ * If @out_strs is NULL, the number of strings in the property is returned.
|
||
|
+ */
|
||
|
+static inline int of_property_read_string_array(struct device_node *np,
|
||
|
+ const char *propname, const char **out_strs,
|
||
|
+ size_t sz)
|
||
|
+{
|
||
|
+ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
+ * of_property_count_strings() - Find and return the number of strings from a
|
||
|
+ * multiple strings property.
|
||
|
+ * @np: device node from which the property value is to be read.
|
||
|
+ * @propname: name of the property to be searched.
|
||
|
+ *
|
||
|
+ * Search for a property in a device tree node and retrieve the number of null
|
||
|
+ * terminated string contain in it. Returns the number of strings on
|
||
|
+ * success, -EINVAL if the property does not exist, -ENODATA if property
|
||
|
+ * does not have a value, and -EILSEQ if the string is not null-terminated
|
||
|
+ * within the length of the property data.
|
||
|
+ */
|
||
|
+static inline int of_property_count_strings(struct device_node *np,
|
||
|
+ const char *propname)
|
||
|
+{
|
||
|
+ return of_property_read_string_helper(np, propname, NULL, 0, 0);
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
+ * of_property_read_string_index() - Find and read a string from a multiple
|
||
|
+ * strings property.
|
||
|
+ * @np: device node from which the property value is to be read.
|
||
|
+ * @propname: name of the property to be searched.
|
||
|
+ * @index: index of the string in the list of strings
|
||
|
+ * @out_string: pointer to null terminated return string, modified only if
|
||
|
+ * return value is 0.
|
||
|
+ *
|
||
|
+ * Search for a property in a device tree node and retrieve a null
|
||
|
+ * terminated string value (pointer to data, not a copy) in the list of strings
|
||
|
+ * contained in that property.
|
||
|
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
|
||
|
+ * property does not have a value, and -EILSEQ if the string is not
|
||
|
+ * null-terminated within the length of the property data.
|
||
|
+ *
|
||
|
+ * The out_string pointer is modified only if a valid string can be decoded.
|
||
|
+ */
|
||
|
+static inline int of_property_read_string_index(struct device_node *np,
|
||
|
+ const char *propname,
|
||
|
+ int index, const char **output)
|
||
|
+{
|
||
|
+ int rc = of_property_read_string_helper(np, propname, output, 1, index);
|
||
|
+ return rc < 0 ? rc : 0;
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
* of_property_read_bool - Findfrom a property
|
||
|
* @np: device node from which the property value is to be read.
|
||
|
* @propname: name of the property to be searched.
|
||
|
diff --git a/include/linux/oom.h b/include/linux/oom.h
|
||
|
index f047a54..5ff36a2 100644
|
||
|
--- a/include/linux/oom.h
|
||
|
+++ b/include/linux/oom.h
|
||
|
@@ -45,6 +45,10 @@ extern int test_set_oom_score_adj(int new_val);
|
||
|
|
||
|
extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
|
||
|
const nodemask_t *nodemask, unsigned long totalpages);
|
||
|
+
|
||
|
+extern int oom_kills_count(void);
|
||
|
+extern void note_oom_kill(void);
|
||
|
+
|
||
|
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
||
|
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
|
||
|
|
||
|
diff --git a/include/linux/string.h b/include/linux/string.h
|
||
|
index e033564..3d9feb7 100644
|
||
|
--- a/include/linux/string.h
|
||
|
+++ b/include/linux/string.h
|
||
|
@@ -133,7 +133,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
|
||
|
#endif
|
||
|
|
||
|
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
|
||
|
- const void *from, size_t available);
|
||
|
+ const void *from, size_t available);
|
||
|
|
||
|
/**
|
||
|
* strstarts - does @str start with @prefix?
|
||
|
@@ -144,5 +144,7 @@ static inline bool strstarts(const char *str, const char *prefix)
|
||
|
{
|
||
|
return strncmp(str, prefix, strlen(prefix)) == 0;
|
||
|
}
|
||
|
+
|
||
|
+void memzero_explicit(void *s, size_t count);
|
||
|
#endif
|
||
|
#endif /* _LINUX_STRING_H_ */
|
||
|
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
|
||
|
index fb1ca8c..b1a7654 100644
|
||
|
--- a/include/linux/usb/quirks.h
|
||
|
+++ b/include/linux/usb/quirks.h
|
||
|
@@ -32,4 +32,7 @@
|
||
|
|
||
|
#define USB_QUIRK_OTG_PET 0x00000080
|
||
|
|
||
|
+/* device can't handle device_qualifier descriptor requests */
|
||
|
+#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
|
||
|
+
|
||
|
#endif /* __LINUX_USB_QUIRKS_H */
|
||
|
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
|
||
|
index 0caf1f8..8a14284 100644
|
||
|
--- a/kernel/audit_tree.c
|
||
|
+++ b/kernel/audit_tree.c
|
||
|
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
|
||
|
chunk->owners[i].index = i;
|
||
|
}
|
||
|
fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
|
||
|
+ chunk->mark.mask = FS_IN_IGNORED;
|
||
|
return chunk;
|
||
|
}
|
||
|
|
||
|
diff --git a/kernel/freezer.c b/kernel/freezer.c
|
||
|
index d1db423..df6fc77 100644
|
||
|
--- a/kernel/freezer.c
|
||
|
+++ b/kernel/freezer.c
|
||
|
@@ -36,6 +36,9 @@ bool freezing_slow_path(struct task_struct *p)
|
||
|
if (p->flags & PF_NOFREEZE)
|
||
|
return false;
|
||
|
|
||
|
+ if (test_thread_flag(TIF_MEMDIE))
|
||
|
+ return false;
|
||
|
+
|
||
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
||
|
return true;
|
||
|
|
||
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
||
|
index 3a24f26..1ae5049 100644
|
||
|
--- a/kernel/futex.c
|
||
|
+++ b/kernel/futex.c
|
||
|
@@ -213,6 +213,8 @@ static void drop_futex_key_refs(union futex_key *key)
|
||
|
case FUT_OFF_MMSHARED:
|
||
|
mmdrop(key->private.mm);
|
||
|
break;
|
||
|
+ default:
|
||
|
+ smp_mb(); /* explicit MB (B) */
|
||
|
}
|
||
|
}
|
||
|
|
||
|
@@ -485,8 +487,14 @@ static struct futex_pi_state * alloc_pi_state(void)
|
||
|
return pi_state;
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * Must be called with the hb lock held.
|
||
|
+ */
|
||
|
static void free_pi_state(struct futex_pi_state *pi_state)
|
||
|
{
|
||
|
+ if (!pi_state)
|
||
|
+ return;
|
||
|
+
|
||
|
if (!atomic_dec_and_test(&pi_state->refcount))
|
||
|
return;
|
||
|
|
||
|
@@ -1399,15 +1407,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
}
|
||
|
|
||
|
retry:
|
||
|
- if (pi_state != NULL) {
|
||
|
- /*
|
||
|
- * We will have to lookup the pi_state again, so free this one
|
||
|
- * to keep the accounting correct.
|
||
|
- */
|
||
|
- free_pi_state(pi_state);
|
||
|
- pi_state = NULL;
|
||
|
- }
|
||
|
-
|
||
|
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
|
||
|
if (unlikely(ret != 0))
|
||
|
goto out;
|
||
|
@@ -1486,6 +1485,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
case 0:
|
||
|
break;
|
||
|
case -EFAULT:
|
||
|
+ free_pi_state(pi_state);
|
||
|
+ pi_state = NULL;
|
||
|
double_unlock_hb(hb1, hb2);
|
||
|
put_futex_key(&key2);
|
||
|
put_futex_key(&key1);
|
||
|
@@ -1495,6 +1496,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
goto out;
|
||
|
case -EAGAIN:
|
||
|
/* The owner was exiting, try again. */
|
||
|
+ free_pi_state(pi_state);
|
||
|
+ pi_state = NULL;
|
||
|
double_unlock_hb(hb1, hb2);
|
||
|
put_futex_key(&key2);
|
||
|
put_futex_key(&key1);
|
||
|
@@ -1571,6 +1574,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
}
|
||
|
|
||
|
out_unlock:
|
||
|
+ free_pi_state(pi_state);
|
||
|
double_unlock_hb(hb1, hb2);
|
||
|
|
||
|
/*
|
||
|
@@ -1587,8 +1591,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
out_put_key1:
|
||
|
put_futex_key(&key1);
|
||
|
out:
|
||
|
- if (pi_state != NULL)
|
||
|
- free_pi_state(pi_state);
|
||
|
return ret ? ret : task_count;
|
||
|
}
|
||
|
|
||
|
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
|
||
|
index e885be1..02824a5 100644
|
||
|
--- a/kernel/posix-timers.c
|
||
|
+++ b/kernel/posix-timers.c
|
||
|
@@ -589,6 +589,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
|
||
|
goto out;
|
||
|
}
|
||
|
} else {
|
||
|
+ memset(&event.sigev_value, 0, sizeof(event.sigev_value));
|
||
|
event.sigev_notify = SIGEV_SIGNAL;
|
||
|
event.sigev_signo = SIGALRM;
|
||
|
event.sigev_value.sival_int = new_timer->it_id;
|
||
|
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
|
||
|
index 586521a..cacf697 100644
|
||
|
--- a/kernel/power/hibernate.c
|
||
|
+++ b/kernel/power/hibernate.c
|
||
|
@@ -488,8 +488,14 @@ int hibernation_restore(int platform_mode)
|
||
|
error = dpm_suspend_start(PMSG_QUIESCE);
|
||
|
if (!error) {
|
||
|
error = resume_target_kernel(platform_mode);
|
||
|
- dpm_resume_end(PMSG_RECOVER);
|
||
|
+ /*
|
||
|
+ * The above should either succeed and jump to the new kernel,
|
||
|
+ * or return with an error. Otherwise things are just
|
||
|
+ * undefined, so let's be paranoid.
|
||
|
+ */
|
||
|
+ BUG_ON(!error);
|
||
|
}
|
||
|
+ dpm_resume_end(PMSG_RECOVER);
|
||
|
pm_restore_gfp_mask();
|
||
|
ftrace_start();
|
||
|
resume_console();
|
||
|
diff --git a/kernel/power/process.c b/kernel/power/process.c
|
||
|
index 8908e6c..bf89a05 100644
|
||
|
--- a/kernel/power/process.c
|
||
|
+++ b/kernel/power/process.c
|
||
|
@@ -126,6 +126,28 @@ static int try_to_freeze_tasks(bool user_only)
|
||
|
return todo ? -EBUSY : 0;
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * Returns true if all freezable tasks (except for current) are frozen already
|
||
|
+ */
|
||
|
+static bool check_frozen_processes(void)
|
||
|
+{
|
||
|
+ struct task_struct *g, *p;
|
||
|
+ bool ret = true;
|
||
|
+
|
||
|
+ read_lock(&tasklist_lock);
|
||
|
+ for_each_process_thread(g, p) {
|
||
|
+ if (p != current && !freezer_should_skip(p) &&
|
||
|
+ !frozen(p)) {
|
||
|
+ ret = false;
|
||
|
+ goto done;
|
||
|
+ }
|
||
|
+ }
|
||
|
+done:
|
||
|
+ read_unlock(&tasklist_lock);
|
||
|
+
|
||
|
+ return ret;
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||
|
*
|
||
|
@@ -134,6 +156,7 @@ static int try_to_freeze_tasks(bool user_only)
|
||
|
int freeze_processes(void)
|
||
|
{
|
||
|
int error;
|
||
|
+ int oom_kills_saved;
|
||
|
|
||
|
error = __usermodehelper_disable(UMH_FREEZING);
|
||
|
if (error)
|
||
|
@@ -144,12 +167,27 @@ int freeze_processes(void)
|
||
|
|
||
|
printk("Freezing user space processes ... ");
|
||
|
pm_freezing = true;
|
||
|
+ oom_kills_saved = oom_kills_count();
|
||
|
error = try_to_freeze_tasks(true);
|
||
|
if (!error) {
|
||
|
- printk("done.");
|
||
|
__usermodehelper_set_disable_depth(UMH_DISABLED);
|
||
|
oom_killer_disable();
|
||
|
+
|
||
|
+ /*
|
||
|
+ * There might have been an OOM kill while we were
|
||
|
+ * freezing tasks and the killed task might be still
|
||
|
+ * on the way out so we have to double check for race.
|
||
|
+ */
|
||
|
+ if (oom_kills_count() != oom_kills_saved &&
|
||
|
+ !check_frozen_processes()) {
|
||
|
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
|
||
|
+ printk("OOM in progress.");
|
||
|
+ error = -EBUSY;
|
||
|
+ goto done;
|
||
|
+ }
|
||
|
+ printk("done.");
|
||
|
}
|
||
|
+done:
|
||
|
printk("\n");
|
||
|
BUG_ON(in_atomic());
|
||
|
|
||
|
diff --git a/lib/bitmap.c b/lib/bitmap.c
|
||
|
index b121ae5..b46ce02 100644
|
||
|
--- a/lib/bitmap.c
|
||
|
+++ b/lib/bitmap.c
|
||
|
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
|
||
|
lower = src[off + k];
|
||
|
if (left && off + k == lim - 1)
|
||
|
lower &= mask;
|
||
|
- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
|
||
|
+ dst[k] = lower >> rem;
|
||
|
+ if (rem)
|
||
|
+ dst[k] |= upper << (BITS_PER_LONG - rem);
|
||
|
if (left && k == lim - 1)
|
||
|
dst[k] &= mask;
|
||
|
}
|
||
|
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
|
||
|
upper = src[k];
|
||
|
if (left && k == lim - 1)
|
||
|
upper &= (1UL << left) - 1;
|
||
|
- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
|
||
|
+ dst[k + off] = upper << rem;
|
||
|
+ if (rem)
|
||
|
+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
|
||
|
if (left && k + off == lim - 1)
|
||
|
dst[k + off] &= (1UL << left) - 1;
|
||
|
}
|
||
|
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
|
||
|
index a5f3d0e..0875a8e 100644
|
||
|
--- a/lib/lzo/lzo1x_decompress_safe.c
|
||
|
+++ b/lib/lzo/lzo1x_decompress_safe.c
|
||
|
@@ -19,31 +19,21 @@
|
||
|
#include <linux/lzo.h>
|
||
|
#include "lzodefs.h"
|
||
|
|
||
|
-#define HAVE_IP(t, x) \
|
||
|
- (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
|
||
|
- (((t + x) >= t) && ((t + x) >= x)))
|
||
|
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
|
||
|
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
|
||
|
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
|
||
|
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
|
||
|
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
|
||
|
|
||
|
-#define HAVE_OP(t, x) \
|
||
|
- (((size_t)(op_end - op) >= (size_t)(t + x)) && \
|
||
|
- (((t + x) >= t) && ((t + x) >= x)))
|
||
|
-
|
||
|
-#define NEED_IP(t, x) \
|
||
|
- do { \
|
||
|
- if (!HAVE_IP(t, x)) \
|
||
|
- goto input_overrun; \
|
||
|
- } while (0)
|
||
|
-
|
||
|
-#define NEED_OP(t, x) \
|
||
|
- do { \
|
||
|
- if (!HAVE_OP(t, x)) \
|
||
|
- goto output_overrun; \
|
||
|
- } while (0)
|
||
|
-
|
||
|
-#define TEST_LB(m_pos) \
|
||
|
- do { \
|
||
|
- if ((m_pos) < out) \
|
||
|
- goto lookbehind_overrun; \
|
||
|
- } while (0)
|
||
|
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
|
||
|
+ * count without overflowing an integer. The multiply will overflow when
|
||
|
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
|
||
|
+ * depending on the base count. Since the base count is taken from a u8
|
||
|
+ * and a few bits, it is safe to assume that it will always be lower than
|
||
|
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
|
||
|
+ * two less 255 steps. See Documentation/lzo.txt for more information.
|
||
|
+ */
|
||
|
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
|
||
|
|
||
|
int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
unsigned char *out, size_t *out_len)
|
||
|
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
if (t < 16) {
|
||
|
if (likely(state == 0)) {
|
||
|
if (unlikely(t == 0)) {
|
||
|
+ size_t offset;
|
||
|
+ const unsigned char *ip_last = ip;
|
||
|
+
|
||
|
while (unlikely(*ip == 0)) {
|
||
|
- t += 255;
|
||
|
ip++;
|
||
|
- NEED_IP(1, 0);
|
||
|
+ NEED_IP(1);
|
||
|
}
|
||
|
- t += 15 + *ip++;
|
||
|
+ offset = ip - ip_last;
|
||
|
+ if (unlikely(offset > MAX_255_COUNT))
|
||
|
+ return LZO_E_ERROR;
|
||
|
+
|
||
|
+ offset = (offset << 8) - offset;
|
||
|
+ t += offset + 15 + *ip++;
|
||
|
}
|
||
|
t += 3;
|
||
|
copy_literal_run:
|
||
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||
|
- if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
|
||
|
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
|
||
|
const unsigned char *ie = ip + t;
|
||
|
unsigned char *oe = op + t;
|
||
|
do {
|
||
|
@@ -103,8 +100,8 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
} else
|
||
|
#endif
|
||
|
{
|
||
|
- NEED_OP(t, 0);
|
||
|
- NEED_IP(t, 3);
|
||
|
+ NEED_OP(t);
|
||
|
+ NEED_IP(t + 3);
|
||
|
do {
|
||
|
*op++ = *ip++;
|
||
|
} while (--t > 0);
|
||
|
@@ -117,7 +114,7 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
m_pos -= t >> 2;
|
||
|
m_pos -= *ip++ << 2;
|
||
|
TEST_LB(m_pos);
|
||
|
- NEED_OP(2, 0);
|
||
|
+ NEED_OP(2);
|
||
|
op[0] = m_pos[0];
|
||
|
op[1] = m_pos[1];
|
||
|
op += 2;
|
||
|
@@ -138,13 +135,20 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
} else if (t >= 32) {
|
||
|
t = (t & 31) + (3 - 1);
|
||
|
if (unlikely(t == 2)) {
|
||
|
+ size_t offset;
|
||
|
+ const unsigned char *ip_last = ip;
|
||
|
+
|
||
|
while (unlikely(*ip == 0)) {
|
||
|
- t += 255;
|
||
|
ip++;
|
||
|
- NEED_IP(1, 0);
|
||
|
+ NEED_IP(1);
|
||
|
}
|
||
|
- t += 31 + *ip++;
|
||
|
- NEED_IP(2, 0);
|
||
|
+ offset = ip - ip_last;
|
||
|
+ if (unlikely(offset > MAX_255_COUNT))
|
||
|
+ return LZO_E_ERROR;
|
||
|
+
|
||
|
+ offset = (offset << 8) - offset;
|
||
|
+ t += offset + 31 + *ip++;
|
||
|
+ NEED_IP(2);
|
||
|
}
|
||
|
m_pos = op - 1;
|
||
|
next = get_unaligned_le16(ip);
|
||
|
@@ -156,13 +160,20 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
m_pos -= (t & 8) << 11;
|
||
|
t = (t & 7) + (3 - 1);
|
||
|
if (unlikely(t == 2)) {
|
||
|
+ size_t offset;
|
||
|
+ const unsigned char *ip_last = ip;
|
||
|
+
|
||
|
while (unlikely(*ip == 0)) {
|
||
|
- t += 255;
|
||
|
ip++;
|
||
|
- NEED_IP(1, 0);
|
||
|
+ NEED_IP(1);
|
||
|
}
|
||
|
- t += 7 + *ip++;
|
||
|
- NEED_IP(2, 0);
|
||
|
+ offset = ip - ip_last;
|
||
|
+ if (unlikely(offset > MAX_255_COUNT))
|
||
|
+ return LZO_E_ERROR;
|
||
|
+
|
||
|
+ offset = (offset << 8) - offset;
|
||
|
+ t += offset + 7 + *ip++;
|
||
|
+ NEED_IP(2);
|
||
|
}
|
||
|
next = get_unaligned_le16(ip);
|
||
|
ip += 2;
|
||
|
@@ -176,7 +187,7 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||
|
if (op - m_pos >= 8) {
|
||
|
unsigned char *oe = op + t;
|
||
|
- if (likely(HAVE_OP(t, 15))) {
|
||
|
+ if (likely(HAVE_OP(t + 15))) {
|
||
|
do {
|
||
|
COPY8(op, m_pos);
|
||
|
op += 8;
|
||
|
@@ -188,7 +199,7 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
# endif
|
||
|
} while (op < oe);
|
||
|
op = oe;
|
||
|
- if (HAVE_IP(6, 0)) {
|
||
|
+ if (HAVE_IP(6)) {
|
||
|
state = next;
|
||
|
COPY4(op, ip);
|
||
|
op += next;
|
||
|
@@ -196,7 +207,7 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
continue;
|
||
|
}
|
||
|
} else {
|
||
|
- NEED_OP(t, 0);
|
||
|
+ NEED_OP(t);
|
||
|
do {
|
||
|
*op++ = *m_pos++;
|
||
|
} while (op < oe);
|
||
|
@@ -205,7 +216,7 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
#endif
|
||
|
{
|
||
|
unsigned char *oe = op + t;
|
||
|
- NEED_OP(t, 0);
|
||
|
+ NEED_OP(t);
|
||
|
op[0] = m_pos[0];
|
||
|
op[1] = m_pos[1];
|
||
|
op += 2;
|
||
|
@@ -218,15 +229,15 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
|
||
|
state = next;
|
||
|
t = next;
|
||
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||
|
- if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
|
||
|
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
|
||
|
COPY4(op, ip);
|
||
|
op += t;
|
||
|
ip += t;
|
||
|
} else
|
||
|
#endif
|
||
|
{
|
||
|
- NEED_IP(t, 3);
|
||
|
- NEED_OP(t, 0);
|
||
|
+ NEED_IP(t + 3);
|
||
|
+ NEED_OP(t);
|
||
|
while (t > 0) {
|
||
|
*op++ = *ip++;
|
||
|
t--;
|
||
|
diff --git a/lib/string.c b/lib/string.c
|
||
|
index e5878de..43d0781 100644
|
||
|
--- a/lib/string.c
|
||
|
+++ b/lib/string.c
|
||
|
@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
|
||
|
EXPORT_SYMBOL(memset);
|
||
|
#endif
|
||
|
|
||
|
+/**
|
||
|
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
|
||
|
+ * keying data) with 0s.
|
||
|
+ * @s: Pointer to the start of the area.
|
||
|
+ * @count: The size of the area.
|
||
|
+ *
|
||
|
+ * memzero_explicit() doesn't need an arch-specific version as
|
||
|
+ * it just invokes the one of memset() implicitly.
|
||
|
+ */
|
||
|
+void memzero_explicit(void *s, size_t count)
|
||
|
+{
|
||
|
+ memset(s, 0, count);
|
||
|
+ OPTIMIZER_HIDE_VAR(s);
|
||
|
+}
|
||
|
+EXPORT_SYMBOL(memzero_explicit);
|
||
|
+
|
||
|
#ifndef __HAVE_ARCH_MEMCPY
|
||
|
/**
|
||
|
* memcpy - Copy one area of memory to another
|
||
|
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
|
||
|
index 3da5c0b..8978c1b 100644
|
||
|
--- a/mm/huge_memory.c
|
||
|
+++ b/mm/huge_memory.c
|
||
|
@@ -711,7 +711,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||
|
if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
|
||
|
if (unlikely(anon_vma_prepare(vma)))
|
||
|
return VM_FAULT_OOM;
|
||
|
- if (unlikely(khugepaged_enter(vma)))
|
||
|
+ if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
|
||
|
return VM_FAULT_OOM;
|
||
|
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
||
|
vma, haddr, numa_node_id(), 0);
|
||
|
@@ -1505,7 +1505,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
|
||
|
* register it here without waiting a page fault that
|
||
|
* may not happen any time soon.
|
||
|
*/
|
||
|
- if (unlikely(khugepaged_enter_vma_merge(vma)))
|
||
|
+ if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
|
||
|
return -ENOMEM;
|
||
|
break;
|
||
|
case MADV_NOHUGEPAGE:
|
||
|
@@ -1637,7 +1637,8 @@ int __khugepaged_enter(struct mm_struct *mm)
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||
|
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||
|
+ unsigned long vm_flags)
|
||
|
{
|
||
|
unsigned long hstart, hend;
|
||
|
if (!vma->anon_vma)
|
||
|
@@ -1653,11 +1654,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||
|
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
|
||
|
* true too, verify it here.
|
||
|
*/
|
||
|
- VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
|
||
|
+ VM_BUG_ON(is_linear_pfn_mapping(vma) || vm_flags & VM_NO_THP);
|
||
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
||
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
||
|
if (hstart < hend)
|
||
|
- return khugepaged_enter(vma);
|
||
|
+ return khugepaged_enter(vma, vm_flags);
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
diff --git a/mm/memory.c b/mm/memory.c
|
||
|
index 6b0897e..6d5e635 100644
|
||
|
--- a/mm/memory.c
|
||
|
+++ b/mm/memory.c
|
||
|
@@ -1169,8 +1169,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
||
|
if (unlikely(page_mapcount(page) < 0))
|
||
|
print_bad_pte(vma, addr, ptent, page);
|
||
|
force_flush = !__tlb_remove_page(tlb, page);
|
||
|
- if (force_flush)
|
||
|
+ if (force_flush) {
|
||
|
+ addr += PAGE_SIZE;
|
||
|
break;
|
||
|
+ }
|
||
|
continue;
|
||
|
}
|
||
|
/*
|
||
|
diff --git a/mm/mmap.c b/mm/mmap.c
|
||
|
index e0e4ebf..eadef99 100644
|
||
|
--- a/mm/mmap.c
|
||
|
+++ b/mm/mmap.c
|
||
|
@@ -832,7 +832,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||
|
end, prev->vm_pgoff, NULL);
|
||
|
if (err)
|
||
|
return NULL;
|
||
|
- khugepaged_enter_vma_merge(prev);
|
||
|
+ khugepaged_enter_vma_merge(prev, vm_flags);
|
||
|
return prev;
|
||
|
}
|
||
|
|
||
|
@@ -851,7 +851,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||
|
next->vm_pgoff - pglen, NULL);
|
||
|
if (err)
|
||
|
return NULL;
|
||
|
- khugepaged_enter_vma_merge(area);
|
||
|
+ khugepaged_enter_vma_merge(area, vm_flags);
|
||
|
return area;
|
||
|
}
|
||
|
|
||
|
@@ -1827,7 +1827,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||
|
}
|
||
|
}
|
||
|
vma_unlock_anon_vma(vma);
|
||
|
- khugepaged_enter_vma_merge(vma);
|
||
|
+ khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||
|
return error;
|
||
|
}
|
||
|
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
|
||
|
@@ -1878,7 +1878,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
||
|
}
|
||
|
}
|
||
|
vma_unlock_anon_vma(vma);
|
||
|
- khugepaged_enter_vma_merge(vma);
|
||
|
+ khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||
|
return error;
|
||
|
}
|
||
|
|
||
|
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
|
||
|
index ebda919..ab1171a 100644
|
||
|
--- a/mm/oom_kill.c
|
||
|
+++ b/mm/oom_kill.c
|
||
|
@@ -435,6 +435,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
||
|
dump_tasks(memcg, nodemask);
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * Number of OOM killer invocations (including memcg OOM killer).
|
||
|
+ * Primarily used by PM freezer to check for potential races with
|
||
|
+ * OOM killed frozen task.
|
||
|
+ */
|
||
|
+static atomic_t oom_kills = ATOMIC_INIT(0);
|
||
|
+
|
||
|
+int oom_kills_count(void)
|
||
|
+{
|
||
|
+ return atomic_read(&oom_kills);
|
||
|
+}
|
||
|
+
|
||
|
+void note_oom_kill(void)
|
||
|
+{
|
||
|
+ atomic_inc(&oom_kills);
|
||
|
+}
|
||
|
+
|
||
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||
|
static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||
|
unsigned int points, unsigned long totalpages,
|
||
|
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
|
||
|
index b02c67f..1e3e4c0 100644
|
||
|
--- a/mm/page_alloc.c
|
||
|
+++ b/mm/page_alloc.c
|
||
|
@@ -2130,6 +2130,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
+ * PM-freezer should be notified that there might be an OOM killer on
|
||
|
+ * its way to kill and wake somebody up. This is too early and we might
|
||
|
+ * end up not killing anything but false positives are acceptable.
|
||
|
+ * See freeze_processes.
|
||
|
+ */
|
||
|
+ note_oom_kill();
|
||
|
+
|
||
|
+ /*
|
||
|
* Go through the zonelist yet one more time, keep very high watermark
|
||
|
* here, this is only to catch a parallel oom killing, we must fail if
|
||
|
* we're still under heavy pressure.
|
||
|
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
|
||
|
index 1ccbd71..b7693fd 100644
|
||
|
--- a/mm/page_cgroup.c
|
||
|
+++ b/mm/page_cgroup.c
|
||
|
@@ -170,6 +170,7 @@ static void free_page_cgroup(void *addr)
|
||
|
sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||
|
|
||
|
BUG_ON(PageReserved(page));
|
||
|
+ kmemleak_free(addr);
|
||
|
free_pages_exact(addr, table_size);
|
||
|
}
|
||
|
}
|
||
|
diff --git a/mm/percpu.c b/mm/percpu.c
|
||
|
index 5f6042b..13b2eef 100644
|
||
|
--- a/mm/percpu.c
|
||
|
+++ b/mm/percpu.c
|
||
|
@@ -1907,8 +1907,6 @@ void __init setup_per_cpu_areas(void)
|
||
|
|
||
|
if (pcpu_setup_first_chunk(ai, fc) < 0)
|
||
|
panic("Failed to initialize percpu areas.");
|
||
|
-
|
||
|
- pcpu_free_alloc_info(ai);
|
||
|
}
|
||
|
|
||
|
#endif /* CONFIG_SMP */
|
||
|
diff --git a/mm/truncate.c b/mm/truncate.c
|
||
|
index f38055c..57625f7 100644
|
||
|
--- a/mm/truncate.c
|
||
|
+++ b/mm/truncate.c
|
||
|
@@ -20,6 +20,7 @@
|
||
|
#include <linux/buffer_head.h> /* grr. try_to_release_page,
|
||
|
do_invalidatepage */
|
||
|
#include <linux/cleancache.h>
|
||
|
+#include <linux/rmap.h>
|
||
|
#include "internal.h"
|
||
|
|
||
|
|
||
|
@@ -571,16 +572,70 @@ EXPORT_SYMBOL(truncate_pagecache);
|
||
|
*/
|
||
|
void truncate_setsize(struct inode *inode, loff_t newsize)
|
||
|
{
|
||
|
- loff_t oldsize;
|
||
|
-
|
||
|
- oldsize = inode->i_size;
|
||
|
+ loff_t oldsize = inode->i_size;
|
||
|
i_size_write(inode, newsize);
|
||
|
|
||
|
+ if (newsize > oldsize)
|
||
|
+ pagecache_isize_extended(inode, oldsize, newsize);
|
||
|
truncate_pagecache(inode, oldsize, newsize);
|
||
|
}
|
||
|
EXPORT_SYMBOL(truncate_setsize);
|
||
|
|
||
|
/**
|
||
|
+ * pagecache_isize_extended - update pagecache after extension of i_size
|
||
|
+ * @inode: inode for which i_size was extended
|
||
|
+ * @from: original inode size
|
||
|
+ * @to: new inode size
|
||
|
+ *
|
||
|
+ * Handle extension of inode size either caused by extending truncate or by
|
||
|
+ * write starting after current i_size. We mark the page straddling current
|
||
|
+ * i_size RO so that page_mkwrite() is called on the nearest write access to
|
||
|
+ * the page. This way filesystem can be sure that page_mkwrite() is called on
|
||
|
+ * the page before user writes to the page via mmap after the i_size has been
|
||
|
+ * changed.
|
||
|
+ *
|
||
|
+ * The function must be called after i_size is updated so that page fault
|
||
|
+ * coming after we unlock the page will already see the new i_size.
|
||
|
+ * The function must be called while we still hold i_mutex - this not only
|
||
|
+ * makes sure i_size is stable but also that userspace cannot observe new
|
||
|
+ * i_size value before we are prepared to store mmap writes at new inode size.
|
||
|
+ */
|
||
|
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
|
||
|
+{
|
||
|
+ int bsize = 1 << inode->i_blkbits;
|
||
|
+ loff_t rounded_from;
|
||
|
+ struct page *page;
|
||
|
+ pgoff_t index;
|
||
|
+
|
||
|
+ WARN_ON(to > inode->i_size);
|
||
|
+
|
||
|
+ if (from >= to || bsize == PAGE_CACHE_SIZE)
|
||
|
+ return;
|
||
|
+ /* Page straddling @from will not have any hole block created? */
|
||
|
+ rounded_from = round_up(from, bsize);
|
||
|
+ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
|
||
|
+ return;
|
||
|
+
|
||
|
+ index = from >> PAGE_CACHE_SHIFT;
|
||
|
+ page = find_lock_page(inode->i_mapping, index);
|
||
|
+ /* Page not cached? Nothing to do */
|
||
|
+ if (!page)
|
||
|
+ return;
|
||
|
+ /*
|
||
|
+ * See clear_page_dirty_for_io() for details why set_page_dirty()
|
||
|
+ * is needed.
|
||
|
+ */
|
||
|
+ if (page_mkclean(page))
|
||
|
+ set_page_dirty(page);
|
||
|
+ unlock_page(page);
|
||
|
+ page_cache_release(page);
|
||
|
+}
|
||
|
+EXPORT_SYMBOL(pagecache_isize_extended);
|
||
|
+
|
||
|
+/**
|
||
|
+ * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
|
||
|
+ * @inode: inode
|
||
|
+ * @lstart: offset of beginning of hole
|
||
|
* vmtruncate - unmap mappings "freed" by truncate() syscall
|
||
|
* @inode: inode of the file used
|
||
|
* @newsize: file offset to start truncating
|
||
|
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
|
||
|
index 9da7fdd..3d1be99 100644
|
||
|
--- a/net/ceph/crypto.c
|
||
|
+++ b/net/ceph/crypto.c
|
||
|
@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
|
||
|
|
||
|
static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
|
||
|
|
||
|
+/*
|
||
|
+ * Should be used for buffers allocated with ceph_kvmalloc().
|
||
|
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
|
||
|
+ * in-buffer (msg front).
|
||
|
+ *
|
||
|
+ * Dispose of @sgt with teardown_sgtable().
|
||
|
+ *
|
||
|
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
|
||
|
+ * in cases where a single sg is sufficient. No attempt to reduce the
|
||
|
+ * number of sgs by squeezing physically contiguous pages together is
|
||
|
+ * made though, for simplicity.
|
||
|
+ */
|
||
|
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
|
||
|
+ const void *buf, unsigned int buf_len)
|
||
|
+{
|
||
|
+ struct scatterlist *sg;
|
||
|
+ const bool is_vmalloc = is_vmalloc_addr(buf);
|
||
|
+ unsigned int off = offset_in_page(buf);
|
||
|
+ unsigned int chunk_cnt = 1;
|
||
|
+ unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
|
||
|
+ int i;
|
||
|
+ int ret;
|
||
|
+
|
||
|
+ if (buf_len == 0) {
|
||
|
+ memset(sgt, 0, sizeof(*sgt));
|
||
|
+ return -EINVAL;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (is_vmalloc) {
|
||
|
+ chunk_cnt = chunk_len >> PAGE_SHIFT;
|
||
|
+ chunk_len = PAGE_SIZE;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (chunk_cnt > 1) {
|
||
|
+ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
+ } else {
|
||
|
+ WARN_ON(chunk_cnt != 1);
|
||
|
+ sg_init_table(prealloc_sg, 1);
|
||
|
+ sgt->sgl = prealloc_sg;
|
||
|
+ sgt->nents = sgt->orig_nents = 1;
|
||
|
+ }
|
||
|
+
|
||
|
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
|
||
|
+ struct page *page;
|
||
|
+ unsigned int len = min(chunk_len - off, buf_len);
|
||
|
+
|
||
|
+ if (is_vmalloc)
|
||
|
+ page = vmalloc_to_page(buf);
|
||
|
+ else
|
||
|
+ page = virt_to_page(buf);
|
||
|
+
|
||
|
+ sg_set_page(sg, page, len, off);
|
||
|
+
|
||
|
+ off = 0;
|
||
|
+ buf += len;
|
||
|
+ buf_len -= len;
|
||
|
+ }
|
||
|
+ WARN_ON(buf_len != 0);
|
||
|
+
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
+static void teardown_sgtable(struct sg_table *sgt)
|
||
|
+{
|
||
|
+ if (sgt->orig_nents > 1)
|
||
|
+ sg_free_table(sgt);
|
||
|
+}
|
||
|
+
|
||
|
static int ceph_aes_encrypt(const void *key, int key_len,
|
||
|
void *dst, size_t *dst_len,
|
||
|
const void *src, size_t src_len)
|
||
|
{
|
||
|
- struct scatterlist sg_in[2], sg_out[1];
|
||
|
+ struct scatterlist sg_in[2], prealloc_sg;
|
||
|
+ struct sg_table sg_out;
|
||
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
||
|
struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
|
||
|
int ret;
|
||
|
@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
|
||
|
|
||
|
*dst_len = src_len + zero_padding;
|
||
|
|
||
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
sg_init_table(sg_in, 2);
|
||
|
sg_set_buf(&sg_in[0], src, src_len);
|
||
|
sg_set_buf(&sg_in[1], pad, zero_padding);
|
||
|
- sg_init_table(sg_out, 1);
|
||
|
- sg_set_buf(sg_out, dst, *dst_len);
|
||
|
+ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
|
||
|
+ if (ret)
|
||
|
+ goto out_tfm;
|
||
|
+
|
||
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
||
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
||
|
-
|
||
|
memcpy(iv, aes_iv, ivsize);
|
||
|
+
|
||
|
/*
|
||
|
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
key, key_len, 1);
|
||
|
@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
|
||
|
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
pad, zero_padding, 1);
|
||
|
*/
|
||
|
- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
|
||
|
+ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
|
||
|
src_len + zero_padding);
|
||
|
- crypto_free_blkcipher(tfm);
|
||
|
- if (ret < 0)
|
||
|
+ if (ret < 0) {
|
||
|
pr_err("ceph_aes_crypt failed %d\n", ret);
|
||
|
+ goto out_sg;
|
||
|
+ }
|
||
|
/*
|
||
|
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
dst, *dst_len, 1);
|
||
|
*/
|
||
|
- return 0;
|
||
|
+
|
||
|
+out_sg:
|
||
|
+ teardown_sgtable(&sg_out);
|
||
|
+out_tfm:
|
||
|
+ crypto_free_blkcipher(tfm);
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
||
|
@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
||
|
const void *src1, size_t src1_len,
|
||
|
const void *src2, size_t src2_len)
|
||
|
{
|
||
|
- struct scatterlist sg_in[3], sg_out[1];
|
||
|
+ struct scatterlist sg_in[3], prealloc_sg;
|
||
|
+ struct sg_table sg_out;
|
||
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
||
|
struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
|
||
|
int ret;
|
||
|
@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
||
|
|
||
|
*dst_len = src1_len + src2_len + zero_padding;
|
||
|
|
||
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
sg_init_table(sg_in, 3);
|
||
|
sg_set_buf(&sg_in[0], src1, src1_len);
|
||
|
sg_set_buf(&sg_in[1], src2, src2_len);
|
||
|
sg_set_buf(&sg_in[2], pad, zero_padding);
|
||
|
- sg_init_table(sg_out, 1);
|
||
|
- sg_set_buf(sg_out, dst, *dst_len);
|
||
|
+ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
|
||
|
+ if (ret)
|
||
|
+ goto out_tfm;
|
||
|
+
|
||
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
||
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
||
|
-
|
||
|
memcpy(iv, aes_iv, ivsize);
|
||
|
+
|
||
|
/*
|
||
|
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
key, key_len, 1);
|
||
|
@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
|
||
|
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
pad, zero_padding, 1);
|
||
|
*/
|
||
|
- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
|
||
|
+ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
|
||
|
src1_len + src2_len + zero_padding);
|
||
|
- crypto_free_blkcipher(tfm);
|
||
|
- if (ret < 0)
|
||
|
+ if (ret < 0) {
|
||
|
pr_err("ceph_aes_crypt2 failed %d\n", ret);
|
||
|
+ goto out_sg;
|
||
|
+ }
|
||
|
/*
|
||
|
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
dst, *dst_len, 1);
|
||
|
*/
|
||
|
- return 0;
|
||
|
+
|
||
|
+out_sg:
|
||
|
+ teardown_sgtable(&sg_out);
|
||
|
+out_tfm:
|
||
|
+ crypto_free_blkcipher(tfm);
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static int ceph_aes_decrypt(const void *key, int key_len,
|
||
|
void *dst, size_t *dst_len,
|
||
|
const void *src, size_t src_len)
|
||
|
{
|
||
|
- struct scatterlist sg_in[1], sg_out[2];
|
||
|
+ struct sg_table sg_in;
|
||
|
+ struct scatterlist sg_out[2], prealloc_sg;
|
||
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
||
|
struct blkcipher_desc desc = { .tfm = tfm };
|
||
|
char pad[16];
|
||
|
@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
||
|
if (IS_ERR(tfm))
|
||
|
return PTR_ERR(tfm);
|
||
|
|
||
|
- crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
- sg_init_table(sg_in, 1);
|
||
|
sg_init_table(sg_out, 2);
|
||
|
- sg_set_buf(sg_in, src, src_len);
|
||
|
sg_set_buf(&sg_out[0], dst, *dst_len);
|
||
|
sg_set_buf(&sg_out[1], pad, sizeof(pad));
|
||
|
+ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
|
||
|
+ if (ret)
|
||
|
+ goto out_tfm;
|
||
|
|
||
|
+ crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
||
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
||
|
-
|
||
|
memcpy(iv, aes_iv, ivsize);
|
||
|
|
||
|
/*
|
||
|
@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
||
|
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
src, src_len, 1);
|
||
|
*/
|
||
|
-
|
||
|
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
|
||
|
- crypto_free_blkcipher(tfm);
|
||
|
+ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
|
||
|
if (ret < 0) {
|
||
|
pr_err("ceph_aes_decrypt failed %d\n", ret);
|
||
|
- return ret;
|
||
|
+ goto out_sg;
|
||
|
}
|
||
|
|
||
|
if (src_len <= *dst_len)
|
||
|
@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
|
||
|
print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
dst, *dst_len, 1);
|
||
|
*/
|
||
|
- return 0;
|
||
|
+
|
||
|
+out_sg:
|
||
|
+ teardown_sgtable(&sg_in);
|
||
|
+out_tfm:
|
||
|
+ crypto_free_blkcipher(tfm);
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
static int ceph_aes_decrypt2(const void *key, int key_len,
|
||
|
@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
||
|
void *dst2, size_t *dst2_len,
|
||
|
const void *src, size_t src_len)
|
||
|
{
|
||
|
- struct scatterlist sg_in[1], sg_out[3];
|
||
|
+ struct sg_table sg_in;
|
||
|
+ struct scatterlist sg_out[3], prealloc_sg;
|
||
|
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
|
||
|
struct blkcipher_desc desc = { .tfm = tfm };
|
||
|
char pad[16];
|
||
|
@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
||
|
if (IS_ERR(tfm))
|
||
|
return PTR_ERR(tfm);
|
||
|
|
||
|
- sg_init_table(sg_in, 1);
|
||
|
- sg_set_buf(sg_in, src, src_len);
|
||
|
sg_init_table(sg_out, 3);
|
||
|
sg_set_buf(&sg_out[0], dst1, *dst1_len);
|
||
|
sg_set_buf(&sg_out[1], dst2, *dst2_len);
|
||
|
sg_set_buf(&sg_out[2], pad, sizeof(pad));
|
||
|
+ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
|
||
|
+ if (ret)
|
||
|
+ goto out_tfm;
|
||
|
|
||
|
crypto_blkcipher_setkey((void *)tfm, key, key_len);
|
||
|
iv = crypto_blkcipher_crt(tfm)->iv;
|
||
|
ivsize = crypto_blkcipher_ivsize(tfm);
|
||
|
-
|
||
|
memcpy(iv, aes_iv, ivsize);
|
||
|
|
||
|
/*
|
||
|
@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
||
|
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
|
||
|
src, src_len, 1);
|
||
|
*/
|
||
|
-
|
||
|
- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
|
||
|
- crypto_free_blkcipher(tfm);
|
||
|
+ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
|
||
|
if (ret < 0) {
|
||
|
pr_err("ceph_aes_decrypt failed %d\n", ret);
|
||
|
- return ret;
|
||
|
+ goto out_sg;
|
||
|
}
|
||
|
|
||
|
if (src_len <= *dst1_len)
|
||
|
@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
|
||
|
dst2, *dst2_len, 1);
|
||
|
*/
|
||
|
|
||
|
- return 0;
|
||
|
+out_sg:
|
||
|
+ teardown_sgtable(&sg_in);
|
||
|
+out_tfm:
|
||
|
+ crypto_free_blkcipher(tfm);
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
|
||
|
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
|
||
|
index 95a04f0..9f32756 100644
|
||
|
--- a/net/mac80211/iface.c
|
||
|
+++ b/net/mac80211/iface.c
|
||
|
@@ -395,10 +395,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
||
|
u32 hw_reconf_flags = 0;
|
||
|
int i;
|
||
|
enum nl80211_channel_type orig_ct;
|
||
|
+ bool cancel_scan;
|
||
|
|
||
|
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
|
||
|
|
||
|
- if (local->scan_sdata == sdata)
|
||
|
+ cancel_scan = local->scan_sdata == sdata;
|
||
|
+ if (cancel_scan)
|
||
|
ieee80211_scan_cancel(local);
|
||
|
|
||
|
/*
|
||
|
@@ -562,6 +564,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
||
|
|
||
|
ieee80211_recalc_ps(local, -1);
|
||
|
|
||
|
+ if (cancel_scan)
|
||
|
+ flush_delayed_work(&local->scan_work);
|
||
|
+
|
||
|
if (local->open_count == 0) {
|
||
|
if (local->ops->napi_poll)
|
||
|
napi_disable(&local->napi);
|
||
|
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
|
||
|
index f5ed863..32929b0 100644
|
||
|
--- a/net/mac80211/rx.c
|
||
|
+++ b/net/mac80211/rx.c
|
||
|
@@ -1486,11 +1486,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
||
|
sc = le16_to_cpu(hdr->seq_ctrl);
|
||
|
frag = sc & IEEE80211_SCTL_FRAG;
|
||
|
|
||
|
- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
|
||
|
- is_multicast_ether_addr(hdr->addr1))) {
|
||
|
- /* not fragmented */
|
||
|
+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
|
||
|
+ goto out;
|
||
|
+
|
||
|
+ if (is_multicast_ether_addr(hdr->addr1)) {
|
||
|
+ rx->local->dot11MulticastReceivedFrameCount++;
|
||
|
goto out;
|
||
|
}
|
||
|
+
|
||
|
I802_DEBUG_INC(rx->local->rx_handlers_fragments);
|
||
|
|
||
|
if (skb_linearize(rx->skb))
|
||
|
@@ -1583,10 +1586,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
|
||
|
out:
|
||
|
if (rx->sta)
|
||
|
rx->sta->rx_packets++;
|
||
|
- if (is_multicast_ether_addr(hdr->addr1))
|
||
|
- rx->local->dot11MulticastReceivedFrameCount++;
|
||
|
- else
|
||
|
- ieee80211_led_rx(rx->local);
|
||
|
+ ieee80211_led_rx(rx->local);
|
||
|
return RX_CONTINUE;
|
||
|
}
|
||
|
|
||
|
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
|
||
|
index c487715..d96b7f6 100644
|
||
|
--- a/security/integrity/evm/evm_main.c
|
||
|
+++ b/security/integrity/evm/evm_main.c
|
||
|
@@ -282,9 +282,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
|
||
|
{
|
||
|
const struct evm_ima_xattr_data *xattr_data = xattr_value;
|
||
|
|
||
|
- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
|
||
|
- && (xattr_data->type == EVM_XATTR_HMAC))
|
||
|
- return -EPERM;
|
||
|
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
|
||
|
+ if (!xattr_value_len)
|
||
|
+ return -EINVAL;
|
||
|
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
|
||
|
+ return -EPERM;
|
||
|
+ }
|
||
|
return evm_protect_xattr(dentry, xattr_name, xattr_value,
|
||
|
xattr_value_len);
|
||
|
}
|
||
|
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
|
||
|
index e2c02dc..7076a8e 100644
|
||
|
--- a/sound/core/pcm_compat.c
|
||
|
+++ b/sound/core/pcm_compat.c
|
||
|
@@ -205,6 +205,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
|
||
|
if (err < 0)
|
||
|
return err;
|
||
|
|
||
|
+ if (clear_user(src, sizeof(*src)))
|
||
|
+ return -EFAULT;
|
||
|
if (put_user(status.state, &src->state) ||
|
||
|
compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
|
||
|
compat_put_timespec(&status.tstamp, &src->tstamp) ||
|
||
|
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
|
||
|
index 50b16fb..3e90775 100644
|
||
|
--- a/sound/core/pcm_native.c
|
||
|
+++ b/sound/core/pcm_native.c
|
||
|
@@ -3244,7 +3244,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
|
||
|
|
||
|
#ifndef ARCH_HAS_DMA_MMAP_COHERENT
|
||
|
/* This should be defined / handled globally! */
|
||
|
-#ifdef CONFIG_ARM
|
||
|
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
|
||
|
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||
|
#endif
|
||
|
#endif
|
||
|
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
|
||
|
index a0afa50..f35284b 100644
|
||
|
--- a/sound/pci/emu10k1/emu10k1_callback.c
|
||
|
+++ b/sound/pci/emu10k1/emu10k1_callback.c
|
||
|
@@ -85,6 +85,8 @@ snd_emu10k1_ops_setup(struct snd_emux *emux)
|
||
|
* get more voice for pcm
|
||
|
*
|
||
|
* terminate most inactive voice and give it as a pcm voice.
|
||
|
+ *
|
||
|
+ * voice_lock is already held.
|
||
|
*/
|
||
|
int
|
||
|
snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
||
|
@@ -92,12 +94,10 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
||
|
struct snd_emux *emu;
|
||
|
struct snd_emux_voice *vp;
|
||
|
struct best_voice best[V_END];
|
||
|
- unsigned long flags;
|
||
|
int i;
|
||
|
|
||
|
emu = hw->synth;
|
||
|
|
||
|
- spin_lock_irqsave(&emu->voice_lock, flags);
|
||
|
lookup_voices(emu, hw, best, 1); /* no OFF voices */
|
||
|
for (i = 0; i < V_END; i++) {
|
||
|
if (best[i].voice >= 0) {
|
||
|
@@ -113,11 +113,9 @@ snd_emu10k1_synth_get_voice(struct snd_emu10k1 *hw)
|
||
|
vp->emu->num_voices--;
|
||
|
vp->ch = -1;
|
||
|
vp->state = SNDRV_EMUX_ST_OFF;
|
||
|
- spin_unlock_irqrestore(&emu->voice_lock, flags);
|
||
|
return ch;
|
||
|
}
|
||
|
}
|
||
|
- spin_unlock_irqrestore(&emu->voice_lock, flags);
|
||
|
|
||
|
/* not found */
|
||
|
return -ENOMEM;
|
||
|
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
|
||
|
index f0b8d8e..c40b7ca 100644
|
||
|
--- a/sound/soc/codecs/sgtl5000.c
|
||
|
+++ b/sound/soc/codecs/sgtl5000.c
|
||
|
@@ -1313,8 +1313,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
|
||
|
|
||
|
/* enable small pop, introduce 400ms delay in turning off */
|
||
|
snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
|
||
|
- SGTL5000_SMALL_POP,
|
||
|
- SGTL5000_SMALL_POP);
|
||
|
+ SGTL5000_SMALL_POP, 1);
|
||
|
|
||
|
/* disable short cut detector */
|
||
|
snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
|
||
|
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
|
||
|
index d3a68bb..0bd6e1c 100644
|
||
|
--- a/sound/soc/codecs/sgtl5000.h
|
||
|
+++ b/sound/soc/codecs/sgtl5000.h
|
||
|
@@ -275,7 +275,7 @@
|
||
|
#define SGTL5000_BIAS_CTRL_MASK 0x000e
|
||
|
#define SGTL5000_BIAS_CTRL_SHIFT 1
|
||
|
#define SGTL5000_BIAS_CTRL_WIDTH 3
|
||
|
-#define SGTL5000_SMALL_POP 0x0001
|
||
|
+#define SGTL5000_SMALL_POP 0
|
||
|
|
||
|
/*
|
||
|
* SGTL5000_CHIP_MIC_CTRL
|
||
|
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
|
||
|
index 91b7287..eb0599f 100644
|
||
|
--- a/sound/soc/sh/fsi.c
|
||
|
+++ b/sound/soc/sh/fsi.c
|
||
|
@@ -1393,8 +1393,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
|
||
|
static struct snd_pcm_hardware fsi_pcm_hardware = {
|
||
|
.info = SNDRV_PCM_INFO_INTERLEAVED |
|
||
|
SNDRV_PCM_INFO_MMAP |
|
||
|
- SNDRV_PCM_INFO_MMAP_VALID |
|
||
|
- SNDRV_PCM_INFO_PAUSE,
|
||
|
+ SNDRV_PCM_INFO_MMAP_VALID,
|
||
|
.formats = FSI_FMTS,
|
||
|
.rates = FSI_RATES,
|
||
|
.rate_min = 8000,
|
||
|
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
|
||
|
index a5c758b..1afc47d 100644
|
||
|
--- a/sound/usb/quirks-table.h
|
||
|
+++ b/sound/usb/quirks-table.h
|
||
|
@@ -353,6 +353,36 @@ YAMAHA_DEVICE(0x105d, NULL),
|
||
|
}
|
||
|
}
|
||
|
},
|
||
|
+{
|
||
|
+ USB_DEVICE(0x0499, 0x1509),
|
||
|
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
|
||
|
+ /* .vendor_name = "Yamaha", */
|
||
|
+ /* .product_name = "Steinberg UR22", */
|
||
|
+ .ifnum = QUIRK_ANY_INTERFACE,
|
||
|
+ .type = QUIRK_COMPOSITE,
|
||
|
+ .data = (const struct snd_usb_audio_quirk[]) {
|
||
|
+ {
|
||
|
+ .ifnum = 1,
|
||
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||
|
+ },
|
||
|
+ {
|
||
|
+ .ifnum = 2,
|
||
|
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
|
||
|
+ },
|
||
|
+ {
|
||
|
+ .ifnum = 3,
|
||
|
+ .type = QUIRK_MIDI_YAMAHA
|
||
|
+ },
|
||
|
+ {
|
||
|
+ .ifnum = 4,
|
||
|
+ .type = QUIRK_IGNORE_INTERFACE
|
||
|
+ },
|
||
|
+ {
|
||
|
+ .ifnum = -1
|
||
|
+ }
|
||
|
+ }
|
||
|
+ }
|
||
|
+},
|
||
|
YAMAHA_DEVICE(0x2000, "DGP-7"),
|
||
|
YAMAHA_DEVICE(0x2001, "DGP-5"),
|
||
|
YAMAHA_DEVICE(0x2002, NULL),
|
||
|
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
|
||
|
index 89e4321..88581d0 100644
|
||
|
--- a/virt/kvm/iommu.c
|
||
|
+++ b/virt/kvm/iommu.c
|
||
|
@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
||
|
gfn_t base_gfn, unsigned long npages);
|
||
|
|
||
|
static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||
|
- gfn_t gfn, unsigned long size)
|
||
|
+ gfn_t gfn, unsigned long npages)
|
||
|
{
|
||
|
gfn_t end_gfn;
|
||
|
pfn_t pfn;
|
||
|
|
||
|
pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
|
||
|
- end_gfn = gfn + (size >> PAGE_SHIFT);
|
||
|
+ end_gfn = gfn + npages;
|
||
|
gfn += 1;
|
||
|
|
||
|
if (is_error_pfn(pfn))
|
||
|
@@ -117,7 +117,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||
|
* Pin all pages we are about to map in memory. This is
|
||
|
* important because we unmap and unpin in 4kb steps later.
|
||
|
*/
|
||
|
- pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
|
||
|
+ pfn = kvm_pin_pages(kvm, slot, gfn, page_size >> PAGE_SHIFT);
|
||
|
if (is_error_pfn(pfn)) {
|
||
|
gfn += 1;
|
||
|
continue;
|
||
|
@@ -129,7 +129,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||
|
if (r) {
|
||
|
printk(KERN_ERR "kvm_iommu_map_address:"
|
||
|
"iommu failed to map pfn=%llx\n", pfn);
|
||
|
- kvm_unpin_pages(kvm, pfn, page_size);
|
||
|
+ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
|
||
|
goto unmap_pages;
|
||
|
}
|
||
|
|
||
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
||
|
index bc5ed14..f4732bd 100644
|
||
|
--- a/virt/kvm/kvm_main.c
|
||
|
+++ b/virt/kvm/kvm_main.c
|
||
|
@@ -52,6 +52,7 @@
|
||
|
|
||
|
#include <asm/processor.h>
|
||
|
#include <asm/io.h>
|
||
|
+#include <asm/ioctl.h>
|
||
|
#include <asm/uaccess.h>
|
||
|
#include <asm/pgtable.h>
|
||
|
|
||
|
@@ -1744,6 +1745,9 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
||
|
if (vcpu->kvm->mm != current->mm)
|
||
|
return -EIO;
|
||
|
|
||
|
+ if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
#if defined(CONFIG_S390) || defined(CONFIG_PPC)
|
||
|
/*
|
||
|
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
|