diff --git a/Patches/LineageOS-19.1/android_frameworks_base/0014-Automatic_Reboot.patch b/Patches/LineageOS-19.1/android_frameworks_base/0014-Automatic_Reboot.patch index 5b8c3e8e..4e00deb2 100644 --- a/Patches/LineageOS-19.1/android_frameworks_base/0014-Automatic_Reboot.patch +++ b/Patches/LineageOS-19.1/android_frameworks_base/0014-Automatic_Reboot.patch @@ -57,7 +57,7 @@ index 5797aa01bfc0..cd3e8b391982 100644 diff --git a/packages/SystemUI/src/com/android/systemui/keyguard/KeyguardViewMediator.java b/packages/SystemUI/src/com/android/systemui/keyguard/KeyguardViewMediator.java -index 532f071132fc..bb78b6cd6eee 100644 +index 1eba4cc4d16a..d267e40cfce7 100644 --- a/packages/SystemUI/src/com/android/systemui/keyguard/KeyguardViewMediator.java +++ b/packages/SystemUI/src/com/android/systemui/keyguard/KeyguardViewMediator.java @@ -186,6 +186,8 @@ public class KeyguardViewMediator extends SystemUI implements Dumpable, @@ -143,7 +143,7 @@ index 532f071132fc..bb78b6cd6eee 100644 } } }; -@@ -2365,6 +2399,7 @@ public class KeyguardViewMediator extends SystemUI implements Dumpable, +@@ -2367,6 +2401,7 @@ public class KeyguardViewMediator extends SystemUI implements Dumpable, mHideAnimationRun = false; adjustStatusBarLocked(); sendUserPresentBroadcast(); diff --git a/Patches/LineageOS-19.1/android_kernel_oneplus_sdm845/4.9.282-qc.patch b/Patches/LineageOS-19.1/android_kernel_oneplus_sdm845/4.9.282-qc.patch new file mode 100644 index 00000000..f56356ed --- /dev/null +++ b/Patches/LineageOS-19.1/android_kernel_oneplus_sdm845/4.9.282-qc.patch @@ -0,0 +1,124841 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Tad +Date: Wed, 6 Apr 2022 20:15:35 -0400 +Subject: [PATCH] Merge remote-tracking branch 'qc/msm-4.9' into HEAD + +Signed-off-by: Tad +Change-Id: Ie4ee3fa1a2c9d22acdcb19f4d07e068ef4388513 +--- + Documentation/ABI/testing/sysfs-bus-iio | 3 +- + Documentation/DocBook/libata.tmpl | 2 +- + .../devicetree/bindings/arm/msm/msm.txt | 6 + + .../display/mediatek/mediatek,dpi.txt | 6 + + .../devicetree/bindings/input/hall_sensor.txt | 36 + + .../devicetree/bindings/input/misc/bma2x2.txt | 61 + + .../devicetree/bindings/input/misc/ltr553.txt | 67 + + .../devicetree/bindings/leds/leds-aw2013.txt | 75 + + .../devicetree/bindings/net/nfc/nxp-nci.txt | 2 +- + .../devicetree/bindings/net/nfc/pn544.txt | 2 +- + .../devicetree/bindings/pci/msm_ep_pcie.txt | 1 + + .../devicetree/bindings/sound/wm8994.txt | 18 +- + .../devicetree/bindings/spi/spi_qsd.txt | 8 +- + .../devicetree/bindings/vendor-prefixes.txt | 1 + + Documentation/filesystems/affs.txt | 16 +- + .../filesystems/mandatory-locking.txt | 10 + + Documentation/kernel-parameters.txt | 15 + + .../media/uapi/dvb/fe-get-property.rst | 7 +- + Documentation/networking/ip-sysctl.txt | 10 + + Documentation/sphinx/parse-headers.pl | 2 +- + Documentation/target/tcm_mod_builder.py | 2 +- + Documentation/trace/postprocess/decode_msr.py | 2 +- + .../trace-pagealloc-postprocess.pl | 2 +- + .../postprocess/trace-vmscan-postprocess.pl | 2 +- + Documentation/virtual/kvm/api.txt | 2 + + Makefile | 50 +- + arch/alpha/include/asm/io.h | 8 +- + arch/alpha/include/asm/uaccess.h | 67 +- + arch/alpha/kernel/smp.c | 2 +- + arch/alpha/lib/Makefile | 33 +- + arch/alpha/lib/clear_user.S | 66 +- + arch/alpha/lib/copy_user.S | 82 +- + arch/alpha/lib/ev6-clear_user.S | 84 +- + arch/alpha/lib/ev6-copy_user.S | 104 +- + arch/arc/Makefile | 1 + + arch/arc/include/asm/elf.h | 2 +- + arch/arc/include/asm/page.h | 1 + + arch/arc/kernel/entry.S | 4 +- + arch/arc/kernel/signal.c | 4 +- + arch/arc/kernel/stacktrace.c | 30 +- + arch/arc/kernel/vmlinux.lds.S | 2 + + arch/arc/plat-eznps/include/plat/ctop.h | 1 - + arch/arm/Kconfig | 9 +- + arch/arm/Makefile | 6 +- + arch/arm/boot/compressed/head.S | 4 +- + arch/arm/boot/dts/am335x-cm-t335.dts | 2 +- + arch/arm/boot/dts/am43x-epos-evm.dts | 2 +- + arch/arm/boot/dts/at91-sama5d3_xplained.dts | 7 + + arch/arm/boot/dts/at91-sama5d4_xplained.dts | 7 + + arch/arm/boot/dts/at91sam9rl.dtsi | 19 +- + arch/arm/boot/dts/bcm63138.dtsi | 2 +- + arch/arm/boot/dts/bcm7445-bcm97445svmb.dts | 4 +- + arch/arm/boot/dts/bcm7445.dtsi | 2 +- + arch/arm/boot/dts/bcm963138dvt.dts | 4 +- + arch/arm/boot/dts/exynos5250-smdk5250.dts | 2 +- + arch/arm/boot/dts/exynos5250-snow-common.dtsi | 2 +- + arch/arm/boot/dts/exynos5250-spring.dts | 2 +- + arch/arm/boot/dts/exynos5410-odroidxu.dts | 6 +- + arch/arm/boot/dts/exynos5410-pinctrl.dtsi | 28 + + arch/arm/boot/dts/exynos5410.dtsi | 4 + + arch/arm/boot/dts/exynos5420-arndale-octa.dts | 2 +- + arch/arm/boot/dts/exynos5422-odroidxu4.dts | 2 +- + .../boot/dts/exynos54xx-odroidxu-leds.dtsi | 4 +- + arch/arm/boot/dts/imx50-evk.dts | 2 +- + arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | 5 +- + arch/arm/boot/dts/imx6qdl-udoo.dtsi | 2 +- + arch/arm/boot/dts/lpc32xx.dtsi | 3 - + arch/arm/boot/dts/omap3.dtsi | 3 + + arch/arm/boot/dts/omap4.dtsi | 5 + + arch/arm/boot/dts/omap443x.dtsi | 2 + + arch/arm/boot/dts/omap5.dtsi | 5 + + arch/arm/boot/dts/picoxcell-pc3x2.dtsi | 4 + + arch/arm/boot/dts/qcom/pmxpoorwills.dtsi | 40 +- + .../boot/dts/qcom/sa415m-ccard-pcie-ep.dts | 6 + + .../arm/boot/dts/qcom/sa415m-ccard-usb-ep.dts | 4 + + arch/arm/boot/dts/qcom/sa415m-ccard.dtsi | 2 - + arch/arm/boot/dts/qcom/sa415m-ttp-pcie-ep.dts | 8 +- + arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dtsi | 4 +- + arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi | 3 +- + arch/arm/boot/dts/qcom/sdxpoorwills.dtsi | 6 +- + arch/arm/boot/dts/r8a7779-marzen.dts | 2 +- + arch/arm/boot/dts/r8a7779.dtsi | 1 + + arch/arm/boot/dts/rk3036-kylin.dts | 2 +- + arch/arm/boot/dts/rk3288.dtsi | 10 +- + arch/arm/boot/dts/s5pv210.dtsi | 127 +- + arch/arm/boot/dts/sama5d4.dtsi | 2 +- + arch/arm/boot/dts/socfpga.dtsi | 2 +- + arch/arm/boot/dts/socfpga_arria10.dtsi | 4 +- + arch/arm/boot/dts/ste-nomadik-stn8815.dtsi | 4 +- + arch/arm/boot/dts/stm32f429.dtsi | 2 +- + arch/arm/boot/dts/sun4i-a10.dtsi | 2 +- + arch/arm/boot/dts/versatile-ab.dts | 5 +- + arch/arm/boot/dts/versatile-pb.dts | 2 +- + arch/arm/configs/msm8909-perf_defconfig | 39 +- + arch/arm/configs/msm8909_defconfig | 38 +- + arch/arm/configs/msm8937go-perf_defconfig | 75 +- + arch/arm/configs/msm8937go_defconfig | 13 +- + arch/arm/configs/sa415m-perf_defconfig | 1 - + arch/arm/configs/sa415m_defconfig | 1 - + arch/arm/configs/sdm670-perf_defconfig | 6 + + .../configs/vendor/msm8909go-perf_defconfig | 555 +++++++ + arch/arm/configs/vendor/msm8909go_defconfig | 596 +++++++ + arch/arm/include/asm/percpu.h | 2 + + arch/arm/include/asm/unified.h | 77 +- + arch/arm/kernel/head.S | 6 +- + arch/arm/kernel/hw_breakpoint.c | 123 +- + arch/arm/kernel/ptrace.c | 4 +- + arch/arm/kernel/setup.c | 16 +- + arch/arm/kernel/signal.c | 14 +- + arch/arm/kernel/stacktrace.c | 24 + + arch/arm/kvm/mmu.c | 8 +- + arch/arm/mach-at91/pm.c | 11 +- + arch/arm/mach-footbridge/dc21285.c | 12 +- + arch/arm/mach-imx/pm-imx5.c | 6 +- + arch/arm/mach-imx/pm-imx6.c | 10 +- + arch/arm/mach-imx/suspend-imx53.S | 4 +- + arch/arm/mach-imx/suspend-imx6.S | 1 + + arch/arm/mach-integrator/Kconfig | 7 +- + arch/arm/mach-keystone/keystone.c | 4 +- + arch/arm/mach-omap2/board-n8x0.c | 2 +- + arch/arm/mach-omap2/omap_device.c | 8 +- + arch/arm/mach-socfpga/pm.c | 8 +- + arch/arm/mach-tegra/tegra.c | 4 +- + arch/arm/mm/cache-l2x0.c | 16 +- + arch/arm/mm/proc-macros.S | 3 +- + arch/arm/plat-samsung/Kconfig | 1 + + arch/arm/probes/kprobes/core.c | 6 + + arch/arm/probes/kprobes/test-thumb.c | 10 +- + arch/arm/probes/uprobes/core.c | 4 +- + arch/arm/xen/p2m.c | 33 +- + arch/arm64/Kconfig.platforms | 1 + + arch/arm64/boot/dts/arm/juno-base.dtsi | 6 +- + .../boot/dts/exynos/exynos7-espresso.dts | 3 +- + arch/arm64/boot/dts/exynos/exynos7.dtsi | 4 +- + .../arm64/boot/dts/freescale/fsl-ls1043a.dtsi | 1 + + arch/arm64/boot/dts/mediatek/mt8173.dtsi | 2 +- + arch/arm64/boot/dts/nvidia/tegra210.dtsi | 1 + + arch/arm64/boot/dts/qcom/Makefile | 10 +- + .../batterydata-qrd-skue-4v35-2000mah.dtsi | 121 ++ + ...si-panel-arglass-boe-dual-1080p-video.dtsi | 99 ++ + .../qcom/dsi-panel-hx8379c-fwvga-video.dtsi | 90 + + ...en3-batterydata-goertek-merlin-230mah.dtsi | 80 + + arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi | 3 +- + .../arm64/boot/dts/qcom/msm8905-qrd-skub.dtsi | 18 +- + .../boot/dts/qcom/msm8909-1gb-qrd-skue.dts | 20 + + .../dts/qcom/msm8909-camera-sensor-skue.dtsi | 129 ++ + .../boot/dts/qcom/msm8909-coresight.dtsi | 30 +- + .../boot/dts/qcom/msm8909-mdss-panels.dtsi | 1 + + arch/arm64/boot/dts/qcom/msm8909-pm8909.dtsi | 4 - + .../arm64/boot/dts/qcom/msm8909-qrd-skue.dtsi | 490 ++++++ + arch/arm64/boot/dts/qcom/msm8909-qrd.dtsi | 9 + + arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi | 4 +- + arch/arm64/boot/dts/qcom/msm8909.dtsi | 10 + + arch/arm64/boot/dts/qcom/msm8916-pins.dtsi | 12 +- + arch/arm64/boot/dts/qcom/msm8916.dtsi | 8 +- + .../qcom/qcs605-pm660-pm8005-regulator.dtsi | 4 +- + arch/arm64/boot/dts/qcom/qcs605.dtsi | 17 + + arch/arm64/boot/dts/qcom/qm215.dtsi | 22 +- + arch/arm64/boot/dts/qcom/sdm429-bg-soc.dtsi | 2 + + arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi | 1 + + .../qcom/sdm670-camera-sensor-arglass.dtsi | 431 +++++ + .../dts/qcom/sdm670-camera-sensor-svr.dtsi | 2 +- + arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi | 7 +- + .../boot/dts/qcom/sdm670-sde-display.dtsi | 45 +- + arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi | 7 +- + arch/arm64/boot/dts/qcom/sdm845-v2.dtsi | 3 +- + arch/arm64/boot/dts/qcom/sdm845.dtsi | 10 +- + .../boot/dts/qcom/sdw3100-apq8009w-alpha.dts | 4 +- + .../boot/dts/qcom/sdw3100-apq8009w-wtp.dts | 4 +- + .../dts/qcom/sdw3100-msm8909w-1gb-wtp.dts | 4 +- + .../boot/dts/qcom/sdw3100-msm8909w-wtp.dts | 4 +- + .../boot/dts/qcom/sxr1130-arglass-overlay.dts | 35 + + .../qcom/sxr1130-arglass-pinctrl-overlay.dtsi | 150 ++ + arch/arm64/boot/dts/qcom/sxr1130-arglass.dts | 72 + + arch/arm64/boot/dts/qcom/sxr1130-arglass.dtsi | 536 ++++++ + arch/arm64/boot/dts/qcom/sxr1130-svr.dtsi | 28 +- + arch/arm64/boot/dts/rockchip/rk3399.dtsi | 3 + + arch/arm64/boot/dts/xilinx/zynqmp.dtsi | 4 +- + arch/arm64/configs/cuttlefish_defconfig | 2 +- + arch/arm64/configs/sdm670-perf_defconfig | 3 +- + arch/arm64/configs/sdm670_defconfig | 1 - + arch/arm64/configs/sdm845-perf_defconfig | 1 - + arch/arm64/configs/sdm845_defconfig | 1 - + arch/arm64/include/asm/atomic_ll_sc.h | 109 +- + arch/arm64/include/asm/atomic_lse.h | 46 +- + arch/arm64/include/asm/checksum.h | 5 +- + arch/arm64/include/asm/cmpxchg.h | 116 +- + arch/arm64/include/asm/debug-monitors.h | 2 + + arch/arm64/include/asm/futex.h | 55 +- + arch/arm64/include/asm/kvm_arm.h | 3 +- + arch/arm64/include/asm/kvm_asm.h | 43 + + arch/arm64/include/asm/kvm_host.h | 8 +- + arch/arm64/include/asm/numa.h | 3 + + arch/arm64/include/asm/pgtable-prot.h | 2 +- + arch/arm64/include/asm/pgtable.h | 7 +- + arch/arm64/kernel/debug-monitors.c | 24 +- + arch/arm64/kernel/head.S | 1 + + arch/arm64/kernel/kgdb.c | 2 +- + arch/arm64/kernel/perf_event.c | 50 +- + arch/arm64/kernel/perf_regs.c | 25 +- + arch/arm64/kernel/psci.c | 5 +- + arch/arm64/kernel/ptrace.c | 4 +- + arch/arm64/kernel/traps.c | 8 - + arch/arm64/kernel/vdso/vdso.lds.S | 8 +- + arch/arm64/kernel/vmlinux.lds.S | 8 + + arch/arm64/kvm/hyp/entry.S | 26 +- + arch/arm64/kvm/hyp/hyp-entry.S | 63 +- + arch/arm64/kvm/hyp/switch.c | 41 +- + arch/arm64/kvm/sys_regs.c | 11 +- + arch/arm64/mm/numa.c | 6 +- + arch/h8300/kernel/asm-offsets.c | 3 + + arch/hexagon/kernel/vmlinux.lds.S | 7 +- + arch/ia64/include/asm/ptrace.h | 8 +- + arch/ia64/include/asm/syscall.h | 2 +- + arch/ia64/kernel/Makefile | 2 +- + arch/ia64/kernel/mca.c | 2 +- + arch/ia64/kernel/mca_drv.c | 2 +- + arch/ia64/kernel/ptrace.c | 24 +- + arch/ia64/mm/discontig.c | 6 +- + arch/ia64/scripts/unwcheck.py | 2 +- + arch/m68k/include/asm/m53xxacr.h | 6 +- + arch/m68k/include/asm/mac_via.h | 1 + + arch/m68k/mac/config.c | 21 +- + arch/m68k/mac/iop.c | 21 +- + arch/m68k/mac/via.c | 6 +- + arch/m68k/q40/config.c | 1 + + arch/mips/Kconfig | 1 + + arch/mips/Makefile | 15 +- + arch/mips/alchemy/board-xxs1500.c | 1 + + arch/mips/alchemy/common/clock.c | 9 +- + arch/mips/bcm47xx/Kconfig | 1 + + arch/mips/boot/compressed/Makefile | 6 +- + arch/mips/boot/compressed/decompress.c | 5 +- + arch/mips/include/asm/cpu-type.h | 1 + + arch/mips/include/asm/div64.h | 55 +- + arch/mips/include/asm/hugetlb.h | 8 +- + arch/mips/include/asm/kvm_host.h | 6 +- + arch/mips/kernel/genex.S | 6 +- + arch/mips/kernel/mips-cm.c | 6 +- + arch/mips/kernel/relocate.c | 10 +- + arch/mips/kernel/setup.c | 10 + + arch/mips/kernel/smp-bmips.c | 2 + + arch/mips/kernel/time.c | 65 + + arch/mips/kernel/topology.c | 2 +- + arch/mips/kernel/traps.c | 1 + + arch/mips/kernel/vmlinux.lds.S | 3 +- + arch/mips/lantiq/irq.c | 2 +- + arch/mips/lib/mips-atomic.c | 12 +- + arch/mips/mm/c-r4k.c | 6 +- + arch/mips/mm/tlb-r4k.c | 1 + + arch/mips/mti-malta/malta-platform.c | 3 +- + arch/mips/pci/pci-legacy.c | 9 +- + arch/mips/ralink/of.c | 2 + + arch/mips/sni/a20r.c | 9 +- + arch/mips/vdso/genvdso.c | 10 + + arch/mips/vdso/vdso.h | 2 +- + arch/openrisc/include/asm/barrier.h | 9 + + arch/openrisc/kernel/entry.S | 4 +- + arch/openrisc/kernel/setup.c | 2 + + arch/parisc/include/asm/atomic.h | 2 + + arch/parisc/include/asm/cmpxchg.h | 2 + + arch/parisc/lib/bitops.c | 12 + + arch/powerpc/Kconfig | 2 +- + arch/powerpc/Kconfig.debug | 1 + + arch/powerpc/boot/devtree.c | 59 +- + arch/powerpc/boot/dts/fsl/p1010si-post.dtsi | 8 + + arch/powerpc/boot/dts/fsl/p2041si-post.dtsi | 16 + + arch/powerpc/boot/ns16550.c | 9 +- + arch/powerpc/configs/pasemi_defconfig | 1 - + arch/powerpc/configs/ppc6xx_defconfig | 1 - + arch/powerpc/include/asm/barrier.h | 2 + + arch/powerpc/include/asm/book3s/32/pgtable.h | 4 +- + .../powerpc/include/asm/book3s/64/kup-radix.h | 23 + + arch/powerpc/include/asm/book3s/64/pgtable.h | 23 +- + arch/powerpc/include/asm/code-patching.h | 2 +- + arch/powerpc/include/asm/cpu_has_feature.h | 4 +- + arch/powerpc/include/asm/cputable.h | 5 - + arch/powerpc/include/asm/dcr-native.h | 8 +- + arch/powerpc/include/asm/exception-64s.h | 13 +- + arch/powerpc/include/asm/feature-fixups.h | 19 + + arch/powerpc/include/asm/futex.h | 4 + + arch/powerpc/include/asm/kprobes.h | 3 +- + arch/powerpc/include/asm/kup.h | 40 + + arch/powerpc/include/asm/nohash/pgtable.h | 4 +- + arch/powerpc/include/asm/percpu.h | 4 +- + arch/powerpc/include/asm/ps3.h | 2 + + arch/powerpc/include/asm/reg.h | 2 +- + arch/powerpc/include/asm/security_features.h | 7 + + arch/powerpc/include/asm/setup.h | 4 + + arch/powerpc/include/asm/uaccess.h | 143 +- + arch/powerpc/include/uapi/asm/errno.h | 1 + + arch/powerpc/kernel/dma-iommu.c | 3 +- + arch/powerpc/kernel/eeh.c | 11 +- + arch/powerpc/kernel/exceptions-64s.S | 130 +- + arch/powerpc/kernel/head_8xx.S | 8 +- + arch/powerpc/kernel/iommu.c | 4 +- + arch/powerpc/kernel/machine_kexec.c | 8 +- + arch/powerpc/kernel/prom.c | 2 +- + arch/powerpc/kernel/setup_64.c | 120 ++ + arch/powerpc/kernel/sysfs.c | 42 +- + arch/powerpc/kernel/tau_6xx.c | 82 +- + arch/powerpc/kernel/vdso.c | 2 +- + arch/powerpc/kernel/vmlinux.lds.S | 14 + + arch/powerpc/kvm/book3s_rtas.c | 25 +- + arch/powerpc/lib/checksum_wrappers.c | 4 + + arch/powerpc/lib/feature-fixups.c | 119 ++ + arch/powerpc/lib/string.S | 4 +- + arch/powerpc/lib/string_64.S | 6 +- + arch/powerpc/mm/fault.c | 7 +- + arch/powerpc/perf/core-book3s.c | 27 +- + arch/powerpc/perf/hv-24x7.c | 10 - + arch/powerpc/perf/hv-gpci-requests.h | 6 +- + arch/powerpc/perf/isa207-common.c | 14 +- + arch/powerpc/platforms/52xx/lite5200_sleep.S | 2 +- + arch/powerpc/platforms/Kconfig | 9 +- + arch/powerpc/platforms/cell/Kconfig | 1 + + arch/powerpc/platforms/cell/spufs/file.c | 113 +- + arch/powerpc/platforms/powernv/opal-dump.c | 50 +- + arch/powerpc/platforms/powernv/opal-elog.c | 33 +- + arch/powerpc/platforms/powernv/setup.c | 15 + + arch/powerpc/platforms/powernv/smp.c | 2 +- + arch/powerpc/platforms/ps3/mm.c | 34 +- + arch/powerpc/platforms/pseries/dlpar.c | 7 +- + arch/powerpc/platforms/pseries/hotplug-cpu.c | 3 - + .../platforms/pseries/hotplug-memory.c | 2 +- + arch/powerpc/platforms/pseries/pci_dlpar.c | 4 +- + arch/powerpc/platforms/pseries/ras.c | 6 +- + arch/powerpc/platforms/pseries/rng.c | 1 + + arch/powerpc/platforms/pseries/setup.c | 8 + + arch/powerpc/platforms/pseries/suspend.c | 1 - + arch/powerpc/sysdev/mpic_msgr.c | 2 +- + arch/powerpc/sysdev/xics/icp-hv.c | 1 + + arch/powerpc/xmon/nonstdio.c | 2 +- + arch/s390/Kconfig | 2 +- + arch/s390/include/asm/ftrace.h | 1 + + arch/s390/include/asm/kvm_host.h | 8 +- + arch/s390/include/asm/percpu.h | 28 +- + arch/s390/include/asm/syscall.h | 12 +- + arch/s390/kernel/cpcmd.c | 6 +- + arch/s390/kernel/dis.c | 2 +- + arch/s390/kernel/early.c | 2 + + arch/s390/kernel/entry.S | 1 + + arch/s390/kernel/ftrace.c | 2 + + arch/s390/kernel/mcount.S | 4 +- + arch/s390/kernel/perf_cpum_sf.c | 2 +- + arch/s390/kernel/ptrace.c | 31 +- + arch/s390/kernel/setup.c | 8 +- + arch/s390/kernel/smp.c | 2 +- + arch/s390/mm/hugetlbpage.c | 2 +- + arch/s390/net/bpf_jit_comp.c | 2 +- + arch/sh/boards/mach-landisk/setup.c | 3 + + arch/sh/drivers/dma/Kconfig | 3 +- + arch/sh/kernel/entry-common.S | 6 +- + arch/sparc/Kconfig | 2 +- + arch/sparc/kernel/ptrace_32.c | 233 ++- + arch/sparc/kernel/ptrace_64.c | 17 +- + arch/sparc/kernel/smp_64.c | 65 +- + arch/sparc/lib/memset.S | 1 + + arch/um/drivers/chan_user.c | 3 +- + arch/um/drivers/slip_user.c | 3 +- + arch/um/drivers/xterm.c | 5 + + arch/um/kernel/dyn.lds.S | 6 + + arch/um/kernel/sigio.c | 6 +- + arch/um/kernel/uml.lds.S | 6 + + arch/x86/Kconfig | 1 + + arch/x86/Makefile | 6 +- + arch/x86/boot/Makefile | 2 +- + arch/x86/boot/compressed/Makefile | 2 + + arch/x86/boot/compressed/head_32.S | 5 +- + arch/x86/boot/compressed/head_64.S | 1 + + arch/x86/configs/i386_defconfig | 2 +- + arch/x86/configs/x86_64_cuttlefish_defconfig | 4 +- + arch/x86/configs/x86_64_defconfig | 2 +- + arch/x86/entry/entry_64.S | 2 +- + arch/x86/events/amd/ibs.c | 53 +- + arch/x86/events/amd/iommu.c | 6 +- + arch/x86/events/intel/cstate.c | 6 +- + arch/x86/events/intel/ds.c | 2 +- + arch/x86/events/intel/pt.c | 4 +- + arch/x86/events/intel/rapl.c | 14 +- + arch/x86/events/intel/uncore.c | 4 +- + arch/x86/events/intel/uncore.h | 12 +- + arch/x86/events/perf_event.h | 3 +- + arch/x86/include/asm/apic.h | 10 - + arch/x86/include/asm/archrandom.h | 8 +- + arch/x86/include/asm/barrier.h | 18 + + arch/x86/include/asm/bitops.h | 29 +- + arch/x86/include/asm/cpufeatures.h | 2 +- + arch/x86/include/asm/fpu/internal.h | 49 +- + arch/x86/include/asm/insn.h | 15 + + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/include/asm/nospec-branch.h | 5 +- + arch/x86/include/asm/percpu.h | 2 +- + arch/x86/include/asm/pkeys.h | 5 + + arch/x86/include/asm/processor.h | 9 - + arch/x86/include/asm/proto.h | 2 + + arch/x86/include/asm/svm.h | 2 + + arch/x86/include/asm/thread_info.h | 23 +- + arch/x86/include/asm/tlbflush.h | 11 +- + arch/x86/kernel/apic/apic.c | 4 + + arch/x86/kernel/apic/io_apic.c | 10 + + arch/x86/kernel/apic/x2apic_cluster.c | 6 +- + arch/x86/kernel/apic/x2apic_phys.c | 6 +- + arch/x86/kernel/cpu/bugs.c | 108 +- + arch/x86/kernel/cpu/common.c | 2 +- + arch/x86/kernel/cpu/microcode/intel.c | 48 +- + arch/x86/kernel/cpu/mtrr/generic.c | 6 +- + arch/x86/kernel/crash.c | 3 +- + arch/x86/kernel/fpu/regset.c | 2 +- + arch/x86/kernel/fpu/signal.c | 18 +- + arch/x86/kernel/fpu/xstate.c | 47 +- + arch/x86/kernel/i8259.c | 2 +- + arch/x86/kernel/kexec-bzimage64.c | 3 +- + arch/x86/kernel/kprobes/core.c | 17 +- + arch/x86/kernel/module.c | 1 + + arch/x86/kernel/process.c | 28 +- + arch/x86/kernel/process.h | 2 +- + arch/x86/kernel/reboot.c | 46 +- + arch/x86/kernel/signal.c | 24 +- + arch/x86/kernel/time.c | 4 - + arch/x86/kernel/uprobes.c | 10 +- + arch/x86/kernel/vmlinux.lds.S | 7 +- + arch/x86/kvm/cpuid.c | 8 +- + arch/x86/kvm/emulate.c | 10 +- + arch/x86/kvm/ioapic.c | 2 +- + arch/x86/kvm/ioapic.h | 4 +- + arch/x86/kvm/lapic.c | 2 +- + arch/x86/kvm/mmu.c | 14 +- + arch/x86/kvm/mmutrace.h | 2 +- + arch/x86/kvm/pmu_intel.c | 2 +- + arch/x86/kvm/svm.c | 16 +- + arch/x86/kvm/vmx.c | 3 +- + arch/x86/kvm/x86.c | 22 +- + arch/x86/lib/msr-smp.c | 4 +- + arch/x86/math-emu/wm_sqrt.S | 2 +- + arch/x86/mm/init.c | 2 - + arch/x86/mm/pgtable.c | 2 + + arch/x86/net/bpf_jit_comp.c | 11 +- + arch/x86/pci/fixup.c | 4 + + arch/x86/tools/chkobjdump.awk | 1 + + arch/x86/tools/relocs.c | 12 +- + arch/x86/xen/p2m.c | 59 +- + arch/x86/xen/spinlock.c | 12 +- + arch/xtensa/kernel/perf_event.c | 2 +- + arch/xtensa/kernel/setup.c | 3 +- + arch/xtensa/kernel/xtensa_ksyms.c | 4 +- + arch/xtensa/mm/cache.c | 14 + + arch/xtensa/platforms/iss/simdisk.c | 1 - + block/blk-mq.c | 8 +- + block/blk-settings.c | 12 + + block/genhd.c | 9 +- + build.config.aarch64 | 1 + + crypto/af_alg.c | 30 +- + crypto/algboss.c | 2 - + crypto/algif_aead.c | 13 +- + crypto/algif_hash.c | 9 +- + crypto/algif_skcipher.c | 9 +- + crypto/ecdh_helper.c | 3 + + crypto/shash.c | 18 +- + drivers/acpi/Makefile | 5 + + drivers/acpi/acpi_amba.c | 1 + + drivers/acpi/acpi_configfs.c | 7 +- + drivers/acpi/acpi_dbg.c | 3 + + drivers/acpi/acpi_extlog.c | 6 +- + drivers/acpi/acpi_pnp.c | 3 + + drivers/acpi/acpica/exprep.c | 4 - + drivers/acpi/acpica/utdelete.c | 6 +- + drivers/acpi/bus.c | 1 + + drivers/acpi/cppc_acpi.c | 4 +- + drivers/acpi/custom_method.c | 4 +- + drivers/acpi/device_pm.c | 2 +- + drivers/acpi/device_sysfs.c | 22 +- + drivers/acpi/ec.c | 16 +- + drivers/acpi/evged.c | 22 +- + drivers/acpi/internal.h | 8 +- + drivers/acpi/nfit/core.c | 5 +- + drivers/acpi/processor_idle.c | 40 + + drivers/acpi/resource.c | 2 +- + drivers/acpi/scan.c | 122 +- + drivers/acpi/sysfs.c | 8 +- + drivers/acpi/thermal.c | 55 +- + drivers/acpi/video_detect.c | 9 + + drivers/amba/bus.c | 20 +- + drivers/android/binder.c | 17 +- + drivers/ata/acard-ahci.c | 6 +- + drivers/ata/ahci_brcm.c | 14 +- + drivers/ata/ahci_sunxi.c | 2 +- + drivers/ata/libahci.c | 6 +- + drivers/ata/libahci_platform.c | 4 +- + drivers/ata/libata-core.c | 25 +- + drivers/ata/libata-scsi.c | 17 +- + drivers/ata/libata-sff.c | 12 +- + drivers/ata/pata_arasan_cf.c | 15 +- + drivers/ata/pata_ep93xx.c | 2 +- + drivers/ata/pata_ixp4xx_cf.c | 6 +- + drivers/ata/pata_macio.c | 6 +- + drivers/ata/pata_octeon_cf.c | 5 +- + drivers/ata/pata_pxa.c | 8 +- + drivers/ata/pata_rb532_cf.c | 6 +- + drivers/ata/pdc_adma.c | 7 +- + drivers/ata/sata_fsl.c | 4 +- + drivers/ata/sata_highbank.c | 6 +- + drivers/ata/sata_inic162x.c | 4 +- + drivers/ata/sata_mv.c | 38 +- + drivers/ata/sata_nv.c | 18 +- + drivers/ata/sata_promise.c | 6 +- + drivers/ata/sata_qstor.c | 8 +- + drivers/ata/sata_rcar.c | 8 +- + drivers/ata/sata_sil.c | 8 +- + drivers/ata/sata_sil24.c | 6 +- + drivers/ata/sata_sx4.c | 6 +- + drivers/atm/atmtcp.c | 10 +- + drivers/atm/eni.c | 5 +- + drivers/atm/firestream.c | 1 + + drivers/atm/idt77105.c | 4 +- + drivers/atm/idt77252.c | 2 +- + drivers/atm/iphase.c | 2 +- + drivers/atm/lanai.c | 5 +- + drivers/atm/nicstar.c | 28 +- + drivers/atm/uPD98402.c | 2 +- + drivers/base/core.c | 15 +- + drivers/base/dd.c | 5 +- + drivers/base/platform.c | 2 + + drivers/base/power/main.c | 16 +- + drivers/base/regmap/regmap-debugfs.c | 6 + + drivers/base/regmap/regmap.c | 2 +- + drivers/block/Kconfig | 1 + + drivers/block/brd.c | 1 - + drivers/block/drbd/drbd_bitmap.c | 2 +- + drivers/block/loop.c | 369 +++-- + drivers/block/ps3disk.c | 1 - + drivers/block/rbd.c | 21 +- + drivers/block/rsxx/core.c | 9 +- + drivers/block/virtio_blk.c | 3 + + drivers/block/xen-blkback/blkback.c | 52 +- + drivers/block/xen-blkback/xenbus.c | 9 +- + drivers/block/zram/zram_drv.c | 2 +- + drivers/block/zram/zram_drv.h | 1 - + drivers/bluetooth/bluetooth-power.c | 9 + + drivers/bluetooth/btusb.c | 5 + + drivers/bus/mips_cdmm.c | 4 +- + drivers/bus/omap_l3_noc.c | 4 +- + drivers/bus/qcom-ebi2.c | 4 +- + drivers/cdrom/gdrom.c | 13 +- + drivers/char/adsprpc.c | 54 +- + drivers/char/adsprpc_shared.h | 2 +- + drivers/char/agp/Kconfig | 2 +- + drivers/char/agp/intel-gtt.c | 8 +- + drivers/char/diag/diag_masks.c | 5 +- + drivers/char/diag/diag_memorydevice.c | 22 +- + drivers/char/diag/diag_usb.c | 4 +- + drivers/char/diag/diagchar.h | 15 + + drivers/char/diag/diagchar_core.c | 155 +- + drivers/char/diag/diagfwd.c | 32 +- + drivers/char/diag/diagfwd.h | 5 +- + drivers/char/diag/diagfwd_cntl.c | 5 +- + drivers/char/diag/diagmem.c | 8 +- + drivers/char/hpet.c | 4 + + drivers/char/ipmi/ipmi_watchdog.c | 22 +- + drivers/char/pcmcia/cm4000_cs.c | 4 + + drivers/char/random.c | 2 +- + drivers/char/tlclk.c | 17 +- + drivers/char/tpm/tpm_ibmvtpm.c | 9 + + drivers/char/tpm/tpm_ibmvtpm.h | 1 + + drivers/char/ttyprintk.c | 11 + + drivers/char/virtio_console.c | 7 +- + drivers/clk/at91/clk-main.c | 11 +- + drivers/clk/bcm/clk-bcm2835.c | 14 +- + drivers/clk/clk-s2mps11.c | 1 + + drivers/clk/clk.c | 30 +- + drivers/clk/meson/clk-pll.c | 2 +- + drivers/clk/mvebu/armada-37xx-xtal.c | 4 +- + drivers/clk/qcom/gcc-msm8916.c | 8 +- + drivers/clk/rockchip/clk-rk3228.c | 2 +- + drivers/clk/samsung/clk-exynos4.c | 4 +- + drivers/clk/samsung/clk-exynos5433.c | 3 +- + drivers/clk/samsung/clk-exynos7.c | 7 +- + drivers/clk/sirf/clk-atlas6.c | 2 +- + drivers/clk/socfpga/clk-gate-a10.c | 1 + + drivers/clk/socfpga/clk-gate.c | 2 +- + drivers/clk/st/clk-flexgen.c | 1 + + drivers/clk/sunxi/clk-sunxi.c | 2 +- + drivers/clk/tegra/clk-id.h | 1 + + drivers/clk/tegra/clk-pll.c | 6 +- + drivers/clk/tegra/clk-tegra-periph.c | 2 +- + drivers/clk/ti/adpll.c | 11 +- + drivers/clk/ti/clockdomain.c | 2 + + drivers/clk/ti/composite.c | 1 + + drivers/clk/ti/fapll.c | 11 +- + drivers/clk/uniphier/clk-uniphier-mux.c | 4 +- + drivers/clocksource/arm_arch_timer.c | 23 +- + drivers/clocksource/cadence_ttc_timer.c | 18 +- + drivers/clocksource/dw_apb_timer_of.c | 6 +- + drivers/clocksource/h8300_timer8.c | 2 +- + drivers/clocksource/mxs_timer.c | 5 +- + drivers/cpufreq/acpi-cpufreq.c | 3 +- + drivers/cpufreq/highbank-cpufreq.c | 7 + + drivers/cpufreq/loongson1-cpufreq.c | 1 + + drivers/cpufreq/powernow-k8.c | 9 +- + drivers/cpufreq/powernv-cpufreq.c | 22 +- + drivers/cpufreq/scpi-cpufreq.c | 1 + + drivers/cpufreq/sti-cpufreq.c | 13 +- + drivers/cpuidle/sysfs.c | 6 +- + drivers/crypto/ccp/ccp-dev.h | 1 + + drivers/crypto/ccp/ccp-ops.c | 42 +- + drivers/crypto/ixp4xx_crypto.c | 4 +- + drivers/crypto/nx/nx-842-pseries.c | 9 +- + drivers/crypto/omap-aes.c | 3 +- + drivers/crypto/omap-sham.c | 67 +- + drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | 4 +- + drivers/crypto/qat/qat_c62xvf/adf_drv.c | 4 +- + drivers/crypto/qat/qat_common/adf_isr.c | 29 +- + drivers/crypto/qat/qat_common/adf_transport.c | 1 + + drivers/crypto/qat/qat_common/adf_vf_isr.c | 17 +- + drivers/crypto/qat/qat_common/qat_algs.c | 10 +- + drivers/crypto/qat/qat_common/qat_hal.c | 6 +- + drivers/crypto/qat/qat_common/qat_uclo.c | 10 +- + drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 4 +- + drivers/crypto/talitos.c | 8 +- + drivers/crypto/ux500/hash/hash_core.c | 1 + + drivers/devfreq/tegra-devfreq.c | 4 +- + drivers/dma/at_hdmac.c | 2 + + drivers/dma/dma-jz4780.c | 7 +- + drivers/dma/dw/Kconfig | 2 + + drivers/dma/fsl-edma.c | 7 + + drivers/dma/fsldma.c | 6 + + drivers/dma/ioat/dma.c | 12 + + drivers/dma/ioat/dma.h | 2 - + drivers/dma/of-dma.c | 17 +- + drivers/dma/pl330.c | 10 +- + drivers/dma/qcom/Kconfig | 1 + + drivers/dma/qcom/hidma_mgmt.c | 14 + + drivers/dma/sh/usb-dmac.c | 2 +- + drivers/dma/ste_dma40.c | 3 + + drivers/dma/tegra20-apb-dma.c | 3 +- + drivers/dma/tegra210-adma.c | 5 +- + drivers/dma/xilinx/xilinx_dma.c | 12 +- + drivers/dma/xilinx/zynqmp_dma.c | 24 +- + drivers/edac/amd64_edac.c | 2 + + drivers/edac/edac_device_sysfs.c | 1 + + drivers/edac/edac_pci_sysfs.c | 2 +- + drivers/edac/i5100_edac.c | 11 +- + drivers/edac/ie31200_edac.c | 50 +- + drivers/extcon/extcon-adc-jack.c | 3 +- + drivers/extcon/extcon-arizona.c | 17 +- + drivers/extcon/extcon-max77693.c | 2 +- + drivers/extcon/extcon-max8997.c | 1 + + drivers/extcon/extcon-sm5502.c | 1 - + drivers/extcon/extcon.c | 1 + + drivers/firewire/nosy.c | 9 +- + drivers/firmware/Kconfig | 1 + + drivers/firmware/efi/Kconfig | 11 + + drivers/firmware/efi/cper.c | 4 +- + drivers/firmware/efi/efi.c | 2 +- + drivers/firmware/efi/efivars.c | 4 +- + drivers/firmware/efi/esrt.c | 2 +- + drivers/firmware/efi/memattr.c | 5 - + drivers/firmware/qemu_fw_cfg.c | 8 +- + drivers/gpio/gpio-pcf857x.c | 2 +- + drivers/gpio/gpio-tc3589x.c | 2 +- + drivers/gpio/gpio-zynq.c | 5 +- + drivers/gpio/gpiolib-of.c | 2 +- + .../gpu/drm/amd/amdgpu/amdgpu_connectors.c | 16 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 3 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 +- + drivers/gpu/drm/amd/amdgpu/atom.c | 4 +- + drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 27 +- + drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 20 +- + drivers/gpu/drm/drm_auth.c | 3 +- + drivers/gpu/drm/drm_debugfs.c | 8 +- + drivers/gpu/drm/drm_dp_aux_dev.c | 2 +- + drivers/gpu/drm/drm_dp_mst_topology.c | 58 +- + drivers/gpu/drm/drm_encoder_slave.c | 5 +- + drivers/gpu/drm/drm_gem.c | 10 +- + drivers/gpu/drm/drm_mipi_dsi.c | 6 +- + drivers/gpu/drm/gma500/cdv_intel_display.c | 2 + + drivers/gpu/drm/gma500/cdv_intel_dp.c | 4 +- + drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c | 22 +- + drivers/gpu/drm/gma500/psb_drv.c | 2 + + drivers/gpu/drm/gma500/psb_irq.c | 34 +- + drivers/gpu/drm/i915/i915_cmd_parser.c | 4 + + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- + drivers/gpu/drm/imx/imx-ldb.c | 17 +- + drivers/gpu/drm/imx/imx-tve.c | 20 +- + drivers/gpu/drm/mediatek/mtk_drm_drv.c | 7 +- + drivers/gpu/drm/msm/dsi-staging/dsi_panel.c | 2 +- + drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 2 +- + .../gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 10 +- + drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 3 +- + drivers/gpu/drm/msm/msm_fence.c | 2 +- + drivers/gpu/drm/msm/sde/sde_connector.c | 10 +- + drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +- + drivers/gpu/drm/nouveau/nouveau_connector.c | 4 +- + drivers/gpu/drm/nouveau/nouveau_drm.c | 8 +- + drivers/gpu/drm/nouveau/nouveau_fbcon.c | 5 +- + drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +- + .../gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | 2 +- + .../gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c | 4 +- + .../drm/nouveau/nvkm/subdev/i2c/auxgm200.c | 12 +- + .../gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +- + drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 1 + + drivers/gpu/drm/panel/panel-simple.c | 2 +- + drivers/gpu/drm/qxl/qxl_dumb.c | 2 + + drivers/gpu/drm/qxl/qxl_kms.c | 2 +- + drivers/gpu/drm/radeon/ci_dpm.c | 9 +- + drivers/gpu/drm/radeon/ni_dpm.c | 2 +- + drivers/gpu/drm/radeon/radeon_atombios.c | 6 +- + drivers/gpu/drm/radeon/radeon_connectors.c | 20 +- + drivers/gpu/drm/radeon/radeon_display.c | 4 +- + drivers/gpu/drm/radeon/radeon_drv.c | 4 +- + drivers/gpu/drm/radeon/radeon_kms.c | 5 +- + drivers/gpu/drm/radeon/radeon_uvd.c | 4 +- + drivers/gpu/drm/tegra/sor.c | 10 +- + drivers/gpu/drm/tilcdc/tilcdc_panel.c | 6 +- + drivers/gpu/drm/virtio/virtgpu_kms.c | 3 + + drivers/gpu/drm/virtio/virtgpu_vq.c | 10 +- + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 8 +- + drivers/gpu/host1x/bus.c | 9 + + drivers/gpu/ipu-v3/ipu-image-convert.c | 58 +- + drivers/gpu/msm/a6xx_reg.h | 3 +- + drivers/gpu/msm/adreno.c | 10 + + drivers/gpu/msm/adreno.h | 1 + + drivers/gpu/msm/adreno_a6xx.c | 29 + + drivers/gpu/msm/adreno_a6xx_snapshot.c | 49 +- + drivers/gpu/msm/adreno_debugfs.c | 24 +- + drivers/gpu/msm/adreno_dispatch.c | 2 +- + drivers/gpu/msm/adreno_ioctl.c | 7 +- + drivers/gpu/msm/adreno_pm4types.h | 2 +- + drivers/gpu/msm/adreno_snapshot.c | 6 +- + drivers/gpu/msm/kgsl.c | 18 +- + drivers/gpu/msm/kgsl.h | 2 + + drivers/gpu/msm/kgsl_compat.c | 4 +- + drivers/gpu/msm/kgsl_debugfs.c | 2 +- + drivers/gpu/msm/kgsl_gmu.c | 15 +- + drivers/gpu/msm/kgsl_gmu.h | 2 +- + drivers/gpu/msm/kgsl_ioctl.c | 4 +- + drivers/gpu/msm/kgsl_iommu.c | 97 +- + drivers/gpu/msm/kgsl_mmu.c | 2 +- + drivers/gpu/msm/kgsl_trace.h | 10 +- + drivers/hid/hid-alps.c | 1 + + drivers/hid/hid-apple.c | 18 + + drivers/hid/hid-core.c | 21 +- + drivers/hid/hid-cypress.c | 44 +- + drivers/hid/hid-gt683r.c | 1 + + drivers/hid/hid-ids.h | 3 + + drivers/hid/hid-input.c | 4 + + drivers/hid/hid-magicmouse.c | 6 + + drivers/hid/hid-multitouch.c | 2 + + drivers/hid/hid-plantronics.c | 60 +- + drivers/hid/hid-roccat-kone.c | 23 +- + drivers/hid/hid-sensor-hub.c | 16 +- + drivers/hid/i2c-hid/i2c-hid-core.c | 26 +- + drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c | 16 + + drivers/hid/usbhid/hid-core.c | 2 +- + drivers/hid/usbhid/hid-pidff.c | 1 + + drivers/hid/usbhid/hiddev.c | 4 + + drivers/hsi/controllers/omap_ssi_core.c | 2 +- + drivers/hsi/hsi_core.c | 3 +- + drivers/hv/channel_mgmt.c | 31 +- + drivers/hwmon/acpi_power_meter.c | 4 +- + drivers/hwmon/applesmc.c | 31 +- + drivers/hwmon/emc2103.c | 2 +- + drivers/hwmon/lm80.c | 11 +- + drivers/hwmon/lm90.c | 42 +- + drivers/hwmon/max31722.c | 9 - + drivers/hwmon/max31790.c | 2 +- + drivers/hwmon/max6697.c | 7 +- + drivers/hwtracing/intel_th/gth.c | 4 +- + drivers/hwtracing/stm/heartbeat.c | 6 +- + drivers/i2c/algos/i2c-algo-pca.c | 38 +- + drivers/i2c/busses/i2c-brcmstb.c | 2 +- + drivers/i2c/busses/i2c-cadence.c | 14 +- + drivers/i2c/busses/i2c-cpm.c | 3 + + drivers/i2c/busses/i2c-eg20t.c | 1 + + drivers/i2c/busses/i2c-emev2.c | 5 +- + drivers/i2c/busses/i2c-i801.c | 6 +- + drivers/i2c/busses/i2c-imx.c | 86 +- + drivers/i2c/busses/i2c-jz4780.c | 5 +- + drivers/i2c/busses/i2c-mpc.c | 95 +- + drivers/i2c/busses/i2c-octeon-core.c | 2 +- + drivers/i2c/busses/i2c-piix4.c | 3 +- + drivers/i2c/busses/i2c-pxa.c | 13 +- + drivers/i2c/busses/i2c-qcom-geni.c | 13 +- + drivers/i2c/busses/i2c-qup.c | 3 +- + drivers/i2c/busses/i2c-rcar.c | 8 +- + drivers/i2c/busses/i2c-robotfuzz-osif.c | 4 +- + drivers/i2c/busses/i2c-s3c2410.c | 3 + + drivers/i2c/busses/i2c-sh7760.c | 5 +- + drivers/i2c/i2c-core.c | 5 +- + drivers/i2c/i2c-dev.c | 5 +- + drivers/i2c/muxes/i2c-mux-pca954x.c | 6 +- + drivers/ide/ide-cd.c | 8 +- + drivers/ide/ide-cd.h | 6 +- + drivers/iio/accel/bma180.c | 85 +- + drivers/iio/accel/bma220_spi.c | 10 +- + drivers/iio/accel/bmc150-accel-core.c | 15 +- + drivers/iio/accel/kxcjk-1013.c | 39 +- + drivers/iio/accel/kxsd9.c | 16 +- + drivers/iio/accel/mma7455_core.c | 16 +- + drivers/iio/accel/mma8452.c | 16 +- + drivers/iio/accel/stk8312.c | 12 +- + drivers/iio/accel/stk8ba50.c | 17 +- + drivers/iio/adc/ad7793.c | 1 + + drivers/iio/adc/ina2xx-adc.c | 11 +- + drivers/iio/adc/mcp3422.c | 16 +- + drivers/iio/adc/palmas_gpadc.c | 4 +- + drivers/iio/adc/rockchip_saradc.c | 2 +- + drivers/iio/adc/ti-adc081c.c | 11 +- + drivers/iio/adc/ti-adc12138.c | 13 +- + drivers/iio/adc/ti-ads1015.c | 22 +- + drivers/iio/adc/vf610_adc.c | 10 +- + drivers/iio/dac/ad5504.c | 4 +- + drivers/iio/dac/ad5592r-base.c | 4 +- + drivers/iio/gyro/bmg160_core.c | 10 +- + drivers/iio/gyro/itg3200_buffer.c | 15 +- + drivers/iio/health/afe4403.c | 13 +- + drivers/iio/health/afe4404.c | 8 +- + drivers/iio/humidity/am2315.c | 16 +- + drivers/iio/imu/adis16400_buffer.c | 5 +- + drivers/iio/imu/adis16400_core.c | 3 +- + drivers/iio/imu/adis_buffer.c | 8 +- + drivers/iio/imu/bmi160/bmi160_core.c | 12 +- + drivers/iio/industrialio-buffer.c | 6 +- + drivers/iio/light/hid-sensor-prox.c | 14 +- + drivers/iio/light/isl29125.c | 10 +- + drivers/iio/light/ltr501.c | 30 +- + drivers/iio/light/max44000.c | 12 +- + drivers/iio/light/si1145.c | 19 +- + drivers/iio/light/tcs3414.c | 10 +- + drivers/iio/magnetometer/ak8974.c | 29 +- + drivers/iio/magnetometer/ak8975.c | 26 +- + drivers/iio/magnetometer/mag3110.c | 13 +- + drivers/iio/pressure/bmp280-core.c | 7 +- + drivers/iio/pressure/mpl3115.c | 9 +- + drivers/iio/pressure/ms5611_core.c | 11 +- + drivers/iio/pressure/zpa2326.c | 4 +- + .../iio/proximity/pulsedlight-lidar-lite-v2.c | 11 +- + drivers/infiniband/core/cm.c | 2 + + drivers/infiniband/core/cma.c | 3 +- + drivers/infiniband/core/cma_configfs.c | 13 + + drivers/infiniband/core/mad.c | 1 + + drivers/infiniband/core/ucma.c | 6 +- + drivers/infiniband/core/user_mad.c | 7 +- + drivers/infiniband/hw/cxgb4/cm.c | 9 +- + drivers/infiniband/hw/cxgb4/cq.c | 3 + + drivers/infiniband/hw/cxgb4/qp.c | 3 +- + drivers/infiniband/hw/hfi1/sdma.c | 9 +- + drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 - + drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- + drivers/infiniband/hw/i40iw/i40iw_main.c | 5 - + drivers/infiniband/hw/i40iw/i40iw_pble.c | 6 +- + drivers/infiniband/hw/i40iw/i40iw_verbs.c | 36 +- + drivers/infiniband/hw/mlx4/cm.c | 3 + + drivers/infiniband/hw/mlx4/mad.c | 34 +- + drivers/infiniband/hw/mlx4/mlx4_ib.h | 2 + + drivers/infiniband/hw/mthca/mthca_cq.c | 12 +- + drivers/infiniband/hw/mthca/mthca_dev.h | 1 - + drivers/infiniband/hw/qedr/main.c | 2 +- + drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 3 + + drivers/infiniband/sw/rdmavt/vt.c | 4 +- + drivers/infiniband/sw/rxe/rxe.c | 5 +- + drivers/infiniband/sw/rxe/rxe_loc.h | 20 +- + drivers/infiniband/sw/rxe/rxe_mcast.c | 4 +- + drivers/infiniband/sw/rxe/rxe_mr.c | 3 +- + drivers/infiniband/sw/rxe/rxe_net.c | 53 +- + drivers/infiniband/sw/rxe/rxe_qp.c | 14 +- + drivers/infiniband/sw/rxe/rxe_recv.c | 11 +- + drivers/infiniband/sw/rxe/rxe_req.c | 7 +- + drivers/infiniband/sw/rxe/rxe_resp.c | 4 +- + drivers/infiniband/sw/rxe/rxe_verbs.c | 10 +- + drivers/infiniband/sw/rxe/rxe_verbs.h | 22 - + drivers/input/joydev.c | 9 +- + drivers/input/joystick/xpad.c | 20 +- + drivers/input/keyboard/cros_ec_keyb.c | 1 + + drivers/input/keyboard/ep93xx_keypad.c | 4 +- + drivers/input/keyboard/hil_kbd.c | 1 + + drivers/input/keyboard/matrix_keypad.c | 18 +- + drivers/input/keyboard/nspire-keypad.c | 56 +- + drivers/input/keyboard/omap4-keypad.c | 6 +- + drivers/input/keyboard/sunkbd.c | 41 +- + drivers/input/keyboard/twl4030_keypad.c | 8 +- + drivers/input/misc/adxl34x.c | 2 +- + drivers/input/misc/cm109.c | 7 +- + drivers/input/mouse/cyapa_gen6.c | 2 +- + drivers/input/mouse/psmouse-base.c | 2 +- + drivers/input/mouse/sentelic.c | 2 +- + drivers/input/serio/hil_mlc.c | 21 +- + drivers/input/serio/hp_sdc_mlc.c | 8 +- + drivers/input/serio/i8042-x86ia64io.h | 83 + + drivers/input/serio/i8042.c | 15 +- + drivers/input/serio/sun4i-ps2.c | 9 +- + drivers/input/touchscreen/ads7846.c | 8 +- + drivers/input/touchscreen/elo.c | 4 +- + drivers/input/touchscreen/goodix.c | 12 + + drivers/input/touchscreen/imx6ul_tsc.c | 27 +- + drivers/input/touchscreen/raydium_i2c_ts.c | 3 +- + drivers/input/touchscreen/silead.c | 44 +- + drivers/input/touchscreen/usbtouchscreen.c | 8 +- + drivers/iommu/amd_iommu.c | 12 +- + drivers/iommu/amd_iommu_types.h | 6 +- + drivers/iommu/dmar.c | 41 +- + drivers/iommu/exynos-iommu.c | 8 +- + drivers/iommu/intel-iommu.c | 6 + + drivers/iommu/intel_irq_remapping.c | 12 +- + drivers/iommu/iova.c | 4 +- + drivers/iommu/omap-iommu-debug.c | 3 + + drivers/ipack/carriers/tpci200.c | 33 +- + drivers/irqchip/irq-alpine-msi.c | 3 +- + drivers/irqchip/irq-gic.c | 13 +- + drivers/isdn/capi/kcapi.c | 4 +- + drivers/isdn/hardware/mISDN/hfcpci.c | 2 +- + drivers/isdn/hardware/mISDN/mISDNinfineon.c | 24 +- + drivers/isdn/hardware/mISDN/mISDNipac.c | 2 +- + drivers/isdn/hardware/mISDN/netjet.c | 1 - + drivers/isdn/mISDN/Kconfig | 1 + + drivers/leds/led-class.c | 1 + + drivers/leds/led-triggers.c | 10 +- + drivers/leds/leds-88pm860x.c | 14 +- + drivers/leds/leds-bcm6328.c | 2 +- + drivers/leds/leds-bcm6358.c | 2 +- + drivers/leds/leds-da903x.c | 14 +- + drivers/leds/leds-ktd2692.c | 27 +- + drivers/leds/leds-lm3533.c | 12 +- + drivers/leds/leds-lm355x.c | 7 +- + drivers/leds/leds-lp5523.c | 2 +- + drivers/leds/leds-wm831x-status.c | 14 +- + drivers/macintosh/windfarm_pm112.c | 22 +- + drivers/md/bcache/bcache.h | 1 + + drivers/md/bcache/bset.c | 2 +- + drivers/md/bcache/btree.c | 22 +- + drivers/md/bcache/journal.c | 4 +- + drivers/md/bcache/super.c | 12 +- + drivers/md/bitmap.c | 2 +- + drivers/md/dm-bow.c | 2 +- + drivers/md/dm-cache-metadata.c | 8 +- + drivers/md/dm-era-target.c | 93 +- + drivers/md/dm-ioctl.c | 3 +- + drivers/md/dm-req-crypt.c | 1 - + drivers/md/dm-rq.c | 2 + + drivers/md/dm-snap.c | 3 +- + drivers/md/dm-table.c | 125 +- + drivers/md/dm-thin-metadata.c | 8 +- + drivers/md/dm-verity-target.c | 9 + + drivers/md/md-cluster.c | 1 + + drivers/md/md.c | 53 +- + .../md/persistent-data/dm-btree-internal.h | 4 +- + drivers/md/persistent-data/dm-btree-remove.c | 3 +- + .../md/persistent-data/dm-space-map-common.c | 2 + + .../md/persistent-data/dm-space-map-common.h | 8 +- + .../md/persistent-data/dm-space-map-disk.c | 9 +- + .../persistent-data/dm-space-map-metadata.c | 9 +- + drivers/md/raid5.c | 7 +- + drivers/media/common/siano/smscoreapi.c | 22 +- + drivers/media/common/siano/smscoreapi.h | 4 +- + drivers/media/common/siano/smsdvb-main.c | 9 +- + drivers/media/dvb-core/dvb_frontend.c | 571 ++++--- + drivers/media/dvb-core/dvb_frontend.h | 13 - + drivers/media/dvb-core/dvb_net.c | 25 +- + drivers/media/dvb-core/dvbdev.c | 1 + + drivers/media/dvb-frontends/lg2160.c | 14 - + drivers/media/dvb-frontends/sp8870.c | 4 +- + drivers/media/dvb-frontends/stv0288.c | 7 - + drivers/media/dvb-frontends/stv6110.c | 9 - + drivers/media/dvb-frontends/tda10071.c | 9 +- + drivers/media/firewire/firedtv-fw.c | 4 + + drivers/media/i2c/adv7511-v4l2.c | 2 +- + drivers/media/i2c/adv7604.c | 2 +- + drivers/media/i2c/adv7842.c | 2 +- + drivers/media/i2c/m5mols/m5mols_core.c | 3 +- + drivers/media/i2c/s5c73m3/s5c73m3-core.c | 6 +- + drivers/media/i2c/s5c73m3/s5c73m3.h | 2 +- + drivers/media/i2c/s5k4ecgx.c | 10 +- + drivers/media/i2c/s5k5baf.c | 6 +- + drivers/media/i2c/s5k6aa.c | 10 +- + drivers/media/i2c/tc358743.c | 3 +- + drivers/media/pci/bt8xx/bt878.c | 3 + + drivers/media/pci/bt8xx/bttv-driver.c | 13 +- + drivers/media/pci/cobalt/cobalt-driver.c | 1 + + drivers/media/pci/cobalt/cobalt-driver.h | 7 +- + drivers/media/pci/cx23885/cx23888-ir.c | 5 +- + drivers/media/pci/cx25821/cx25821-core.c | 4 +- + .../media/pci/netup_unidvb/netup_unidvb_spi.c | 5 +- + drivers/media/pci/ngene/ngene-core.c | 2 +- + drivers/media/pci/ngene/ngene.h | 14 +- + drivers/media/pci/saa7134/saa7134-empress.c | 5 +- + drivers/media/pci/saa7134/saa7134-tvaudio.c | 3 +- + drivers/media/pci/saa7146/mxb.c | 19 +- + drivers/media/pci/saa7164/saa7164-encoder.c | 20 +- + drivers/media/pci/solo6x10/solo6x10-g723.c | 2 +- + drivers/media/pci/ttpci/av7110.c | 5 +- + drivers/media/pci/ttpci/budget-core.c | 11 +- + drivers/media/pci/tw5864/tw5864-video.c | 6 + + drivers/media/platform/davinci/vpif_capture.c | 2 - + drivers/media/platform/davinci/vpss.c | 20 +- + drivers/media/platform/exynos4-is/fimc-isp.c | 4 +- + drivers/media/platform/exynos4-is/fimc-lite.c | 2 +- + drivers/media/platform/exynos4-is/media-dev.c | 5 +- + drivers/media/platform/exynos4-is/mipi-csis.c | 4 +- + .../camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c | 193 ++- + .../camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h | 4 +- + .../cam_jpeg_dma_hw_info_ver_4_2_0.h | 53 + + .../jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c | 242 ++- + .../jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h | 41 +- + .../jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c | 17 +- + .../jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c | 3 +- + .../jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c | 4 +- + .../msm/camera/cam_req_mgr/cam_req_mgr_core.c | 2 +- + .../msm/camera/cam_req_mgr/cam_req_mgr_core.h | 2 +- + .../msm/camera_v2/sensor/csiphy/msm_csiphy.c | 74 +- + .../msm/camera_v2/sensor/csiphy/msm_csiphy.h | 8 +- + .../cam_sensor_module/cam_cci/cam_cci_core.c | 160 +- + .../cam_sensor_module/cam_cci/cam_cci_dev.h | 3 +- + .../cam_sensor_module/cam_cci/cam_cci_soc.c | 6 +- + .../media/platform/msm/vidc/msm_vidc_clocks.c | 17 +- + .../platform/msm/vidc_3x/msm_vidc_res_parse.c | 7 +- + .../platform/msm/vidc_3x/msm_vidc_resources.h | 3 +- + .../media/platform/msm/vidc_3x/venus_hfi.c | 19 +- + drivers/media/platform/omap3isp/isp.c | 6 +- + drivers/media/platform/omap3isp/isppreview.c | 4 +- + drivers/media/platform/pxa_camera.c | 3 + + drivers/media/platform/rcar-fcp.c | 9 +- + drivers/media/platform/s3c-camif/camif-core.c | 5 +- + drivers/media/platform/s5p-g2d/g2d.c | 3 + + drivers/media/platform/sti/bdisp/bdisp-v4l2.c | 3 +- + drivers/media/platform/sti/hva/hva-hw.c | 5 +- + drivers/media/platform/ti-vpe/cal.c | 6 +- + drivers/media/platform/ti-vpe/vpe.c | 2 + + drivers/media/platform/vivid/vivid-vid-out.c | 2 +- + drivers/media/platform/vsp1/vsp1_drv.c | 15 +- + drivers/media/rc/ati_remote.c | 4 + + drivers/media/rc/ite-cir.c | 8 +- + drivers/media/rc/sunxi-cir.c | 2 + + drivers/media/tuners/m88rs6000t.c | 6 +- + drivers/media/tuners/qm1d1c0042.c | 4 +- + drivers/media/tuners/si2157.c | 15 +- + drivers/media/usb/cpia2/cpia2.h | 1 + + drivers/media/usb/cpia2/cpia2_core.c | 12 + + drivers/media/usb/cpia2/cpia2_usb.c | 13 +- + drivers/media/usb/dvb-usb-v2/lmedm04.c | 2 +- + drivers/media/usb/dvb-usb-v2/rtl28xxu.c | 11 +- + drivers/media/usb/dvb-usb/cxusb.c | 2 +- + drivers/media/usb/dvb-usb/dibusb-mb.c | 2 +- + drivers/media/usb/dvb-usb/dtv5100.c | 7 +- + drivers/media/usb/dvb-usb/dvb-usb-init.c | 20 +- + drivers/media/usb/dvb-usb/dvb-usb.h | 3 +- + drivers/media/usb/dvb-usb/friio-fe.c | 24 - + drivers/media/usb/dvb-usb/gp8psk.c | 2 +- + drivers/media/usb/em28xx/em28xx-dvb.c | 1 + + drivers/media/usb/go7007/go7007-usb.c | 4 +- + drivers/media/usb/go7007/snd-go7007.c | 35 +- + drivers/media/usb/gspca/gspca.c | 3 + + drivers/media/usb/gspca/gspca.h | 1 + + drivers/media/usb/gspca/m5602/m5602_po1030.c | 10 +- + drivers/media/usb/gspca/sq905.c | 4 +- + drivers/media/usb/gspca/stv06xx/stv06xx.c | 9 + + drivers/media/usb/gspca/sunplus.c | 8 +- + drivers/media/usb/msi2500/msi2500.c | 2 +- + drivers/media/usb/pvrusb2/pvrusb2-hdw.c | 4 +- + drivers/media/usb/tm6000/tm6000-dvb.c | 4 + + drivers/media/usb/usbtv/usbtv-audio.c | 2 +- + drivers/media/usb/usbtv/usbtv-core.c | 3 +- + drivers/media/usb/uvc/uvc_driver.c | 7 +- + drivers/media/usb/uvc/uvc_v4l2.c | 30 + + drivers/media/usb/uvc/uvc_video.c | 27 + + drivers/media/usb/zr364xx/zr364xx.c | 1 + + drivers/media/v4l2-core/v4l2-fh.c | 1 + + drivers/media/v4l2-core/v4l2-ioctl.c | 19 +- + drivers/media/v4l2-core/videobuf2-core.c | 13 +- + drivers/memory/emif.c | 33 +- + drivers/memory/fsl-corenet-cf.c | 6 +- + drivers/memory/fsl_ifc.c | 8 +- + drivers/memory/omap-gpmc.c | 11 +- + drivers/memstick/core/memstick.c | 1 - + drivers/memstick/host/r592.c | 12 +- + drivers/message/fusion/mptscsih.c | 17 +- + drivers/mfd/arizona-core.c | 18 + + drivers/mfd/da9052-i2c.c | 1 + + drivers/mfd/dln2.c | 4 + + drivers/mfd/mfd-core.c | 10 + + drivers/mfd/rtsx_pcr.c | 4 +- + drivers/mfd/sm501.c | 8 +- + drivers/mfd/stmpe-i2c.c | 2 +- + drivers/mfd/wm831x-auxadc.c | 3 +- + drivers/mfd/wm8994-core.c | 1 + + drivers/misc/atmel-ssc.c | 24 +- + drivers/misc/cxl/sysfs.c | 2 +- + drivers/misc/eeprom/at25.c | 2 +- + drivers/misc/eeprom/eeprom_93xx46.c | 16 + + drivers/misc/ibmasm/module.c | 5 +- + drivers/misc/kgdbts.c | 27 +- + drivers/misc/lis3lv02d/lis3lv02d.c | 21 +- + drivers/misc/lis3lv02d/lis3lv02d.h | 1 + + drivers/misc/mei/bus.c | 3 +- + drivers/misc/mei/client.h | 4 +- + drivers/misc/mei/interrupt.c | 3 + + drivers/misc/mic/scif/scif_rma.c | 4 +- + drivers/misc/mic/vop/vop_main.c | 2 +- + drivers/misc/mic/vop/vop_vringh.c | 24 +- + drivers/misc/qcom-xr-smrtvwr-misc.c | 14 +- + drivers/misc/tusb1064.c | 13 +- + drivers/misc/tusb1064.h | 4 +- + drivers/misc/vmw_vmci/vmci_context.c | 2 +- + drivers/misc/vmw_vmci/vmci_doorbell.c | 2 +- + drivers/misc/vmw_vmci/vmci_guest.c | 2 +- + drivers/misc/vmw_vmci/vmci_queue_pair.c | 15 +- + drivers/mmc/core/core.c | 11 +- + drivers/mmc/core/host.c | 10 + + drivers/mmc/core/mmc.c | 14 +- + drivers/mmc/core/sd.c | 6 + + drivers/mmc/core/sdio_cis.c | 9 + + drivers/mmc/host/dw_mmc.c | 21 +- + drivers/mmc/host/mtk-sd.c | 18 +- + drivers/mmc/host/mxs-mmc.c | 2 +- + drivers/mmc/host/sdhci-esdhc-imx.c | 5 +- + drivers/mmc/host/sdhci.c | 4 + + drivers/mmc/host/sdhci.h | 1 + + drivers/mmc/host/usdhi6rol0.c | 5 +- + drivers/mmc/host/via-sdmmc.c | 6 + + drivers/mmc/host/vub300.c | 2 +- + drivers/mtd/chips/cfi_cmdset_0002.c | 1 - + drivers/mtd/cmdlinepart.c | 35 +- + drivers/mtd/devices/msm_qpic_nand.c | 84 +- + drivers/mtd/lpddr/lpddr2_nvm.c | 35 +- + drivers/mtd/mtdchar.c | 56 +- + drivers/mtd/mtdoops.c | 11 +- + drivers/mtd/nand/ams-delta.c | 2 +- + drivers/mtd/nand/atmel_nand.c | 2 +- + drivers/mtd/nand/au1550nd.c | 2 +- + drivers/mtd/nand/bcm47xxnflash/main.c | 2 +- + drivers/mtd/nand/bf5xx_nand.c | 2 +- + drivers/mtd/nand/brcmnand/brcmnand.c | 18 +- + drivers/mtd/nand/cafe_nand.c | 2 +- + drivers/mtd/nand/cmx270_nand.c | 2 +- + drivers/mtd/nand/cs553x_nand.c | 2 +- + drivers/mtd/nand/davinci_nand.c | 2 +- + drivers/mtd/nand/denali.c | 2 +- + drivers/mtd/nand/diskonchip.c | 9 +- + drivers/mtd/nand/docg4.c | 4 +- + drivers/mtd/nand/fsl_elbc_nand.c | 2 +- + drivers/mtd/nand/fsl_ifc_nand.c | 2 +- + drivers/mtd/nand/fsl_upm.c | 2 +- + drivers/mtd/nand/fsmc_nand.c | 2 +- + drivers/mtd/nand/gpio.c | 2 +- + drivers/mtd/nand/gpmi-nand/gpmi-nand.c | 4 +- + drivers/mtd/nand/hisi504_nand.c | 5 +- + drivers/mtd/nand/jz4740_nand.c | 4 +- + drivers/mtd/nand/jz4780_nand.c | 4 +- + drivers/mtd/nand/lpc32xx_mlc.c | 5 +- + drivers/mtd/nand/lpc32xx_slc.c | 5 +- + drivers/mtd/nand/mpc5121_nfc.c | 2 +- + drivers/mtd/nand/mtk_nand.c | 4 +- + drivers/mtd/nand/mxc_nand.c | 2 +- + drivers/mtd/nand/nand_base.c | 8 +- + drivers/mtd/nand/nandsim.c | 4 +- + drivers/mtd/nand/ndfc.c | 2 +- + drivers/mtd/nand/nuc900_nand.c | 2 +- + drivers/mtd/nand/omap2.c | 2 +- + drivers/mtd/nand/omap_elm.c | 1 + + drivers/mtd/nand/orion_nand.c | 5 +- + drivers/mtd/nand/pasemi_nand.c | 6 +- + drivers/mtd/nand/plat_nand.c | 4 +- + drivers/mtd/nand/pxa3xx_nand.c | 2 +- + drivers/mtd/nand/qcom_nandc.c | 4 +- + drivers/mtd/nand/r852.c | 4 +- + drivers/mtd/nand/s3c2410.c | 2 +- + drivers/mtd/nand/sh_flctl.c | 2 +- + drivers/mtd/nand/sharpsl.c | 4 +- + drivers/mtd/nand/socrates_nand.c | 5 +- + drivers/mtd/nand/sunxi_nand.c | 4 +- + drivers/mtd/nand/tmio_nand.c | 4 +- + drivers/mtd/nand/txx9ndfmc.c | 2 +- + drivers/mtd/nand/vf610_nfc.c | 2 +- + drivers/mtd/nand/xway_nand.c | 4 +- + drivers/mtd/spi-nor/cadence-quadspi.c | 2 +- + drivers/mtd/spi-nor/hisi-sfc.c | 4 +- + drivers/mtd/ubi/wl.c | 13 + + drivers/net/appletalk/cops.c | 4 +- + drivers/net/bonding/bond_main.c | 88 +- + drivers/net/bonding/bond_sysfs_slave.c | 18 +- + drivers/net/caif/caif_serial.c | 2 +- + drivers/net/can/c_can/c_can.c | 24 +- + drivers/net/can/c_can/c_can_pci.c | 3 +- + drivers/net/can/c_can/c_can_platform.c | 6 +- + drivers/net/can/dev.c | 21 +- + drivers/net/can/flexcan.c | 20 +- + drivers/net/can/m_can/m_can.c | 7 +- + drivers/net/can/softing/softing_main.c | 9 +- + drivers/net/can/usb/ems_usb.c | 17 +- + drivers/net/can/usb/esd_usb2.c | 20 +- + drivers/net/can/usb/gs_usb.c | 131 +- + drivers/net/can/usb/kvaser_usb.c | 6 +- + drivers/net/can/usb/peak_usb/pcan_usb_core.c | 57 +- + drivers/net/can/usb/peak_usb/pcan_usb_fd.c | 48 +- + drivers/net/can/usb/usb_8dev.c | 15 +- + drivers/net/dsa/b53/b53_common.c | 24 +- + drivers/net/dsa/b53/b53_regs.h | 1 + + drivers/net/dsa/bcm_sf2.c | 21 +- + drivers/net/dsa/bcm_sf2_regs.h | 2 + + drivers/net/ethernet/aeroflex/greth.c | 3 +- + drivers/net/ethernet/allwinner/sun4i-emac.c | 11 +- + drivers/net/ethernet/amazon/ena/ena_com.c | 6 +- + drivers/net/ethernet/amazon/ena/ena_netdev.c | 17 +- + drivers/net/ethernet/amd/pcnet32.c | 5 +- + drivers/net/ethernet/arc/emac_mdio.c | 1 + + drivers/net/ethernet/atheros/alx/main.c | 18 +- + drivers/net/ethernet/broadcom/b44.c | 3 +- + drivers/net/ethernet/broadcom/bnx2.c | 2 +- + .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 +- + .../net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 4 +- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 +- + .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 31 +- + .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 +- + .../net/ethernet/broadcom/genet/bcmgenet.c | 25 +- + .../ethernet/broadcom/genet/bcmgenet_wol.c | 6 - + drivers/net/ethernet/broadcom/tg3.c | 21 +- + drivers/net/ethernet/cadence/macb.c | 5 +- + .../ethernet/cavium/liquidio/cn23xx_pf_regs.h | 2 +- + .../ethernet/cavium/liquidio/cn66xx_regs.h | 2 +- + .../ethernet/cavium/thunder/nicvf_queues.c | 2 +- + drivers/net/ethernet/chelsio/cxgb3/sge.c | 1 + + .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2 +- + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 18 +- + .../chelsio/cxgb4/cxgb4_tc_u32_parse.h | 122 +- + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 8 +- + drivers/net/ethernet/cisco/enic/enic.h | 1 + + drivers/net/ethernet/cisco/enic/enic_api.c | 6 + + drivers/net/ethernet/cisco/enic/enic_main.c | 27 +- + drivers/net/ethernet/davicom/dm9000.c | 27 +- + drivers/net/ethernet/dec/tulip/de2104x.c | 2 +- + drivers/net/ethernet/dec/tulip/winbond-840.c | 7 +- + drivers/net/ethernet/ec_bhf.c | 4 +- + drivers/net/ethernet/emulex/benet/be_main.c | 1 + + drivers/net/ethernet/ethoc.c | 3 +- + drivers/net/ethernet/ezchip/nps_enet.c | 4 +- + drivers/net/ethernet/faraday/ftgmac100.c | 4 + + drivers/net/ethernet/freescale/fec_main.c | 6 +- + drivers/net/ethernet/freescale/fec_ptp.c | 11 + + drivers/net/ethernet/freescale/fman/fman.c | 3 +- + .../net/ethernet/freescale/fman/fman_dtsec.c | 4 +- + .../net/ethernet/freescale/fman/fman_mac.h | 2 +- + .../net/ethernet/freescale/fman/fman_memac.c | 3 +- + .../net/ethernet/freescale/fman/fman_port.c | 9 +- + .../net/ethernet/freescale/fman/fman_tgec.c | 2 +- + .../ethernet/freescale/fs_enet/mii-bitbang.c | 1 + + .../net/ethernet/freescale/fs_enet/mii-fec.c | 1 + + drivers/net/ethernet/freescale/gianfar.c | 24 +- + drivers/net/ethernet/freescale/ucc_geth.c | 2 +- + drivers/net/ethernet/freescale/ucc_geth.h | 9 +- + drivers/net/ethernet/fujitsu/fmvj18x_cs.c | 5 + + drivers/net/ethernet/hisilicon/hns/hns_enet.c | 9 +- + .../net/ethernet/hisilicon/hns/hns_ethtool.c | 4 + + drivers/net/ethernet/ibm/ehea/ehea_main.c | 9 +- + drivers/net/ethernet/ibm/ibmveth.c | 13 +- + drivers/net/ethernet/ibm/ibmvnic.c | 41 +- + drivers/net/ethernet/intel/e100.c | 12 +- + drivers/net/ethernet/intel/e1000/e1000_main.c | 22 +- + drivers/net/ethernet/intel/e1000e/82571.c | 2 + + drivers/net/ethernet/intel/e1000e/e1000.h | 1 - + drivers/net/ethernet/intel/e1000e/ich8lan.c | 14 +- + drivers/net/ethernet/intel/e1000e/ich8lan.h | 3 + + drivers/net/ethernet/intel/e1000e/netdev.c | 33 +- + drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 1 + + .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 +- + drivers/net/ethernet/intel/i40e/i40e_common.c | 35 +- + drivers/net/ethernet/intel/i40e/i40e_main.c | 60 +- + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 +- + .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 32 +- + .../net/ethernet/intel/i40evf/i40evf_main.c | 1 + + drivers/net/ethernet/intel/igb/igb_ethtool.c | 3 +- + drivers/net/ethernet/intel/igb/igb_main.c | 21 +- + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + + .../net/ethernet/intel/ixgbe/ixgbe_common.c | 2 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 11 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 27 + + drivers/net/ethernet/korina.c | 5 +- + drivers/net/ethernet/marvell/mvneta.c | 2 +- + drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- + drivers/net/ethernet/marvell/sky2.c | 2 +- + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + + .../net/ethernet/mellanox/mlx4/en_ethtool.c | 6 +- + .../net/ethernet/mellanox/mlx4/en_netdev.c | 22 +- + drivers/net/ethernet/mellanox/mlx4/fw.c | 6 +- + drivers/net/ethernet/mellanox/mlx4/fw.h | 4 +- + drivers/net/ethernet/mellanox/mlx4/main.c | 3 + + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 8 +- + drivers/net/ethernet/mellanox/mlx4/mr.c | 2 +- + drivers/net/ethernet/mellanox/mlx4/port.c | 107 +- + .../ethernet/mellanox/mlx4/resource_tracker.c | 1 + + .../net/ethernet/mellanox/mlx5/core/en_fs.c | 1 + + .../net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 10 +- + .../ethernet/mellanox/mlx5/core/pagealloc.c | 21 +- + drivers/net/ethernet/mellanox/mlxsw/core.c | 13 +- + drivers/net/ethernet/micrel/ks8842.c | 4 + + drivers/net/ethernet/moxa/moxart_ether.c | 4 +- + .../net/ethernet/myricom/myri10ge/myri10ge.c | 1 + + drivers/net/ethernet/natsemi/natsemi.c | 8 +- + .../net/ethernet/neterion/vxge/vxge-main.c | 6 +- + drivers/net/ethernet/nxp/lpc_eth.c | 3 +- + .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 29 +- + drivers/net/ethernet/pasemi/pasemi_mac.c | 8 +- + .../ethernet/qlogic/netxen/netxen_nic_main.c | 9 +- + drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 +- + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 4 +- + drivers/net/ethernet/qlogic/qed/qed_int.c | 3 +- + drivers/net/ethernet/qlogic/qed/qed_vf.c | 23 +- + drivers/net/ethernet/qlogic/qla3xxx.c | 8 +- + .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 4 +- + .../ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 3 +- + .../ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 3 +- + .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 + + .../ethernet/qlogic/qlcnic/qlcnic_minidump.c | 3 + + drivers/net/ethernet/qualcomm/emac/emac-mac.c | 4 +- + drivers/net/ethernet/qualcomm/emac/emac.c | 20 +- + drivers/net/ethernet/realtek/r8169.c | 110 +- + drivers/net/ethernet/renesas/ravb_main.c | 36 +- + drivers/net/ethernet/renesas/sh_eth.c | 8 +- + drivers/net/ethernet/rocker/rocker_main.c | 4 +- + drivers/net/ethernet/sfc/ef10_sriov.c | 25 +- + drivers/net/ethernet/sis/sis900.c | 7 +- + drivers/net/ethernet/smsc/smc91x.c | 4 +- + .../ethernet/stmicro/stmmac/dwmac-ipq806x.c | 3 + + .../ethernet/stmicro/stmmac/dwmac-meson8b.c | 6 +- + .../net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 8 +- + .../net/ethernet/stmicro/stmmac/dwmac1000.h | 8 +- + .../ethernet/stmicro/stmmac/dwmac1000_core.c | 3 + + .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 15 +- + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 13 +- + drivers/net/ethernet/sun/niu.c | 35 +- + drivers/net/ethernet/sun/sunvnet_common.c | 24 +- + drivers/net/ethernet/tehuti/tehuti.c | 1 + + drivers/net/ethernet/ti/davinci_emac.c | 4 +- + drivers/net/ethernet/ti/netcp_core.c | 2 +- + drivers/net/ethernet/ti/tlan.c | 3 +- + drivers/net/ethernet/toshiba/spider_net.c | 4 +- + drivers/net/ethernet/xilinx/ll_temac_main.c | 5 + + drivers/net/ethernet/xilinx/xilinx_emaclite.c | 5 +- + drivers/net/fddi/Kconfig | 15 +- + drivers/net/fddi/defxx.c | 47 +- + drivers/net/fjes/fjes_main.c | 4 + + drivers/net/geneve.c | 36 +- + drivers/net/gtp.c | 5 +- + drivers/net/hamradio/6pack.c | 6 + + drivers/net/hamradio/mkiss.c | 1 + + drivers/net/hamradio/yam.c | 1 + + drivers/net/hippi/rrunner.c | 2 +- + drivers/net/ieee802154/adf7242.c | 4 +- + drivers/net/ieee802154/atusb.c | 1 + + drivers/net/ipvlan/ipvlan_main.c | 25 +- + drivers/net/macsec.c | 4 +- + drivers/net/macvlan.c | 4 + + drivers/net/phy/Kconfig | 1 + + drivers/net/phy/dp83640.c | 4 + + drivers/net/phy/mdio-bcm-unimac.c | 2 + + drivers/net/phy/mdio-octeon.c | 2 - + drivers/net/phy/mdio-thunder.c | 1 - + drivers/net/phy/mdio_bus.c | 3 +- + drivers/net/phy/phy_device.c | 3 +- + drivers/net/ppp/ppp_generic.c | 21 +- + drivers/net/team/team.c | 3 +- + drivers/net/tun.c | 48 + + drivers/net/usb/asix_common.c | 2 +- + drivers/net/usb/ax88172a.c | 1 + + drivers/net/usb/ax88179_178a.c | 15 +- + drivers/net/usb/cdc-phonet.c | 2 + + drivers/net/usb/cdc_eem.c | 2 +- + drivers/net/usb/cdc_ncm.c | 25 +- + drivers/net/usb/dm9601.c | 4 + + drivers/net/usb/hso.c | 47 +- + drivers/net/usb/ipheth.c | 2 +- + drivers/net/usb/lan78xx.c | 113 +- + drivers/net/usb/pegasus.c | 14 +- + drivers/net/usb/qmi_wwan.c | 14 +- + drivers/net/usb/r8152.c | 5 +- + drivers/net/usb/rndis_host.c | 4 +- + drivers/net/usb/rtl8150.c | 16 +- + drivers/net/usb/smsc75xx.c | 12 +- + drivers/net/usb/smsc95xx.c | 11 +- + drivers/net/virtio_net.c | 12 +- + drivers/net/vmxnet3/vmxnet3_ethtool.c | 2 + + drivers/net/vxlan.c | 16 +- + drivers/net/wan/Kconfig | 1 + + drivers/net/wan/cosa.c | 1 + + drivers/net/wan/fsl_ucc_hdlc.c | 8 +- + drivers/net/wan/hdlc.c | 10 +- + drivers/net/wan/hdlc_cisco.c | 2 + + drivers/net/wan/hdlc_fr.c | 101 +- + drivers/net/wan/hdlc_ppp.c | 24 +- + drivers/net/wan/hdlc_raw_eth.c | 1 + + drivers/net/wan/lapbether.c | 55 +- + drivers/net/wan/lmc/lmc_main.c | 2 + + drivers/net/wan/x25_asy.c | 21 +- + drivers/net/wimax/i2400m/op-rfkill.c | 2 +- + drivers/net/wireless/ath/ar5523/ar5523.c | 2 + + drivers/net/wireless/ath/ath10k/htt_rx.c | 77 +- + drivers/net/wireless/ath/ath10k/hw.h | 2 +- + drivers/net/wireless/ath/ath10k/mac.c | 18 +- + drivers/net/wireless/ath/ath6kl/main.c | 3 + + drivers/net/wireless/ath/ath6kl/wmi.c | 5 + + drivers/net/wireless/ath/ath9k/ath9k.h | 3 +- + drivers/net/wireless/ath/ath9k/hif_usb.c | 81 +- + drivers/net/wireless/ath/ath9k/hif_usb.h | 6 + + drivers/net/wireless/ath/ath9k/htc_drv_init.c | 12 +- + drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 8 +- + drivers/net/wireless/ath/ath9k/htc_hst.c | 8 + + drivers/net/wireless/ath/ath9k/hw.c | 2 +- + drivers/net/wireless/ath/ath9k/main.c | 5 + + drivers/net/wireless/ath/ath9k/wmi.c | 6 +- + drivers/net/wireless/ath/ath9k/wmi.h | 3 +- + drivers/net/wireless/ath/ath9k/xmit.c | 6 + + drivers/net/wireless/ath/carl9170/Kconfig | 8 +- + drivers/net/wireless/ath/carl9170/fw.c | 4 +- + drivers/net/wireless/ath/carl9170/main.c | 21 +- + drivers/net/wireless/ath/wcn36xx/main.c | 2 +- + drivers/net/wireless/ath/wil6210/Kconfig | 1 + + drivers/net/wireless/broadcom/b43/main.c | 2 +- + drivers/net/wireless/broadcom/b43/phy_n.c | 2 +- + .../net/wireless/broadcom/b43legacy/main.c | 1 + + .../net/wireless/broadcom/b43legacy/xmit.c | 1 + + .../broadcom/brcm80211/brcmfmac/fwil_types.h | 2 +- + .../broadcom/brcm80211/brcmfmac/msgbuf.c | 2 + + .../broadcom/brcm80211/brcmsmac/mac80211_if.c | 8 +- + .../broadcom/brcm80211/brcmsmac/phy/phy_lcn.c | 4 +- + .../net/wireless/intel/ipw2x00/libipw_wx.c | 6 +- + drivers/net/wireless/intel/iwlegacy/common.c | 4 +- + .../wireless/intel/iwlwifi/mvm/debugfs-vif.c | 3 + + .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 9 +- + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 +- + .../net/wireless/intel/iwlwifi/pcie/trans.c | 38 +- + drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 12 +- + .../wireless/intersil/orinoco/orinoco_usb.c | 14 +- + drivers/net/wireless/intersil/p54/p54pci.c | 4 +- + drivers/net/wireless/marvell/libertas/mesh.c | 28 +- + .../net/wireless/marvell/mwifiex/cfg80211.c | 14 +- + drivers/net/wireless/marvell/mwifiex/fw.h | 2 +- + drivers/net/wireless/marvell/mwifiex/join.c | 2 + + drivers/net/wireless/marvell/mwifiex/scan.c | 2 +- + drivers/net/wireless/marvell/mwifiex/sdio.c | 2 + + .../wireless/marvell/mwifiex/sta_cmdresp.c | 24 +- + drivers/net/wireless/marvell/mwl8k.c | 1 + + drivers/net/wireless/mediatek/mt7601u/dma.c | 5 +- + .../net/wireless/mediatek/mt7601u/eeprom.c | 2 +- + .../wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 10 +- + drivers/net/wireless/realtek/rtlwifi/base.c | 19 +- + drivers/net/wireless/realtek/rtlwifi/usb.c | 13 +- + drivers/net/wireless/st/cw1200/cw1200_sdio.c | 1 + + drivers/net/wireless/st/cw1200/main.c | 2 + + drivers/net/wireless/ti/wl1251/cmd.c | 9 +- + drivers/net/wireless/ti/wl1251/event.c | 2 +- + drivers/net/wireless/ti/wl12xx/main.c | 10 +- + drivers/net/wireless/ti/wlcore/main.c | 15 +- + drivers/net/wireless/ti/wlcore/wlcore.h | 3 - + drivers/net/wireless/wl3501.h | 47 +- + drivers/net/wireless/wl3501_cs.c | 54 +- + drivers/net/xen-netback/common.h | 15 + + drivers/net/xen-netback/interface.c | 63 +- + drivers/net/xen-netback/netback.c | 21 +- + drivers/net/xen-netback/rx.c | 22 +- + drivers/net/xen-netback/xenbus.c | 14 +- + drivers/net/xen-netfront.c | 64 +- + drivers/nfc/nfcsim.c | 3 +- + drivers/nfc/pn533/pn533.c | 3 + + drivers/nfc/s3fwrn5/core.c | 1 + + drivers/nfc/s3fwrn5/firmware.c | 4 +- + drivers/nfc/s3fwrn5/i2c.c | 4 +- + drivers/nfc/st95hf/core.c | 2 +- + drivers/ntb/hw/amd/ntb_hw_amd.c | 1 + + drivers/nvdimm/dimm_devs.c | 18 +- + drivers/nvdimm/nd.h | 1 - + drivers/nvme/target/core.c | 26 +- + drivers/of/address.c | 4 +- + drivers/of/fdt.c | 8 +- + drivers/of/of_reserved_mem.c | 21 +- + drivers/parisc/sba_iommu.c | 2 +- + drivers/pci/access.c | 8 +- + drivers/pci/bus.c | 6 +- + drivers/pci/host/pci-aardvark.c | 4 - + drivers/pci/host/pci-xgene-msi.c | 10 +- + drivers/pci/host/pcie-rcar.c | 9 +- + drivers/pci/hotplug/acpiphp_glue.c | 15 +- + drivers/pci/hotplug/rpadlpar_sysfs.c | 14 +- + drivers/pci/msi.c | 119 +- + drivers/pci/pci-label.c | 2 +- + drivers/pci/pcie/aspm.c | 11 +- + drivers/pci/pcie/ptm.c | 22 +- + drivers/pci/probe.c | 25 +- + drivers/pci/quirks.c | 38 + + drivers/pci/slot.c | 12 +- + drivers/pci/syscall.c | 10 +- + drivers/pcmcia/i82092.c | 1 + + drivers/phy/phy-dm816x-usb.c | 17 +- + drivers/phy/phy-s5pv210-usb2.c | 4 + + drivers/phy/phy-twl4030-usb.c | 2 +- + drivers/phy/tegra/xusb.c | 1 + + drivers/pinctrl/aspeed/pinctrl-aspeed.c | 7 +- + drivers/pinctrl/devicetree.c | 26 +- + drivers/pinctrl/freescale/pinctrl-imx1-core.c | 1 - + drivers/pinctrl/intel/pinctrl-baytrail.c | 75 +- + drivers/pinctrl/intel/pinctrl-merrifield.c | 8 + + drivers/pinctrl/pinctrl-amd.c | 14 +- + drivers/pinctrl/pinctrl-amd.h | 2 +- + drivers/pinctrl/pinctrl-falcon.c | 14 +- + drivers/pinctrl/pinctrl-rockchip.c | 15 +- + drivers/pinctrl/pinctrl-single.c | 11 +- + drivers/pinctrl/qcom/pinctrl-msm.c | 4 +- + drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c | 6 +- + drivers/pinctrl/samsung/pinctrl-exynos.c | 9 + + drivers/platform/msm/ep_pcie/ep_pcie.c | 17 +- + drivers/platform/msm/ep_pcie/ep_pcie_com.h | 61 +- + drivers/platform/msm/ep_pcie/ep_pcie_core.c | 976 ++++++++++- + drivers/platform/msm/ep_pcie/ep_pcie_dbg.c | 4 +- + drivers/platform/msm/ep_pcie/ep_pcie_phy.c | 31 +- + drivers/platform/msm/ipa/ipa_v2/ipa_flt.c | 2 +- + drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 2 +- + drivers/platform/msm/ipa/ipa_v3/ipa_flt.c | 5 +- + drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c | 11 +- + drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 2 + + drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 11 +- + drivers/platform/msm/ipa/ipa_v3/ipa_rt.c | 5 +- + .../platform/msm/ipa/ipa_v3/ipahal/ipahal.c | 76 +- + .../platform/msm/ipa/ipa_v3/ipahal/ipahal.h | 8 +- + .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c | 15 +- + .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h | 5 +- + .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h | 3 +- + .../platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h | 3 +- + drivers/platform/msm/mhi_dev/mhi.c | 10 +- + drivers/platform/msm/mhi_dev/mhi_sm.c | 21 +- + drivers/platform/msm/qcom-geni-se.c | 13 +- + drivers/platform/msm/sps/bam.c | 16 +- + drivers/platform/x86/acer-wmi.c | 9 +- + drivers/platform/x86/hp_accel.c | 22 +- + drivers/platform/x86/intel-hid.c | 2 +- + drivers/platform/x86/intel-vbtn.c | 2 +- + drivers/platform/x86/intel_punit_ipc.c | 1 + + drivers/platform/x86/thinkpad_acpi.c | 37 +- + drivers/platform/x86/toshiba_acpi.c | 4 +- + drivers/power/reset/at91-sama5d2_shdwc.c | 2 +- + drivers/power/reset/gpio-poweroff.c | 1 + + drivers/power/reset/vexpress-poweroff.c | 1 + + drivers/power/supply/88pm860x_battery.c | 6 +- + drivers/power/supply/Kconfig | 5 +- + drivers/power/supply/ab8500_btemp.c | 1 + + drivers/power/supply/ab8500_charger.c | 19 +- + drivers/power/supply/ab8500_fg.c | 1 + + drivers/power/supply/charger-manager.c | 1 + + drivers/power/supply/generic-adc-battery.c | 2 +- + drivers/power/supply/lp8788-charger.c | 20 +- + drivers/power/supply/pm2301_charger.c | 2 +- + drivers/power/supply/qcom/qpnp-fg-gen3.c | 4 + + .../power/supply/qcom/qpnp-linear-charger.c | 11 +- + drivers/power/supply/qcom/qpnp-smb2.c | 11 + + drivers/power/supply/qcom/smb-lib.c | 12 +- + drivers/power/supply/qcom/smb-lib.h | 3 +- + drivers/power/supply/rt5033_battery.c | 7 + + drivers/power/supply/s3c_adc_battery.c | 2 +- + drivers/power/supply/smb347-charger.c | 1 + + drivers/power/supply/test_power.c | 6 + + drivers/power/supply/tps65090-charger.c | 2 +- + drivers/power/supply/tps65217_charger.c | 2 +- + drivers/powercap/powercap_sys.c | 4 +- + drivers/ps3/ps3stor_lib.c | 2 +- + drivers/pwm/pwm-bcm-iproc.c | 9 +- + drivers/pwm/pwm-rockchip.c | 1 - + drivers/pwm/pwm-spear.c | 4 - + drivers/pwm/pwm-tegra.c | 13 - + drivers/rapidio/Kconfig | 2 +- + drivers/rapidio/devices/rio_mport_cdev.c | 18 +- + drivers/rapidio/rio_cm.c | 17 +- + drivers/regulator/axp20x-regulator.c | 7 +- + drivers/regulator/core.c | 105 +- + drivers/regulator/da9052-regulator.c | 3 +- + drivers/regulator/ti-abb-regulator.c | 12 +- + drivers/remoteproc/qcom_q6v5_pil.c | 6 + + drivers/reset/core.c | 5 +- + drivers/reset/reset-ti-syscon.c | 4 +- + drivers/rtc/rtc-max77686.c | 4 +- + drivers/rtc/rtc-proc.c | 4 +- + drivers/rtc/rtc-rx8010.c | 24 +- + drivers/s390/block/dasd.c | 3 +- + drivers/s390/block/dasd_alias.c | 22 +- + drivers/s390/char/sclp_vt220.c | 4 +- + drivers/s390/cio/chp.c | 3 + + drivers/s390/cio/chsc.c | 2 - + drivers/s390/cio/css.c | 5 + + drivers/s390/cio/qdio.h | 1 - + drivers/s390/cio/qdio_setup.c | 1 - + drivers/s390/cio/qdio_thinint.c | 14 +- + drivers/s390/net/qeth_l2_main.c | 4 + + drivers/scsi/BusLogic.c | 6 +- + drivers/scsi/BusLogic.h | 2 +- + drivers/scsi/FlashPoint.c | 32 +- + drivers/scsi/aacraid/aachba.c | 8 +- + drivers/scsi/aic7xxx/aic7xxx_core.c | 2 +- + drivers/scsi/arm/acornscsi.c | 4 +- + drivers/scsi/arm/cumana_2.c | 2 +- + drivers/scsi/arm/eesox.c | 2 +- + drivers/scsi/arm/powertec.c | 2 +- + drivers/scsi/be2iscsi/be_main.c | 1 + + drivers/scsi/bnx2fc/Kconfig | 1 + + drivers/scsi/bnx2i/Kconfig | 1 + + drivers/scsi/csiostor/csio_hw.c | 2 +- + drivers/scsi/device_handler/scsi_dh_alua.c | 14 +- + drivers/scsi/device_handler/scsi_dh_rdac.c | 4 +- + drivers/scsi/fcoe/fcoe_ctlr.c | 2 +- + drivers/scsi/fnic/fnic_main.c | 1 + + drivers/scsi/gdth.h | 3 - + drivers/scsi/hosts.c | 34 +- + drivers/scsi/hpsa.c | 4 +- + drivers/scsi/ibmvscsi/ibmvfc.c | 5 +- + drivers/scsi/ibmvscsi/ibmvscsi.c | 2 + + drivers/scsi/iscsi_boot_sysfs.c | 2 +- + drivers/scsi/jazz_esp.c | 4 +- + drivers/scsi/libfc/fc_disc.c | 12 +- + drivers/scsi/libfc/fc_exch.c | 16 +- + drivers/scsi/libfc/fc_lport.c | 2 +- + drivers/scsi/libiscsi.c | 189 ++- + drivers/scsi/libsas/sas_ata.c | 6 +- + drivers/scsi/libsas/sas_port.c | 4 +- + drivers/scsi/lpfc/lpfc_ct.c | 137 +- + drivers/scsi/lpfc/lpfc_debugfs.c | 4 +- + drivers/scsi/lpfc/lpfc_els.c | 15 +- + drivers/scsi/lpfc/lpfc_hw.h | 36 +- + drivers/scsi/lpfc/lpfc_nportdisc.c | 2 - + drivers/scsi/lpfc/lpfc_sli.c | 5 +- + drivers/scsi/lpfc/lpfc_vport.c | 26 +- + drivers/scsi/megaraid/megaraid_mm.c | 21 +- + drivers/scsi/mesh.c | 8 +- + drivers/scsi/mpt3sas/mpt3sas_base.c | 4 +- + drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 +- + drivers/scsi/mvumi.c | 1 + + drivers/scsi/pm8001/pm8001_init.c | 3 +- + drivers/scsi/pm8001/pm8001_sas.c | 2 +- + drivers/scsi/qla2xxx/qla_attr.c | 8 +- + drivers/scsi/qla2xxx/qla_nx.c | 3 +- + drivers/scsi/qla2xxx/qla_target.c | 2 + + drivers/scsi/qla2xxx/qla_target.h | 2 +- + drivers/scsi/qla2xxx/qla_tmpl.c | 9 +- + drivers/scsi/qla2xxx/qla_tmpl.h | 2 +- + drivers/scsi/qla2xxx/tcm_qla2xxx.c | 2 + + drivers/scsi/qla4xxx/ql4_os.c | 2 +- + drivers/scsi/scsi_debug.c | 6 + + drivers/scsi/scsi_devinfo.c | 5 +- + drivers/scsi/scsi_lib.c | 5 +- + drivers/scsi/scsi_scan.c | 10 +- + drivers/scsi/scsi_transport_iscsi.c | 142 +- + drivers/scsi/scsi_transport_spi.c | 2 +- + drivers/scsi/sni_53c710.c | 5 +- + drivers/scsi/sr.c | 8 +- + drivers/scsi/st.c | 2 +- + drivers/scsi/sun3x_esp.c | 4 +- + drivers/scsi/ufs/ufs_quirks.h | 1 + + drivers/scsi/ufs/ufshcd.c | 56 +- + drivers/scsi/vmw_pvscsi.c | 8 +- + drivers/soc/fsl/qbman/qman.c | 2 +- + drivers/soc/qcom/bgcom_interface.c | 6 +- + drivers/soc/qcom/bgcom_spi.c | 6 +- + drivers/soc/qcom/boot_marker.c | 54 +- + drivers/soc/qcom/icnss.c | 3 + + drivers/soc/qcom/ipc_router_mhi_dev_xprt.c | 4 +- + drivers/soc/qcom/ipc_router_mhi_xprt.c | 2 + + drivers/soc/qcom/memshare/msm_memshare.c | 168 +- + drivers/soc/qcom/peripheral-loader.c | 11 +- + drivers/soc/qcom/rpmh_master_stat.c | 7 +- + drivers/soc/qcom/smp2p.c | 5 +- + drivers/soc/tegra/fuse/speedo-tegra210.c | 2 +- + drivers/soc/ti/knav_dma.c | 13 +- + drivers/soc/ti/knav_qmss_queue.c | 4 +- + drivers/spi/Kconfig | 3 + + drivers/spi/spi-bcm-qspi.c | 42 +- + drivers/spi/spi-bcm2835.c | 26 +- + drivers/spi/spi-bcm2835aux.c | 21 +- + drivers/spi/spi-cadence.c | 6 +- + drivers/spi/spi-davinci.c | 2 +- + drivers/spi/spi-dln2.c | 2 +- + drivers/spi/spi-dw-mid.c | 16 +- + drivers/spi/spi-dw.c | 14 +- + drivers/spi/spi-fsl-dspi.c | 2 +- + drivers/spi/spi-img-spfi.c | 4 +- + drivers/spi/spi-loopback-test.c | 4 +- + drivers/spi/spi-mt65xx.c | 17 +- + drivers/spi/spi-omap-100k.c | 8 +- + drivers/spi/spi-pic32.c | 1 + + drivers/spi/spi-pxa2xx-pci.c | 27 +- + drivers/spi/spi-pxa2xx.c | 7 +- + drivers/spi/spi-rb4xx.c | 2 +- + drivers/spi/spi-s3c24xx-fiq.S | 9 +- + drivers/spi/spi-sc18is602.c | 13 +- + drivers/spi/spi-sh.c | 13 +- + drivers/spi/spi-st-ssc4.c | 5 +- + drivers/spi/spi-sun6i.c | 20 +- + drivers/spi/spi-tegra114.c | 2 + + drivers/spi/spi-tegra20-sflash.c | 1 + + drivers/spi/spi-tegra20-slink.c | 2 + + drivers/spi/spi-ti-qspi.c | 1 + + drivers/spi/spi-topcliff-pch.c | 4 +- + drivers/spi/spi.c | 73 +- + drivers/spi/spi_qsd.c | 204 ++- + drivers/spi/spi_qsd.h | 3 +- + drivers/spi/spidev.c | 24 +- + drivers/ssb/sdio.c | 1 - + drivers/staging/android/ion/ion.c | 43 +- + drivers/staging/android/ion/ion_heap.c | 4 +- + drivers/staging/android/vsoc.c | 3 +- + .../staging/comedi/drivers/addi_apci_1032.c | 24 +- + .../staging/comedi/drivers/addi_apci_1500.c | 52 +- + .../staging/comedi/drivers/addi_apci_1564.c | 20 +- + drivers/staging/comedi/drivers/adv_pci1710.c | 10 +- + drivers/staging/comedi/drivers/cb_pcidas.c | 3 +- + drivers/staging/comedi/drivers/cb_pcidas64.c | 2 +- + drivers/staging/comedi/drivers/das6402.c | 2 +- + drivers/staging/comedi/drivers/das800.c | 2 +- + drivers/staging/comedi/drivers/dmm32at.c | 2 +- + drivers/staging/comedi/drivers/me4000.c | 2 +- + drivers/staging/comedi/drivers/mf6x4.c | 3 +- + drivers/staging/comedi/drivers/ni_6527.c | 2 +- + drivers/staging/comedi/drivers/pcl711.c | 2 +- + drivers/staging/comedi/drivers/pcl818.c | 2 +- + drivers/staging/comedi/drivers/vmk80xx.c | 3 + + drivers/staging/emxx_udc/emxx_udc.c | 4 +- + drivers/staging/fsl-mc/bus/mc-io.c | 7 +- + drivers/staging/fwserial/fwserial.c | 2 + + drivers/staging/gdm724x/gdm_lte.c | 20 +- + drivers/staging/greybus/audio_codec.c | 2 + + drivers/staging/greybus/audio_topology.c | 29 +- + drivers/staging/greybus/sdio.c | 10 +- + drivers/staging/greybus/uart.c | 2 - + drivers/staging/iio/cdc/ad7746.c | 1 - + drivers/staging/iio/light/tsl2583.c | 9 + + drivers/staging/ks7010/ks_wlan_net.c | 6 +- + drivers/staging/media/cec/cec-api.c | 8 +- + drivers/staging/media/omap4iss/iss.c | 4 +- + drivers/staging/media/s5p-cec/s5p_cec.c | 2 +- + drivers/staging/most/aim-sound/sound.c | 2 + + drivers/staging/octeon/ethernet-mdio.c | 6 - + drivers/staging/octeon/ethernet-rx.c | 34 +- + drivers/staging/octeon/ethernet.c | 9 + + drivers/staging/rtl8188eu/core/rtw_ap.c | 5 + + .../staging/rtl8188eu/os_dep/ioctl_linux.c | 6 +- + drivers/staging/rtl8188eu/os_dep/usb_intf.c | 1 + + drivers/staging/rtl8192e/rtl8192e/rtl_wx.c | 7 +- + drivers/staging/rtl8192e/rtllib.h | 2 +- + drivers/staging/rtl8192e/rtllib_rx.c | 2 +- + drivers/staging/rtl8192u/r8192U_core.c | 4 +- + drivers/staging/rtl8192u/r8192U_wx.c | 6 +- + drivers/staging/rtl8712/rtl871x_cmd.c | 6 +- + drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 2 +- + drivers/staging/sm750fb/sm750.c | 1 + + drivers/staging/speakup/speakup_dectlk.c | 2 +- + drivers/staging/wlan-ng/hfa384x_usb.c | 5 - + drivers/staging/wlan-ng/prism2usb.c | 9 +- + drivers/target/iscsi/iscsi_target.c | 34 +- + drivers/target/iscsi/iscsi_target_login.c | 6 +- + drivers/target/iscsi/iscsi_target_login.h | 3 +- + drivers/target/iscsi/iscsi_target_nego.c | 3 +- + drivers/target/target_core_pscsi.c | 11 +- + drivers/target/target_core_sbc.c | 35 +- + drivers/target/target_core_transport.c | 28 +- + drivers/target/target_core_user.c | 9 +- + drivers/target/target_core_xcopy.c | 220 ++- + drivers/target/target_core_xcopy.h | 1 + + drivers/tee/tee_shm.c | 5 +- + drivers/thermal/fair_share.c | 4 + + drivers/thermal/hisi_thermal.c | 2 +- + drivers/thermal/mtk_thermal.c | 6 +- + drivers/thermal/thermal_core.c | 2 +- + .../ti-soc-thermal/omap4-thermal-data.c | 23 +- + .../thermal/ti-soc-thermal/omap4xxx-bandgap.h | 10 +- + drivers/tty/hvc/hvcs.c | 14 +- + drivers/tty/ipwireless/network.c | 4 +- + drivers/tty/ipwireless/tty.c | 2 +- + drivers/tty/n_gsm.c | 26 +- + drivers/tty/nozomi.c | 9 +- + drivers/tty/pty.c | 2 +- + drivers/tty/serial/8250/8250_core.c | 13 +- + drivers/tty/serial/8250/8250_mtk.c | 20 +- + drivers/tty/serial/8250/8250_omap.c | 13 +- + drivers/tty/serial/8250/8250_pci.c | 11 + + drivers/tty/serial/8250/8250_port.c | 37 +- + drivers/tty/serial/8250/serial_cs.c | 13 +- + drivers/tty/serial/Kconfig | 1 + + drivers/tty/serial/amba-pl011.c | 17 +- + drivers/tty/serial/fsl_lpuart.c | 3 + + drivers/tty/serial/imx.c | 20 +- + drivers/tty/serial/max310x.c | 6 +- + drivers/tty/serial/msm_serial_hs.c | 4 +- + drivers/tty/serial/rp2.c | 52 +- + drivers/tty/serial/samsung.c | 16 +- + drivers/tty/serial/serial_core.c | 4 + + drivers/tty/serial/serial_txx9.c | 3 + + drivers/tty/serial/sh-sci.c | 8 + + drivers/tty/serial/stm32-usart.c | 12 +- + drivers/tty/tty_io.c | 57 +- + drivers/tty/vt/consolemap.c | 2 +- + drivers/tty/vt/keyboard.c | 39 +- + drivers/tty/vt/vt.c | 60 +- + drivers/tty/vt/vt_ioctl.c | 57 +- + drivers/uio/uio_pdrv_genirq.c | 2 +- + drivers/usb/c67x00/c67x00-sched.c | 2 +- + drivers/usb/chipidea/ci_hdrc_imx.c | 9 +- + drivers/usb/chipidea/core.c | 24 + + drivers/usb/class/cdc-acm.c | 62 +- + drivers/usb/class/cdc-wdm.c | 72 +- + drivers/usb/class/usblp.c | 50 +- + drivers/usb/class/usbtmc.c | 8 +- + drivers/usb/common/usb-otg-fsm.c | 6 +- + drivers/usb/core/config.c | 11 + + drivers/usb/core/devio.c | 14 +- + drivers/usb/core/hub.c | 71 +- + drivers/usb/core/hub.h | 6 +- + drivers/usb/core/message.c | 91 +- + drivers/usb/core/quirks.c | 60 +- + drivers/usb/core/sysfs.c | 5 + + drivers/usb/core/urb.c | 119 +- + drivers/usb/core/usb.c | 83 + + drivers/usb/core/usb.h | 3 + + drivers/usb/dwc2/core.h | 2 + + drivers/usb/dwc2/core_intr.c | 7 +- + drivers/usb/dwc2/gadget.c | 17 +- + drivers/usb/dwc2/hcd.c | 15 +- + drivers/usb/dwc2/hcd_intr.c | 14 +- + drivers/usb/dwc2/platform.c | 18 +- + drivers/usb/dwc3/core.c | 2 +- + drivers/usb/dwc3/core.h | 4 +- + drivers/usb/dwc3/dwc3-msm.c | 53 +- + drivers/usb/dwc3/ep0.c | 3 + + drivers/usb/dwc3/gadget.c | 78 +- + drivers/usb/dwc3/ulpi.c | 20 +- + drivers/usb/gadget/Kconfig | 2 + + drivers/usb/gadget/composite.c | 86 +- + drivers/usb/gadget/config.c | 8 + + drivers/usb/gadget/configfs.c | 24 +- + drivers/usb/gadget/function/f_accessory.c | 156 +- + drivers/usb/gadget/function/f_acm.c | 2 +- + drivers/usb/gadget/function/f_cdev.c | 6 +- + drivers/usb/gadget/function/f_ecm.c | 2 +- + drivers/usb/gadget/function/f_eem.c | 49 +- + drivers/usb/gadget/function/f_fs.c | 14 +- + drivers/usb/gadget/function/f_gsi.c | 2 + + drivers/usb/gadget/function/f_hid.c | 8 +- + drivers/usb/gadget/function/f_ipc.c | 7 +- + drivers/usb/gadget/function/f_loopback.c | 2 +- + drivers/usb/gadget/function/f_midi.c | 16 +- + drivers/usb/gadget/function/f_ncm.c | 61 +- + drivers/usb/gadget/function/f_printer.c | 26 +- + drivers/usb/gadget/function/f_rndis.c | 6 +- + drivers/usb/gadget/function/f_serial.c | 2 +- + drivers/usb/gadget/function/f_sourcesink.c | 3 +- + drivers/usb/gadget/function/f_subset.c | 2 +- + drivers/usb/gadget/function/f_tcm.c | 10 +- + drivers/usb/gadget/function/f_uac1.c | 2 + + drivers/usb/gadget/function/f_uac2.c | 35 +- + drivers/usb/gadget/function/f_uvc.c | 7 +- + drivers/usb/gadget/function/u_ether.c | 2 +- + drivers/usb/gadget/function/uvc_video.c | 3 +- + drivers/usb/gadget/legacy/acm_ms.c | 4 +- + drivers/usb/gadget/legacy/ether.c | 4 +- + drivers/usb/gadget/legacy/hid.c | 4 +- + drivers/usb/gadget/legacy/inode.c | 3 + + drivers/usb/gadget/u_f.h | 38 +- + drivers/usb/gadget/udc/atmel_usba_udc.c | 2 +- + drivers/usb/gadget/udc/bdc/Kconfig | 2 +- + drivers/usb/gadget/udc/bdc/bdc_core.c | 4 + + drivers/usb/gadget/udc/bdc/bdc_ep.c | 16 +- + drivers/usb/gadget/udc/core.c | 13 +- + drivers/usb/gadget/udc/dummy_hcd.c | 25 +- + drivers/usb/gadget/udc/fotg210-udc.c | 26 +- + drivers/usb/gadget/udc/goku_udc.c | 2 +- + drivers/usb/gadget/udc/gr_udc.c | 7 +- + drivers/usb/gadget/udc/lpc32xx_udc.c | 11 +- + drivers/usb/gadget/udc/m66592-udc.c | 2 +- + drivers/usb/gadget/udc/mv_udc_core.c | 3 +- + drivers/usb/gadget/udc/net2280.c | 4 +- + drivers/usb/gadget/udc/pch_udc.c | 49 +- + drivers/usb/gadget/udc/r8a66597-udc.c | 2 + + drivers/usb/gadget/udc/s3c2410_udc.c | 4 - + drivers/usb/gadget/usbstring.c | 4 +- + drivers/usb/host/ehci-exynos.c | 5 +- + drivers/usb/host/ehci-hcd.c | 13 + + drivers/usb/host/ehci-hub.c | 4 +- + drivers/usb/host/ehci-mv.c | 8 +- + drivers/usb/host/ehci-mxc.c | 2 + + drivers/usb/host/ehci-omap.c | 1 + + drivers/usb/host/ehci-pci.c | 10 + + drivers/usb/host/fotg210-hcd.c | 4 +- + drivers/usb/host/fsl-mph-dr-of.c | 9 +- + drivers/usb/host/max3421-hcd.c | 44 +- + drivers/usb/host/ohci-exynos.c | 5 +- + drivers/usb/host/ohci-hcd.c | 18 +- + drivers/usb/host/ohci-sm501.c | 1 + + drivers/usb/host/oxu210hp-hcd.c | 4 +- + drivers/usb/host/sl811-hcd.c | 9 +- + drivers/usb/host/xhci-ext-caps.h | 5 +- + drivers/usb/host/xhci-hub.c | 26 +- + drivers/usb/host/xhci-mem.c | 9 + + drivers/usb/host/xhci-mtk-sch.c | 4 + + drivers/usb/host/xhci-mtk.c | 10 +- + drivers/usb/host/xhci-pci.c | 3 + + drivers/usb/host/xhci-plat.c | 24 + + drivers/usb/host/xhci-ring.c | 33 +- + drivers/usb/host/xhci-tegra.c | 7 + + drivers/usb/host/xhci.c | 66 +- + drivers/usb/host/xhci.h | 2 +- + drivers/usb/misc/adutux.c | 1 + + drivers/usb/misc/lvstest.c | 2 +- + drivers/usb/misc/sisusbvga/Kconfig | 2 +- + drivers/usb/misc/sisusbvga/sisusb.c | 2 +- + drivers/usb/misc/trancevibrator.c | 4 +- + drivers/usb/misc/usbtest.c | 1 + + drivers/usb/misc/uss720.c | 1 + + drivers/usb/misc/yurex.c | 5 +- + drivers/usb/musb/musb_core.c | 31 +- + drivers/usb/pd/policy_engine.c | 53 +- + drivers/usb/renesas_usbhs/fifo.c | 9 + + drivers/usb/renesas_usbhs/pipe.c | 2 + + drivers/usb/serial/ch341.c | 7 +- + drivers/usb/serial/cp210x.c | 25 + + drivers/usb/serial/cyberjack.c | 7 +- + drivers/usb/serial/cypress_m8.c | 2 + + drivers/usb/serial/cypress_m8.h | 3 + + drivers/usb/serial/digi_acceleport.c | 45 +- + drivers/usb/serial/ftdi_sio.c | 48 +- + drivers/usb/serial/ftdi_sio_ids.h | 19 + + drivers/usb/serial/io_edgeport.c | 26 +- + drivers/usb/serial/iuu_phoenix.c | 42 +- + drivers/usb/serial/keyspan_pda.c | 65 +- + drivers/usb/serial/kl5kusb105.c | 10 +- + drivers/usb/serial/mos7720.c | 6 +- + drivers/usb/serial/mos7840.c | 4 +- + drivers/usb/serial/omninet.c | 2 + + drivers/usb/serial/option.c | 70 +- + drivers/usb/serial/pl2303.c | 2 + + drivers/usb/serial/pl2303.h | 2 + + drivers/usb/serial/qcserial.c | 1 + + drivers/usb/serial/quatech2.c | 6 +- + drivers/usb/serial/ti_usb_3410_5052.c | 3 + + drivers/usb/storage/uas.c | 14 +- + drivers/usb/storage/unusual_devs.h | 2 +- + drivers/usb/storage/unusual_uas.h | 28 + + drivers/usb/usbip/stub_dev.c | 49 +- + drivers/usb/usbip/usbip_common.h | 3 + + drivers/usb/usbip/usbip_event.c | 2 + + drivers/usb/usbip/vhci_hcd.c | 1 + + drivers/usb/usbip/vhci_sysfs.c | 63 +- + drivers/usb/usbip/vudc_dev.c | 1 + + drivers/usb/usbip/vudc_sysfs.c | 55 +- + drivers/vfio/pci/Kconfig | 1 + + drivers/vfio/pci/vfio_pci.c | 368 +++- + drivers/vfio/pci/vfio_pci_config.c | 67 +- + drivers/vfio/pci/vfio_pci_intrs.c | 18 +- + drivers/vfio/pci/vfio_pci_private.h | 16 + + drivers/vfio/pci/vfio_pci_rdwr.c | 29 +- + drivers/vfio/platform/vfio_platform_common.c | 2 +- + drivers/vfio/vfio_iommu_type1.c | 36 +- + drivers/vhost/net.c | 6 +- + drivers/vhost/vhost.c | 12 +- + drivers/vhost/vringh.c | 11 +- + drivers/video/backlight/lm3630a_bl.c | 12 +- + drivers/video/backlight/lp855x_bl.c | 20 +- + drivers/video/backlight/sky81452-backlight.c | 1 + + drivers/video/console/Kconfig | 25 - + drivers/video/console/bitblit.c | 15 +- + drivers/video/console/fbcon.c | 373 +---- + drivers/video/console/fbcon.h | 9 +- + drivers/video/console/fbcon_ccw.c | 15 +- + drivers/video/console/fbcon_cw.c | 15 +- + drivers/video/console/fbcon_rotate.c | 1 + + drivers/video/console/fbcon_ud.c | 15 +- + drivers/video/console/newport_con.c | 19 +- + drivers/video/console/tileblit.c | 3 +- + drivers/video/console/vgacon.c | 211 +-- + drivers/video/fbdev/Kconfig | 2 +- + drivers/video/fbdev/core/fbcmap.c | 8 +- + drivers/video/fbdev/core/fbmem.c | 4 + + drivers/video/fbdev/hgafb.c | 21 +- + drivers/video/fbdev/hyperv_fb.c | 5 +- + drivers/video/fbdev/imsttfb.c | 5 - + drivers/video/fbdev/msm/mdp3_ctrl.c | 3 + + drivers/video/fbdev/msm/mdss_spi_display.c | 1 - + drivers/video/fbdev/msm/mdss_spi_panel.c | 2 + + drivers/video/fbdev/msm/mdss_spi_panel.h | 2 +- + drivers/video/fbdev/neofb.c | 1 + + drivers/video/fbdev/omap2/omapfb/dss/dispc.c | 7 +- + drivers/video/fbdev/omap2/omapfb/dss/dsi.c | 7 +- + drivers/video/fbdev/omap2/omapfb/dss/dss.c | 9 +- + drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c | 5 +- + drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c | 5 +- + drivers/video/fbdev/omap2/omapfb/dss/venc.c | 7 +- + drivers/video/fbdev/pvr2fb.c | 2 + + drivers/video/fbdev/pxafb.c | 4 +- + drivers/video/fbdev/sis/init.c | 11 +- + drivers/video/fbdev/sm712fb.c | 2 + + drivers/video/fbdev/vga16fb.c | 16 +- + drivers/video/fbdev/w100fb.c | 2 + + drivers/virt/fsl_hypervisor.c | 17 +- + drivers/virtio/virtio_ring.c | 9 +- + drivers/w1/masters/mxc_w1.c | 14 +- + drivers/w1/masters/omap_hdq.c | 10 +- + drivers/watchdog/aspeed_wdt.c | 2 +- + drivers/watchdog/da9062_wdt.c | 5 - + drivers/watchdog/f71808e_wdt.c | 13 +- + drivers/watchdog/lpc18xx_wdt.c | 2 +- + drivers/watchdog/qcom-wdt.c | 2 +- + drivers/watchdog/rdc321x_wdt.c | 5 +- + drivers/watchdog/sbc60xxwdt.c | 2 +- + drivers/watchdog/sc520_wdt.c | 2 +- + drivers/watchdog/w83877f_wdt.c | 2 +- + drivers/xen/balloon.c | 12 +- + drivers/xen/events/events_2l.c | 31 +- + drivers/xen/events/events_base.c | 608 ++++++- + drivers/xen/events/events_fifo.c | 87 +- + drivers/xen/events/events_internal.h | 42 +- + drivers/xen/evtchn.c | 7 +- + drivers/xen/gntdev.c | 33 +- + drivers/xen/preempt.c | 2 +- + drivers/xen/xen-pciback/pci_stub.c | 14 +- + drivers/xen/xen-pciback/pciback.h | 12 +- + drivers/xen/xen-pciback/pciback_ops.c | 48 +- + drivers/xen/xen-pciback/vpci.c | 14 +- + drivers/xen/xen-pciback/xenbus.c | 26 +- + drivers/xen/xen-scsiback.c | 27 +- + drivers/xen/xenbus/xenbus_client.c | 18 +- + drivers/xen/xenbus/xenbus_probe.c | 1 + + drivers/xen/xenbus/xenbus_probe.h | 2 + + drivers/xen/xenbus/xenbus_probe_backend.c | 7 + + drivers/xen/xenbus/xenbus_xs.c | 38 +- + fs/9p/v9fs.c | 5 +- + fs/9p/vfs_file.c | 4 +- + fs/affs/amigaffs.c | 63 +- + fs/affs/file.c | 26 +- + fs/binfmt_misc.c | 29 +- + fs/block_dev.c | 10 + + fs/btrfs/Kconfig | 2 + + fs/btrfs/backref.c | 1 + + fs/btrfs/compression.c | 2 +- + fs/btrfs/ctree.c | 23 +- + fs/btrfs/ctree.h | 2 + + fs/btrfs/delayed-inode.c | 8 + + fs/btrfs/disk-io.c | 14 +- + fs/btrfs/export.c | 8 +- + fs/btrfs/export.h | 5 + + fs/btrfs/extent-tree.c | 21 +- + fs/btrfs/extent_io.c | 54 +- + fs/btrfs/extent_io.h | 6 +- + fs/btrfs/file-item.c | 16 +- + fs/btrfs/file.c | 4 +- + fs/btrfs/free-space-cache.c | 12 +- + fs/btrfs/inode.c | 95 +- + fs/btrfs/ioctl.c | 48 +- + fs/btrfs/qgroup.c | 6 +- + fs/btrfs/raid56.c | 58 +- + fs/btrfs/reada.c | 2 + + fs/btrfs/relocation.c | 10 +- + fs/btrfs/scrub.c | 17 +- + fs/btrfs/send.c | 67 + + fs/btrfs/super.c | 18 +- + fs/btrfs/tests/btrfs-tests.c | 8 +- + fs/btrfs/tests/inode-tests.c | 1 + + fs/btrfs/transaction.c | 6 +- + fs/btrfs/tree-log.c | 41 +- + fs/btrfs/volumes.c | 25 +- + fs/buffer.c | 25 +- + fs/cachefiles/rdwr.c | 3 +- + fs/ceph/addr.c | 12 +- + fs/ceph/caps.c | 26 +- + fs/ceph/file.c | 1 + + fs/ceph/inode.c | 1 + + fs/ceph/mds_client.c | 14 +- + fs/cifs/asn1.c | 16 +- + fs/cifs/cifs_unicode.c | 8 +- + fs/cifs/cifsfs.c | 2 +- + fs/cifs/cifsglob.h | 9 +- + fs/cifs/connect.c | 5 + + fs/cifs/dir.c | 22 +- + fs/cifs/file.c | 22 +- + fs/cifs/inode.c | 9 + + fs/cifs/misc.c | 17 +- + fs/cifs/smb1ops.c | 8 +- + fs/cifs/smb2misc.c | 36 +- + fs/cifs/smb2ops.c | 58 +- + fs/cifs/smb2pdu.c | 2 + + fs/cifs/smb2pdu.h | 2 +- + fs/compat_ioctl.c | 17 - + fs/configfs/file.c | 16 +- + fs/crypto/fname.c | 9 +- + fs/direct-io.c | 5 +- + fs/dlm/config.c | 12 + + fs/dlm/debug_fs.c | 1 + + fs/dlm/dlm_internal.h | 1 - + fs/dlm/lockspace.c | 6 +- + fs/dlm/lowcomms.c | 2 +- + fs/ecryptfs/crypto.c | 6 +- + fs/ecryptfs/main.c | 6 + + fs/efivarfs/inode.c | 2 + + fs/efivarfs/super.c | 3 + + fs/eventpoll.c | 71 +- + fs/exec.c | 2 +- + fs/ext2/ialloc.c | 3 +- + fs/ext4/block_validity.c | 71 +- + fs/ext4/ext4.h | 9 +- + fs/ext4/ext4_extents.h | 9 +- + fs/ext4/extents.c | 64 +- + fs/ext4/extents_status.c | 4 +- + fs/ext4/fsync.c | 28 +- + fs/ext4/ialloc.c | 59 +- + fs/ext4/indirect.c | 6 +- + fs/ext4/inline.c | 1 + + fs/ext4/inode.c | 24 +- + fs/ext4/ioctl.c | 3 + + fs/ext4/mballoc.c | 5 +- + fs/ext4/namei.c | 79 +- + fs/ext4/super.c | 28 +- + fs/f2fs/checkpoint.c | 8 +- + fs/f2fs/file.c | 3 +- + fs/f2fs/inline.c | 3 +- + fs/fat/inode.c | 6 + + fs/fs-writeback.c | 120 +- + fs/fuse/cuse.c | 2 + + fs/fuse/dev.c | 40 +- + fs/fuse/file.c | 12 +- + fs/fuse/fuse_i.h | 2 +- + fs/gfs2/glock.c | 5 +- + fs/gfs2/lock_dlm.c | 8 +- + fs/gfs2/ops_fstype.c | 20 +- + fs/gfs2/rgrp.c | 9 +- + fs/gfs2/super.c | 10 +- + fs/hfs/bfind.c | 14 +- + fs/hfs/bnode.c | 25 +- + fs/hfs/btree.h | 7 + + fs/hfs/super.c | 10 +- + fs/hugetlbfs/inode.c | 7 +- + fs/isofs/dir.c | 1 + + fs/isofs/namei.c | 1 + + fs/jbd2/transaction.c | 26 + + fs/jffs2/compr_rtime.c | 3 + + fs/jffs2/dir.c | 6 +- + fs/jffs2/readinode.c | 16 + + fs/jffs2/scan.c | 2 +- + fs/jffs2/summary.c | 3 + + fs/jfs/inode.c | 3 +- + fs/jfs/jfs_dmap.c | 2 +- + fs/jfs/jfs_dmap.h | 2 +- + fs/jfs/jfs_filsys.h | 1 + + fs/jfs/jfs_logmgr.c | 1 + + fs/jfs/jfs_mount.c | 10 + + fs/libfs.c | 6 +- + fs/lockd/host.c | 20 +- + fs/minix/inode.c | 36 +- + fs/minix/itree_common.c | 8 +- + fs/namespace.c | 57 +- + fs/nfs/Kconfig | 2 +- + fs/nfs/client.c | 2 +- + fs/nfs/dir.c | 3 + + fs/nfs/filelayout/filelayout.c | 2 +- + fs/nfs/flexfilelayout/flexfilelayout.c | 13 +- + fs/nfs/inode.c | 10 +- + fs/nfs/internal.h | 12 +- + fs/nfs/namespace.c | 12 +- + fs/nfs/nfs3proc.c | 4 +- + fs/nfs/nfs3xdr.c | 3 +- + fs/nfs/nfs42proc.c | 21 +- + fs/nfs/nfs4file.c | 2 +- + fs/nfs/nfs4proc.c | 34 +- + fs/nfs/nfs4xdr.c | 6 +- + fs/nfs/pagelist.c | 12 +- + fs/nfs/pnfs.c | 17 +- + fs/nfs_common/grace.c | 6 +- + fs/nfsd/nfs3xdr.c | 7 +- + fs/nfsd/nfs4callback.c | 2 + + fs/nfsd/nfssvc.c | 3 +- + fs/nilfs2/segment.c | 2 + + fs/nilfs2/sysfs.c | 1 + + fs/ntfs/inode.c | 14 +- + fs/ocfs2/aops.c | 11 +- + fs/ocfs2/cluster/heartbeat.c | 8 +- + fs/ocfs2/file.c | 82 +- + fs/ocfs2/filecheck.c | 6 +- + fs/ocfs2/ocfs2.h | 4 +- + fs/ocfs2/ocfs2_fs.h | 4 +- + fs/ocfs2/stackglue.c | 8 +- + fs/ocfs2/suballoc.c | 13 +- + fs/ocfs2/super.c | 5 +- + fs/orangefs/super.c | 2 +- + fs/overlayfs/copy_up.c | 17 +- + fs/pipe.c | 19 +- + fs/proc/base.c | 15 +- + fs/proc/inode.c | 2 +- + fs/proc/self.c | 9 +- + fs/proc/task_mmu.c | 1 + + fs/proc/thread_self.c | 2 +- + fs/quota/quota_tree.c | 8 +- + fs/quota/quota_v2.c | 1 + + fs/reiserfs/inode.c | 9 +- + fs/reiserfs/journal.c | 14 + + fs/reiserfs/stree.c | 27 + + fs/reiserfs/super.c | 16 +- + fs/reiserfs/xattr.c | 7 + + fs/reiserfs/xattr.h | 2 +- + fs/romfs/storage.c | 4 +- + fs/select.c | 10 +- + fs/seq_file.c | 3 + + fs/squashfs/export.c | 45 +- + fs/squashfs/file.c | 6 +- + fs/squashfs/id.c | 42 +- + fs/squashfs/squashfs_fs.h | 1 + + fs/squashfs/squashfs_fs_sb.h | 1 + + fs/squashfs/super.c | 6 +- + fs/squashfs/xattr.h | 10 +- + fs/squashfs/xattr_id.c | 68 +- + fs/super.c | 33 +- + fs/ubifs/debug.c | 1 + + fs/ubifs/dir.c | 7 + + fs/ubifs/io.c | 29 +- + fs/udf/inode.c | 34 +- + fs/udf/namei.c | 4 + + fs/udf/super.c | 6 + + fs/ufs/super.c | 2 +- + fs/xattr.c | 84 +- + fs/xfs/libxfs/xfs_alloc.c | 16 + + fs/xfs/libxfs/xfs_attr_leaf.c | 13 +- + fs/xfs/libxfs/xfs_bmap.c | 4 + + fs/xfs/libxfs/xfs_dir2_node.c | 1 + + fs/xfs/libxfs/xfs_rmap.c | 2 +- + fs/xfs/xfs_icache.c | 58 +- + fs/xfs/xfs_iops.c | 12 +- + fs/xfs/xfs_log.c | 9 +- + fs/xfs/xfs_pnfs.c | 2 +- + fs/xfs/xfs_reflink.c | 21 +- + fs/xfs/xfs_rtalloc.c | 21 +- + fs/xfs/xfs_sysfs.h | 6 +- + fs/xfs/xfs_trans_dquot.c | 2 +- + gen_headers_arm.bp | 2 + + gen_headers_arm64.bp | 2 + + include/acpi/acpi_bus.h | 1 + + include/asm-generic/vmlinux.lds.h | 11 +- + include/crypto/if_alg.h | 4 +- + include/crypto/internal/hash.h | 8 +- + include/linux/acpi.h | 7 + + include/linux/backing-dev.h | 10 + + include/linux/bitops.h | 2 +- + include/linux/blkdev.h | 42 +- + include/linux/bvec.h | 9 +- + include/linux/can/skb.h | 28 +- + include/linux/cgroup-defs.h | 8 +- + include/linux/cgroup.h | 4 +- + include/linux/compat.h | 2 - + include/linux/compiler-gcc.h | 6 + + include/linux/console_struct.h | 1 + + include/linux/debugfs.h | 5 +- + include/linux/device-mapper.h | 2 - + include/linux/device.h | 1 + + include/linux/diagchar.h | 40 +- + include/linux/eeprom_93xx46.h | 2 + + include/linux/efi.h | 4 + + include/linux/elfcore.h | 22 + + include/linux/elfnote.h | 2 +- + include/linux/extcon/extcon-adc-jack.h | 2 +- + include/linux/font.h | 13 + + include/linux/fs.h | 8 +- + include/linux/ftrace.h | 4 +- + include/linux/futex.h | 44 +- + include/linux/genhd.h | 2 + + include/linux/hid.h | 47 +- + include/linux/hil_mlc.h | 2 +- + include/linux/hugetlb.h | 20 +- + include/linux/i2c-algo-pca.h | 15 + + include/linux/icmpv6.h | 48 +- + include/linux/ide.h | 1 - + include/linux/idr.h | 5 + + include/linux/if_macvlan.h | 3 +- + include/linux/if_vlan.h | 29 +- + include/linux/intel-iommu.h | 6 +- + include/linux/io-mapping.h | 5 +- + include/linux/ipv6.h | 2 +- + include/linux/kdev_t.h | 22 +- + include/linux/kgdb.h | 2 +- + include/linux/khugepaged.h | 5 + + include/linux/kprobes.h | 4 + + include/linux/kref.h | 2 + + include/linux/kvm_host.h | 11 +- + include/linux/libata.h | 17 +- + include/linux/log2.h | 2 +- + include/linux/memcontrol.h | 23 +- + include/linux/mfd/abx500/ux500_chargalg.h | 2 +- + include/linux/mfd/rt5033-private.h | 4 +- + include/linux/mmdebug.h | 21 +- + include/linux/mmzone.h | 3 +- + include/linux/msi.h | 2 +- + include/linux/msm-sps.h | 4 +- + include/linux/msm_ep_pcie.h | 34 +- + include/linux/mtd/map.h | 2 +- + include/linux/mtd/nand.h | 6 +- + include/linux/mtd/pfow.h | 2 +- + include/linux/netdevice.h | 4 +- + include/linux/netfilter/x_tables.h | 11 +- + include/linux/of.h | 1 + + include/linux/overflow.h | 1 + + include/linux/page_ext.h | 8 + + include/linux/pagemap.h | 13 +- + include/linux/perf_event.h | 2 +- + include/linux/prandom.h | 110 ++ + include/linux/qed/qed_chain.h | 26 +- + include/linux/random.h | 63 +- + include/linux/sched.h | 9 +- + include/linux/sched/sysctl.h | 1 + + include/linux/seq_buf.h | 2 +- + include/linux/seqlock.h | 11 +- + include/linux/skbuff.h | 16 +- + include/linux/spi/qcom-spi.h | 8 +- + include/linux/spi/spi.h | 5 + + include/linux/string.h | 34 + + include/linux/sunrpc/gss_api.h | 1 + + include/linux/sunrpc/svcauth_gss.h | 3 +- + include/linux/sunrpc/xdr.h | 3 +- + include/linux/sunrpc/xprt.h | 1 + + include/linux/swapops.h | 9 - + include/linux/tcp.h | 4 +- + include/linux/thread_info.h | 16 + + include/linux/time64.h | 4 + + include/linux/trace_seq.h | 4 +- + include/linux/tracepoint.h | 2 +- + include/linux/tty.h | 4 + + include/linux/tty_driver.h | 2 +- + include/linux/u64_stats_sync.h | 7 +- + include/linux/uaccess.h | 26 + + include/linux/usb.h | 37 + + include/linux/usb/composite.h | 3 + + include/linux/usb/otg-fsm.h | 1 + + include/linux/usb/quirks.h | 3 + + include/linux/usb/usbnet.h | 2 + + include/linux/virtio_vsock.h | 3 +- + include/linux/xattr.h | 2 + + include/linux/zsmalloc.h | 2 +- + include/net/addrconf.h | 1 + + include/net/af_unix.h | 1 + + include/net/bluetooth/hci_core.h | 32 +- + include/net/bluetooth/l2cap.h | 2 + + include/net/bonding.h | 8 + + include/net/caif/caif_dev.h | 2 +- + include/net/caif/cfcnfg.h | 2 +- + include/net/caif/cfserl.h | 1 + + include/net/cfg80211.h | 5 +- + include/net/dst.h | 10 +- + include/net/dst_metadata.h | 4 +- + include/net/genetlink.h | 8 - + include/net/icmp.h | 10 + + include/net/inet_connection_sock.h | 4 + + include/net/inet_ecn.h | 1 + + include/net/ip6_route.h | 2 +- + include/net/llc_pdu.h | 31 +- + include/net/ndisc.h | 2 +- + include/net/netfilter/nf_tables.h | 2 + + include/net/netns/ipv4.h | 1 + + include/net/nfc/nci_core.h | 1 + + include/net/red.h | 16 +- + include/net/rtnetlink.h | 2 + + include/net/sctp/constants.h | 12 +- + include/net/sock.h | 15 +- + include/net/tcp.h | 6 +- + include/net/xfrm.h | 49 +- + include/scsi/libfcoe.h | 2 +- + include/scsi/libiscsi.h | 3 + + include/scsi/scsi_common.h | 7 + + include/scsi/scsi_transport_iscsi.h | 2 + + include/soc/nps/common.h | 6 + + include/soc/qcom/boot_stats.h | 4 +- + include/sound/compress_driver.h | 10 +- + include/target/target_core_base.h | 4 + + include/trace/events/target.h | 12 +- + include/trace/events/writeback.h | 48 +- + include/uapi/linux/bcache.h | 2 +- + include/uapi/linux/btrfs_tree.h | 2 + + include/uapi/linux/const.h | 5 + + include/uapi/linux/cryptouser.h | 2 + + include/uapi/linux/dvb/frontend.h | 592 +++++-- + include/uapi/linux/ethtool.h | 2 +- + include/uapi/linux/kernel.h | 9 +- + include/uapi/linux/kvm.h | 2 + + include/uapi/linux/lightnvm.h | 2 +- + include/uapi/linux/loop.h | 31 +- + include/uapi/linux/mroute6.h | 2 +- + include/uapi/linux/msdos_fs.h | 2 + + include/uapi/linux/msm_ipa.h | 4 +- + include/uapi/linux/netfilter/nf_tables.h | 2 +- + .../uapi/linux/netfilter/nfnetlink_cthelper.h | 2 +- + include/uapi/linux/netfilter/x_tables.h | 2 +- + include/uapi/linux/netlink.h | 2 +- + include/uapi/linux/pr.h | 2 + + include/uapi/linux/qrtr.h | 1 + + include/uapi/linux/sysctl.h | 2 +- + include/uapi/linux/tty_flags.h | 4 +- + include/uapi/linux/usb/ch9.h | 3 + + include/xen/events.h | 29 +- + include/xen/grant_table.h | 1 + + include/xen/xenbus.h | 15 +- + init/Kconfig | 6 +- + init/main.c | 6 +- + init/version.c | 4 +- + kernel/Makefile | 4 - + kernel/audit.c | 48 +- + kernel/audit.h | 2 +- + kernel/audit_watch.c | 2 - + kernel/auditfilter.c | 13 +- + kernel/bpf/hashtab.c | 8 - + kernel/bpf/stackmap.c | 2 + + kernel/bpf/verifier.c | 7 +- + kernel/cgroup.c | 43 +- + kernel/cpu.c | 6 +- + kernel/cpu_pm.c | 4 +- + kernel/debug/debug_core.c | 27 +- + kernel/debug/kdb/kdb_io.c | 8 +- + kernel/debug/kdb/kdb_private.h | 2 +- + kernel/elfcore.c | 25 - + kernel/events/core.c | 64 +- + kernel/events/internal.h | 2 +- + kernel/events/uprobes.c | 2 +- + kernel/exit.c | 60 +- + kernel/fork.c | 50 +- + kernel/futex.c | 1476 +++++++++++++---- + kernel/futex_compat.c | 201 --- + kernel/gcov/gcc_4_7.c | 4 +- + kernel/irq/Kconfig | 1 + + kernel/irq/manage.c | 4 + + kernel/kexec_file.c | 4 +- + kernel/kmod.c | 9 + + kernel/kprobes.c | 48 +- + kernel/kthread.c | 80 +- + kernel/locking/lockdep_proc.c | 2 +- + kernel/locking/rtmutex-debug.c | 9 - + kernel/locking/rtmutex-debug.h | 3 - + kernel/locking/rtmutex.c | 213 ++- + kernel/locking/rtmutex.h | 2 - + kernel/locking/rtmutex_common.h | 17 +- + kernel/module.c | 27 +- + kernel/pid.c | 4 +- + kernel/power/hibernate.c | 11 - + kernel/printk/nmi.c | 16 +- + kernel/printk/printk.c | 3 + + kernel/ptrace.c | 18 +- + kernel/reboot.c | 28 +- + kernel/relay.c | 1 + + kernel/sched/core.c | 8 +- + kernel/sched/fair.c | 16 +- + kernel/sched/rt.c | 1 + + kernel/seccomp.c | 2 + + kernel/sys.c | 4 +- + kernel/sysctl.c | 2 +- + kernel/time/alarmtimer.c | 2 +- + kernel/time/hrtimer.c | 2 +- + kernel/time/posix-cpu-timers.c | 2 +- + kernel/time/timekeeping.c | 3 +- + kernel/time/timer.c | 5 +- + kernel/trace/blktrace.c | 13 + + kernel/trace/ftrace.c | 56 +- + kernel/trace/ring_buffer.c | 94 +- + kernel/trace/trace.c | 76 +- + kernel/trace/trace.h | 32 +- + kernel/trace/trace_clock.c | 44 +- + kernel/trace/trace_entries.h | 2 +- + kernel/trace/trace_events.c | 5 +- + kernel/trace/trace_events_hist.c | 6 +- + kernel/trace/trace_events_trigger.c | 21 +- + kernel/trace/trace_hwlat.c | 37 +- + kernel/trace/trace_selftest.c | 9 +- + kernel/tracepoint.c | 80 +- + kernel/workqueue.c | 47 +- + lib/crc32.c | 2 +- + lib/decompress_unlz4.c | 8 + + lib/dynamic_debug.c | 23 +- + lib/fonts/font_10x18.c | 9 +- + lib/fonts/font_6x10.c | 9 +- + lib/fonts/font_6x11.c | 9 +- + lib/fonts/font_7x14.c | 9 +- + lib/fonts/font_8x16.c | 9 +- + lib/fonts/font_8x8.c | 9 +- + lib/fonts/font_acorn_8x8.c | 9 +- + lib/fonts/font_mini_4x6.c | 8 +- + lib/fonts/font_pearl_8x8.c | 9 +- + lib/fonts/font_sun12x22.c | 9 +- + lib/fonts/font_sun8x16.c | 7 +- + lib/genalloc.c | 25 +- + lib/iov_iter.c | 2 +- + lib/kobject_uevent.c | 9 +- + lib/mpi/longlong.h | 2 +- + lib/random32.c | 462 ++++-- + lib/seq_buf.c | 8 +- + lib/stackdepot.c | 6 +- + lib/string.c | 113 +- + lib/swiotlb.c | 6 +- + lib/zlib_inflate/inffast.c | 91 +- + mm/backing-dev.c | 1 + + mm/filemap.c | 8 + + mm/huge_memory.c | 94 +- + mm/hugetlb.c | 138 +- + mm/khugepaged.c | 72 +- + mm/ksm.c | 1 + + mm/maccess.c | 167 +- + mm/memblock.c | 48 +- + mm/memcontrol.c | 4 +- + mm/memory-failure.c | 87 +- + mm/memory.c | 8 +- + mm/memory_hotplug.c | 10 +- + mm/mempolicy.c | 6 +- + mm/mmap.c | 3 + + mm/page-writeback.c | 4 +- + mm/page_alloc.c | 17 +- + mm/page_ext.c | 8 +- + mm/page_io.c | 11 +- + mm/pagewalk.c | 4 +- + mm/slab_common.c | 2 +- + mm/slob.c | 2 + + mm/slub.c | 32 +- + mm/swap_state.c | 3 +- + mm/swapfile.c | 2 +- + mm/vmstat.c | 3 + + mm/zsmalloc.c | 17 +- + net/802/garp.c | 14 + + net/802/mrp.c | 14 + + net/9p/trans_fd.c | 58 +- + net/Makefile | 2 +- + net/appletalk/ddp.c | 33 +- + net/atm/lec.c | 6 + + net/ax25/af_ax25.c | 10 +- + net/batman-adv/bat_iv_ogm.c | 4 +- + net/batman-adv/bridge_loop_avoidance.c | 47 +- + net/batman-adv/bridge_loop_avoidance.h | 4 +- + net/batman-adv/gateway_client.c | 6 +- + net/batman-adv/log.c | 1 + + net/batman-adv/routing.c | 4 + + net/batman-adv/soft-interface.c | 6 +- + net/batman-adv/translation-table.c | 2 + + net/bluetooth/6lowpan.c | 5 + + net/bluetooth/a2mp.c | 23 +- + net/bluetooth/amp.c | 3 + + net/bluetooth/cmtp/core.c | 5 + + net/bluetooth/hci_conn.c | 17 + + net/bluetooth/hci_core.c | 29 +- + net/bluetooth/hci_event.c | 127 +- + net/bluetooth/hci_request.c | 12 +- + net/bluetooth/hci_sock.c | 49 +- + net/bluetooth/hci_sysfs.c | 3 + + net/bluetooth/hidp/core.c | 2 +- + net/bluetooth/l2cap_core.c | 40 +- + net/bluetooth/l2cap_sock.c | 39 +- + net/bluetooth/mgmt.c | 13 +- + net/bluetooth/smp.c | 9 + + net/bridge/br_device.c | 1 + + net/bridge/br_if.c | 19 +- + net/bridge/br_netfilter_hooks.c | 7 +- + net/bridge/br_vlan.c | 4 +- + net/bridge/netfilter/ebt_limit.c | 1 + + net/caif/caif_dev.c | 13 +- + net/caif/caif_socket.c | 3 +- + net/caif/caif_usb.c | 14 +- + net/caif/cfcnfg.c | 16 +- + net/caif/cfserl.c | 5 + + net/can/bcm.c | 71 +- + net/can/gw.c | 3 + + net/can/raw.c | 82 +- + net/ceph/messenger.c | 5 + + net/compat.c | 3 +- + net/core/dev.c | 65 +- + net/core/devlink.c | 6 +- + net/core/fib_rules.c | 2 +- + net/core/filter.c | 11 +- + net/core/neighbour.c | 3 +- + net/core/net-sysfs.c | 2 +- + net/core/netpoll.c | 24 +- + net/core/pktgen.c | 2 +- + net/core/rtnetlink.c | 4 + + net/core/skbuff.c | 54 +- + net/core/sock.c | 25 +- + net/dcb/dcbnl.c | 2 + + net/dccp/dccp.h | 6 +- + net/dccp/ipv6.c | 5 + + net/decnet/af_decnet.c | 27 +- + net/hsr/hsr_device.c | 3 +- + net/hsr/hsr_framereg.c | 3 +- + net/ieee802154/nl-mac.c | 11 +- + net/ieee802154/nl-phy.c | 4 +- + net/ieee802154/nl802154.c | 61 +- + net/ieee802154/socket.c | 7 +- + net/ipc_router/Kconfig | 11 + + net/ipc_router/ipc_router_core.c | 8 +- + net/ipv4/cipso_ipv4.c | 1 + + net/ipv4/fib_frontend.c | 2 +- + net/ipv4/fib_semantics.c | 2 +- + net/ipv4/fib_trie.c | 2 +- + net/ipv4/icmp.c | 34 + + net/ipv4/igmp.c | 1 + + net/ipv4/inet_connection_sock.c | 37 +- + net/ipv4/inet_diag.c | 4 +- + net/ipv4/inet_hashtables.c | 1 + + net/ipv4/ip_gre.c | 2 + + net/ipv4/ip_output.c | 5 +- + net/ipv4/ip_tunnel.c | 24 +- + net/ipv4/ipconfig.c | 13 +- + net/ipv4/netfilter/arp_tables.c | 2 + + net/ipv4/netfilter/ip_tables.c | 23 +- + net/ipv4/netfilter/ipt_CLUSTERIP.c | 1 + + net/ipv4/netfilter/ipt_rpfilter.c | 2 +- + net/ipv4/ping.c | 15 +- + net/ipv4/route.c | 43 +- + net/ipv4/syncookies.c | 9 +- + net/ipv4/sysctl_net_ipv4.c | 33 +- + net/ipv4/tcp.c | 15 +- + net/ipv4/tcp_bbr.c | 4 +- + net/ipv4/tcp_cong.c | 2 +- + net/ipv4/tcp_cubic.c | 2 + + net/ipv4/tcp_input.c | 28 +- + net/ipv4/tcp_ipv4.c | 20 +- + net/ipv4/tcp_minisocks.c | 2 +- + net/ipv4/tcp_output.c | 43 +- + net/ipv4/udp.c | 12 +- + net/ipv4/udp_offload.c | 2 +- + net/ipv6/addrconf.c | 7 +- + net/ipv6/ah6.c | 3 +- + net/ipv6/anycast.c | 17 +- + net/ipv6/datagram.c | 21 +- + net/ipv6/icmp.c | 19 +- + net/ipv6/ip6_gre.c | 23 +- + net/ipv6/ip6_icmp.c | 46 +- + net/ipv6/ip6_input.c | 10 - + net/ipv6/ip6_tunnel.c | 13 +- + net/ipv6/ip6_vti.c | 3 +- + net/ipv6/ipv6_sockglue.c | 14 +- + net/ipv6/mcast.c | 4 +- + net/ipv6/netfilter/ip6_tables.c | 23 +- + net/ipv6/netfilter/ip6t_NPT.c | 2 + + net/ipv6/output_core.c | 28 +- + net/ipv6/route.c | 8 +- + net/ipv6/sit.c | 16 +- + net/ipv6/syncookies.c | 10 +- + net/ipv6/tcp_ipv6.c | 24 +- + net/ipv6/udp.c | 5 +- + net/ipv6/xfrm6_output.c | 2 +- + net/iucv/af_iucv.c | 7 +- + net/key/af_key.c | 13 +- + net/l2tp/l2tp_core.c | 48 +- + net/l2tp/l2tp_core.h | 3 - + net/lapb/lapb_out.c | 3 +- + net/llc/af_llc.c | 20 +- + net/llc/llc_s_ac.c | 2 +- + net/mac80211/cfg.c | 5 +- + net/mac80211/debugfs_sta.c | 1 + + net/mac80211/driver-ops.c | 5 +- + net/mac80211/ibss.c | 2 + + net/mac80211/ieee80211_i.h | 39 +- + net/mac80211/iface.c | 15 +- + net/mac80211/key.c | 8 + + net/mac80211/key.h | 2 + + net/mac80211/main.c | 20 +- + net/mac80211/mesh_hwmp.c | 2 +- + net/mac80211/mesh_pathtbl.c | 5 +- + net/mac80211/mlme.c | 5 + + net/mac80211/rate.c | 3 +- + net/mac80211/rc80211_minstrel.c | 27 +- + net/mac80211/rc80211_minstrel.h | 1 - + net/mac80211/rx.c | 186 ++- + net/mac80211/sta_info.c | 38 +- + net/mac80211/sta_info.h | 32 + + net/mac80211/tx.c | 47 +- + net/mac80211/vht.c | 8 +- + net/mac80211/wpa.c | 12 +- + net/mac802154/llsec.c | 2 +- + net/mac802154/tx.c | 8 +- + net/mpls/mpls_gso.c | 3 + + net/ncsi/ncsi-rsp.c | 2 +- + net/netfilter/ipset/ip_set_core.c | 2 + + net/netfilter/ipset/ip_set_hash_gen.h | 20 +- + net/netfilter/ipvs/ip_vs_ctl.c | 9 +- + net/netfilter/nf_conntrack_core.c | 10 +- + net/netfilter/nf_conntrack_netlink.c | 2 + + net/netfilter/nf_conntrack_standalone.c | 8 +- + net/netfilter/nf_synproxy_core.c | 5 + + net/netfilter/nf_tables_api.c | 3 +- + net/netfilter/nfnetlink_cthelper.c | 8 +- + net/netfilter/nft_dynset.c | 4 +- + net/netfilter/nft_exthdr.c | 3 + + net/netfilter/nft_nat.c | 8 +- + net/netfilter/nft_payload.c | 4 +- + net/netfilter/x_tables.c | 84 +- + net/netfilter/xt_CT.c | 3 + + net/netfilter/xt_IDLETIMER.c | 1 + + net/netfilter/xt_LED.c | 1 + + net/netfilter/xt_RATEEST.c | 4 + + net/netfilter/xt_TEE.c | 2 + + net/netfilter/xt_bpf.c | 2 + + net/netfilter/xt_cgroup.c | 1 + + net/netfilter/xt_connlimit.c | 1 + + net/netfilter/xt_hashlimit.c | 4 + + net/netfilter/xt_limit.c | 1 + + net/netfilter/xt_nfacct.c | 1 + + net/netfilter/xt_quota.c | 1 + + net/netfilter/xt_quota2.c | 25 +- + net/netfilter/xt_rateest.c | 1 + + net/netfilter/xt_recent.c | 12 +- + net/netfilter/xt_statistic.c | 1 + + net/netfilter/xt_string.c | 1 + + net/netlabel/netlabel_domainhash.c | 59 +- + net/netlabel/netlabel_mgmt.c | 19 +- + net/netlabel/netlabel_unlabeled.c | 17 +- + net/netlink/af_netlink.c | 6 +- + net/netlink/genetlink.c | 52 - + net/netrom/nr_timer.c | 20 +- + net/nfc/digital_dep.c | 2 + + net/nfc/llcp_sock.c | 16 + + net/nfc/nci/core.c | 1 + + net/nfc/nci/hci.c | 5 + + net/nfc/netlink.c | 3 +- + net/nfc/rawsock.c | 9 +- + net/openvswitch/actions.c | 8 +- + net/openvswitch/conntrack.c | 22 +- + net/packet/af_packet.c | 41 +- + net/qrtr/qrtr.c | 7 +- + net/rds/ib_frmr.c | 4 +- + net/rds/recv.c | 5 +- + net/rose/rose_loopback.c | 17 +- + net/rxrpc/call_accept.c | 7 + + net/rxrpc/conn_event.c | 6 +- + net/rxrpc/key.c | 22 +- + net/rxrpc/recvmsg.c | 2 +- + net/rxrpc/sendmsg.c | 2 +- + net/sched/cls_tcindex.c | 10 +- + net/sched/sch_api.c | 11 +- + net/sched/sch_choke.c | 7 +- + net/sched/sch_dsmark.c | 3 +- + net/sched/sch_generic.c | 1 + + net/sched/sch_gred.c | 2 +- + net/sched/sch_red.c | 7 +- + net/sched/sch_sfq.c | 2 +- + net/sched/sch_teql.c | 3 + + net/sctp/associola.c | 5 +- + net/sctp/auth.c | 1 + + net/sctp/bind_addr.c | 1 + + net/sctp/input.c | 6 +- + net/sctp/protocol.c | 4 +- + net/sctp/sm_make_chunk.c | 2 +- + net/sctp/sm_sideeffect.c | 4 +- + net/sctp/sm_statefuns.c | 3 +- + net/sctp/socket.c | 47 +- + net/sctp/transport.c | 2 +- + net/socket.c | 2 +- + net/sunrpc/addr.c | 6 +- + net/sunrpc/auth_gss/auth_gss.c | 30 +- + net/sunrpc/auth_gss/auth_gss_internal.h | 45 + + net/sunrpc/auth_gss/gss_krb5_mech.c | 31 +- + net/sunrpc/auth_gss/gss_mech_switch.c | 12 +- + net/sunrpc/auth_gss/svcauth_gss.c | 29 +- + net/sunrpc/clnt.c | 8 + + net/sunrpc/rpc_pipe.c | 1 + + net/sunrpc/rpcb_clnt.c | 4 +- + net/sunrpc/sched.c | 12 +- + net/sunrpc/svc.c | 6 +- + net/sunrpc/svc_xprt.c | 23 +- + net/sunrpc/xdr.c | 4 + + net/sunrpc/xprt.c | 65 +- + net/sunrpc/xprtrdma/module.c | 1 + + net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 7 +- + net/sunrpc/xprtrdma/transport.c | 1 + + net/sunrpc/xprtsock.c | 4 + + net/tipc/core.c | 5 + + net/tipc/link.c | 11 +- + net/tipc/msg.c | 14 +- + net/tipc/netlink_compat.c | 14 +- + net/tipc/socket.c | 12 +- + net/unix/Kconfig | 5 + + net/unix/Makefile | 2 + + net/unix/af_unix.c | 133 +- + net/unix/garbage.c | 68 +- + net/unix/scm.c | 161 ++ + net/unix/scm.h | 10 + + net/vmw_vsock/af_vsock.c | 16 +- + net/vmw_vsock/virtio_transport.c | 96 +- + net/vmw_vsock/virtio_transport_common.c | 8 +- + net/vmw_vsock/vmci_transport.c | 3 +- + net/wireless/nl80211.c | 16 +- + net/wireless/reg.c | 5 +- + net/wireless/scan.c | 4 +- + net/wireless/sme.c | 2 +- + net/wireless/util.c | 11 +- + net/wireless/wext-core.c | 5 +- + net/wireless/wext-spy.c | 14 +- + net/x25/af_x25.c | 11 +- + net/x25/x25_subr.c | 6 + + net/xfrm/Kconfig | 11 + + net/xfrm/Makefile | 1 + + net/xfrm/xfrm_compat.c | 629 +++++++ + net/xfrm/xfrm_input.c | 2 +- + net/xfrm/xfrm_state.c | 98 +- + net/xfrm/xfrm_user.c | 138 +- + samples/bpf/tracex1_kern.c | 4 +- + samples/kfifo/bytestream-example.c | 8 +- + samples/kfifo/inttype-example.c | 8 +- + samples/kfifo/record-example.c | 8 +- + samples/mic/mpssd/mpssd.c | 4 +- + scripts/Kbuild.include | 11 +- + scripts/Makefile | 9 +- + scripts/Makefile.build | 3 + + scripts/analyze_suspend.py | 2 +- + scripts/bloat-o-meter | 2 +- + scripts/bootgraph.pl | 2 +- + scripts/checkincludes.pl | 2 +- + scripts/checkpatch.pl | 6 +- + scripts/checkstack.pl | 2 +- + scripts/config | 2 +- + scripts/decode_stacktrace.sh | 4 +- + scripts/depmod.sh | 2 + + scripts/diffconfig | 2 +- + scripts/dtc/dt_to_config | 2 +- + scripts/extract_xc3028.pl | 2 +- + scripts/gdb/linux/dmesg.py | 22 +- + scripts/gdb/linux/proc.py | 2 +- + scripts/get_dvb_firmware | 2 +- + scripts/kconfig/nconf.c | 2 +- + scripts/markup_oops.pl | 2 +- + scripts/mkcompile_h | 14 +- + scripts/mksysmap | 2 +- + scripts/profile2linkerlist.pl | 2 +- + scripts/recordmcount.c | 2 +- + scripts/recordmcount.pl | 19 +- + scripts/setlocalversion | 21 +- + scripts/show_delta | 2 +- + scripts/stackdelta | 2 +- + scripts/tracing/draw_functrace.py | 8 +- + security/integrity/evm/evm_crypto.c | 2 +- + security/integrity/ima/ima.h | 7 +- + security/integrity/ima/ima_crypto.c | 2 + + security/integrity/ima/ima_policy.c | 3 +- + security/keys/trusted.c | 2 +- + security/lsm_audit.c | 7 +- + security/selinux/include/classmap.h | 3 +- + security/selinux/include/security.h | 1 + + security/selinux/nlmsgtab.c | 24 +- + security/selinux/selinuxfs.c | 1 + + security/selinux/ss/policydb.c | 4 + + security/selinux/ss/policydb.h | 2 + + security/selinux/ss/services.c | 6 + + security/smack/smackfs.c | 52 +- + sound/core/compress_offload.c | 4 + + sound/core/control.c | 2 +- + sound/core/info.c | 4 +- + sound/core/init.c | 2 - + sound/core/oss/mulaw.c | 4 +- + sound/core/oss/pcm_oss.c | 28 +- + sound/core/pcm_native.c | 5 + + sound/core/seq/oss/seq_oss.c | 11 +- + sound/core/seq/oss/seq_oss_synth.c | 3 +- + sound/core/seq/seq_ports.c | 39 +- + sound/core/seq/seq_queue.h | 8 +- + sound/core/timer.c | 3 +- + sound/drivers/aloop.c | 11 +- + sound/drivers/opl3/opl3_synth.c | 2 + + sound/firewire/Kconfig | 5 +- + sound/firewire/bebob/bebob.c | 5 +- + sound/firewire/bebob/bebob_hwdep.c | 3 +- + sound/firewire/digi00x/digi00x.c | 5 + + sound/firewire/oxfw/oxfw.c | 3 +- + sound/firewire/tascam/tascam.c | 30 +- + sound/hda/ext/hdac_ext_controller.c | 2 + + sound/hda/hdac_bus.c | 4 + + sound/hda/hdac_device.c | 2 + + sound/isa/cmi8330.c | 2 +- + sound/isa/es1688/es1688.c | 4 +- + sound/isa/sb/emu8000.c | 4 +- + sound/isa/sb/sb16_csp.c | 20 +- + sound/isa/sb/sb8.c | 4 - + sound/isa/wavefront/wavefront_synth.c | 8 +- + sound/pci/asihpi/hpioctl.c | 4 +- + sound/pci/ca0106/ca0106_main.c | 3 +- + sound/pci/cs46xx/cs46xx_lib.c | 2 +- + sound/pci/cs46xx/dsp_spos_scb_lib.c | 2 +- + sound/pci/ctxfi/cthw20k2.c | 2 +- + sound/pci/echoaudio/echoaudio.c | 2 - + sound/pci/hda/hda_auto_parser.c | 6 + + sound/pci/hda/hda_bind.c | 4 + + sound/pci/hda/hda_codec.c | 2 +- + sound/pci/hda/hda_controller.c | 11 +- + sound/pci/hda/hda_generic.c | 40 +- + sound/pci/hda/hda_generic.h | 1 + + sound/pci/hda/hda_tegra.c | 3 + + sound/pci/hda/patch_ca0132.c | 16 +- + sound/pci/hda/patch_conexant.c | 1 + + sound/pci/hda/patch_hdmi.c | 108 +- + sound/pci/hda/patch_realtek.c | 48 +- + sound/pci/hda/patch_sigmatel.c | 2 +- + sound/pci/hda/patch_via.c | 1 + + sound/pci/ice1712/prodigy192.c | 2 +- + sound/pci/mixart/mixart_core.c | 5 +- + sound/pci/oxygen/xonar_dg.c | 2 +- + sound/pci/rme9652/hdsp.c | 3 +- + sound/pci/rme9652/hdspm.c | 3 +- + sound/pci/rme9652/rme9652.c | 3 +- + sound/ppc/powermac.c | 6 +- + sound/soc/codecs/cs35l33.c | 1 + + sound/soc/codecs/cs42l56.c | 3 +- + sound/soc/codecs/rt286.c | 23 +- + sound/soc/codecs/rt5640.c | 4 +- + sound/soc/codecs/rt5651.c | 4 +- + sound/soc/codecs/rt5659.c | 5 + + sound/soc/codecs/rt5670.h | 2 +- + sound/soc/codecs/sgtl5000.c | 2 +- + sound/soc/codecs/sti-sas.c | 1 + + sound/soc/codecs/wm_adsp.c | 5 +- + sound/soc/fsl/fsl_asrc_dma.c | 1 + + sound/soc/fsl/fsl_esai.c | 8 +- + sound/soc/intel/atom/sst-mfld-platform-pcm.c | 14 +- + sound/soc/intel/boards/haswell.c | 1 + + sound/soc/jz4740/jz4740-i2s.c | 4 + + sound/soc/kirkwood/kirkwood-dma.c | 2 +- + sound/soc/qcom/lpass-platform.c | 3 +- + sound/soc/soc-core.c | 2 +- + sound/soc/soc-dapm.c | 1 + + sound/soc/soc-pcm.c | 2 + + sound/soc/tegra/tegra30_ahub.c | 4 +- + sound/soc/tegra/tegra30_i2s.c | 4 +- + sound/soc/tegra/tegra_alc5632.c | 1 + + sound/soc/tegra/tegra_max98090.c | 1 + + sound/soc/tegra/tegra_rt5640.c | 1 + + sound/soc/tegra/tegra_rt5677.c | 1 + + sound/soc/tegra/tegra_sgtl5000.c | 1 + + sound/soc/tegra/tegra_wm8753.c | 1 + + sound/soc/tegra/tegra_wm8903.c | 1 + + sound/soc/tegra/tegra_wm9712.c | 1 + + sound/soc/tegra/trimslice.c | 1 + + sound/usb/card.c | 34 +- + sound/usb/card.h | 1 + + sound/usb/clock.c | 6 + + sound/usb/format.c | 4 + + sound/usb/line6/capture.c | 2 + + sound/usb/line6/driver.c | 4 + + sound/usb/line6/playback.c | 2 + + sound/usb/line6/pod.c | 5 - + sound/usb/line6/variax.c | 6 - + sound/usb/midi.c | 54 +- + sound/usb/mixer.c | 34 +- + sound/usb/mixer.h | 15 +- + sound/usb/mixer_quirks.c | 6 +- + sound/usb/mixer_scarlett.c | 6 +- + sound/usb/pcm.c | 59 +- + sound/usb/quirks-table.h | 124 ++ + sound/usb/quirks.c | 29 +- + sound/usb/stream.c | 7 +- + sound/usb/usbaudio.h | 4 +- + tools/arch/ia64/include/asm/barrier.h | 3 - + tools/gpio/gpio-hammer.c | 17 +- + tools/kvm/kvm_stat/kvm_stat | 2 +- + tools/lib/traceevent/event-parse.c | 1 + + tools/objtool/arch/x86/include/asm/insn.h | 15 + + tools/objtool/check.c | 8 +- + tools/objtool/elf.c | 7 +- + tools/perf/Documentation/perf-record.txt | 4 + + tools/perf/Documentation/perf-stat.txt | 4 + + tools/perf/builtin-lock.c | 2 +- + tools/perf/builtin-probe.c | 3 + + tools/perf/builtin-report.c | 3 +- + tools/perf/builtin-script.c | 24 +- + tools/perf/builtin-top.c | 4 +- + tools/perf/pmu-events/jevents.c | 2 +- + tools/perf/python/tracepoint.py | 2 +- + tools/perf/python/twatch.py | 2 +- + tools/perf/scripts/python/sched-migration.py | 2 +- + tools/perf/tests/attr.c | 4 +- + tools/perf/tests/bpf.c | 2 + + tools/perf/tests/pmu.c | 3 +- + tools/perf/tests/sample-parsing.c | 2 +- + tools/perf/util/annotate.c | 17 +- + tools/perf/util/auxtrace.c | 4 - + tools/perf/util/cgroup.c | 2 +- + tools/perf/util/dso.c | 16 + + tools/perf/util/dso.h | 1 + + tools/perf/util/dwarf-aux.c | 8 + + tools/perf/util/event.c | 9 +- + .../util/intel-pt-decoder/intel-pt-decoder.c | 3 + + tools/perf/util/intel-pt.c | 8 +- + tools/perf/util/lzma.c | 8 +- + tools/perf/util/map.c | 7 +- + tools/perf/util/parse-events.c | 4 +- + tools/perf/util/parse-regs-options.c | 2 +- + tools/perf/util/pmu.c | 13 +- + tools/perf/util/pmu.h | 1 + + tools/perf/util/probe-event.c | 21 +- + tools/perf/util/probe-file.c | 4 +- + tools/perf/util/probe-finder.c | 3 +- + tools/perf/util/session.c | 2 + + tools/perf/util/setup.py | 2 +- + tools/perf/util/sort.c | 2 +- + tools/perf/util/srcline.c | 16 +- + tools/perf/util/stat.c | 6 +- + tools/perf/util/symbol-elf.c | 7 + + tools/perf/util/symbol.c | 2 + + tools/perf/util/symbol_fprintf.c | 2 +- + tools/testing/ktest/compare-ktest-sample.pl | 2 +- + tools/testing/selftests/lib.mk | 4 + + .../networking/timestamping/timestamping.c | 10 +- + .../powerpc/benchmarks/context_switch.c | 21 +- + .../powerpc/pmu/ebb/back_to_back_ebbs_test.c | 2 - + .../selftests/powerpc/pmu/ebb/cycles_test.c | 2 - + .../powerpc/pmu/ebb/cycles_with_freeze_test.c | 2 - + .../powerpc/pmu/ebb/cycles_with_mmcr2_test.c | 2 - + tools/testing/selftests/powerpc/pmu/ebb/ebb.c | 2 - + .../pmu/ebb/ebb_on_willing_child_test.c | 2 - + .../powerpc/pmu/ebb/lost_exception_test.c | 1 - + .../powerpc/pmu/ebb/multi_counter_test.c | 7 - + .../powerpc/pmu/ebb/multi_ebb_procs_test.c | 2 - + .../powerpc/pmu/ebb/no_handler_test.c | 2 - + .../powerpc/pmu/ebb/pmae_handling_test.c | 2 - + .../powerpc/pmu/ebb/pmc56_overflow_test.c | 2 - + tools/testing/selftests/powerpc/utils.c | 37 +- + tools/testing/selftests/x86/protection_keys.c | 4 +- + tools/testing/selftests/x86/syscall_nt.c | 1 + + tools/usb/usbip/libsrc/usbip_host_common.c | 2 +- + virt/kvm/kvm_main.c | 25 +- + 3015 files changed, 35155 insertions(+), 14779 deletions(-) + create mode 100644 Documentation/devicetree/bindings/input/hall_sensor.txt + create mode 100644 Documentation/devicetree/bindings/input/misc/bma2x2.txt + create mode 100644 Documentation/devicetree/bindings/input/misc/ltr553.txt + create mode 100644 Documentation/devicetree/bindings/leds/leds-aw2013.txt + mode change 100755 => 100644 arch/arm/configs/msm8909-perf_defconfig + mode change 100755 => 100644 arch/arm/configs/msm8937go-perf_defconfig + create mode 100644 arch/arm/configs/vendor/msm8909go-perf_defconfig + create mode 100644 arch/arm/configs/vendor/msm8909go_defconfig + create mode 100644 arch/arm64/boot/dts/qcom/batterydata-qrd-skue-4v35-2000mah.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/dsi-panel-arglass-boe-dual-1080p-video.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/dsi-panel-hx8379c-fwvga-video.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/fg-gen3-batterydata-goertek-merlin-230mah.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/msm8909-1gb-qrd-skue.dts + create mode 100644 arch/arm64/boot/dts/qcom/msm8909-camera-sensor-skue.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/msm8909-qrd-skue.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/sdm670-camera-sensor-arglass.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/sxr1130-arglass-overlay.dts + create mode 100644 arch/arm64/boot/dts/qcom/sxr1130-arglass-pinctrl-overlay.dtsi + create mode 100644 arch/arm64/boot/dts/qcom/sxr1130-arglass.dts + create mode 100644 arch/arm64/boot/dts/qcom/sxr1130-arglass.dtsi + create mode 100644 arch/openrisc/include/asm/barrier.h + create mode 100644 arch/powerpc/include/asm/book3s/64/kup-radix.h + create mode 100644 arch/powerpc/include/asm/kup.h + mode change 100644 => 100755 drivers/irqchip/irq-gic.c + create mode 100644 drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h + mode change 100644 => 100755 drivers/usb/host/xhci-plat.c + mode change 100644 => 100755 include/linux/overflow.h + create mode 100644 include/linux/prandom.h + delete mode 100644 kernel/elfcore.c + delete mode 100644 kernel/futex_compat.c + mode change 100644 => 100755 net/netlink/genetlink.c + create mode 100644 net/sunrpc/auth_gss/auth_gss_internal.h + create mode 100644 net/unix/scm.c + create mode 100644 net/unix/scm.h + create mode 100644 net/xfrm/xfrm_compat.c + +diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio +index 0406076e4405..743ffbcc6b5f 100644 +--- a/Documentation/ABI/testing/sysfs-bus-iio ++++ b/Documentation/ABI/testing/sysfs-bus-iio +@@ -1491,7 +1491,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw + KernelVersion: 4.3 + Contact: linux-iio@vger.kernel.org + Description: +- Raw (unscaled no offset etc.) percentage reading of a substance. ++ Raw (unscaled no offset etc.) reading of a substance. Units ++ after application of scale and offset are percents. + + What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw + What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw +diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl +index d7fcdc5a4379..9b55778ab024 100644 +--- a/Documentation/DocBook/libata.tmpl ++++ b/Documentation/DocBook/libata.tmpl +@@ -324,7 +324,7 @@ Many legacy IDE drivers use ata_bmdma_status() as the bmdma_status() hook. + + High-level taskfile hooks + +-void (*qc_prep) (struct ata_queued_cmd *qc); ++enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); + int (*qc_issue) (struct ata_queued_cmd *qc); + + +diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt +index 36b7a575ba1b..67a2d31a6740 100644 +--- a/Documentation/devicetree/bindings/arm/msm/msm.txt ++++ b/Documentation/devicetree/bindings/arm/msm/msm.txt +@@ -113,6 +113,9 @@ SoCs: + - SMVWR1130 + compatible = "qcom,sxr1130-smrtvwr" + ++- ARGLASS1130 ++ compatible = "qcom,sxr1130-arglass" ++ + - MSM8952 + compatible = "qcom,msm8952" + +@@ -223,6 +226,9 @@ Generic board variants: + - SMVWR device: + compatible = "qcom,xr-smrtvwr" + ++- ARGlass device: ++ compatible = "qcom,xr-arglass" ++ + - HDK device: + compatible = "qcom,hdk" + +diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +index b6a7e7397b8b..b944fe067188 100644 +--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt ++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt +@@ -16,6 +16,9 @@ Required properties: + Documentation/devicetree/bindings/graph.txt. This port should be connected + to the input port of an attached HDMI or LVDS encoder chip. + ++Optional properties: ++- pinctrl-names: Contain "default" and "sleep". ++ + Example: + + dpi0: dpi@1401d000 { +@@ -26,6 +29,9 @@ dpi0: dpi@1401d000 { + <&mmsys CLK_MM_DPI_ENGINE>, + <&apmixedsys CLK_APMIXED_TVDPLL>; + clock-names = "pixel", "engine", "pll"; ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&dpi_pin_func>; ++ pinctrl-1 = <&dpi_pin_idle>; + + port { + dpi0_out: endpoint { +diff --git a/Documentation/devicetree/bindings/input/hall_sensor.txt b/Documentation/devicetree/bindings/input/hall_sensor.txt +new file mode 100644 +index 000000000000..e4401886a0b8 +--- /dev/null ++++ b/Documentation/devicetree/bindings/input/hall_sensor.txt +@@ -0,0 +1,36 @@ ++Hall sensor driver ++ ++Hall sensor is a switch. It senses magnetic field change and trigger interrupts to cpu. ++ ++Required properties: ++ ++ - compatible : Should be "hall-switch". ++ - pinctrl-names : Pinctrl configuration names of this sensor driver. ++ Should be "default". ++ - interrupt-parent : Parent of interrupt. ++ - interrupts : Hall Sensor interrupts to indicate new data ready or events. ++ - vddio-supply : Analog power supply needed to power device. ++ - linux,gpio-int : Irq gpio which is to provide interrupts to host. ++ - linux,min-uv : Device required minimum voltage. ++ - linux,max-uv : Device required max voltage. ++ ++Optional properties: ++ ++ - linux,wakeup : Boolean to select interrupt wakeup or not, if this property is defined ++ device will wakeup system. ++ ++Example: ++ &soc { ++ hall { ++ compatible = "hall-switch"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&hall_sensor_int_default>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <36 0x2003>; ++ vddio-supply = <&pm8909_l6>; ++ linux,gpio-int = <&msm_gpio 36 0x1>; ++ linux,wakeup; ++ linux,min-uv = <1650000>; ++ linux,max-uv = <3300000>; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/input/misc/bma2x2.txt b/Documentation/devicetree/bindings/input/misc/bma2x2.txt +new file mode 100644 +index 000000000000..40629abf8f9d +--- /dev/null ++++ b/Documentation/devicetree/bindings/input/misc/bma2x2.txt +@@ -0,0 +1,61 @@ ++BOSCH bma2x2 3-axis accelerometer sensor driver. ++ ++Required properties: ++ ++ - compatible : Should be "bosch,bma2x2". ++ - reg : i2c slave address of the device. ++ - pinctrl-names : Pinctrl configuration names of this sensor driver. ++ Should be "default". ++ - pinctrl-0 : The pinctrl node corresponding to "default", ++ should be <&bma2x2_int1_default &bma2x2_int2_default>. ++ - interrupt-parent : Parent of interrupt. ++ - interrupts : Accelerometer interrupts to indicate new data ready or events. ++ - vdd-supply : Analog power supply needed to power device. ++ - vio-supply : Digital IO power supply needed for IO and I2C. ++ - bosch,init-interval : Initial data polling interval in millisecond. ++ - bosch,place : The placing of the accelerometer on board. There are 8 ++ patterns of placing described as below: ++ 0: 1st pin is right down ++ 1: 1st pin is left down ++ 2: 1st pin is left top ++ 3: 1st pin is right top ++ 4: 1st pin is left down (from top view) ++ 5: 1st pin is left top (from top view) ++ 6: 1st pin is right top (from top view) ++ 7: 1st pin is right down (from top view) ++ ++Optional properties: ++ ++ - bosch,gpio-int1 : 1st irq gpio which is to provide interrupts ++ to host, interrupt events can be route to any of ++ these two irq pins according device configuration. ++ - bosch,gpio-int2 : 2nd irq gpio which is to provide interrupts ++ to host. ++ - bosch,use-interrupt : Use device interrupt to trigger data acquisition ++ instead of polling data. ++ - bosch,use-int2 : Map interrupt signal to 2nd interrupt pin of device. ++ If this property is defined, data ready, fifo full and ++ other event will trigger level change on interrupt pin2 ++ instead of interrupt pin1. ++ - bosch,use-smd : enable signification motion detection(SMD). Enable this ++ will create a new sensor which triggers an event each time ++ significant motion is detected. ++ - bosch,use-hrtimer : Use hrtimer to report data. Hrtimer can report data more timely. ++ ++Example: ++&i2c_0 { /* BLSP1 QUP2 */ ++ bosch@18 { /* Accelerometer sensor */ ++ compatible = "bosch,bma2x2"; ++ reg = <0x18>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&bma2x2_int1_default &bma2x2_int2_default>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <112 0x2002>; ++ vdd-supply = <&pm8916_l17>; ++ vio-supply = <&pm8916_l6>; ++ bosch,init-interval = <200>; ++ bosch,place = <1>; ++ bosch,gpio-int1 = <&msm_gpio 112 0x2002>; ++ bosch,gpio-int2 = <&msm_gpio 114 0x2002>; ++ }; ++}; +diff --git a/Documentation/devicetree/bindings/input/misc/ltr553.txt b/Documentation/devicetree/bindings/input/misc/ltr553.txt +new file mode 100644 +index 000000000000..a89cb00b990b +--- /dev/null ++++ b/Documentation/devicetree/bindings/input/misc/ltr553.txt +@@ -0,0 +1,67 @@ ++LTR-553ALS-01 optical sensor ++ ++LTR553 is an ambient light and proximity sensor. It connects to host SoC via ++I2C bus. ++ ++Abbreviation: ++ALS: ambient light sensor ++PS: proximity sensor ++ALPS: ambient light and proximity sensor ++ ++Required properties: ++ - compatible : Should be "liteon,ltr553". ++ - reg : i2c slave address of this device. ++ - vdd-supply : Analog power supply needed to power up this device. ++ - vio-supply : Digital IO power supply needed for IO and i2c. ++ - interrupt-parent : The interrupt controller this device is connected to. ++ - interrupts : L/P sample interrupt to indicate new data ready. ++ - pinctrl-names : pinctrl configuration names of this device. Should be ++ "default" and "sleep". ++ - pinctrl-0 : Should specify pin control groups used for this sensor. ++ - pinctrl-1 : Should specify pin control groups used for this sensor. ++ - liteon,irq-gpio : GPIO pin for interrupt of this device. ++ - liteon,als-ps-persist: The number of data out of range to trigger interrupt. ++ - liteon,ps-led : Proximity sensor LED settings. ++ - liteon,ps-pulses : Controls the number of LED pulses to be emitted. ++ - liteon,als-integration-time : ALS integration time configuration. The ++ maximum value is 7. ++ ++ Optional properties: ++ - liteon,ps-distance-table : Proximity sensor ADC value to transfer into ++ distance in centimeter. ++ - liteon,als-equation-0 : Equation to convert ALS data into lux for ++ light ratio below 0.45. ++ - liteon,als-equation-1 : Equation to convert ALS data into lux for ++ light ratio between 0.45 and 0.68. ++ - liteon,als-equation-2 : Equation to convert ALS data into lux for ++ light ratio between 0.68 and 0.99. ++ - liteon,als-equation-3 : Equation to convert ALS data into lux for ++ light ratio above 0.99. ++ - liteon,wakeup-threshold : Threshold in centimeter to wake up SoC in ++ suspend mode when proximity is enabled. ++ - liteon,als-sensitivity : Ambient light sensitivity in lux. This array ++ configures how much light intensity change will trigger a light sensor ++ event under different als gain configuration. The array size must be 8. ++ ++ Example: ++ ++ i2c@78b5000 { /* BLSP1 QUP1 */ ++ liteon@23 { ++ compatible = "liteon,ltr553"; ++ reg = <0x23>; ++ vdd-supply = <&pm8916_l17>; ++ vio-supply = <&pm8916_l6>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <113 0x2002>; ++ pinctrl-names = "default","sleep"; ++ pinctrl-0 = <<r553_default>; ++ pinctrl-1 = <<r553_sleep>; ++ liteon,irq-gpio = <&msm_gpio 113 0x2002>; ++ liteon,als-ps-persist = <0>; ++ liteon,ps-led = <0x7f>; ++ liteon,ps-pulses = <4>; ++ liteon,wakeup-threshold = <4>; ++ liteon,als-integration-time = <0>; ++ liteon,als-sensitivity = <6000 3200 1600 800 0 0 130 1>; ++ }; ++ } +diff --git a/Documentation/devicetree/bindings/leds/leds-aw2013.txt b/Documentation/devicetree/bindings/leds/leds-aw2013.txt +new file mode 100644 +index 000000000000..4b18135d6318 +--- /dev/null ++++ b/Documentation/devicetree/bindings/leds/leds-aw2013.txt +@@ -0,0 +1,75 @@ ++Binding for RGB LEDs connected to AW2013. ++ ++AWINIC AW2013 RGB LED driver is used to provide red/green/blue ++led blink or glowing to notify user for different system events, ++such as missed call, new sms, low battery. AW2013 RGB LED is ++connected through I2C. ++ ++Required properties: ++ - compatible : should be compatible = "awinic,aw2013" ++ - reg : i2c slave address of the device ++ - vdd-supply : Power supply needed to power up the device ++ - vcc-supply : Power source required to power up i2c bus ++ ++LED required sub-node properties: ++ - aw2013,name : name of the LED ++ - aw2013,id : id of the LED ++ - aw2013,max-brightness: max brightness set of the LED ++ - aw2013,max-current : max current set of the LED ++ - aw2013,rise-time-ms : the rise time when led in breathe mode ++ - aw2013,hold-time-ms : the hold time when led in breathe mode ++ - aw2013,fall-time-ms : the fall time when led in breathe mode ++ - aw2013,off-time-ms : the off time when led in breathe mode ++ The definition of each time described as ++ shown in figure: ++ ++ /-----------\ ++ / | \ ++ /| | |\ ++ / | | | \----------- ++ |hold_time_ms | | ++ | | | ++ rise_time_ms fall_time_ms | ++ off_time_ms ++ ++Example: ++ ++ aw2013@45 { ++ compatible = "awinic,aw2013"; ++ reg = <0x45>; ++ vdd-supply = <&pm8909_l17>; ++ vcc-supply = <&pm8909_l6>; ++ ++ aw2013,red { ++ aw2013,name = "red"; ++ aw2013,id = <0>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ ++ aw2013,green { ++ aw2013,name = "green"; ++ aw2013,id = <1>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ ++ aw2013,blue { ++ aw2013,name = "blue"; ++ aw2013,id = <2>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt +index 5b6cd9b3f628..0188bbd2e35f 100644 +--- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt ++++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt +@@ -27,7 +27,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2): + clock-frequency = <100000>; + + interrupt-parent = <&gpio1>; +- interrupts = <29 GPIO_ACTIVE_HIGH>; ++ interrupts = <29 IRQ_TYPE_LEVEL_HIGH>; + + enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>; + firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>; +diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt +index dab69f36167c..8541e8dafd55 100644 +--- a/Documentation/devicetree/bindings/net/nfc/pn544.txt ++++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt +@@ -27,7 +27,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2): + clock-frequency = <400000>; + + interrupt-parent = <&gpio1>; +- interrupts = <17 GPIO_ACTIVE_HIGH>; ++ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; + + enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; + firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; +diff --git a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt +index 138fb7e8516b..4fb30ba56279 100644 +--- a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt ++++ b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt +@@ -67,6 +67,7 @@ Optional Properties: + - qcom,msm-bus,num-cases + - qcom,msm-bus,num-paths + - qcom,msm-bus,vectors-KBps ++ - qcom,pcie-m2-autonomous: Enable L1ss sleep/exit to support M2 autonomous mode. + + Example: + +diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt +index 68c4e8d96bed..b309de00cd83 100644 +--- a/Documentation/devicetree/bindings/sound/wm8994.txt ++++ b/Documentation/devicetree/bindings/sound/wm8994.txt +@@ -14,9 +14,15 @@ Required properties: + - #gpio-cells : Must be 2. The first cell is the pin number and the + second cell is used to specify optional parameters (currently unused). + +- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, +- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered +- in Documentation/devicetree/bindings/regulator/regulator.txt ++ - power supplies for the device, as covered in ++ Documentation/devicetree/bindings/regulator/regulator.txt, depending ++ on compatible: ++ - for wlf,wm1811 and wlf,wm8958: ++ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, ++ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply ++ - for wlf,wm8994: ++ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, ++ SPKVDD1-supply, SPKVDD2-supply + + Optional properties: + +@@ -68,11 +74,11 @@ codec: wm8994@1a { + + lineout1-se; + ++ AVDD1-supply = <®ulator>; + AVDD2-supply = <®ulator>; + CPVDD-supply = <®ulator>; +- DBVDD1-supply = <®ulator>; +- DBVDD2-supply = <®ulator>; +- DBVDD3-supply = <®ulator>; ++ DBVDD-supply = <®ulator>; ++ DCVDD-supply = <®ulator>; + SPKVDD1-supply = <®ulator>; + SPKVDD2-supply = <®ulator>; + }; +diff --git a/Documentation/devicetree/bindings/spi/spi_qsd.txt b/Documentation/devicetree/bindings/spi/spi_qsd.txt +index 1edf0820398a..7c0b2a4df208 100644 +--- a/Documentation/devicetree/bindings/spi/spi_qsd.txt ++++ b/Documentation/devicetree/bindings/spi/spi_qsd.txt +@@ -36,7 +36,13 @@ Optional properties: + - qcom,rt-priority : whether spi message queue is set to run as a realtime task. + With this spi transaction message pump with high (realtime) priority to reduce + the transfer latency on the bus by minimising the delay between a transfer request +- - qcom,shared : whether this qup is shared with other ee's ++ - qcom,shared : whether this qup is shared with other ee's and client driver is not ++ in control of the spi driver get_sync/put_sync_suspend. Spi driver will ++ take care of resources management like clock, gpios and bam for EE switching. ++ - qcom,shared_ee : whether this qup is used by other ee's and client driver is in ++ control of the spi driver get_sync/put_sync_suspend. Resources management ++ of clock, gpios and bam for EE switching will be taken care when client calls spi ++ get_sync/put_sync_suspend APIs. + + Optional properties which are required for support of BAM-mode: + - qcom,ver-reg-exists : Boolean. When present, allows driver to verify if HW +diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt +index 3b0b909c6fab..c00ba8c43126 100644 +--- a/Documentation/devicetree/bindings/vendor-prefixes.txt ++++ b/Documentation/devicetree/bindings/vendor-prefixes.txt +@@ -161,6 +161,7 @@ lenovo Lenovo Group Ltd. + lg LG Corporation + linaro Linaro Limited + linux Linux-specific binding ++liteon Lite-On Technology Corporation + lltc Linear Technology Corporation + lsi LSI Corp. (LSI Logic) + lt Lontium Semiconductor Corporation +diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt +index 71b63c2b9841..a8f1a58e3692 100644 +--- a/Documentation/filesystems/affs.txt ++++ b/Documentation/filesystems/affs.txt +@@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: + + - R maps to r for user, group and others. On directories, R implies x. + +- - If both W and D are allowed, w will be set. ++ - W maps to w. + + - E maps to x. + +- - H and P are always retained and ignored under Linux. ++ - D is ignored. + +- - A is always reset when a file is written to. ++ - H, S and P are always retained and ignored under Linux. ++ ++ - A is cleared when a file is written to. + + User id and group id will be used unless set[gu]id are given as mount + options. Since most of the Amiga file systems are single user systems +@@ -111,11 +113,13 @@ Linux -> Amiga: + + The Linux rwxrwxrwx file mode is handled as follows: + +- - r permission will set R for user, group and others. ++ - r permission will allow R for user, group and others. ++ ++ - w permission will allow W for user, group and others. + +- - w permission will set W and D for user, group and others. ++ - x permission of the user will allow E for plain files. + +- - x permission of the user will set E for plain files. ++ - D will be allowed for user, group and others. + + - All other flags (suid, sgid, ...) are ignored and will + not be retained. +diff --git a/Documentation/filesystems/mandatory-locking.txt b/Documentation/filesystems/mandatory-locking.txt +index 0979d1d2ca8b..a251ca33164a 100644 +--- a/Documentation/filesystems/mandatory-locking.txt ++++ b/Documentation/filesystems/mandatory-locking.txt +@@ -169,3 +169,13 @@ havoc if they lock crucial files. The way around it is to change the file + permissions (remove the setgid bit) before trying to read or write to it. + Of course, that might be a bit tricky if the system is hung :-( + ++7. The "mand" mount option ++-------------------------- ++Mandatory locking is disabled on all filesystems by default, and must be ++administratively enabled by mounting with "-o mand". That mount option ++is only allowed if the mounting task has the CAP_SYS_ADMIN capability. ++ ++Since kernel v4.5, it is possible to disable mandatory locking ++altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel ++with this disabled will reject attempts to mount filesystems with the ++"mand" mount option with the error status EPERM. +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index a2bff3173088..a55b528cdf1e 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -2549,6 +2549,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + mds=off [X86] + tsx_async_abort=off [X86] + kvm.nx_huge_pages=off [X86] ++ no_entry_flush [PPC] ++ no_uaccess_flush [PPC] + + Exceptions: + This does not have any effect on +@@ -2855,6 +2857,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + + noefi Disable EFI runtime services support. + ++ no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel. ++ + noexec [IA-64] + + noexec [X86] +@@ -2904,6 +2908,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + nospec_store_bypass_disable + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + ++ no_uaccess_flush ++ [PPC] Don't flush the L1-D cache after accessing user data. ++ + noxsave [BUGS=X86] Disables x86 extended register state save + and restore using xsave. The kernel will fallback to + enabling legacy floating-point and sse state. +@@ -5054,6 +5061,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + Disables the PV optimizations forcing the HVM guest to + run as generic HVM guest with no PV drivers. + ++ xen.event_eoi_delay= [XEN] ++ How long to delay EOI handling in case of event ++ storms (jiffies). Default is 10. ++ ++ xen.event_loop_timeout= [XEN] ++ After which time (jiffies) the event handling loop ++ should start to delay EOI handling. Default is 2. ++ + xirc2ps_cs= [NET,PCMCIA] + Format: + ,,,,,[,[,[,]]] +diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst +index 015d4db597b5..c80c5fc6e916 100644 +--- a/Documentation/media/uapi/dvb/fe-get-property.rst ++++ b/Documentation/media/uapi/dvb/fe-get-property.rst +@@ -48,8 +48,11 @@ depends on the delivery system and on the device: + + - This call requires read/write access to the device. + +- - At return, the values are updated to reflect the actual parameters +- used. ++.. note:: ++ ++ At return, the values aren't updated to reflect the actual ++ parameters used. If the actual parameters are needed, an explicit ++ call to ``FE_GET_PROPERTY`` is needed. + + - ``FE_GET_PROPERTY:`` + +diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt +index 54de8ff14627..f29ca1c89375 100644 +--- a/Documentation/networking/ip-sysctl.txt ++++ b/Documentation/networking/ip-sysctl.txt +@@ -1752,6 +1752,16 @@ stable_secret - IPv6 address + + By default the stable secret is unset. + ++addr_gen_mode - INTEGER ++ Defines how link-local and autoconf addresses are generated. ++ ++ 0: generate address based on EUI64 (default) ++ 1: do no generate a link-local address, use EUI64 for addresses generated ++ from autoconf ++ 2: generate stable privacy addresses, using the secret from ++ stable_secret (RFC7217) ++ 3: generate stable privacy addresses, using a random secret if unset ++ + drop_unicast_in_l2_multicast - BOOLEAN + Drop any unicast IPv6 packets that are received in link-layer + multicast (or broadcast) frames. +diff --git a/Documentation/sphinx/parse-headers.pl b/Documentation/sphinx/parse-headers.pl +index db0186a7618f..299b0f82af27 100755 +--- a/Documentation/sphinx/parse-headers.pl ++++ b/Documentation/sphinx/parse-headers.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + use strict; + use Text::Tabs; + +diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py +index 94bf6944bb1e..7e79ff6b09e0 100755 +--- a/Documentation/target/tcm_mod_builder.py ++++ b/Documentation/target/tcm_mod_builder.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD + # + # Copyright (c) 2010 Rising Tide Systems +diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py +index 0ab40e0db580..aa9cc7abd5c2 100644 +--- a/Documentation/trace/postprocess/decode_msr.py ++++ b/Documentation/trace/postprocess/decode_msr.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # add symbolic names to read_msr / write_msr in trace + # decode_msr msr-index.h < trace + import sys +diff --git a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl +index 0a120aae33ce..b9b7d80c2f9d 100644 +--- a/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl ++++ b/Documentation/trace/postprocess/trace-pagealloc-postprocess.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + # This is a POC (proof of concept or piece of crap, take your pick) for reading the + # text representation of trace output related to page allocation. It makes an attempt + # to extract some high-level information on what is going on. The accuracy of the parser +diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl +index 8f961ef2b457..7749cdf372f7 100644 +--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl ++++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + # This is a POC for reading the text representation of trace output related to + # page reclaim. It makes an attempt to extract some high-level information on + # what is going on. The accuracy of the parser may vary +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt +index d1908e50b506..b8f5bf2a890a 100644 +--- a/Documentation/virtual/kvm/api.txt ++++ b/Documentation/virtual/kvm/api.txt +@@ -3534,9 +3534,11 @@ EOI was received. + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/Makefile b/Makefile +index 63779eb0dde7..2c6dddb6edf7 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 227 ++SUBLEVEL = 282 + EXTRAVERSION = + NAME = Roaring Lionus + +@@ -318,12 +318,8 @@ KBUILD_MODULES := + KBUILD_BUILTIN := 1 + + # If we have only "make modules", don't compile built-in objects. +-# When we're building modules with modversions, we need to consider +-# the built-in objects during the descend as well, in order to +-# make sure the checksums are up to date before we record them. +- + ifeq ($(MAKECMDGOALS),modules) +- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1) ++ KBUILD_BUILTIN := + endif + + # If we have "make modules", compile modules +@@ -359,7 +355,7 @@ OBJDUMP = $(CROSS_COMPILE)objdump + AWK = awk + GENKSYMS = scripts/genksyms/genksyms + INSTALLKERNEL := installkernel +-DEPMOD = /sbin/depmod ++DEPMOD = depmod + PERL = perl + PYTHON = python + CHECK = sparse +@@ -801,12 +797,11 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) + # See modpost pattern 2 + KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) +-else ++endif + + # These warnings generated too much noise in a regular build. + # Use make W=1 to enable them (see scripts/Makefile.extrawarn) + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) +-endif + + KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) + ifdef CONFIG_FRAME_POINTER +@@ -845,6 +840,13 @@ ifdef CONFIG_FUNCTION_TRACER + ifndef CC_FLAGS_FTRACE + CC_FLAGS_FTRACE := -pg + endif ++ifdef CONFIG_FTRACE_MCOUNT_RECORD ++ # gcc 5 supports generating the mcount tables directly ++ ifeq ($(call cc-option-yn,-mrecord-mcount),y) ++ CC_FLAGS_FTRACE += -mrecord-mcount ++ export CC_USING_RECORD_MCOUNT := 1 ++ endif ++endif + export CC_FLAGS_FTRACE + ifdef CONFIG_HAVE_FENTRY + CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY) +@@ -924,12 +926,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) + # change __FILE__ to the relative path from the srctree + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) + +-# ensure -fcf-protection is disabled when using retpoline as it is +-# incompatible with -mindirect-branch=thunk-extern +-ifdef CONFIG_RETPOLINE +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +-endif +- + # use the deterministic mode of AR if available + KBUILD_ARFLAGS := $(call ar-option,D) + +@@ -1235,17 +1231,22 @@ endif + # needs to be updated, so this check is forced on all builds + + uts_len := 64 ++ifneq (,$(BUILD_NUMBER)) ++ UTS_RELEASE=$(KERNELRELEASE)-ab$(BUILD_NUMBER) ++else ++ UTS_RELEASE=$(KERNELRELEASE) ++endif + define filechk_utsrelease.h +- if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \ +- echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2; \ +- exit 1; \ +- fi; \ +- (echo \#define UTS_RELEASE \"$(KERNELRELEASE)\";) ++ if [ `echo -n "$(UTS_RELEASE)" | wc -c ` -gt $(uts_len) ]; then \ ++ echo '"$(UTS_RELEASE)" exceeds $(uts_len) characters' >&2; \ ++ exit 1; \ ++ fi; \ ++ (echo \#define UTS_RELEASE \"$(UTS_RELEASE)\";) + endef + + define filechk_version.h + (echo \#define LINUX_VERSION_CODE $(shell \ +- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \ + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) + endef + +@@ -1339,6 +1340,13 @@ ifdef CONFIG_MODULES + + all: modules + ++# When we're building modules with modversions, we need to consider ++# the built-in objects during the descend as well, in order to ++# make sure the checksums are up to date before we record them. ++ifdef CONFIG_MODVERSIONS ++ KBUILD_BUILTIN := 1 ++endif ++ + # Build modules + # + # A module can be listed more than once in obj-m resulting in +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h +index ff4049155c84..355aec0867f4 100644 +--- a/arch/alpha/include/asm/io.h ++++ b/arch/alpha/include/asm/io.h +@@ -491,10 +491,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) + } + #endif + +-#define ioread16be(p) be16_to_cpu(ioread16(p)) +-#define ioread32be(p) be32_to_cpu(ioread32(p)) +-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) +-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) ++#define ioread16be(p) swab16(ioread16(p)) ++#define ioread32be(p) swab32(ioread32(p)) ++#define iowrite16be(v,p) iowrite16(swab16(v), (p)) ++#define iowrite32be(v,p) iowrite32(swab32(v), (p)) + + #define inb_p inb + #define inw_p inw +diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h +index 94f587535dee..181254a20a2a 100644 +--- a/arch/alpha/include/asm/uaccess.h ++++ b/arch/alpha/include/asm/uaccess.h +@@ -341,45 +341,17 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ + * Complex access routines + */ + +-/* This little bit of silliness is to get the GP loaded for a function +- that ordinarily wouldn't. Otherwise we could have it done by the macro +- directly, which can be optimized the linker. */ +-#ifdef MODULE +-#define __module_address(sym) "r"(sym), +-#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +-#else +-#define __module_address(sym) +-#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +-#endif +- +-extern void __copy_user(void); +- +-extern inline long +-__copy_tofrom_user_nocheck(void *to, const void *from, long len) +-{ +- register void * __cu_to __asm__("$6") = to; +- register const void * __cu_from __asm__("$7") = from; +- register long __cu_len __asm__("$0") = len; +- +- __asm__ __volatile__( +- __module_call(28, 3, __copy_user) +- : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) +- : __module_address(__copy_user) +- "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) +- : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); +- +- return __cu_len; +-} ++extern long __copy_user(void *to, const void *from, long len); + +-#define __copy_to_user(to, from, n) \ +-({ \ +- __chk_user_ptr(to); \ +- __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ ++#define __copy_to_user(to, from, n) \ ++({ \ ++ __chk_user_ptr(to); \ ++ __copy_user((__force void *)(to), (from), (n)); \ + }) +-#define __copy_from_user(to, from, n) \ +-({ \ +- __chk_user_ptr(from); \ +- __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ ++#define __copy_from_user(to, from, n) \ ++({ \ ++ __chk_user_ptr(from); \ ++ __copy_user((to), (__force void *)(from), (n)); \ + }) + + #define __copy_to_user_inatomic __copy_to_user +@@ -389,7 +361,7 @@ extern inline long + copy_to_user(void __user *to, const void *from, long n) + { + if (likely(__access_ok((unsigned long)to, n, get_fs()))) +- n = __copy_tofrom_user_nocheck((__force void *)to, from, n); ++ n = __copy_user((__force void *)to, from, n); + return n; + } + +@@ -404,21 +376,7 @@ copy_from_user(void *to, const void __user *from, long n) + return res; + } + +-extern void __do_clear_user(void); +- +-extern inline long +-__clear_user(void __user *to, long len) +-{ +- register void __user * __cl_to __asm__("$6") = to; +- register long __cl_len __asm__("$0") = len; +- __asm__ __volatile__( +- __module_call(28, 2, __do_clear_user) +- : "=r"(__cl_len), "=r"(__cl_to) +- : __module_address(__do_clear_user) +- "0"(__cl_len), "1"(__cl_to) +- : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); +- return __cl_len; +-} ++extern long __clear_user(void __user *to, long len); + + extern inline long + clear_user(void __user *to, long len) +@@ -428,9 +386,6 @@ clear_user(void __user *to, long len) + return len; + } + +-#undef __module_address +-#undef __module_call +- + #define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + +diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c +index 46bf263c3153..d2477a502ce7 100644 +--- a/arch/alpha/kernel/smp.c ++++ b/arch/alpha/kernel/smp.c +@@ -584,7 +584,7 @@ void + smp_send_stop(void) + { + cpumask_t to_whom; +- cpumask_copy(&to_whom, cpu_possible_mask); ++ cpumask_copy(&to_whom, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &to_whom); + #ifdef DEBUG_IPI_MSG + if (hard_smp_processor_id() != boot_cpu_id) +diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile +index 59660743237c..a80815960364 100644 +--- a/arch/alpha/lib/Makefile ++++ b/arch/alpha/lib/Makefile +@@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ + checksum.o \ + csum_partial_copy.o \ + $(ev67-y)strlen.o \ +- $(ev67-y)strcat.o \ +- strcpy.o \ +- $(ev67-y)strncat.o \ +- strncpy.o \ +- $(ev6-y)stxcpy.o \ +- $(ev6-y)stxncpy.o \ ++ stycpy.o \ ++ styncpy.o \ + $(ev67-y)strchr.o \ + $(ev67-y)strrchr.o \ + $(ev6-y)memchr.o \ +@@ -46,11 +42,20 @@ AFLAGS___remqu.o = -DREM + AFLAGS___divlu.o = -DDIV -DINTSIZE + AFLAGS___remlu.o = -DREM -DINTSIZE + +-$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S +- $(cmd_as_o_S) +-$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S +- $(cmd_as_o_S) ++$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ ++ $(src)/$(ev6-y)divide.S FORCE ++ $(call if_changed_rule,as_o_S) ++ ++# There are direct branches between {str*cpy,str*cat} and stx*cpy. ++# Ensure the branches are within range by merging these objects. ++ ++LDFLAGS_stycpy.o := -r ++LDFLAGS_styncpy.o := -r ++ ++$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \ ++ $(obj)/$(ev6-y)stxcpy.o FORCE ++ $(call if_changed,ld) ++ ++$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \ ++ $(obj)/$(ev6-y)stxncpy.o FORCE ++ $(call if_changed,ld) +diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S +index bf5b931866ba..006f469fef73 100644 +--- a/arch/alpha/lib/clear_user.S ++++ b/arch/alpha/lib/clear_user.S +@@ -8,21 +8,6 @@ + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. +- * +- * NOTE! This is not directly C-callable, because the calling semantics +- * are different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * exception pointer in $7 +- * return address in $28 (exceptions expect it there) +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6 + */ + #include + +@@ -38,62 +23,63 @@ + .set noreorder + .align 4 + +- .globl __do_clear_user +- .ent __do_clear_user +- .frame $30, 0, $28 ++ .globl __clear_user ++ .ent __clear_user ++ .frame $30, 0, $26 + .prologue 0 + + $loop: + and $1, 3, $4 # e0 : + beq $4, 1f # .. e1 : + +-0: EX( stq_u $31, 0($6) ) # e0 : zero one word ++0: EX( stq_u $31, 0($16) ) # e0 : zero one word + subq $0, 8, $0 # .. e1 : + subq $4, 1, $4 # e0 : +- addq $6, 8, $6 # .. e1 : ++ addq $16, 8, $16 # .. e1 : + bne $4, 0b # e1 : + unop # : + + 1: bic $1, 3, $1 # e0 : + beq $1, $tail # .. e1 : + +-2: EX( stq_u $31, 0($6) ) # e0 : zero four words ++2: EX( stq_u $31, 0($16) ) # e0 : zero four words + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 8($6) ) # e0 : ++ EX( stq_u $31, 8($16) ) # e0 : + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 16($6) ) # e0 : ++ EX( stq_u $31, 16($16) ) # e0 : + subq $0, 8, $0 # .. e1 : +- EX( stq_u $31, 24($6) ) # e0 : ++ EX( stq_u $31, 24($16) ) # e0 : + subq $0, 8, $0 # .. e1 : + subq $1, 4, $1 # e0 : +- addq $6, 32, $6 # .. e1 : ++ addq $16, 32, $16 # .. e1 : + bne $1, 2b # e1 : + + $tail: + bne $2, 1f # e1 : is there a tail to do? +- ret $31, ($28), 1 # .. e1 : ++ ret $31, ($26), 1 # .. e1 : + +-1: EX( ldq_u $5, 0($6) ) # e0 : ++1: EX( ldq_u $5, 0($16) ) # e0 : + clr $0 # .. e1 : + nop # e1 : + mskqh $5, $0, $5 # e0 : +- EX( stq_u $5, 0($6) ) # e0 : +- ret $31, ($28), 1 # .. e1 : ++ EX( stq_u $5, 0($16) ) # e0 : ++ ret $31, ($26), 1 # .. e1 : + +-__do_clear_user: +- and $6, 7, $4 # e0 : find dest misalignment ++__clear_user: ++ and $17, $17, $0 ++ and $16, 7, $4 # e0 : find dest misalignment + beq $0, $zerolength # .. e1 : + addq $0, $4, $1 # e0 : bias counter + and $1, 7, $2 # e1 : number of bytes in tail + srl $1, 3, $1 # e0 : + beq $4, $loop # .. e1 : + +- EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in ++ EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in + beq $1, $oneword # .. e1 : sub-word store? + +- mskql $5, $6, $5 # e0 : take care of misaligned head +- addq $6, 8, $6 # .. e1 : +- EX( stq_u $5, -8($6) ) # e0 : ++ mskql $5, $16, $5 # e0 : take care of misaligned head ++ addq $16, 8, $16 # .. e1 : ++ EX( stq_u $5, -8($16) ) # e0 : + addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment + subq $1, 1, $1 # e0 : + subq $0, 8, $0 # .. e1 : +@@ -101,15 +87,15 @@ __do_clear_user: + unop # : + + $oneword: +- mskql $5, $6, $4 # e0 : ++ mskql $5, $16, $4 # e0 : + mskqh $5, $2, $5 # e0 : + or $5, $4, $5 # e1 : +- EX( stq_u $5, 0($6) ) # e0 : ++ EX( stq_u $5, 0($16) ) # e0 : + clr $0 # .. e1 : + + $zerolength: + $exception: +- ret $31, ($28), 1 # .. e1 : ++ ret $31, ($26), 1 # .. e1 : + +- .end __do_clear_user +- EXPORT_SYMBOL(__do_clear_user) ++ .end __clear_user ++ EXPORT_SYMBOL(__clear_user) +diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S +index 509f62b65311..159f1b7e6e49 100644 +--- a/arch/alpha/lib/copy_user.S ++++ b/arch/alpha/lib/copy_user.S +@@ -9,21 +9,6 @@ + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. +- * +- * NOTE! This is not directly C-callable, because the calling semantics are +- * different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * source address in $7 +- * return address in $28 +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6,$7 + */ + + #include +@@ -49,58 +34,59 @@ + .ent __copy_user + __copy_user: + .prologue 0 +- and $6,7,$3 ++ and $18,$18,$0 ++ and $16,7,$3 + beq $0,$35 + beq $3,$36 + subq $3,8,$3 + .align 4 + $37: +- EXI( ldq_u $1,0($7) ) +- EXO( ldq_u $2,0($6) ) +- extbl $1,$7,$1 +- mskbl $2,$6,$2 +- insbl $1,$6,$1 ++ EXI( ldq_u $1,0($17) ) ++ EXO( ldq_u $2,0($16) ) ++ extbl $1,$17,$1 ++ mskbl $2,$16,$2 ++ insbl $1,$16,$1 + addq $3,1,$3 + bis $1,$2,$1 +- EXO( stq_u $1,0($6) ) ++ EXO( stq_u $1,0($16) ) + subq $0,1,$0 +- addq $6,1,$6 +- addq $7,1,$7 ++ addq $16,1,$16 ++ addq $17,1,$17 + beq $0,$41 + bne $3,$37 + $36: +- and $7,7,$1 ++ and $17,7,$1 + bic $0,7,$4 + beq $1,$43 + beq $4,$48 +- EXI( ldq_u $3,0($7) ) ++ EXI( ldq_u $3,0($17) ) + .align 4 + $50: +- EXI( ldq_u $2,8($7) ) ++ EXI( ldq_u $2,8($17) ) + subq $4,8,$4 +- extql $3,$7,$3 +- extqh $2,$7,$1 ++ extql $3,$17,$3 ++ extqh $2,$17,$1 + bis $3,$1,$1 +- EXO( stq $1,0($6) ) +- addq $7,8,$7 ++ EXO( stq $1,0($16) ) ++ addq $17,8,$17 + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bis $2,$2,$3 + bne $4,$50 + $48: + beq $0,$41 + .align 4 + $57: +- EXI( ldq_u $1,0($7) ) +- EXO( ldq_u $2,0($6) ) +- extbl $1,$7,$1 +- mskbl $2,$6,$2 +- insbl $1,$6,$1 ++ EXI( ldq_u $1,0($17) ) ++ EXO( ldq_u $2,0($16) ) ++ extbl $1,$17,$1 ++ mskbl $2,$16,$2 ++ insbl $1,$16,$1 + bis $1,$2,$1 +- EXO( stq_u $1,0($6) ) ++ EXO( stq_u $1,0($16) ) + subq $0,1,$0 +- addq $6,1,$6 +- addq $7,1,$7 ++ addq $16,1,$16 ++ addq $17,1,$17 + bne $0,$57 + br $31,$41 + .align 4 +@@ -108,27 +94,27 @@ $43: + beq $4,$65 + .align 4 + $66: +- EXI( ldq $1,0($7) ) ++ EXI( ldq $1,0($17) ) + subq $4,8,$4 +- EXO( stq $1,0($6) ) +- addq $7,8,$7 ++ EXO( stq $1,0($16) ) ++ addq $17,8,$17 + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bne $4,$66 + $65: + beq $0,$41 +- EXI( ldq $2,0($7) ) +- EXO( ldq $1,0($6) ) ++ EXI( ldq $2,0($17) ) ++ EXO( ldq $1,0($16) ) + mskql $2,$0,$2 + mskqh $1,$0,$1 + bis $2,$1,$2 +- EXO( stq $2,0($6) ) ++ EXO( stq $2,0($16) ) + bis $31,$31,$0 + $41: + $35: + $exitin: + $exitout: +- ret $31,($28),1 ++ ret $31,($26),1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) +diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S +index 05bef6b50598..e179e4757ef8 100644 +--- a/arch/alpha/lib/ev6-clear_user.S ++++ b/arch/alpha/lib/ev6-clear_user.S +@@ -9,21 +9,6 @@ + * a successful copy). There is also some rather minor exception setup + * stuff. + * +- * NOTE! This is not directly C-callable, because the calling semantics +- * are different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * exception pointer in $7 +- * return address in $28 (exceptions expect it there) +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6 +- * + * Much of the information about 21264 scheduling/coding comes from: + * Compiler Writer's Guide for the Alpha 21264 + * abbreviated as 'CWG' in other comments here +@@ -56,14 +41,15 @@ + .set noreorder + .align 4 + +- .globl __do_clear_user +- .ent __do_clear_user +- .frame $30, 0, $28 ++ .globl __clear_user ++ .ent __clear_user ++ .frame $30, 0, $26 + .prologue 0 + + # Pipeline info : Slotting & Comments +-__do_clear_user: +- and $6, 7, $4 # .. E .. .. : find dest head misalignment ++__clear_user: ++ and $17, $17, $0 ++ and $16, 7, $4 # .. E .. .. : find dest head misalignment + beq $0, $zerolength # U .. .. .. : U L U L + + addq $0, $4, $1 # .. .. .. E : bias counter +@@ -75,14 +61,14 @@ __do_clear_user: + + /* + * Head is not aligned. Write (8 - $4) bytes to head of destination +- * This means $6 is known to be misaligned ++ * This means $16 is known to be misaligned + */ +- EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in ++ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in + beq $1, $onebyte # .. .. U .. : sub-word store? +- mskql $5, $6, $5 # .. U .. .. : take care of misaligned head +- addq $6, 8, $6 # E .. .. .. : L U U L ++ mskql $5, $16, $5 # .. U .. .. : take care of misaligned head ++ addq $16, 8, $16 # E .. .. .. : L U U L + +- EX( stq_u $5, -8($6) ) # .. .. .. L : ++ EX( stq_u $5, -8($16) ) # .. .. .. L : + subq $1, 1, $1 # .. .. E .. : + addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment + subq $0, 8, $0 # E .. .. .. : U L U L +@@ -93,11 +79,11 @@ __do_clear_user: + * values upon initial entry to the loop + * $1 is number of quadwords to clear (zero is a valid value) + * $2 is number of trailing bytes (0..7) ($2 never used...) +- * $6 is known to be aligned 0mod8 ++ * $16 is known to be aligned 0mod8 + */ + $headalign: + subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop +- and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop ++ and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop + subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) + blt $4, $trailquad # U .. .. .. : U L U L + +@@ -114,21 +100,21 @@ $headalign: + beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 + + $alignmod64: +- EX( stq_u $31, 0($6) ) # .. .. .. L ++ EX( stq_u $31, 0($16) ) # .. .. .. L + addq $3, 8, $3 # .. .. E .. + subq $0, 8, $0 # .. E .. .. + nop # E .. .. .. : U L U L + + nop # .. .. .. E + subq $1, 1, $1 # .. .. E .. +- addq $6, 8, $6 # .. E .. .. ++ addq $16, 8, $16 # .. E .. .. + blt $3, $alignmod64 # U .. .. .. : U L U L + + $bigalign: + /* + * $0 is the number of bytes left + * $1 is the number of quads left +- * $6 is aligned 0mod64 ++ * $16 is aligned 0mod64 + * we know that we'll be taking a minimum of one trip through + * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle + * We are _not_ going to update $0 after every single store. That +@@ -145,39 +131,39 @@ $bigalign: + nop # E : + nop # E : + nop # E : +- bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest ++ bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest + /* This might actually help for the current trip... */ + + $do_wh64: + wh64 ($3) # .. .. .. L1 : memory subsystem hint + subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? +- EX( stq_u $31, 0($6) ) # .. L .. .. ++ EX( stq_u $31, 0($16) ) # .. L .. .. + subq $0, 8, $0 # E .. .. .. : U L U L + +- addq $6, 128, $3 # E : Target address of wh64 +- EX( stq_u $31, 8($6) ) # L : +- EX( stq_u $31, 16($6) ) # L : ++ addq $16, 128, $3 # E : Target address of wh64 ++ EX( stq_u $31, 8($16) ) # L : ++ EX( stq_u $31, 16($16) ) # L : + subq $0, 16, $0 # E : U L L U + + nop # E : +- EX( stq_u $31, 24($6) ) # L : +- EX( stq_u $31, 32($6) ) # L : ++ EX( stq_u $31, 24($16) ) # L : ++ EX( stq_u $31, 32($16) ) # L : + subq $0, 168, $5 # E : U L L U : two trips through the loop left? + /* 168 = 192 - 24, since we've already completed some stores */ + + subq $0, 16, $0 # E : +- EX( stq_u $31, 40($6) ) # L : +- EX( stq_u $31, 48($6) ) # L : +- cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle ++ EX( stq_u $31, 40($16) ) # L : ++ EX( stq_u $31, 48($16) ) # L : ++ cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle + + subq $1, 8, $1 # E : + subq $0, 16, $0 # E : +- EX( stq_u $31, 56($6) ) # L : ++ EX( stq_u $31, 56($16) ) # L : + nop # E : U L U L + + nop # E : + subq $0, 8, $0 # E : +- addq $6, 64, $6 # E : ++ addq $16, 64, $16 # E : + bge $4, $do_wh64 # U : U L U L + + $trailquad: +@@ -190,14 +176,14 @@ $trailquad: + beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go + + $onequad: +- EX( stq_u $31, 0($6) ) # .. .. .. L ++ EX( stq_u $31, 0($16) ) # .. .. .. L + subq $1, 1, $1 # .. .. E .. + subq $0, 8, $0 # .. E .. .. + nop # E .. .. .. : U L U L + + nop # .. .. .. E + nop # .. .. E .. +- addq $6, 8, $6 # .. E .. .. ++ addq $16, 8, $16 # .. E .. .. + bgt $1, $onequad # U .. .. .. : U L U L + + # We have an unknown number of bytes left to go. +@@ -211,9 +197,9 @@ $trailbytes: + # so we will use $0 as the loop counter + # We know for a fact that $0 > 0 zero due to previous context + $onebyte: +- EX( stb $31, 0($6) ) # .. .. .. L ++ EX( stb $31, 0($16) ) # .. .. .. L + subq $0, 1, $0 # .. .. E .. : +- addq $6, 1, $6 # .. E .. .. : ++ addq $16, 1, $16 # .. E .. .. : + bgt $0, $onebyte # U .. .. .. : U L U L + + $zerolength: +@@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?) + nop # .. .. .. E : + nop # .. .. E .. : + nop # .. E .. .. : +- ret $31, ($28), 1 # L0 .. .. .. : L U L U +- .end __do_clear_user +- EXPORT_SYMBOL(__do_clear_user) ++ ret $31, ($26), 1 # L0 .. .. .. : L U L U ++ .end __clear_user ++ EXPORT_SYMBOL(__clear_user) +diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S +index be720b518af9..35e6710d0700 100644 +--- a/arch/alpha/lib/ev6-copy_user.S ++++ b/arch/alpha/lib/ev6-copy_user.S +@@ -12,21 +12,6 @@ + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + * +- * NOTE! This is not directly C-callable, because the calling semantics are +- * different: +- * +- * Inputs: +- * length in $0 +- * destination address in $6 +- * source address in $7 +- * return address in $28 +- * +- * Outputs: +- * bytes left to copy in $0 +- * +- * Clobbers: +- * $1,$2,$3,$4,$5,$6,$7 +- * + * Much of the information about 21264 scheduling/coding comes from: + * Compiler Writer's Guide for the Alpha 21264 + * abbreviated as 'CWG' in other comments here +@@ -60,10 +45,11 @@ + # Pipeline info: Slotting & Comments + __copy_user: + .prologue 0 +- subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? ++ andq $18, $18, $0 ++ subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? + beq $0, $zerolength # U .. .. .. : U L U L + +- and $6,7,$3 # .. .. .. E : is leading dest misalignment ++ and $16,7,$3 # .. .. .. E : is leading dest misalignment + ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data + beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) + subq $3, 8, $3 # E .. .. .. : L U U L : trip counter +@@ -73,17 +59,17 @@ __copy_user: + * We know we have at least one trip through this loop + */ + $aligndest: +- EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores +- addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG ++ EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores ++ addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG + addq $3,1,$3 # .. E .. .. : + nop # E .. .. .. : U L U L + + /* +- * the -1 is to compensate for the inc($6) done in a previous quadpack ++ * the -1 is to compensate for the inc($16) done in a previous quadpack + * which allows us zero dependencies within either quadpack in the loop + */ +- EXO( stb $1,-1($6) ) # .. .. .. L : +- addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG ++ EXO( stb $1,-1($16) ) # .. .. .. L : ++ addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG + subq $0,1,$0 # .. E .. .. : + bne $3, $aligndest # U .. .. .. : U L U L + +@@ -92,29 +78,29 @@ $aligndest: + * If we arrived via branch, we have a minimum of 32 bytes + */ + $destaligned: +- and $7,7,$1 # .. .. .. E : Check _current_ source alignment ++ and $17,7,$1 # .. .. .. E : Check _current_ source alignment + bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop +- EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code ++ EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code + beq $1,$quadaligned # U .. .. .. : U L U L + + /* +- * In the worst case, we've just executed an ldq_u here from 0($7) ++ * In the worst case, we've just executed an ldq_u here from 0($17) + * and we'll repeat it once if we take the branch + */ + + /* Misaligned quadword loop - not unrolled. Leave it that way. */ + $misquad: +- EXI( ldq_u $2,8($7) ) # .. .. .. L : ++ EXI( ldq_u $2,8($17) ) # .. .. .. L : + subq $4,8,$4 # .. .. E .. : +- extql $3,$7,$3 # .. U .. .. : +- extqh $2,$7,$1 # U .. .. .. : U U L L ++ extql $3,$17,$3 # .. U .. .. : ++ extqh $2,$17,$1 # U .. .. .. : U U L L + + bis $3,$1,$1 # .. .. .. E : +- EXO( stq $1,0($6) ) # .. .. L .. : +- addq $7,8,$7 # .. E .. .. : ++ EXO( stq $1,0($16) ) # .. .. L .. : ++ addq $17,8,$17 # .. E .. .. : + subq $0,8,$0 # E .. .. .. : U L L U + +- addq $6,8,$6 # .. .. .. E : ++ addq $16,8,$16 # .. .. .. E : + bis $2,$2,$3 # .. .. E .. : + nop # .. E .. .. : + bne $4,$misquad # U .. .. .. : U L U L +@@ -125,8 +111,8 @@ $misquad: + beq $0,$zerolength # U .. .. .. : U L U L + + /* We know we have at least one trip through the byte loop */ +- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad +- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) ++ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad ++ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + nop # .. E .. .. : + br $31, $dirtyentry # L0 .. .. .. : L U U L + /* Do the trailing byte loop load, then hop into the store part of the loop */ +@@ -136,8 +122,8 @@ $misquad: + * Based upon the usage context, it's worth the effort to unroll this loop + * $0 - number of bytes to be moved + * $4 - number of bytes to move as quadwords +- * $6 is current destination address +- * $7 is current source address ++ * $16 is current destination address ++ * $17 is current source address + */ + $quadaligned: + subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff +@@ -155,29 +141,29 @@ $quadaligned: + * instruction memory hint instruction). + */ + $unroll4: +- EXI( ldq $1,0($7) ) # .. .. .. L +- EXI( ldq $2,8($7) ) # .. .. L .. ++ EXI( ldq $1,0($17) ) # .. .. .. L ++ EXI( ldq $2,8($17) ) # .. .. L .. + subq $4,32,$4 # .. E .. .. + nop # E .. .. .. : U U L L + +- addq $7,16,$7 # .. .. .. E +- EXO( stq $1,0($6) ) # .. .. L .. +- EXO( stq $2,8($6) ) # .. L .. .. ++ addq $17,16,$17 # .. .. .. E ++ EXO( stq $1,0($16) ) # .. .. L .. ++ EXO( stq $2,8($16) ) # .. L .. .. + subq $0,16,$0 # E .. .. .. : U L L U + +- addq $6,16,$6 # .. .. .. E +- EXI( ldq $1,0($7) ) # .. .. L .. +- EXI( ldq $2,8($7) ) # .. L .. .. ++ addq $16,16,$16 # .. .. .. E ++ EXI( ldq $1,0($17) ) # .. .. L .. ++ EXI( ldq $2,8($17) ) # .. L .. .. + subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? + +- EXO( stq $1,0($6) ) # .. .. .. L +- EXO( stq $2,8($6) ) # .. .. L .. ++ EXO( stq $1,0($16) ) # .. .. .. L ++ EXO( stq $2,8($16) ) # .. .. L .. + subq $0,16,$0 # .. E .. .. +- addq $7,16,$7 # E .. .. .. : U L L U ++ addq $17,16,$17 # E .. .. .. : U L L U + + nop # .. .. .. E + nop # .. .. E .. +- addq $6,16,$6 # .. E .. .. ++ addq $16,16,$16 # .. E .. .. + bgt $3,$unroll4 # U .. .. .. : U L U L + + nop +@@ -186,14 +172,14 @@ $unroll4: + beq $4, $noquads + + $onequad: +- EXI( ldq $1,0($7) ) ++ EXI( ldq $1,0($17) ) + subq $4,8,$4 +- addq $7,8,$7 ++ addq $17,8,$17 + nop + +- EXO( stq $1,0($6) ) ++ EXO( stq $1,0($16) ) + subq $0,8,$0 +- addq $6,8,$6 ++ addq $16,8,$16 + bne $4,$onequad + + $noquads: +@@ -207,23 +193,23 @@ $noquads: + * There's no point in doing a lot of complex alignment calculations to try to + * to quadword stuff for a small amount of data. + * $0 - remaining number of bytes left to copy +- * $6 - current dest addr +- * $7 - current source addr ++ * $16 - current dest addr ++ * $17 - current source addr + */ + + $onebyteloop: +- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad +- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) ++ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad ++ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + nop # .. E .. .. : + nop # E .. .. .. : U L U L + + $dirtyentry: + /* +- * the -1 is to compensate for the inc($6) done in a previous quadpack ++ * the -1 is to compensate for the inc($16) done in a previous quadpack + * which allows us zero dependencies within either quadpack in the loop + */ +- EXO ( stb $2,-1($6) ) # .. .. .. L : +- addq $7,1,$7 # .. .. E .. : quadpack as the load ++ EXO ( stb $2,-1($16) ) # .. .. .. L : ++ addq $17,1,$17 # .. .. E .. : quadpack as the load + subq $0,1,$0 # .. E .. .. : change count _after_ copy + bgt $0,$onebyteloop # U .. .. .. : U L U L + +@@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?) + nop # .. .. .. E + nop # .. .. E .. + nop # .. E .. .. +- ret $31,($28),1 # L0 .. .. .. : L U L U ++ ret $31,($26),1 # L0 .. .. .. : L U L U + + .end __copy_user + EXPORT_SYMBOL(__copy_user) +diff --git a/arch/arc/Makefile b/arch/arc/Makefile +index fd79faab7892..5dc2d73c6499 100644 +--- a/arch/arc/Makefile ++++ b/arch/arc/Makefile +@@ -108,6 +108,7 @@ bootpImage: vmlinux + + boot_targets += uImage uImage.bin uImage.gz + ++PHONY += $(boot_targets) + $(boot_targets): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h +index aa2d6da9d187..12c74e826530 100644 +--- a/arch/arc/include/asm/elf.h ++++ b/arch/arc/include/asm/elf.h +@@ -26,7 +26,7 @@ + #define R_ARC_32_PCREL 0x31 + + /*to set parameters in the core dumps */ +-#define ELF_ARCH EM_ARCOMPACT ++#define ELF_ARCH EM_ARC_INUSE + #define ELF_CLASS ELFCLASS32 + + #ifdef CONFIG_CPU_BIG_ENDIAN +diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h +index ffb5f33475f1..f0f43eb709d2 100644 +--- a/arch/arc/include/asm/page.h ++++ b/arch/arc/include/asm/page.h +@@ -13,6 +13,7 @@ + #ifndef __ASSEMBLY__ + + #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) ++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + + struct vm_area_struct; +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S +index 85d9ea4a0acc..37ad245cf989 100644 +--- a/arch/arc/kernel/entry.S ++++ b/arch/arc/kernel/entry.S +@@ -169,7 +169,7 @@ tracesys: + + ; Do the Sys Call as we normally would. + ; Validate the Sys Call number +- cmp r8, NR_syscalls ++ cmp r8, NR_syscalls - 1 + mov.hi r0, -ENOSYS + bhi tracesys_exit + +@@ -252,7 +252,7 @@ ENTRY(EV_Trap) + ;============ Normal syscall case + + ; syscall num shd not exceed the total system calls avail +- cmp r8, NR_syscalls ++ cmp r8, NR_syscalls - 1 + mov.hi r0, -ENOSYS + bhi .Lret_from_system_call + +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c +index d347bbc086fe..16cdb471d3db 100644 +--- a/arch/arc/kernel/signal.c ++++ b/arch/arc/kernel/signal.c +@@ -97,7 +97,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, + sizeof(sf->uc.uc_mcontext.regs.scratch)); + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); + +- return err; ++ return err ? -EFAULT : 0; + } + + static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) +@@ -111,7 +111,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) + &(sf->uc.uc_mcontext.regs.scratch), + sizeof(sf->uc.uc_mcontext.regs.scratch)); + if (err) +- return err; ++ return -EFAULT; + + set_current_blocked(&set); + regs->bta = uregs.scratch.bta; +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c +index b9192a653b7e..3ee19b1e79be 100644 +--- a/arch/arc/kernel/stacktrace.c ++++ b/arch/arc/kernel/stacktrace.c +@@ -39,15 +39,15 @@ + + #ifdef CONFIG_ARC_DW2_UNWIND + +-static void seed_unwind_frame_info(struct task_struct *tsk, +- struct pt_regs *regs, +- struct unwind_frame_info *frame_info) ++static int ++seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, ++ struct unwind_frame_info *frame_info) + { + /* + * synchronous unwinding (e.g. dump_stack) + * - uses current values of SP and friends + */ +- if (tsk == NULL && regs == NULL) { ++ if (regs == NULL && (tsk == NULL || tsk == current)) { + unsigned long fp, sp, blink, ret; + frame_info->task = current; + +@@ -66,11 +66,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk, + frame_info->call_frame = 0; + } else if (regs == NULL) { + /* +- * Asynchronous unwinding of sleeping task +- * - Gets SP etc from task's pt_regs (saved bottom of kernel +- * mode stack of task) ++ * Asynchronous unwinding of a likely sleeping task ++ * - first ensure it is actually sleeping ++ * - if so, it will be in __switch_to, kernel mode SP of task ++ * is safe-kept and BLINK at a well known location in there + */ + ++ if (tsk->state == TASK_RUNNING) ++ return -1; ++ + frame_info->task = tsk; + + frame_info->regs.r27 = TSK_K_FP(tsk); +@@ -104,6 +108,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk, + frame_info->regs.r63 = regs->ret; + frame_info->call_frame = 0; + } ++ ++ return 0; + } + + #endif +@@ -113,11 +119,12 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, + int (*consumer_fn) (unsigned int, void *), void *arg) + { + #ifdef CONFIG_ARC_DW2_UNWIND +- int ret = 0; ++ int ret = 0, cnt = 0; + unsigned int address; + struct unwind_frame_info frame_info; + +- seed_unwind_frame_info(tsk, regs, &frame_info); ++ if (seed_unwind_frame_info(tsk, regs, &frame_info)) ++ return 0; + + while (1) { + address = UNW_PC(&frame_info); +@@ -133,6 +140,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, + break; + + frame_info.regs.r63 = frame_info.regs.r31; ++ ++ if (cnt++ > 128) { ++ printk("unwinder looping too long, aborting !\n"); ++ return 0; ++ } + } + + return address; /* return the last address it saw */ +diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S +index f35ed578e007..4d823d3f65bb 100644 +--- a/arch/arc/kernel/vmlinux.lds.S ++++ b/arch/arc/kernel/vmlinux.lds.S +@@ -92,6 +92,8 @@ SECTIONS + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT ++ IRQENTRY_TEXT ++ SOFTIRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + } +diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h +index 3c401ce0351e..fb959828630c 100644 +--- a/arch/arc/plat-eznps/include/plat/ctop.h ++++ b/arch/arc/plat-eznps/include/plat/ctop.h +@@ -42,7 +42,6 @@ + #define CTOP_AUX_HW_COMPLY (CTOP_AUX_BASE + 0x024) + #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) + #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) +-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) + #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) + #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) + +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 76b0f4131eaf..767c8b14ca2d 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -633,7 +633,9 @@ config ARCH_S3C24XX + select HAVE_S3C_RTC if RTC_CLASS + select MULTI_IRQ_HANDLER + select NEED_MACH_IO_H ++ select S3C2410_WATCHDOG + select SAMSUNG_ATAGS ++ select WATCHDOG + help + Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443 + and S3C2450 SoCs based systems, such as the Simtec Electronics BAST +@@ -1587,12 +1589,10 @@ config THUMB2_KERNEL + depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K + default y if CPU_THUMBONLY + select AEABI +- select ARM_ASM_UNIFIED + select ARM_UNWIND + help + By enabling this option, the kernel will be compiled in +- Thumb-2 mode. A compiler/assembler that understand the unified +- ARM-Thumb syntax is needed. ++ Thumb-2 mode. + + If unsure, say N. + +@@ -1627,9 +1627,6 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11 + + Unless you are sure your tools don't have this problem, say Y. + +-config ARM_ASM_UNIFIED +- bool +- + config ARM_PATCH_IDIV + bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()" + depends on CPU_32v7 && !XIP_KERNEL +diff --git a/arch/arm/Makefile b/arch/arm/Makefile +index 963a87972cc8..d212d6b8f2e1 100644 +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -126,9 +126,11 @@ ifeq ($(CONFIG_ARM_UNWIND),y) + CFLAGS_ABI +=-funwind-tables + endif + ++# Accept old syntax despite ".syntax unified" ++AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) ++ + ifeq ($(CONFIG_THUMB2_KERNEL),y) + AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it) +-AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) + CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) + AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb + # Work around buggy relocation from gas if requested: +@@ -136,7 +138,7 @@ ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y) + CFLAGS_MODULE +=-fno-optimize-sibling-calls + endif + else +-CFLAGS_ISA :=$(call cc-option,-marm,) ++CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN) + AFLAGS_ISA :=$(CFLAGS_ISA) + endif + +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S +index f1b4ea9700b8..658efefff545 100644 +--- a/arch/arm/boot/compressed/head.S ++++ b/arch/arm/boot/compressed/head.S +@@ -1082,9 +1082,9 @@ __armv4_mmu_cache_off: + __armv7_mmu_cache_off: + mrc p15, 0, r0, c1, c0 + #ifdef CONFIG_MMU +- bic r0, r0, #0x000d ++ bic r0, r0, #0x0005 + #else +- bic r0, r0, #0x000c ++ bic r0, r0, #0x0004 + #endif + mcr p15, 0, r0, c1, c0 @ turn MMU and cache off + mov r12, lr +diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts +index 947c81b7aaaf..56a04d3086c3 100644 +--- a/arch/arm/boot/dts/am335x-cm-t335.dts ++++ b/arch/arm/boot/dts/am335x-cm-t335.dts +@@ -552,7 +552,7 @@ status = "okay"; + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; +- ti,pindir-d0-out-d1-in = <1>; ++ ti,pindir-d0-out-d1-in; + /* WLS1271 WiFi */ + wlcore: wlcore@1 { + compatible = "ti,wl1271"; +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts +index 21918807c9f6..f42a92391289 100644 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts +@@ -411,7 +411,7 @@ + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; +- clock-frequency = <400000>; ++ clock-frequency = <100000>; + + tps65218: tps65218@24 { + reg = <0x24>; +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +index 5a53fcf542ab..07133c5ad294 100644 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts +@@ -231,6 +231,11 @@ + atmel,pins = + ; /* PE9, conflicts with A9 */ + }; ++ pinctrl_usb_default: usb_default { ++ atmel,pins = ++ ; ++ }; + }; + }; + }; +@@ -288,6 +293,8 @@ + &pioE 3 GPIO_ACTIVE_LOW + &pioE 4 GPIO_ACTIVE_LOW + >; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +index 44d1171c7fc0..4ce865629383 100644 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts +@@ -152,6 +152,11 @@ + atmel,pins = + ; + }; ++ pinctrl_usb_default: usb_default { ++ atmel,pins = ++ ; ++ }; + pinctrl_key_gpio: key_gpio_0 { + atmel,pins = + ; +@@ -177,6 +182,8 @@ + &pioE 11 GPIO_ACTIVE_HIGH + &pioE 14 GPIO_ACTIVE_HIGH + >; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb_default>; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi +index 70adf940d98c..620aa3f555f8 100644 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi +@@ -266,23 +266,26 @@ + atmel,adc-use-res = "highres"; + + trigger0 { +- trigger-name = "timer-counter-0"; ++ trigger-name = "external-rising"; + trigger-value = <0x1>; ++ trigger-external; + }; ++ + trigger1 { +- trigger-name = "timer-counter-1"; +- trigger-value = <0x3>; ++ trigger-name = "external-falling"; ++ trigger-value = <0x2>; ++ trigger-external; + }; + + trigger2 { +- trigger-name = "timer-counter-2"; +- trigger-value = <0x5>; ++ trigger-name = "external-any"; ++ trigger-value = <0x3>; ++ trigger-external; + }; + + trigger3 { +- trigger-name = "external"; +- trigger-value = <0x13>; +- trigger-external; ++ trigger-name = "continuous"; ++ trigger-value = <0x6>; + }; + }; + +diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi +index 547369c69e96..aedbea888684 100644 +--- a/arch/arm/boot/dts/bcm63138.dtsi ++++ b/arch/arm/boot/dts/bcm63138.dtsi +@@ -174,7 +174,7 @@ + status = "disabled"; + }; + +- nand: nand@2000 { ++ nand_controller: nand-controller@2000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.0", "brcm,brcmnand"; +diff --git a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts +index 0bb8d17e4c2d..e51c9b079432 100644 +--- a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts ++++ b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts +@@ -13,10 +13,10 @@ + }; + }; + +-&nand { ++&nand_controller { + status = "okay"; + +- nandcs@1 { ++ nand@1 { + compatible = "brcm,nandcs"; + reg = <1>; + nand-ecc-step-size = <512>; +diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi +index 4791321969b3..3f002f2047f1 100644 +--- a/arch/arm/boot/dts/bcm7445.dtsi ++++ b/arch/arm/boot/dts/bcm7445.dtsi +@@ -149,7 +149,7 @@ + reg-names = "aon-ctrl", "aon-sram"; + }; + +- nand: nand@3e2800 { ++ nand_controller: nand-controller@3e2800 { + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; +diff --git a/arch/arm/boot/dts/bcm963138dvt.dts b/arch/arm/boot/dts/bcm963138dvt.dts +index 370aa2cfddf2..439cff69e948 100644 +--- a/arch/arm/boot/dts/bcm963138dvt.dts ++++ b/arch/arm/boot/dts/bcm963138dvt.dts +@@ -29,10 +29,10 @@ + status = "okay"; + }; + +-&nand { ++&nand_controller { + status = "okay"; + +- nandcs@0 { ++ nand@0 { + compatible = "brcm,nandcs"; + reg = <0>; + nand-ecc-strength = <4>; +diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts +index a97a785ccc6b..f0906d67a107 100644 +--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts ++++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts +@@ -134,7 +134,7 @@ + compatible = "maxim,max77686"; + reg = <0x09>; + interrupt-parent = <&gpx3>; +- interrupts = <2 IRQ_TYPE_NONE>; ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&max77686_irq>; + wakeup-source; +diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi +index d5d51916bb74..b24a77781e75 100644 +--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi ++++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi +@@ -280,7 +280,7 @@ + max77686: max77686@09 { + compatible = "maxim,max77686"; + interrupt-parent = <&gpx3>; +- interrupts = <2 IRQ_TYPE_NONE>; ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&max77686_irq>; + wakeup-source; +diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts +index 4d7bdb735ed3..e4433ecd9fe4 100644 +--- a/arch/arm/boot/dts/exynos5250-spring.dts ++++ b/arch/arm/boot/dts/exynos5250-spring.dts +@@ -112,7 +112,7 @@ + compatible = "samsung,s5m8767-pmic"; + reg = <0x66>; + interrupt-parent = <&gpx3>; +- interrupts = <2 IRQ_TYPE_NONE>; ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>; + wakeup-source; +diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts +index 3c271cb4b2be..a2f94cb2ded8 100644 +--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts ++++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts +@@ -271,6 +271,8 @@ + regulator-name = "vddq_lcd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; ++ /* Supplies also GPK and GPJ */ ++ regulator-always-on; + }; + + ldo8_reg: LDO8 { +@@ -560,11 +562,11 @@ + }; + + &usbdrd_dwc3_0 { +- dr_mode = "host"; ++ dr_mode = "peripheral"; + }; + + &usbdrd_dwc3_1 { +- dr_mode = "peripheral"; ++ dr_mode = "host"; + }; + + &usbdrd3_0 { +diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi +index a083d23fdee3..872096edd77f 100644 +--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi ++++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi +@@ -563,6 +563,34 @@ + interrupt-controller; + #interrupt-cells = <2>; + }; ++ ++ usb3_1_oc: usb3-1-oc { ++ samsung,pins = "gpk2-4", "gpk2-5"; ++ samsung,pin-function = ; ++ samsung,pin-pud = ; ++ samsung,pin-drv = ; ++ }; ++ ++ usb3_1_vbusctrl: usb3-1-vbusctrl { ++ samsung,pins = "gpk2-6", "gpk2-7"; ++ samsung,pin-function = ; ++ samsung,pin-pud = ; ++ samsung,pin-drv = ; ++ }; ++ ++ usb3_0_oc: usb3-0-oc { ++ samsung,pins = "gpk3-0", "gpk3-1"; ++ samsung,pin-function = ; ++ samsung,pin-pud = ; ++ samsung,pin-drv = ; ++ }; ++ ++ usb3_0_vbusctrl: usb3-0-vbusctrl { ++ samsung,pins = "gpk3-2", "gpk3-3"; ++ samsung,pin-function = ; ++ samsung,pin-pud = ; ++ samsung,pin-drv = ; ++ }; + }; + + &pinctrl_2 { +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi +index bb59fee072c0..132b83052439 100644 +--- a/arch/arm/boot/dts/exynos5410.dtsi ++++ b/arch/arm/boot/dts/exynos5410.dtsi +@@ -314,6 +314,8 @@ + &usbdrd3_0 { + clocks = <&clock CLK_USBD300>; + clock-names = "usbdrd30"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&usb3_0_oc>, <&usb3_0_vbusctrl>; + }; + + &usbdrd_phy0 { +@@ -325,6 +327,8 @@ + &usbdrd3_1 { + clocks = <&clock CLK_USBD301>; + clock-names = "usbdrd30"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&usb3_1_oc>, <&usb3_1_vbusctrl>; + }; + + &usbdrd_dwc3_1 { +diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +index e664c33c3c64..4a71bbe1ce54 100644 +--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts ++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts +@@ -88,7 +88,7 @@ + reg = <0x66>; + + interrupt-parent = <&gpx3>; +- interrupts = <2 IRQ_TYPE_EDGE_FALLING>; ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&s2mps11_irq>; + +diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts +index 2faf88627a48..b45e2a0c3908 100644 +--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts ++++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts +@@ -26,7 +26,7 @@ + label = "blue:heartbeat"; + pwms = <&pwm 2 2000000 0>; + pwm-names = "pwm2"; +- max_brightness = <255>; ++ max-brightness = <255>; + linux,default-trigger = "heartbeat"; + }; + }; +diff --git a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi +index 0ed30206625c..f547f67f2783 100644 +--- a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi ++++ b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi +@@ -25,7 +25,7 @@ + * Green LED is much brighter than the others + * so limit its max brightness + */ +- max_brightness = <127>; ++ max-brightness = <127>; + linux,default-trigger = "mmc0"; + }; + +@@ -33,7 +33,7 @@ + label = "blue:heartbeat"; + pwms = <&pwm 2 2000000 0>; + pwm-names = "pwm2"; +- max_brightness = <255>; ++ max-brightness = <255>; + linux,default-trigger = "heartbeat"; + }; + }; +diff --git a/arch/arm/boot/dts/imx50-evk.dts b/arch/arm/boot/dts/imx50-evk.dts +index 27d763c7a307..4dbd180e72ba 100644 +--- a/arch/arm/boot/dts/imx50-evk.dts ++++ b/arch/arm/boot/dts/imx50-evk.dts +@@ -66,7 +66,7 @@ + MX50_PAD_CSPI_MISO__CSPI_MISO 0x00 + MX50_PAD_CSPI_MOSI__CSPI_MOSI 0x00 + MX50_PAD_CSPI_SS0__GPIO4_11 0xc4 +- MX50_PAD_ECSPI1_MOSI__CSPI_SS1 0xf4 ++ MX50_PAD_ECSPI1_MOSI__GPIO4_13 0x84 + >; + }; + +diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +index fed72a5f3ffa..4dede1fbfadb 100644 +--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +@@ -307,8 +307,8 @@ + fsl,pins = < + MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 + MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 +- MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1 +- MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1 ++ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1 ++ MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1 + >; + }; + +@@ -395,6 +395,7 @@ + &uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3>; ++ uart-has-rtscts; + status = "disabled"; + }; + +diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi +index c96c91d83678..fc4ae2e423bd 100644 +--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi +@@ -94,7 +94,7 @@ + &fec { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet>; +- phy-mode = "rgmii"; ++ phy-mode = "rgmii-id"; + status = "okay"; + }; + +diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi +index 2802c9565b6c..976a75a4eb2c 100644 +--- a/arch/arm/boot/dts/lpc32xx.dtsi ++++ b/arch/arm/boot/dts/lpc32xx.dtsi +@@ -323,9 +323,6 @@ + + clocks = <&xtal_32k>, <&xtal>; + clock-names = "xtal_32k", "xtal"; +- +- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>; +- assigned-clock-rates = <208000000>; + }; + }; + +diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi +index 2008648b8c9f..0a7600d06fb5 100644 +--- a/arch/arm/boot/dts/omap3.dtsi ++++ b/arch/arm/boot/dts/omap3.dtsi +@@ -23,6 +23,9 @@ + i2c0 = &i2c1; + i2c1 = &i2c2; + i2c2 = &i2c3; ++ mmc0 = &mmc1; ++ mmc1 = &mmc2; ++ mmc2 = &mmc3; + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; +diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi +index 4d6584f15b17..7e5a09c3d2a6 100644 +--- a/arch/arm/boot/dts/omap4.dtsi ++++ b/arch/arm/boot/dts/omap4.dtsi +@@ -22,6 +22,11 @@ + i2c1 = &i2c2; + i2c2 = &i2c3; + i2c3 = &i2c4; ++ mmc0 = &mmc1; ++ mmc1 = &mmc2; ++ mmc2 = &mmc3; ++ mmc3 = &mmc4; ++ mmc4 = &mmc5; + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; +diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi +index fc6a8610c24c..adcf9d141db6 100644 +--- a/arch/arm/boot/dts/omap443x.dtsi ++++ b/arch/arm/boot/dts/omap443x.dtsi +@@ -35,10 +35,12 @@ + }; + + ocp { ++ /* 4430 has only gpio_86 tshut and no talert interrupt */ + bandgap: bandgap@4a002260 { + reg = <0x4a002260 0x4 + 0x4a00232C 0x4>; + compatible = "ti,omap4430-bandgap"; ++ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; + + #thermal-sensor-cells = <0>; + }; +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi +index a76266f242a1..586fe60b9921 100644 +--- a/arch/arm/boot/dts/omap5.dtsi ++++ b/arch/arm/boot/dts/omap5.dtsi +@@ -25,6 +25,11 @@ + i2c2 = &i2c3; + i2c3 = &i2c4; + i2c4 = &i2c5; ++ mmc0 = &mmc1; ++ mmc1 = &mmc2; ++ mmc2 = &mmc3; ++ mmc3 = &mmc4; ++ mmc4 = &mmc5; + serial0 = &uart1; + serial1 = &uart2; + serial2 = &uart3; +diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +index 533919e96eae..f22a6b436317 100644 +--- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi ++++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +@@ -54,18 +54,21 @@ + emac: gem@30000 { + compatible = "cadence,gem"; + reg = <0x30000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <31>; + }; + + dmac1: dmac@40000 { + compatible = "snps,dw-dmac"; + reg = <0x40000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <25>; + }; + + dmac2: dmac@50000 { + compatible = "snps,dw-dmac"; + reg = <0x50000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <26>; + }; + +@@ -243,6 +246,7 @@ + axi2pico@c0000000 { + compatible = "picochip,axi2pico-pc3x2"; + reg = <0xc0000000 0x10000>; ++ interrupt-parent = <&vic0>; + interrupts = <13 14 15 16 17 18 19 20 21>; + }; + }; +diff --git a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi +index fb88e76df546..991059869094 100644 +--- a/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi ++++ b/arch/arm/boot/dts/qcom/pmxpoorwills.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -90,6 +90,16 @@ + }; + }; + ++ pmxpoorwills_tz: qcom,temp-alarm@2400 { ++ compatible = "qcom,qpnp-temp-alarm"; ++ reg = <0x2400 0x100>; ++ interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>; ++ label = "pmxpoorwills_tz"; ++ qcom,channel-num = <6>; ++ qcom,temp_alarm-vadc = <&pmxpoorwills_vadc>; ++ #thermal-sensor-cells = <0>; ++ }; ++ + pmxpoorwills_vadc: vadc@3100 { + compatible = "qcom,qpnp-vadc-hc"; + reg = <0x3100 0x100>; +@@ -188,3 +198,31 @@ + }; + }; + }; ++ ++&thermal_zones { ++ pmxpoorwills_temp_alarm: pmxpoorwills_tz { ++ polling-delay-passive = <0>; ++ polling-delay = <0>; ++ thermal-governor = "step_wise"; ++ thermal-sensors = <&pmxpoorwills_tz>; ++ wake-capable-sensor; ++ ++ trips { ++ pmxpoorwills_trip0: pmxpoorwills-trip0 { ++ temperature = <105000>; ++ hysteresis = <0>; ++ type = "passive"; ++ }; ++ pmxpoorwills_trip1: pmxpoorwills-trip1 { ++ temperature = <125000>; ++ hysteresis = <0>; ++ type = "passive"; ++ }; ++ pmxpoorwills_trip2: pmxpoorwills-trip2 { ++ temperature = <145000>; ++ hysteresis = <0>; ++ type = "critical"; ++ }; ++ }; ++ }; ++}; +diff --git a/arch/arm/boot/dts/qcom/sa415m-ccard-pcie-ep.dts b/arch/arm/boot/dts/qcom/sa415m-ccard-pcie-ep.dts +index 36fad2ae98ac..07d4494e764f 100644 +--- a/arch/arm/boot/dts/qcom/sa415m-ccard-pcie-ep.dts ++++ b/arch/arm/boot/dts/qcom/sa415m-ccard-pcie-ep.dts +@@ -46,6 +46,12 @@ + qcom,mhi-event-ring-id-limits = <7 11>; /* start and end */ + }; + ++&emac_hw { ++ emac_emb_smmu: emac_emb_smmu { ++ qcom,smmu-s1-bypass; ++ }; ++}; ++ + &cnss_pcie { + status = "disabled"; + }; +diff --git a/arch/arm/boot/dts/qcom/sa415m-ccard-usb-ep.dts b/arch/arm/boot/dts/qcom/sa415m-ccard-usb-ep.dts +index cc091b1d0d54..5d4fc1f55d41 100644 +--- a/arch/arm/boot/dts/qcom/sa415m-ccard-usb-ep.dts ++++ b/arch/arm/boot/dts/qcom/sa415m-ccard-usb-ep.dts +@@ -43,3 +43,7 @@ + &usb { + qcom,gadget-imod-val = <250>; + }; ++ ++&sdx_ext_ipc { ++ qcom,default-policy-nop; ++}; +diff --git a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi +index 38cb8b9dd074..c5d8a25eb8b3 100644 +--- a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi ++++ b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi +@@ -149,7 +149,6 @@ + + sdx_ext_ipc: qcom,sdx_ext_ipc { + compatible = "qcom,sdx-ext-ipc"; +- qcom,default-policy-nop; + qcom,ap2mdm-status-gpio = <&tlmm 95 0x00>; + qcom,mdm2ap-status-gpio = <&tlmm 36 0x00>; + qcom,mdm2ap-status2-gpio = <&tlmm 88 0x00>; +@@ -465,7 +464,6 @@ + /delete-property/ vreg_rgmii-supply; + pinctrl-names = "default"; + pinctrl-0 = <&vreg_rgmii_off_default>; +- qcom,phy-reset-delay-msecs = <10>; + }; + + &vreg_rgmii_io_pads { +diff --git a/arch/arm/boot/dts/qcom/sa415m-ttp-pcie-ep.dts b/arch/arm/boot/dts/qcom/sa415m-ttp-pcie-ep.dts +index 619a60169a10..00bd10ed27b1 100644 +--- a/arch/arm/boot/dts/qcom/sa415m-ttp-pcie-ep.dts ++++ b/arch/arm/boot/dts/qcom/sa415m-ttp-pcie-ep.dts +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -58,6 +58,12 @@ + status = "disabled"; + }; + ++&emac_hw { ++ emac_emb_smmu: emac_emb_smmu { ++ qcom,smmu-s1-bypass; ++ }; ++}; ++ + &mhi_device { + status = "okay"; + }; +diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dtsi +index df7f8fa2489c..2d64b3910818 100644 +--- a/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dtsi ++++ b/arch/arm/boot/dts/qcom/sdxpoorwills-mtp.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2018-2019, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -61,7 +61,7 @@ + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; +- qcom,hw-settle-time = <2>; ++ qcom,hw-settle-time = <8>; + qcom,fast-avg-setup = <0>; + qcom,vadc-thermal-node; + }; +diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi +index 2c227b3cbd44..7e6f46f268a6 100644 +--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi ++++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pm.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -103,5 +103,6 @@ + qcom,rpmh-master-stats@b211200 { + compatible = "qcom,rpmh-master-stats-v1"; + reg = <0xb211200 0x60>; ++ qcom,use-alt-unit = <3>; + }; + }; +diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi +index 2c6e485ef1e0..3252ff01b87b 100644 +--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi ++++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -85,7 +85,7 @@ + compatible = "shared-dma-pool"; + reusable; + alignment = <0x400000>; +- size = <0x400000>; ++ size = <0x700000>; + status = "disabled"; + }; + +@@ -1397,6 +1397,8 @@ + gdsc_emac-supply = <&gdsc_emac>; + mboxes = <&qmp_aop 0>; + mbox-names = "emac_aop"; ++ ipa-dma-rx-desc-cnt = <256>; ++ ipa-dma-tx-desc-cnt = <256>; + io-macro-info { + io-macro-bypass-mode = <0>; + io-interface = "rgmii"; +diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts +index 541678df90a9..50ec24bb1d79 100644 +--- a/arch/arm/boot/dts/r8a7779-marzen.dts ++++ b/arch/arm/boot/dts/r8a7779-marzen.dts +@@ -136,7 +136,7 @@ + status = "okay"; + + clocks = <&mstp1_clks R8A7779_CLK_DU>, <&x3_clk>; +- clock-names = "du", "dclkin.0"; ++ clock-names = "du.0", "dclkin.0"; + + ports { + port@0 { +diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi +index 6c6d4893e92d..3fb0a8d2530b 100644 +--- a/arch/arm/boot/dts/r8a7779.dtsi ++++ b/arch/arm/boot/dts/r8a7779.dtsi +@@ -431,6 +431,7 @@ + reg = <0 0xfff80000 0 0x40000>; + interrupts = ; + clocks = <&mstp1_clks R8A7779_CLK_DU>; ++ clock-names = "du.0"; + power-domains = <&sysc R8A7779_PD_ALWAYS_ON>; + status = "disabled"; + +diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts +index 1df1557a46c3..3080915cfa5f 100644 +--- a/arch/arm/boot/dts/rk3036-kylin.dts ++++ b/arch/arm/boot/dts/rk3036-kylin.dts +@@ -426,7 +426,7 @@ + }; + }; + +- sleep { ++ suspend { + global_pwroff: global-pwroff { + rockchip,pins = <2 7 RK_FUNC_1 &pcfg_pull_none>; + }; +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi +index 30f1384f619b..7b727d738b69 100644 +--- a/arch/arm/boot/dts/rk3288.dtsi ++++ b/arch/arm/boot/dts/rk3288.dtsi +@@ -735,7 +735,7 @@ + * *_HDMI HDMI + * *_MIPI_* MIPI + */ +- pd_vio@RK3288_PD_VIO { ++ power-domain@RK3288_PD_VIO { + reg = ; + clocks = <&cru ACLK_IEP>, + <&cru ACLK_ISP>, +@@ -768,7 +768,7 @@ + * Note: The following 3 are HEVC(H.265) clocks, + * and on the ACLK_HEVC_NIU (NOC). + */ +- pd_hevc@RK3288_PD_HEVC { ++ power-domain@RK3288_PD_HEVC { + reg = ; + clocks = <&cru ACLK_HEVC>, + <&cru SCLK_HEVC_CABAC>, +@@ -780,7 +780,7 @@ + * (video endecoder & decoder) clocks that on the + * ACLK_VCODEC_NIU and HCLK_VCODEC_NIU (NOC). + */ +- pd_video@RK3288_PD_VIDEO { ++ power-domain@RK3288_PD_VIDEO { + reg = ; + clocks = <&cru ACLK_VCODEC>, + <&cru HCLK_VCODEC>; +@@ -790,7 +790,7 @@ + * Note: ACLK_GPU is the GPU clock, + * and on the ACLK_GPU_NIU (NOC). + */ +- pd_gpu@RK3288_PD_GPU { ++ power-domain@RK3288_PD_GPU { + reg = ; + clocks = <&cru ACLK_GPU>; + }; +@@ -1278,7 +1278,7 @@ + drive-strength = <12>; + }; + +- sleep { ++ suspend { + global_pwroff: global-pwroff { + rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>; + }; +diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi +index 0c10ba517cd0..798f676041e0 100644 +--- a/arch/arm/boot/dts/s5pv210.dtsi ++++ b/arch/arm/boot/dts/s5pv210.dtsi +@@ -101,19 +101,16 @@ + }; + + clocks: clock-controller@e0100000 { +- compatible = "samsung,s5pv210-clock", "simple-bus"; ++ compatible = "samsung,s5pv210-clock"; + reg = <0xe0100000 0x10000>; + clock-names = "xxti", "xusbxti"; + clocks = <&xxti>, <&xusbxti>; + #clock-cells = <1>; +- #address-cells = <1>; +- #size-cells = <1>; +- ranges; ++ }; + +- pmu_syscon: syscon@e0108000 { +- compatible = "samsung-s5pv210-pmu", "syscon"; +- reg = <0xe0108000 0x8000>; +- }; ++ pmu_syscon: syscon@e0108000 { ++ compatible = "samsung-s5pv210-pmu", "syscon"; ++ reg = <0xe0108000 0x8000>; + }; + + pinctrl0: pinctrl@e0200000 { +@@ -129,35 +126,28 @@ + }; + }; + +- amba { +- #address-cells = <1>; +- #size-cells = <1>; +- compatible = "simple-bus"; +- ranges; +- +- pdma0: dma@e0900000 { +- compatible = "arm,pl330", "arm,primecell"; +- reg = <0xe0900000 0x1000>; +- interrupt-parent = <&vic0>; +- interrupts = <19>; +- clocks = <&clocks CLK_PDMA0>; +- clock-names = "apb_pclk"; +- #dma-cells = <1>; +- #dma-channels = <8>; +- #dma-requests = <32>; +- }; ++ pdma0: dma@e0900000 { ++ compatible = "arm,pl330", "arm,primecell"; ++ reg = <0xe0900000 0x1000>; ++ interrupt-parent = <&vic0>; ++ interrupts = <19>; ++ clocks = <&clocks CLK_PDMA0>; ++ clock-names = "apb_pclk"; ++ #dma-cells = <1>; ++ #dma-channels = <8>; ++ #dma-requests = <32>; ++ }; + +- pdma1: dma@e0a00000 { +- compatible = "arm,pl330", "arm,primecell"; +- reg = <0xe0a00000 0x1000>; +- interrupt-parent = <&vic0>; +- interrupts = <20>; +- clocks = <&clocks CLK_PDMA1>; +- clock-names = "apb_pclk"; +- #dma-cells = <1>; +- #dma-channels = <8>; +- #dma-requests = <32>; +- }; ++ pdma1: dma@e0a00000 { ++ compatible = "arm,pl330", "arm,primecell"; ++ reg = <0xe0a00000 0x1000>; ++ interrupt-parent = <&vic0>; ++ interrupts = <20>; ++ clocks = <&clocks CLK_PDMA1>; ++ clock-names = "apb_pclk"; ++ #dma-cells = <1>; ++ #dma-channels = <8>; ++ #dma-requests = <32>; + }; + + spi0: spi@e1300000 { +@@ -230,43 +220,36 @@ + status = "disabled"; + }; + +- audio-subsystem { +- compatible = "samsung,s5pv210-audss", "simple-bus"; +- #address-cells = <1>; +- #size-cells = <1>; +- ranges; +- +- clk_audss: clock-controller@eee10000 { +- compatible = "samsung,s5pv210-audss-clock"; +- reg = <0xeee10000 0x1000>; +- clock-names = "hclk", "xxti", +- "fout_epll", +- "sclk_audio0"; +- clocks = <&clocks DOUT_HCLKP>, <&xxti>, +- <&clocks FOUT_EPLL>, +- <&clocks SCLK_AUDIO0>; +- #clock-cells = <1>; +- }; ++ clk_audss: clock-controller@eee10000 { ++ compatible = "samsung,s5pv210-audss-clock"; ++ reg = <0xeee10000 0x1000>; ++ clock-names = "hclk", "xxti", ++ "fout_epll", ++ "sclk_audio0"; ++ clocks = <&clocks DOUT_HCLKP>, <&xxti>, ++ <&clocks FOUT_EPLL>, ++ <&clocks SCLK_AUDIO0>; ++ #clock-cells = <1>; ++ }; + +- i2s0: i2s@eee30000 { +- compatible = "samsung,s5pv210-i2s"; +- reg = <0xeee30000 0x1000>; +- interrupt-parent = <&vic2>; +- interrupts = <16>; +- dma-names = "rx", "tx", "tx-sec"; +- dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; +- clock-names = "iis", +- "i2s_opclk0", +- "i2s_opclk1"; +- clocks = <&clk_audss CLK_I2S>, +- <&clk_audss CLK_I2S>, +- <&clk_audss CLK_DOUT_AUD_BUS>; +- samsung,idma-addr = <0xc0010000>; +- pinctrl-names = "default"; +- pinctrl-0 = <&i2s0_bus>; +- #sound-dai-cells = <0>; +- status = "disabled"; +- }; ++ i2s0: i2s@eee30000 { ++ compatible = "samsung,s5pv210-i2s"; ++ reg = <0xeee30000 0x1000>; ++ interrupt-parent = <&vic2>; ++ interrupts = <16>; ++ dma-names = "rx", "tx", "tx-sec"; ++ dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>; ++ clock-names = "iis", ++ "i2s_opclk0", ++ "i2s_opclk1"; ++ clocks = <&clk_audss CLK_I2S>, ++ <&clk_audss CLK_I2S>, ++ <&clk_audss CLK_DOUT_AUD_BUS>; ++ samsung,idma-addr = <0xc0010000>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2s0_bus>; ++ #sound-dai-cells = <0>; ++ status = "disabled"; + }; + + i2s1: i2s@e2100000 { +diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi +index de0e189711f6..8c94bf57a38c 100644 +--- a/arch/arm/boot/dts/sama5d4.dtsi ++++ b/arch/arm/boot/dts/sama5d4.dtsi +@@ -1371,7 +1371,7 @@ + 0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */ + 0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */ + 0xffffffff 0x3ff83fff 0xff00ffff /* pioC */ +- 0x0003ff00 0x8002a800 0x00000000 /* pioD */ ++ 0xb003ff00 0x8002a800 0x00000000 /* pioD */ + 0xffffffff 0x7fffffff 0x76fff1bf /* pioE */ + >; + +diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi +index f0702d8063d9..9a2db3df5ede 100644 +--- a/arch/arm/boot/dts/socfpga.dtsi ++++ b/arch/arm/boot/dts/socfpga.dtsi +@@ -676,7 +676,7 @@ + }; + }; + +- L2: l2-cache@fffef000 { ++ L2: cache-controller@fffef000 { + compatible = "arm,pl310-cache"; + reg = <0xfffef000 0x1000>; + interrupts = <0 38 0x04>; +diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi +index f520cbff5e1c..342ae7ef9f08 100644 +--- a/arch/arm/boot/dts/socfpga_arria10.dtsi ++++ b/arch/arm/boot/dts/socfpga_arria10.dtsi +@@ -567,7 +567,7 @@ + reg = <0xffcfb100 0x80>; + }; + +- L2: l2-cache@fffff000 { ++ L2: cache-controller@fffff000 { + compatible = "arm,pl310-cache"; + reg = <0xfffff000 0x1000>; + interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; +@@ -710,7 +710,7 @@ + timer3: timer3@ffd00100 { + compatible = "snps,dw-apb-timer"; + interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; +- reg = <0xffd01000 0x100>; ++ reg = <0xffd00100 0x100>; + clocks = <&l4_sys_free_clk>; + clock-names = "timer"; + }; +diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +index 1077ceebb2d6..87494773f409 100644 +--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi ++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +@@ -755,14 +755,14 @@ + status = "disabled"; + }; + +- vica: intc@10140000 { ++ vica: interrupt-controller@10140000 { + compatible = "arm,versatile-vic"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0x10140000 0x20>; + }; + +- vicb: intc@10140020 { ++ vicb: interrupt-controller@10140020 { + compatible = "arm,versatile-vic"; + interrupt-controller; + #interrupt-cells = <1>; +diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi +index 336ee4fb587d..64dc50afc385 100644 +--- a/arch/arm/boot/dts/stm32f429.dtsi ++++ b/arch/arm/boot/dts/stm32f429.dtsi +@@ -334,7 +334,7 @@ + }; + }; + +- rcc: rcc@40023810 { ++ rcc: rcc@40023800 { + #reset-cells = <1>; + #clock-cells = <2>; + compatible = "st,stm32f42xx-rcc", "st,stm32-rcc"; +diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi +index 7e7dfc2b43db..d1af56d2f25e 100644 +--- a/arch/arm/boot/dts/sun4i-a10.dtsi ++++ b/arch/arm/boot/dts/sun4i-a10.dtsi +@@ -144,7 +144,7 @@ + trips { + cpu_alert0: cpu_alert0 { + /* milliCelsius */ +- temperature = <850000>; ++ temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; +diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts +index 00d7d28e86f0..4633b79bf5ea 100644 +--- a/arch/arm/boot/dts/versatile-ab.dts ++++ b/arch/arm/boot/dts/versatile-ab.dts +@@ -154,16 +154,15 @@ + #size-cells = <1>; + ranges; + +- vic: intc@10140000 { ++ vic: interrupt-controller@10140000 { + compatible = "arm,versatile-vic"; + interrupt-controller; + #interrupt-cells = <1>; + reg = <0x10140000 0x1000>; +- clear-mask = <0xffffffff>; + valid-mask = <0xffffffff>; + }; + +- sic: intc@10003000 { ++ sic: interrupt-controller@10003000 { + compatible = "arm,versatile-sic"; + interrupt-controller; + #interrupt-cells = <1>; +diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts +index 33a8eb28374e..3a23164c2c2d 100644 +--- a/arch/arm/boot/dts/versatile-pb.dts ++++ b/arch/arm/boot/dts/versatile-pb.dts +@@ -6,7 +6,7 @@ + + amba { + /* The Versatile PB is using more SIC IRQ lines than the AB */ +- sic: intc@10003000 { ++ sic: interrupt-controller@10003000 { + clear-mask = <0xffffffff>; + /* + * Valid interrupt lines mask according to +diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig +old mode 100755 +new mode 100644 +index c9cf52896037..912ad9e8c494 +--- a/arch/arm/configs/msm8909-perf_defconfig ++++ b/arch/arm/configs/msm8909-perf_defconfig +@@ -16,12 +16,9 @@ CONFIG_IKCONFIG=y + CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 + CONFIG_CGROUP_FREEZER=y +-CONFIG_CGROUP_DEVICE=y + CONFIG_CPUSETS=y + CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y +-CONFIG_MEMCG=y +-CONFIG_MEMCG_SWAP=y + CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y +@@ -62,9 +59,11 @@ CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_HIGHMEM=y + CONFIG_ARM_MODULE_PLTS=y ++CONFIG_KSM=y + CONFIG_CMA=y + CONFIG_ZSMALLOC=y + CONFIG_BALANCE_ANON_FILE_RECLAIM=y ++CONFIG_PROCESS_RECLAIM=y + CONFIG_SECCOMP=y + CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y + CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +@@ -223,7 +222,6 @@ CONFIG_IPC_ROUTER=y + CONFIG_IPC_ROUTER_SECURITY=y + CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y + CONFIG_DMA_CMA=y +-CONFIG_CMA_SIZE_MBYTES=8 + CONFIG_ZRAM=y + CONFIG_BLK_DEV_LOOP=y + CONFIG_BLK_DEV_RAM=y +@@ -231,14 +229,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 + CONFIG_HDCP_QSEECOM=y + CONFIG_QSEECOM=y + CONFIG_UID_SYS_STATS=y +-CONFIG_MEMORY_STATE_TIME=y + CONFIG_MD=y + CONFIG_BLK_DEV_MD=y + CONFIG_MD_LINEAR=y + CONFIG_BLK_DEV_DM=y + CONFIG_DM_CRYPT=y + CONFIG_DM_REQ_CRYPT=y +-CONFIG_DM_DEFAULT_KEY=y + CONFIG_DM_UEVENT=y + CONFIG_DM_VERITY=y + CONFIG_DM_VERITY_FEC=y +@@ -266,8 +262,6 @@ CONFIG_INPUT_EVDEV=y + CONFIG_KEYBOARD_GPIO=y + CONFIG_KEYBOARD_MATRIX=y + # CONFIG_INPUT_MOUSE is not set +-CONFIG_INPUT_JOYSTICK=y +-CONFIG_JOYSTICK_XPAD=y + CONFIG_INPUT_TOUCHSCREEN=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y +@@ -371,7 +365,8 @@ CONFIG_SND=y + CONFIG_SND_DYNAMIC_MINORS=y + CONFIG_SND_SOC=y + CONFIG_UHID=y +-CONFIG_USB_HIDDEV=y ++CONFIG_HID_MULTITOUCH=y ++CONFIG_USB=y + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + CONFIG_USB_MON=y + CONFIG_USB_EHCI_HCD=y +@@ -407,7 +402,6 @@ CONFIG_MMC_BLOCK_DEFERRED_RESUME=y + CONFIG_MMC_SDHCI=y + CONFIG_MMC_SDHCI_PLTFM=y + CONFIG_MMC_SDHCI_MSM=y +-CONFIG_MMC_SDHCI_MSM_ICE=y + CONFIG_LEDS_CLASS_FLASH=y + CONFIG_LEDS_QPNP=y + CONFIG_LEDS_MSM_GPIO_FLASH=y +@@ -422,6 +416,7 @@ CONFIG_UIO_MSM_SHAREDMEM=y + CONFIG_STAGING=y + CONFIG_ASHMEM=y + CONFIG_ANDROID_LOW_MEMORY_KILLER=y ++CONFIG_ANDROID_LMK_NOTIFY_TRIGGER=y + CONFIG_ION=y + CONFIG_ION_MSM=y + CONFIG_IPA=y +@@ -466,6 +461,7 @@ CONFIG_MSM_PIL_SSR_GENERIC=y + CONFIG_MSM_PIL_MSS_QDSP6V5=y + CONFIG_MSM_EVENT_TIMER=y + CONFIG_MSM_AVTIMER=y ++CONFIG_MSM_PM=y + CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y + CONFIG_MEM_SHARE_QMI_SERVICE=y + CONFIG_MSM_BAM_DMUX=y +@@ -479,19 +475,13 @@ CONFIG_PWM=y + CONFIG_PWM_QPNP=y + CONFIG_QCOM_SHOW_RESUME_IRQ=y + CONFIG_QTI_MPM=y ++CONFIG_PHY_QCOM_UFS=y + CONFIG_ANDROID=y + CONFIG_ANDROID_BINDER_IPC=y + CONFIG_ANDROID_BINDER_IPC_32BIT=y +-CONFIG_STM=y + CONFIG_SENSORS_SSC=y + CONFIG_EXT4_FS=y + CONFIG_EXT4_FS_SECURITY=y +-CONFIG_EXT4_ENCRYPTION=y +-CONFIG_EXT4_FS_ENCRYPTION=y +-CONFIG_EXT4_FS_ICE_ENCRYPTION=y +-CONFIG_F2FS_FS=y +-CONFIG_F2FS_FS_SECURITY=y +-CONFIG_F2FS_FS_ENCRYPTION=y + CONFIG_FANOTIFY=y + CONFIG_QUOTA=y + CONFIG_QUOTA_NETLINK_INTERFACE=y +@@ -523,11 +513,21 @@ CONFIG_QCOM_RTB=y + CONFIG_QCOM_RTB_SEPARATE_CPUS=y + CONFIG_CPU_FREQ_SWITCH_PROFILER=y + CONFIG_LKDTM=y +-CONFIG_MEMTEST=y + CONFIG_PANIC_ON_DATA_CORRUPTION=y + CONFIG_PID_IN_CONTEXTIDR=y + CONFIG_CORESIGHT=y +-CONFIG_PFK=y ++CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y ++CONFIG_CORESIGHT_SINK_TPIU=y ++CONFIG_CORESIGHT_SOURCE_ETM3X=y ++CONFIG_CORESIGHT_REMOTE_ETM=y ++CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 ++CONFIG_CORESIGHT_QCOM_REPLICATOR=y ++CONFIG_CORESIGHT_DBGUI=y ++CONFIG_CORESIGHT_STM=y ++CONFIG_CORESIGHT_CTI=y ++CONFIG_CORESIGHT_EVENT=y ++CONFIG_CORESIGHT_HWEVENT=y ++CONFIG_CORESIGHT_DUMMY=y + CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y + CONFIG_SECURITY=y + CONFIG_LSM_MMAP_MIN_ADDR=4096 +@@ -536,6 +536,7 @@ CONFIG_SECURITY_SELINUX=y + CONFIG_SECURITY_SMACK=y + CONFIG_SECURITY_APPARMOR=y + CONFIG_CRYPTO_XCBC=y ++CONFIG_CRYPTO_CRC32=y + CONFIG_CRYPTO_MD4=y + CONFIG_CRYPTO_TWOFISH=y + CONFIG_CRYPTO_ANSI_CPRNG=y +diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig +index b9df76428a96..18e5534652b5 100644 +--- a/arch/arm/configs/msm8909_defconfig ++++ b/arch/arm/configs/msm8909_defconfig +@@ -17,12 +17,9 @@ CONFIG_IKCONFIG_PROC=y + CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 + CONFIG_CGROUP_DEBUG=y + CONFIG_CGROUP_FREEZER=y +-CONFIG_CGROUP_DEVICE=y + CONFIG_CPUSETS=y + CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y +-CONFIG_MEMCG=y +-CONFIG_MEMCG_SWAP=y + CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y +@@ -38,6 +35,7 @@ CONFIG_EMBEDDED=y + CONFIG_PROFILING=y + CONFIG_OPROFILE=m + CONFIG_KPROBES=y ++CONFIG_CC_STACKPROTECTOR_REGULAR=y + CONFIG_ARCH_MMAP_RND_BITS=16 + CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y +@@ -55,10 +53,12 @@ CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_HIGHMEM=y + CONFIG_ARM_MODULE_PLTS=y ++CONFIG_KSM=y + CONFIG_CMA=y + CONFIG_CMA_DEBUGFS=y + CONFIG_ZSMALLOC=y + CONFIG_BALANCE_ANON_FILE_RECLAIM=y ++CONFIG_PROCESS_RECLAIM=y + CONFIG_SECCOMP=y + CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y + CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +@@ -219,7 +219,6 @@ CONFIG_IPC_ROUTER=y + CONFIG_IPC_ROUTER_SECURITY=y + CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y + CONFIG_DMA_CMA=y +-CONFIG_CMA_SIZE_MBYTES=8 + CONFIG_ZRAM=y + CONFIG_BLK_DEV_LOOP=y + CONFIG_BLK_DEV_RAM=y +@@ -235,7 +234,6 @@ CONFIG_BLK_DEV_DM=y + CONFIG_DM_DEBUG=y + CONFIG_DM_CRYPT=y + CONFIG_DM_REQ_CRYPT=y +-CONFIG_DM_DEFAULT_KEY=y + CONFIG_DM_UEVENT=y + CONFIG_DM_VERITY=y + CONFIG_DM_VERITY_FEC=y +@@ -264,8 +262,6 @@ CONFIG_INPUT_EVBUG=y + CONFIG_KEYBOARD_GPIO=y + CONFIG_KEYBOARD_MATRIX=y + # CONFIG_INPUT_MOUSE is not set +-CONFIG_INPUT_JOYSTICK=y +-CONFIG_JOYSTICK_XPAD=y + CONFIG_INPUT_TOUCHSCREEN=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y +@@ -374,7 +370,8 @@ CONFIG_SND=y + CONFIG_SND_DYNAMIC_MINORS=y + CONFIG_SND_SOC=y + CONFIG_UHID=y +-CONFIG_USB_HIDDEV=y ++CONFIG_HID_MULTITOUCH=y ++CONFIG_USB=y + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + CONFIG_USB_MON=y + CONFIG_USB_EHCI_HCD=y +@@ -413,7 +410,6 @@ CONFIG_MMC_TEST=y + CONFIG_MMC_SDHCI=y + CONFIG_MMC_SDHCI_PLTFM=y + CONFIG_MMC_SDHCI_MSM=y +-CONFIG_MMC_SDHCI_MSM_ICE=y + CONFIG_LEDS_CLASS_FLASH=y + CONFIG_LEDS_QPNP=y + CONFIG_LEDS_MSM_GPIO_FLASH=y +@@ -428,6 +424,7 @@ CONFIG_UIO_MSM_SHAREDMEM=y + CONFIG_STAGING=y + CONFIG_ASHMEM=y + CONFIG_ANDROID_LOW_MEMORY_KILLER=y ++CONFIG_ANDROID_LMK_NOTIFY_TRIGGER=y + CONFIG_ION=y + CONFIG_ION_MSM=y + CONFIG_IPA=y +@@ -473,6 +470,7 @@ CONFIG_MSM_PIL_SSR_GENERIC=y + CONFIG_MSM_PIL_MSS_QDSP6V5=y + CONFIG_MSM_EVENT_TIMER=y + CONFIG_MSM_AVTIMER=y ++CONFIG_MSM_PM=y + CONFIG_QTI_RPM_STATS_LOG=y + CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y + CONFIG_MEM_SHARE_QMI_SERVICE=y +@@ -487,20 +485,14 @@ CONFIG_PWM=y + CONFIG_PWM_QPNP=y + CONFIG_QCOM_SHOW_RESUME_IRQ=y + CONFIG_QTI_MPM=y ++CONFIG_PHY_QCOM_UFS=y + CONFIG_ANDROID=y + CONFIG_ANDROID_BINDER_IPC=y + CONFIG_ANDROID_BINDER_IPC_32BIT=y +-CONFIG_STM=y + CONFIG_SENSORS_SSC=y + CONFIG_MSM_TZ_LOG=y + CONFIG_EXT4_FS=y + CONFIG_EXT4_FS_SECURITY=y +-CONFIG_EXT4_ENCRYPTION=y +-CONFIG_EXT4_FS_ENCRYPTION=y +-CONFIG_EXT4_FS_ICE_ENCRYPTION=y +-CONFIG_F2FS_FS=y +-CONFIG_F2FS_FS_SECURITY=y +-CONFIG_F2FS_FS_ENCRYPTION=y + CONFIG_FANOTIFY=y + CONFIG_QUOTA=y + CONFIG_QUOTA_NETLINK_INTERFACE=y +@@ -568,7 +560,18 @@ CONFIG_DEBUG_USER=y + CONFIG_PID_IN_CONTEXTIDR=y + CONFIG_DEBUG_SET_MODULE_RONX=y + CONFIG_CORESIGHT=y +-CONFIG_PFK=y ++CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y ++CONFIG_CORESIGHT_SINK_TPIU=y ++CONFIG_CORESIGHT_SOURCE_ETM3X=y ++CONFIG_CORESIGHT_REMOTE_ETM=y ++CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 ++CONFIG_CORESIGHT_QCOM_REPLICATOR=y ++CONFIG_CORESIGHT_DBGUI=y ++CONFIG_CORESIGHT_STM=y ++CONFIG_CORESIGHT_CTI=y ++CONFIG_CORESIGHT_EVENT=y ++CONFIG_CORESIGHT_HWEVENT=y ++CONFIG_CORESIGHT_DUMMY=y + CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y + CONFIG_SECURITY=y + CONFIG_LSM_MMAP_MIN_ADDR=4096 +@@ -577,6 +580,7 @@ CONFIG_SECURITY_SELINUX=y + CONFIG_SECURITY_SMACK=y + CONFIG_SECURITY_APPARMOR=y + CONFIG_CRYPTO_XCBC=y ++CONFIG_CRYPTO_CRC32=y + CONFIG_CRYPTO_MD4=y + CONFIG_CRYPTO_TWOFISH=y + CONFIG_CRYPTO_ANSI_CPRNG=y +diff --git a/arch/arm/configs/msm8937go-perf_defconfig b/arch/arm/configs/msm8937go-perf_defconfig +old mode 100755 +new mode 100644 +index cdfc9d5f0a51..e486da9e8dd5 +--- a/arch/arm/configs/msm8937go-perf_defconfig ++++ b/arch/arm/configs/msm8937go-perf_defconfig +@@ -36,6 +36,7 @@ CONFIG_BLK_DEV_INITRD=y + # CONFIG_RD_XZ is not set + # CONFIG_RD_LZO is not set + # CONFIG_RD_LZ4 is not set ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y + CONFIG_KALLSYMS_ALL=y + # CONFIG_BASE_FULL is not set + CONFIG_BPF_SYSCALL=y +@@ -58,29 +59,22 @@ CONFIG_ARCH_QCOM=y + CONFIG_ARCH_MSM8937=y + CONFIG_ARCH_MSM8917=y + CONFIG_ARCH_QM215=y +-CONFIG_ARCH_SDM439=y +-CONFIG_ARCH_SDM429=y + # CONFIG_VDSO is not set + CONFIG_SMP=y + CONFIG_SCHED_MC=y +-CONFIG_NR_CPUS=8 + CONFIG_ARM_PSCI=y + CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_HIGHMEM=y + CONFIG_ARM_MODULE_PLTS=y ++CONFIG_KSM=y + CONFIG_CMA=y +-CONFIG_CMA_DEBUGFS=y + CONFIG_ZSMALLOC=y + CONFIG_BALANCE_ANON_FILE_RECLAIM=y + CONFIG_PROCESS_RECLAIM=y + CONFIG_SECCOMP=y + CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y + CONFIG_CPU_FREQ=y +-CONFIG_CPU_FREQ_GOV_POWERSAVE=y +-CONFIG_CPU_FREQ_GOV_USERSPACE=y +-CONFIG_CPU_FREQ_GOV_ONDEMAND=y +-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y + CONFIG_CPU_BOOST=y + CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + CONFIG_CPU_FREQ_MSM=y +@@ -144,7 +138,6 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=y + CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y + CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y + CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +-CONFIG_NETFILTER_XT_TARGET_LOG=y + CONFIG_NETFILTER_XT_TARGET_MARK=y + CONFIG_NETFILTER_XT_TARGET_NFLOG=y + CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +@@ -209,7 +202,6 @@ CONFIG_IP6_NF_RAW=y + CONFIG_BRIDGE_NF_EBTABLES=y + CONFIG_BRIDGE_EBT_BROUTE=y + CONFIG_L2TP=y +-CONFIG_L2TP_DEBUGFS=y + CONFIG_L2TP_V3=y + CONFIG_L2TP_IP=y + CONFIG_L2TP_ETH=y +@@ -232,7 +224,7 @@ CONFIG_NET_EMATCH_TEXT=y + CONFIG_NET_CLS_ACT=y + CONFIG_RMNET_DATA=y + CONFIG_RMNET_DATA_FC=y +-CONFIG_RMNET_DATA_DEBUG_PKT=y ++CONFIG_SOCKEV_NLMCAST=y + CONFIG_BT=y + # CONFIG_BT_BREDR is not set + # CONFIG_BT_LE is not set +@@ -245,7 +237,6 @@ CONFIG_NFC_NQ=y + CONFIG_IPC_ROUTER=y + CONFIG_IPC_ROUTER_SECURITY=y + CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +-CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y + CONFIG_DMA_CMA=y + CONFIG_ZRAM=y + CONFIG_BLK_DEV_LOOP=y +@@ -255,17 +246,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 + CONFIG_HDCP_QSEECOM=y + CONFIG_QSEECOM=y + CONFIG_UID_SYS_STATS=y +-CONFIG_SCSI=y +-CONFIG_BLK_DEV_SD=y +-CONFIG_CHR_DEV_SG=y +-CONFIG_CHR_DEV_SCH=y +-CONFIG_SCSI_CONSTANTS=y +-CONFIG_SCSI_LOGGING=y +-CONFIG_SCSI_SCAN_ASYNC=y +-CONFIG_SCSI_UFSHCD=y +-CONFIG_SCSI_UFSHCD_PLATFORM=y +-CONFIG_SCSI_UFS_QCOM=y +-CONFIG_SCSI_UFS_QCOM_ICE=y + CONFIG_MD=y + CONFIG_BLK_DEV_DM=y + CONFIG_DM_CRYPT=y +@@ -299,8 +279,6 @@ CONFIG_PPPOLAC=y + CONFIG_PPPOPNS=y + CONFIG_PPP_ASYNC=y + CONFIG_PPP_SYNC_TTY=y +-CONFIG_USB_RTL8152=y +-CONFIG_USB_USBNET=y + # CONFIG_WLAN_VENDOR_ADMTEK is not set + # CONFIG_WLAN_VENDOR_ATH is not set + # CONFIG_WLAN_VENDOR_ATMEL is not set +@@ -321,7 +299,6 @@ CONFIG_CLD_LL_CORE=y + CONFIG_INPUT_EVDEV=y + CONFIG_KEYBOARD_GPIO=y + # CONFIG_INPUT_MOUSE is not set +-CONFIG_INPUT_JOYSTICK=y + CONFIG_INPUT_TOUCHSCREEN=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y + CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y +@@ -346,7 +323,6 @@ CONFIG_HW_RANDOM=y + CONFIG_HW_RANDOM_MSM_LEGACY=y + CONFIG_MSM_SMD_PKT=y + CONFIG_MSM_ADSPRPC=y +-CONFIG_MSM_RDBG=m + CONFIG_I2C_CHARDEV=y + CONFIG_I2C_MSM_V2=y + CONFIG_SPI=y +@@ -371,7 +347,6 @@ CONFIG_QPNP_SMB5=y + CONFIG_QPNP_SMBCHARGER=y + CONFIG_QPNP_VM_BMS=y + CONFIG_QPNP_LINEAR_CHARGER=y +-CONFIG_QPNP_TYPEC=y + CONFIG_QPNP_QG=y + CONFIG_MSM_APM=y + CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y +@@ -408,7 +383,6 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y + CONFIG_MEDIA_CONTROLLER=y + CONFIG_VIDEO_V4L2_SUBDEV_API=y + CONFIG_MEDIA_USB_SUPPORT=y +-CONFIG_USB_VIDEO_CLASS=y + CONFIG_V4L_PLATFORM_DRIVERS=y + CONFIG_MSM_CAMERA=y + CONFIG_MSMB_CAMERA=y +@@ -423,15 +397,6 @@ CONFIG_MSM_CSIPHY=y + CONFIG_MSM_CSID=y + CONFIG_MSM_EEPROM=y + CONFIG_MSM_ISPIF_V2=y +-CONFIG_IMX134=y +-CONFIG_IMX132=y +-CONFIG_OV9724=y +-CONFIG_OV5648=y +-CONFIG_GC0339=y +-CONFIG_OV8825=y +-CONFIG_OV8865=y +-CONFIG_s5k4e1=y +-CONFIG_OV12830=y + CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y + CONFIG_MSMB_JPEG=y + CONFIG_MSM_FD=y +@@ -446,7 +411,6 @@ CONFIG_FB_MSM_MDSS=y + CONFIG_FB_MSM_MDSS_WRITEBACK=y + CONFIG_FB_MSM_MDSS_SPI_PANEL=y + CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y + CONFIG_BACKLIGHT_LCD_SUPPORT=y + # CONFIG_BACKLIGHT_CLASS_DEVICE is not set + CONFIG_SOUND=y +@@ -455,40 +419,19 @@ CONFIG_SND_DYNAMIC_MINORS=y + CONFIG_SND_USB_AUDIO=y + CONFIG_SND_SOC=y + CONFIG_UHID=y +-CONFIG_HID_APPLE=y +-CONFIG_HID_ELECOM=y +-CONFIG_HID_MAGICMOUSE=y +-CONFIG_HID_MICROSOFT=y + CONFIG_HID_MULTITOUCH=y + CONFIG_HID_NINTENDO=y + CONFIG_HID_SONY=y +-CONFIG_USB_HIDDEV=y + CONFIG_USB=y + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + CONFIG_USB_MON=y + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_EHCI_MSM=y +-CONFIG_USB_EHCI_HCD_PLATFORM=y + CONFIG_USB_ACM=y +-CONFIG_USB_STORAGE=y +-CONFIG_USB_STORAGE_DATAFAB=y +-CONFIG_USB_STORAGE_FREECOM=y +-CONFIG_USB_STORAGE_ISD200=y +-CONFIG_USB_STORAGE_USBAT=y +-CONFIG_USB_STORAGE_SDDR09=y +-CONFIG_USB_STORAGE_SDDR55=y +-CONFIG_USB_STORAGE_JUMPSHOT=y +-CONFIG_USB_STORAGE_ALAUDA=y +-CONFIG_USB_STORAGE_ONETOUCH=y +-CONFIG_USB_STORAGE_KARMA=y +-CONFIG_USB_STORAGE_CYPRESS_ATACB=y + CONFIG_USB_SERIAL=y +-CONFIG_USB_EHSET_TEST_FIXTURE=y + CONFIG_NOP_USB_XCEIV=y +-CONFIG_DUAL_ROLE_USB_INTF=y + CONFIG_USB_GADGET=y + CONFIG_USB_GADGET_DEBUG_FILES=y +-CONFIG_USB_GADGET_DEBUG_FS=y + CONFIG_USB_GADGET_VBUS_DRAW=500 + CONFIG_USB_CI13XXX_MSM=y + CONFIG_USB_CONFIGFS=y +@@ -549,7 +492,6 @@ CONFIG_RMNET_IPA=y + CONFIG_RNDIS_IPA=y + CONFIG_SPS=y + CONFIG_SPS_SUPPORT_NDP_BAM=y +-CONFIG_QPNP_COINCELL=y + CONFIG_QPNP_REVID=y + CONFIG_USB_BAM=y + CONFIG_MSM_RMNET_BAM=y +@@ -559,7 +501,6 @@ CONFIG_MSM_TIMER_LEAP=y + CONFIG_MAILBOX=y + CONFIG_ARM_SMMU=y + CONFIG_QCOM_LAZY_MAPPING=y +-CONFIG_QCOM_RUN_QUEUE_STATS=y + CONFIG_MSM_SPM=y + CONFIG_MSM_L2_SPM=y + CONFIG_MSM_BOOT_STATS=y +@@ -571,7 +512,6 @@ CONFIG_QCOM_SECURE_BUFFER=y + CONFIG_QCOM_EARLY_RANDOM=y + CONFIG_MSM_SMEM=y + CONFIG_MSM_SMD=y +-CONFIG_MSM_SMD_DEBUG=y + CONFIG_MSM_TZ_SMMU=y + CONFIG_MSM_SMP2P=y + CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y +@@ -581,7 +521,6 @@ CONFIG_MSM_SYSMON_COMM=y + CONFIG_MSM_PIL=y + CONFIG_MSM_PIL_SSR_GENERIC=y + CONFIG_MSM_PIL_MSS_QDSP6V5=y +-CONFIG_MSM_PERFORMANCE=y + CONFIG_MSM_EVENT_TIMER=y + CONFIG_MSM_AVTIMER=y + CONFIG_MSM_PM=y +@@ -609,7 +548,6 @@ CONFIG_QTI_MPM=y + CONFIG_ANDROID=y + CONFIG_ANDROID_BINDER_IPC=y + CONFIG_SENSORS_SSC=y +-CONFIG_MSM_TZ_LOG=y + CONFIG_EXT4_FS=y + CONFIG_EXT4_FS_SECURITY=y + CONFIG_EXT4_ENCRYPTION=y +@@ -618,22 +556,18 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y + CONFIG_F2FS_FS=y + CONFIG_F2FS_FS_SECURITY=y + CONFIG_F2FS_FS_ENCRYPTION=y ++CONFIG_FANOTIFY=y + CONFIG_QUOTA=y + CONFIG_QUOTA_NETLINK_INTERFACE=y +-CONFIG_QFMT_V2=y + CONFIG_FUSE_FS=y + CONFIG_MSDOS_FS=y + CONFIG_VFAT_FS=y + CONFIG_TMPFS=y + CONFIG_SDCARD_FS=y +-CONFIG_SQUASHFS=y +-CONFIG_SQUASHFS_XATTR=y +-CONFIG_SQUASHFS_LZ4=y + # CONFIG_NETWORK_FILESYSTEMS is not set + CONFIG_NLS_CODEPAGE_437=y + CONFIG_NLS_ISO8859_1=y + CONFIG_PRINTK_TIME=y +-CONFIG_DEBUG_INFO=y + CONFIG_FRAME_WARN=2048 + CONFIG_DEBUG_FS=y + CONFIG_MAGIC_SYSRQ=y +@@ -642,7 +576,6 @@ CONFIG_SCHED_STACK_END_CHECK=y + # CONFIG_DEBUG_PREEMPT is not set + CONFIG_STACKTRACE=y + # CONFIG_FTRACE is not set +-CONFIG_DEBUG_SET_MODULE_RONX=y + CONFIG_PFK=y + CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y + CONFIG_SECURITY=y +diff --git a/arch/arm/configs/msm8937go_defconfig b/arch/arm/configs/msm8937go_defconfig +index b64db18c1f44..f6c1b52afe60 100755 +--- a/arch/arm/configs/msm8937go_defconfig ++++ b/arch/arm/configs/msm8937go_defconfig +@@ -37,6 +37,7 @@ CONFIG_BLK_DEV_INITRD=y + # CONFIG_RD_XZ is not set + # CONFIG_RD_LZO is not set + # CONFIG_RD_LZ4 is not set ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y + CONFIG_KALLSYMS_ALL=y + # CONFIG_BASE_FULL is not set + CONFIG_BPF_SYSCALL=y +@@ -71,6 +72,7 @@ CONFIG_PREEMPT=y + CONFIG_AEABI=y + CONFIG_HIGHMEM=y + CONFIG_ARM_MODULE_PLTS=y ++CONFIG_KSM=y + CONFIG_CMA=y + CONFIG_CMA_DEBUGFS=y + CONFIG_ZSMALLOC=y +@@ -237,6 +239,7 @@ CONFIG_DNS_RESOLVER=y + CONFIG_RMNET_DATA=y + CONFIG_RMNET_DATA_FC=y + CONFIG_RMNET_DATA_DEBUG_PKT=y ++CONFIG_SOCKEV_NLMCAST=y + CONFIG_BT=y + # CONFIG_BT_BREDR is not set + # CONFIG_BT_LE is not set +@@ -429,15 +432,6 @@ CONFIG_MSM_CSIPHY=y + CONFIG_MSM_CSID=y + CONFIG_MSM_EEPROM=y + CONFIG_MSM_ISPIF_V2=y +-CONFIG_IMX134=y +-CONFIG_IMX132=y +-CONFIG_OV9724=y +-CONFIG_OV5648=y +-CONFIG_GC0339=y +-CONFIG_OV8825=y +-CONFIG_OV8865=y +-CONFIG_s5k4e1=y +-CONFIG_OV12830=y + CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y + CONFIG_MSMB_JPEG=y + CONFIG_MSM_FD=y +@@ -635,6 +629,7 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y + CONFIG_F2FS_FS=y + CONFIG_F2FS_FS_SECURITY=y + CONFIG_F2FS_FS_ENCRYPTION=y ++CONFIG_FANOTIFY=y + CONFIG_QUOTA=y + CONFIG_QUOTA_NETLINK_INTERFACE=y + CONFIG_QFMT_V2=y +diff --git a/arch/arm/configs/sa415m-perf_defconfig b/arch/arm/configs/sa415m-perf_defconfig +index aca827b951a6..a14833faf8ed 100644 +--- a/arch/arm/configs/sa415m-perf_defconfig ++++ b/arch/arm/configs/sa415m-perf_defconfig +@@ -315,7 +315,6 @@ CONFIG_USB_CONFIGFS_RNDIS=y + CONFIG_USB_CONFIGFS_MASS_STORAGE=y + CONFIG_USB_CONFIGFS_F_FS=y + CONFIG_USB_CONFIGFS_UEVENT=y +-CONFIG_USB_CONFIGFS_F_UAC1=y + CONFIG_USB_CONFIGFS_F_DIAG=y + CONFIG_USB_CONFIGFS_F_CDEV=y + CONFIG_USB_CONFIGFS_F_GSI=y +diff --git a/arch/arm/configs/sa415m_defconfig b/arch/arm/configs/sa415m_defconfig +index 6e31335e7910..2fbd4d678b41 100644 +--- a/arch/arm/configs/sa415m_defconfig ++++ b/arch/arm/configs/sa415m_defconfig +@@ -328,7 +328,6 @@ CONFIG_USB_CONFIGFS_RNDIS=y + CONFIG_USB_CONFIGFS_MASS_STORAGE=y + CONFIG_USB_CONFIGFS_F_FS=y + CONFIG_USB_CONFIGFS_UEVENT=y +-CONFIG_USB_CONFIGFS_F_UAC1=y + CONFIG_USB_CONFIGFS_F_DIAG=y + CONFIG_USB_CONFIGFS_F_CDEV=y + CONFIG_USB_CONFIGFS_F_GSI=y +diff --git a/arch/arm/configs/sdm670-perf_defconfig b/arch/arm/configs/sdm670-perf_defconfig +index a4d16315303c..23ca983cb5bc 100755 +--- a/arch/arm/configs/sdm670-perf_defconfig ++++ b/arch/arm/configs/sdm670-perf_defconfig +@@ -40,6 +40,7 @@ CONFIG_BPF_SYSCALL=y + # CONFIG_AIO is not set + # CONFIG_MEMBARRIER is not set + CONFIG_EMBEDDED=y ++# CONFIG_SLUB_DEBUG is not set + # CONFIG_COMPAT_BRK is not set + CONFIG_PROFILING=y + CONFIG_CC_STACKPROTECTOR_REGULAR=y +@@ -56,6 +57,7 @@ CONFIG_PARTITION_ADVANCED=y + CONFIG_CFQ_GROUP_IOSCHED=y + CONFIG_ARCH_QCOM=y + CONFIG_ARCH_SDM670=y ++# CONFIG_DEBUG_RODATA is not set + CONFIG_PCI_MSM=y + CONFIG_SMP=y + CONFIG_SCHED_MC=y +@@ -555,14 +557,18 @@ CONFIG_MAGIC_SYSRQ=y + CONFIG_PAGE_EXTENSION=y + CONFIG_DETECT_HUNG_TASK=y + CONFIG_PANIC_TIMEOUT=5 ++# CONFIG_SCHED_DEBUG is not set + CONFIG_SCHEDSTATS=y + # CONFIG_DEBUG_PREEMPT is not set ++# CONFIG_DEBUG_BUGVERBOSE is not set + CONFIG_IPC_LOGGING=y + CONFIG_TRACER_SNAPSHOT=y + CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y + CONFIG_CPU_FREQ_SWITCH_PROFILER=y + CONFIG_CORESIGHT=y + CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y ++CONFIG_CORESIGHT_REMOTE_ETM=y ++CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 + CONFIG_CORESIGHT_QCOM_REPLICATOR=y + CONFIG_CORESIGHT_STM=y + CONFIG_CORESIGHT_TPDA=y +diff --git a/arch/arm/configs/vendor/msm8909go-perf_defconfig b/arch/arm/configs/vendor/msm8909go-perf_defconfig +new file mode 100644 +index 000000000000..95598bd40a9f +--- /dev/null ++++ b/arch/arm/configs/vendor/msm8909go-perf_defconfig +@@ -0,0 +1,555 @@ ++CONFIG_LOCALVERSION="-perf" ++CONFIG_POSIX_MQUEUE=y ++# CONFIG_AUDITSYSCALL is not set ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_IRQ_TIME_ACCOUNTING=y ++CONFIG_SCHED_WALT=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_RCU_EXPERT=y ++CONFIG_RCU_FAST_NO_HZ=y ++CONFIG_RCU_NOCB_CPU=y ++CONFIG_RCU_NOCB_CPU_ALL=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 ++CONFIG_CGROUP_FREEZER=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHEDTUNE=y ++CONFIG_MEMCG=y ++CONFIG_MEMCG_SWAP=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_CGROUP_BPF=y ++CONFIG_SCHED_CORE_CTL=y ++CONFIG_NAMESPACES=y ++CONFIG_SCHED_AUTOGROUP=y ++CONFIG_SCHED_TUNE=y ++CONFIG_DEFAULT_USE_ENERGY_AWARE=y ++CONFIG_BLK_DEV_INITRD=y ++# CONFIG_RD_BZIP2 is not set ++# CONFIG_RD_LZMA is not set ++# CONFIG_RD_XZ is not set ++# CONFIG_RD_LZO is not set ++# CONFIG_RD_LZ4 is not set ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_BPF_SYSCALL=y ++# CONFIG_MEMBARRIER is not set ++CONFIG_EMBEDDED=y ++# CONFIG_SLUB_DEBUG is not set ++# CONFIG_COMPAT_BRK is not set ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=m ++CONFIG_CC_STACKPROTECTOR_REGULAR=y ++CONFIG_ARCH_MMAP_RND_BITS=16 ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SIG=y ++CONFIG_MODULE_SIG_FORCE=y ++CONFIG_MODULE_SIG_SHA512=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_ARCH_QCOM=y ++CONFIG_ARCH_MSM8909=y ++CONFIG_SMP=y ++CONFIG_SCHED_MC=y ++CONFIG_PREEMPT=y ++CONFIG_AEABI=y ++CONFIG_HIGHMEM=y ++CONFIG_ARM_MODULE_PLTS=y ++CONFIG_CMA=y ++CONFIG_ZSMALLOC=y ++CONFIG_BALANCE_ANON_FILE_RECLAIM=y ++CONFIG_SECCOMP=y ++CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y ++CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y ++CONFIG_CPU_IDLE=y ++CONFIG_VFP=y ++CONFIG_NEON=y ++CONFIG_KERNEL_MODE_NEON=y ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_PM_AUTOSLEEP=y ++CONFIG_PM_WAKELOCKS=y ++CONFIG_PM_WAKELOCKS_LIMIT=0 ++# CONFIG_PM_WAKELOCKS_GC is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_INET_AH=y ++CONFIG_INET_ESP=y ++CONFIG_INET_IPCOMP=y ++CONFIG_INET_DIAG_DESTROY=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=y ++CONFIG_INET6_ESP=y ++CONFIG_INET6_IPCOMP=y ++CONFIG_IPV6_MIP6=y ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_SUBTREES=y ++CONFIG_NETFILTER=y ++CONFIG_NF_CONNTRACK=y ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_SCTP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y ++CONFIG_NF_CONNTRACK_AMANDA=y ++CONFIG_NF_CONNTRACK_FTP=y ++CONFIG_NF_CONNTRACK_H323=y ++CONFIG_NF_CONNTRACK_IRC=y ++CONFIG_NF_CONNTRACK_NETBIOS_NS=y ++CONFIG_NF_CONNTRACK_PPTP=y ++CONFIG_NF_CONNTRACK_SANE=y ++CONFIG_NF_CONNTRACK_TFTP=y ++CONFIG_NF_CT_NETLINK=y ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y ++CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y ++CONFIG_NETFILTER_XT_TARGET_MARK=y ++CONFIG_NETFILTER_XT_TARGET_NFLOG=y ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=y ++CONFIG_NETFILTER_XT_TARGET_TEE=y ++CONFIG_NETFILTER_XT_TARGET_TPROXY=y ++CONFIG_NETFILTER_XT_TARGET_TRACE=y ++CONFIG_NETFILTER_XT_TARGET_SECMARK=y ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=y ++CONFIG_NETFILTER_XT_MATCH_COMMENT=y ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y ++CONFIG_NETFILTER_XT_MATCH_DSCP=y ++CONFIG_NETFILTER_XT_MATCH_ESP=y ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_HELPER=y ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=y ++CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NETFILTER_XT_MATCH_LIMIT=y ++CONFIG_NETFILTER_XT_MATCH_MAC=y ++CONFIG_NETFILTER_XT_MATCH_MARK=y ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y ++CONFIG_NETFILTER_XT_MATCH_OWNER=y ++CONFIG_NETFILTER_XT_MATCH_POLICY=y ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y ++CONFIG_NETFILTER_XT_MATCH_SOCKET=y ++CONFIG_NETFILTER_XT_MATCH_STATE=y ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y ++CONFIG_NETFILTER_XT_MATCH_STRING=y ++CONFIG_NETFILTER_XT_MATCH_TIME=y ++CONFIG_NETFILTER_XT_MATCH_U32=y ++CONFIG_NF_CONNTRACK_IPV4=y ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=y ++CONFIG_IP_NF_MATCH_ECN=y ++CONFIG_IP_NF_MATCH_RPFILTER=y ++CONFIG_IP_NF_MATCH_TTL=y ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++CONFIG_IP_NF_NAT=y ++CONFIG_IP_NF_TARGET_MASQUERADE=y ++CONFIG_IP_NF_TARGET_NETMAP=y ++CONFIG_IP_NF_TARGET_REDIRECT=y ++CONFIG_IP_NF_MANGLE=y ++CONFIG_IP_NF_RAW=y ++CONFIG_IP_NF_SECURITY=y ++CONFIG_IP_NF_ARPTABLES=y ++CONFIG_IP_NF_ARPFILTER=y ++CONFIG_IP_NF_ARP_MANGLE=y ++CONFIG_NF_CONNTRACK_IPV6=y ++CONFIG_IP6_NF_IPTABLES=y ++CONFIG_IP6_NF_MATCH_RPFILTER=y ++CONFIG_IP6_NF_FILTER=y ++CONFIG_IP6_NF_TARGET_REJECT=y ++CONFIG_IP6_NF_MANGLE=y ++CONFIG_IP6_NF_RAW=y ++CONFIG_BRIDGE_NF_EBTABLES=y ++CONFIG_BRIDGE_EBT_BROUTE=y ++CONFIG_L2TP=y ++CONFIG_L2TP_V3=y ++CONFIG_L2TP_IP=y ++CONFIG_L2TP_ETH=y ++CONFIG_BRIDGE=y ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_HTB=y ++CONFIG_NET_SCH_PRIO=y ++CONFIG_NET_SCH_MULTIQ=y ++CONFIG_NET_SCH_INGRESS=y ++CONFIG_NET_CLS_FW=y ++CONFIG_NET_CLS_U32=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_FLOW=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=y ++CONFIG_NET_EMATCH_NBYTE=y ++CONFIG_NET_EMATCH_U32=y ++CONFIG_NET_EMATCH_META=y ++CONFIG_NET_EMATCH_TEXT=y ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_GACT=y ++CONFIG_NET_ACT_MIRRED=y ++CONFIG_NET_ACT_SKBEDIT=y ++CONFIG_DNS_RESOLVER=y ++CONFIG_RMNET_DATA=y ++CONFIG_RMNET_DATA_FC=y ++CONFIG_SOCKEV_NLMCAST=y ++CONFIG_BT=y ++CONFIG_MSM_BT_POWER=y ++CONFIG_CFG80211=y ++CONFIG_CFG80211_INTERNAL_REGDB=y ++CONFIG_RFKILL=y ++CONFIG_NFC_NQ=y ++CONFIG_IPC_ROUTER=y ++CONFIG_IPC_ROUTER_SECURITY=y ++CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y ++CONFIG_DMA_CMA=y ++CONFIG_CMA_SIZE_MBYTES=8 ++CONFIG_ZRAM=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_HDCP_QSEECOM=y ++CONFIG_QSEECOM=y ++CONFIG_UID_SYS_STATS=y ++CONFIG_MEMORY_STATE_TIME=y ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_DM_CRYPT=y ++CONFIG_DM_REQ_CRYPT=y ++CONFIG_DM_DEFAULT_KEY=y ++CONFIG_DM_UEVENT=y ++CONFIG_DM_VERITY=y ++CONFIG_DM_VERITY_FEC=y ++CONFIG_DM_ANDROID_VERITY=y ++CONFIG_NETDEVICES=y ++CONFIG_DUMMY=y ++CONFIG_TUN=y ++CONFIG_PPP=y ++CONFIG_PPP_BSDCOMP=y ++CONFIG_PPP_DEFLATE=y ++CONFIG_PPP_FILTER=y ++CONFIG_PPP_MPPE=y ++CONFIG_PPP_MULTILINK=y ++CONFIG_PPPOE=y ++CONFIG_PPPOL2TP=y ++CONFIG_PPPOLAC=y ++CONFIG_PPPOPNS=y ++CONFIG_PPP_ASYNC=y ++CONFIG_PPP_SYNC_TTY=y ++CONFIG_WCNSS_MEM_PRE_ALLOC=y ++CONFIG_CNSS=y ++CONFIG_CNSS_SDIO=y ++CONFIG_CLD_HL_SDIO_CORE=y ++CONFIG_INPUT_EVDEV=y ++CONFIG_KEYBOARD_GPIO=y ++CONFIG_KEYBOARD_MATRIX=y ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_JOYSTICK=y ++CONFIG_JOYSTICK_XPAD=y ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y ++CONFIG_TOUCHSCREEN_GEN_VKEYS=y ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_QPNP_POWER_ON=y ++CONFIG_STMVL53L0X=y ++CONFIG_INPUT_UINPUT=y ++CONFIG_INPUT_GPIO=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_DEVMEM is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_MSM_HS=y ++CONFIG_SERIAL_MSM_SMD=y ++CONFIG_DIAG_CHAR=y ++CONFIG_DIAG_USES_SMD=y ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_MSM_LEGACY=y ++CONFIG_MSM_SMD_PKT=y ++CONFIG_MSM_ADSPRPC=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MSM_V2=y ++CONFIG_SPI=y ++CONFIG_SPI_QUP=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_SLIMBUS_MSM_NGD=y ++CONFIG_SPMI=y ++CONFIG_PINCTRL_MSM8909=y ++CONFIG_PINCTRL_QCOM_SPMI_PMIC=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_QCOM=y ++CONFIG_QCOM_DLOAD_MODE=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_POWER_RESET_SYSCON_POWEROFF=y ++CONFIG_POWER_SUPPLY=y ++CONFIG_SMB1360_CHARGER_FG=y ++CONFIG_QPNP_VM_BMS=y ++CONFIG_QPNP_LINEAR_CHARGER=y ++CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y ++CONFIG_THERMAL=y ++CONFIG_THERMAL_WRITABLE_TRIPS=y ++CONFIG_THERMAL_GOV_USER_SPACE=y ++CONFIG_THERMAL_GOV_LOW_LIMITS=y ++CONFIG_CPU_THERMAL=y ++CONFIG_DEVFREQ_THERMAL=y ++CONFIG_THERMAL_QPNP=y ++CONFIG_THERMAL_QPNP_ADC_TM=y ++CONFIG_THERMAL_TSENS=y ++CONFIG_MSM_BCL_PERIPHERAL_CTL=y ++CONFIG_QTI_THERMAL_LIMITS_DCVS=y ++CONFIG_QTI_QMI_COOLING_DEVICE=y ++CONFIG_REGULATOR_COOLING_DEVICE=y ++CONFIG_MFD_QCOM_RPM=y ++CONFIG_MFD_SPMI_PMIC=y ++CONFIG_REGULATOR=y ++CONFIG_REGULATOR_FIXED_VOLTAGE=y ++CONFIG_REGULATOR_PROXY_CONSUMER=y ++CONFIG_REGULATOR_QCOM_RPM=y ++CONFIG_REGULATOR_QCOM_SPMI=y ++CONFIG_REGULATOR_CPR=y ++CONFIG_REGULATOR_MEM_ACC=y ++CONFIG_REGULATOR_MSM_GFX_LDO=y ++CONFIG_REGULATOR_QPNP=y ++CONFIG_REGULATOR_RPM_SMD=y ++CONFIG_REGULATOR_SPM=y ++CONFIG_REGULATOR_STUB=y ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_MEDIA_CAMERA_SUPPORT=y ++CONFIG_MEDIA_CONTROLLER=y ++CONFIG_VIDEO_V4L2_SUBDEV_API=y ++CONFIG_V4L_PLATFORM_DRIVERS=y ++CONFIG_SOC_CAMERA=y ++CONFIG_MSMB_CAMERA=y ++CONFIG_MSM_CAMERA_SENSOR=y ++CONFIG_MSM_CSI30_HEADER=y ++CONFIG_MSM_CSIPHY=y ++CONFIG_MSM_CSID=y ++CONFIG_MSM_EEPROM=y ++CONFIG_MSM_ISP_V1=y ++CONFIG_MSM_ISPIF=y ++CONFIG_MSM_VIDC_3X_V4L2=y ++CONFIG_MSM_VIDC_3X_GOVERNORS=y ++CONFIG_RADIO_IRIS=y ++CONFIG_RADIO_IRIS_TRANSPORT=y ++CONFIG_QCOM_KGSL=y ++CONFIG_FB=y ++CONFIG_FB_VIRTUAL=y ++CONFIG_FB_MSM=y ++CONFIG_FB_MSM_MDSS=y ++CONFIG_FB_MSM_MDSS_WRITEBACK=y ++CONFIG_FB_MSM_MDSS_SPI_PANEL=y ++CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y ++CONFIG_FB_MSM_MDSS_MDP3=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_LOGO=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_DYNAMIC_MINORS=y ++CONFIG_SND_SOC=y ++CONFIG_UHID=y ++CONFIG_USB_HIDDEV=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++CONFIG_USB_MON=y ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_EHCI_MSM=y ++CONFIG_USB_ACM=y ++CONFIG_USB_SERIAL=y ++CONFIG_USB_SERIAL_CP210X=y ++CONFIG_USB_SERIAL_FTDI_SIO=y ++CONFIG_USB_GADGET=y ++CONFIG_USB_GADGET_DEBUG_FILES=y ++CONFIG_USB_GADGET_DEBUG_FS=y ++CONFIG_USB_GADGET_VBUS_DRAW=500 ++CONFIG_USB_CI13XXX_MSM=y ++CONFIG_USB_CONFIGFS=y ++CONFIG_USB_CONFIGFS_SERIAL=y ++CONFIG_USB_CONFIGFS_NCM=y ++CONFIG_USB_CONFIGFS_RNDIS=y ++CONFIG_USB_CONFIGFS_RMNET_BAM=y ++CONFIG_USB_CONFIGFS_MASS_STORAGE=y ++CONFIG_USB_CONFIGFS_F_FS=y ++CONFIG_USB_CONFIGFS_F_MTP=y ++CONFIG_USB_CONFIGFS_F_PTP=y ++CONFIG_USB_CONFIGFS_UEVENT=y ++CONFIG_USB_CONFIGFS_F_DIAG=y ++CONFIG_USB_CONFIGFS_F_CDEV=y ++CONFIG_MMC=y ++CONFIG_MMC_PERF_PROFILING=y ++CONFIG_MMC_RING_BUFFER=y ++CONFIG_MMC_PARANOID_SD_INIT=y ++CONFIG_MMC_CLKGATE=y ++CONFIG_MMC_BLOCK_MINORS=32 ++CONFIG_MMC_BLOCK_DEFERRED_RESUME=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y ++CONFIG_MMC_SDHCI_MSM=y ++CONFIG_MMC_SDHCI_MSM_ICE=y ++CONFIG_LEDS_CLASS_FLASH=y ++CONFIG_LEDS_QPNP=y ++CONFIG_LEDS_MSM_GPIO_FLASH=y ++CONFIG_LEDS_QPNP_VIBRATOR=y ++CONFIG_LEDS_TRIGGERS=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_QPNP=y ++CONFIG_DMADEVICES=y ++CONFIG_QCOM_SPS_DMA=y ++CONFIG_UIO=y ++CONFIG_UIO_MSM_SHAREDMEM=y ++CONFIG_STAGING=y ++CONFIG_ASHMEM=y ++CONFIG_ANDROID_LOW_MEMORY_KILLER=y ++CONFIG_ION=y ++CONFIG_ION_MSM=y ++CONFIG_IPA=y ++CONFIG_RMNET_IPA=y ++CONFIG_RNDIS_IPA=y ++CONFIG_SPS=y ++CONFIG_SPS_SUPPORT_NDP_BAM=y ++CONFIG_QPNP_REVID=y ++CONFIG_USB_BAM=y ++CONFIG_MSM_RMNET_BAM=y ++CONFIG_MSM_MDSS_PLL=y ++CONFIG_REMOTE_SPINLOCK_MSM=y ++CONFIG_MAILBOX=y ++CONFIG_ARM_SMMU=y ++CONFIG_QCOM_LAZY_MAPPING=y ++CONFIG_MSM_SPM=y ++CONFIG_MSM_L2_SPM=y ++CONFIG_MSM_BOOT_STATS=y ++CONFIG_MSM_CORE_HANG_DETECT=y ++CONFIG_MSM_GLADIATOR_HANG_DETECT=y ++CONFIG_QCOM_WATCHDOG_V2=y ++CONFIG_QCOM_MEMORY_DUMP_V2=y ++CONFIG_QCOM_BUS_SCALING=y ++CONFIG_QCOM_SECURE_BUFFER=y ++CONFIG_QCOM_EARLY_RANDOM=y ++CONFIG_MSM_SMEM=y ++CONFIG_MSM_SMD=y ++CONFIG_MSM_GLINK=y ++CONFIG_MSM_TZ_SMMU=y ++CONFIG_MSM_GLINK_LOOPBACK_SERVER=y ++CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y ++CONFIG_MSM_GLINK_SPI_XPRT=y ++CONFIG_TRACER_PKT=y ++CONFIG_MSM_SMP2P=y ++CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y ++CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y ++CONFIG_MSM_QMI_INTERFACE=y ++CONFIG_MSM_GLINK_PKT=y ++CONFIG_MSM_SUBSYSTEM_RESTART=y ++CONFIG_MSM_PIL=y ++CONFIG_MSM_PIL_SSR_GENERIC=y ++CONFIG_MSM_PIL_MSS_QDSP6V5=y ++CONFIG_MSM_EVENT_TIMER=y ++CONFIG_MSM_AVTIMER=y ++CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y ++CONFIG_MEM_SHARE_QMI_SERVICE=y ++CONFIG_MSM_BAM_DMUX=y ++CONFIG_WCNSS_CORE=y ++CONFIG_WCNSS_CORE_PRONTO=y ++CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y ++CONFIG_CNSS_CRYPTO=y ++CONFIG_IIO=y ++CONFIG_INV_ICM20602_IIO=y ++CONFIG_PWM=y ++CONFIG_PWM_QPNP=y ++CONFIG_QCOM_SHOW_RESUME_IRQ=y ++CONFIG_QTI_MPM=y ++CONFIG_ANDROID=y ++CONFIG_ANDROID_BINDER_IPC=y ++CONFIG_ANDROID_BINDER_IPC_32BIT=y ++CONFIG_STM=y ++CONFIG_SENSORS_SSC=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_EXT4_ENCRYPTION=y ++CONFIG_EXT4_FS_ENCRYPTION=y ++CONFIG_EXT4_FS_ICE_ENCRYPTION=y ++CONFIG_F2FS_FS=y ++CONFIG_F2FS_FS_SECURITY=y ++CONFIG_F2FS_FS_ENCRYPTION=y ++CONFIG_FANOTIFY=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_TMPFS=y ++CONFIG_ECRYPT_FS=y ++CONFIG_ECRYPT_FS_MESSAGING=y ++CONFIG_SDCARD_FS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_PRINTK_TIME=y ++CONFIG_FRAME_WARN=2048 ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y ++# CONFIG_DETECT_HUNG_TASK is not set ++CONFIG_WQ_WATCHDOG=y ++CONFIG_PANIC_TIMEOUT=5 ++CONFIG_PANIC_ON_SCHED_BUG=y ++CONFIG_PANIC_ON_RT_THROTTLING=y ++CONFIG_SCHED_STACK_END_CHECK=y ++CONFIG_FAULT_INJECTION=y ++CONFIG_FAIL_PAGE_ALLOC=y ++CONFIG_QCOM_RTB=y ++CONFIG_QCOM_RTB_SEPARATE_CPUS=y ++CONFIG_CPU_FREQ_SWITCH_PROFILER=y ++CONFIG_LKDTM=y ++CONFIG_MEMTEST=y ++CONFIG_PANIC_ON_DATA_CORRUPTION=y ++CONFIG_PID_IN_CONTEXTIDR=y ++CONFIG_CORESIGHT=y ++CONFIG_PFK=y ++CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y ++CONFIG_SECURITY=y ++CONFIG_LSM_MMAP_MIN_ADDR=4096 ++CONFIG_HARDENED_USERCOPY=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SMACK=y ++CONFIG_SECURITY_APPARMOR=y ++CONFIG_CRYPTO_XCBC=y ++CONFIG_CRYPTO_MD4=y ++CONFIG_CRYPTO_TWOFISH=y ++CONFIG_CRYPTO_ANSI_CPRNG=y ++CONFIG_CRYPTO_DEV_QCE=y ++CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y ++CONFIG_CRYPTO_DEV_QCRYPTO=y ++CONFIG_CRYPTO_DEV_QCEDEV=y ++CONFIG_CRYPTO_DEV_OTA_CRYPTO=y ++CONFIG_CRYPTO_DEV_QCOM_ICE=y ++CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" ++CONFIG_ARM_CRYPTO=y ++CONFIG_CRYPTO_SHA1_ARM_NEON=y ++CONFIG_CRYPTO_SHA2_ARM_CE=y ++CONFIG_CRYPTO_AES_ARM_BS=y ++CONFIG_CRYPTO_AES_ARM_CE=y ++CONFIG_QMI_ENCDEC=y +diff --git a/arch/arm/configs/vendor/msm8909go_defconfig b/arch/arm/configs/vendor/msm8909go_defconfig +new file mode 100644 +index 000000000000..b1b56d757c35 +--- /dev/null ++++ b/arch/arm/configs/vendor/msm8909go_defconfig +@@ -0,0 +1,596 @@ ++CONFIG_POSIX_MQUEUE=y ++# CONFIG_AUDITSYSCALL is not set ++CONFIG_NO_HZ=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_IRQ_TIME_ACCOUNTING=y ++CONFIG_SCHED_WALT=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_XACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_RCU_EXPERT=y ++CONFIG_RCU_FAST_NO_HZ=y ++CONFIG_RCU_NOCB_CPU=y ++CONFIG_RCU_NOCB_CPU_ALL=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 ++CONFIG_CGROUP_DEBUG=y ++CONFIG_CGROUP_FREEZER=y ++CONFIG_CGROUP_DEVICE=y ++CONFIG_CPUSETS=y ++CONFIG_CGROUP_CPUACCT=y ++CONFIG_CGROUP_SCHEDTUNE=y ++CONFIG_MEMCG=y ++CONFIG_MEMCG_SWAP=y ++CONFIG_RT_GROUP_SCHED=y ++CONFIG_CGROUP_BPF=y ++CONFIG_SCHED_CORE_CTL=y ++CONFIG_NAMESPACES=y ++CONFIG_SCHED_AUTOGROUP=y ++CONFIG_SCHED_TUNE=y ++CONFIG_DEFAULT_USE_ENERGY_AWARE=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++CONFIG_KALLSYMS_ALL=y ++CONFIG_BPF_SYSCALL=y ++CONFIG_EMBEDDED=y ++CONFIG_PROFILING=y ++CONFIG_OPROFILE=m ++CONFIG_KPROBES=y ++CONFIG_ARCH_MMAP_RND_BITS=16 ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_MODULE_FORCE_UNLOAD=y ++CONFIG_MODVERSIONS=y ++CONFIG_MODULE_SIG=y ++CONFIG_MODULE_SIG_FORCE=y ++CONFIG_MODULE_SIG_SHA512=y ++CONFIG_PARTITION_ADVANCED=y ++CONFIG_ARCH_QCOM=y ++CONFIG_ARCH_MSM8909=y ++CONFIG_SMP=y ++CONFIG_SCHED_MC=y ++CONFIG_PREEMPT=y ++CONFIG_AEABI=y ++CONFIG_HIGHMEM=y ++CONFIG_ARM_MODULE_PLTS=y ++CONFIG_CMA=y ++CONFIG_CMA_DEBUGFS=y ++CONFIG_ZSMALLOC=y ++CONFIG_BALANCE_ANON_FILE_RECLAIM=y ++CONFIG_SECCOMP=y ++CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y ++CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y ++CONFIG_CPU_IDLE=y ++CONFIG_VFP=y ++CONFIG_NEON=y ++CONFIG_KERNEL_MODE_NEON=y ++CONFIG_PM_AUTOSLEEP=y ++CONFIG_PM_WAKELOCKS=y ++CONFIG_PM_WAKELOCKS_LIMIT=0 ++CONFIG_PM_DEBUG=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_XFRM_USER=y ++CONFIG_XFRM_STATISTICS=y ++CONFIG_NET_KEY=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_INET_AH=y ++CONFIG_INET_ESP=y ++CONFIG_INET_IPCOMP=y ++CONFIG_INET_DIAG_DESTROY=y ++CONFIG_IPV6_ROUTER_PREF=y ++CONFIG_IPV6_ROUTE_INFO=y ++CONFIG_IPV6_OPTIMISTIC_DAD=y ++CONFIG_INET6_AH=y ++CONFIG_INET6_ESP=y ++CONFIG_INET6_IPCOMP=y ++CONFIG_IPV6_MIP6=y ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_SUBTREES=y ++CONFIG_NETFILTER=y ++CONFIG_NF_CONNTRACK=y ++CONFIG_NF_CONNTRACK_SECMARK=y ++CONFIG_NF_CONNTRACK_EVENTS=y ++CONFIG_NF_CT_PROTO_DCCP=y ++CONFIG_NF_CT_PROTO_SCTP=y ++CONFIG_NF_CT_PROTO_UDPLITE=y ++CONFIG_NF_CONNTRACK_AMANDA=y ++CONFIG_NF_CONNTRACK_FTP=y ++CONFIG_NF_CONNTRACK_H323=y ++CONFIG_NF_CONNTRACK_IRC=y ++CONFIG_NF_CONNTRACK_NETBIOS_NS=y ++CONFIG_NF_CONNTRACK_PPTP=y ++CONFIG_NF_CONNTRACK_SANE=y ++CONFIG_NF_CONNTRACK_TFTP=y ++CONFIG_NF_CT_NETLINK=y ++CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y ++CONFIG_NETFILTER_XT_TARGET_CONNMARK=y ++CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y ++CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y ++CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y ++CONFIG_NETFILTER_XT_TARGET_LOG=y ++CONFIG_NETFILTER_XT_TARGET_MARK=y ++CONFIG_NETFILTER_XT_TARGET_NFLOG=y ++CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=y ++CONFIG_NETFILTER_XT_TARGET_TEE=y ++CONFIG_NETFILTER_XT_TARGET_TPROXY=y ++CONFIG_NETFILTER_XT_TARGET_TRACE=y ++CONFIG_NETFILTER_XT_TARGET_SECMARK=y ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=y ++CONFIG_NETFILTER_XT_MATCH_COMMENT=y ++CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_CONNMARK=y ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y ++CONFIG_NETFILTER_XT_MATCH_DSCP=y ++CONFIG_NETFILTER_XT_MATCH_ESP=y ++CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y ++CONFIG_NETFILTER_XT_MATCH_HELPER=y ++CONFIG_NETFILTER_XT_MATCH_IPRANGE=y ++CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NETFILTER_XT_MATCH_LIMIT=y ++CONFIG_NETFILTER_XT_MATCH_MAC=y ++CONFIG_NETFILTER_XT_MATCH_MARK=y ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y ++CONFIG_NETFILTER_XT_MATCH_OWNER=y ++CONFIG_NETFILTER_XT_MATCH_POLICY=y ++CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2=y ++CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y ++CONFIG_NETFILTER_XT_MATCH_SOCKET=y ++CONFIG_NETFILTER_XT_MATCH_STATE=y ++CONFIG_NETFILTER_XT_MATCH_STATISTIC=y ++CONFIG_NETFILTER_XT_MATCH_STRING=y ++CONFIG_NETFILTER_XT_MATCH_TIME=y ++CONFIG_NETFILTER_XT_MATCH_U32=y ++CONFIG_NF_CONNTRACK_IPV4=y ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_MATCH_AH=y ++CONFIG_IP_NF_MATCH_ECN=y ++CONFIG_IP_NF_MATCH_RPFILTER=y ++CONFIG_IP_NF_MATCH_TTL=y ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_TARGET_REJECT=y ++CONFIG_IP_NF_NAT=y ++CONFIG_IP_NF_TARGET_MASQUERADE=y ++CONFIG_IP_NF_TARGET_NETMAP=y ++CONFIG_IP_NF_TARGET_REDIRECT=y ++CONFIG_IP_NF_MANGLE=y ++CONFIG_IP_NF_RAW=y ++CONFIG_IP_NF_SECURITY=y ++CONFIG_IP_NF_ARPTABLES=y ++CONFIG_IP_NF_ARPFILTER=y ++CONFIG_IP_NF_ARP_MANGLE=y ++CONFIG_NF_CONNTRACK_IPV6=y ++CONFIG_IP6_NF_IPTABLES=y ++CONFIG_IP6_NF_MATCH_RPFILTER=y ++CONFIG_IP6_NF_FILTER=y ++CONFIG_IP6_NF_TARGET_REJECT=y ++CONFIG_IP6_NF_MANGLE=y ++CONFIG_IP6_NF_RAW=y ++CONFIG_BRIDGE_NF_EBTABLES=y ++CONFIG_BRIDGE_EBT_BROUTE=y ++CONFIG_L2TP=y ++CONFIG_L2TP_DEBUGFS=y ++CONFIG_L2TP_V3=y ++CONFIG_L2TP_IP=y ++CONFIG_L2TP_ETH=y ++CONFIG_BRIDGE=y ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_HTB=y ++CONFIG_NET_SCH_PRIO=y ++CONFIG_NET_SCH_MULTIQ=y ++CONFIG_NET_SCH_INGRESS=y ++CONFIG_NET_CLS_FW=y ++CONFIG_NET_CLS_U32=y ++CONFIG_CLS_U32_MARK=y ++CONFIG_NET_CLS_FLOW=y ++CONFIG_NET_EMATCH=y ++CONFIG_NET_EMATCH_CMP=y ++CONFIG_NET_EMATCH_NBYTE=y ++CONFIG_NET_EMATCH_U32=y ++CONFIG_NET_EMATCH_META=y ++CONFIG_NET_EMATCH_TEXT=y ++CONFIG_NET_CLS_ACT=y ++CONFIG_NET_ACT_GACT=y ++CONFIG_NET_ACT_MIRRED=y ++CONFIG_NET_ACT_SKBEDIT=y ++CONFIG_DNS_RESOLVER=y ++CONFIG_RMNET_DATA=y ++CONFIG_RMNET_DATA_FC=y ++CONFIG_RMNET_DATA_DEBUG_PKT=y ++CONFIG_SOCKEV_NLMCAST=y ++CONFIG_BT=y ++CONFIG_MSM_BT_POWER=y ++CONFIG_CFG80211=y ++CONFIG_CFG80211_INTERNAL_REGDB=y ++CONFIG_RFKILL=y ++CONFIG_NFC_NQ=y ++CONFIG_IPC_ROUTER=y ++CONFIG_IPC_ROUTER_SECURITY=y ++CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y ++CONFIG_DMA_CMA=y ++CONFIG_CMA_SIZE_MBYTES=8 ++CONFIG_ZRAM=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_BLK_DEV_RAM=y ++CONFIG_BLK_DEV_RAM_SIZE=8192 ++CONFIG_HDCP_QSEECOM=y ++CONFIG_QSEECOM=y ++CONFIG_UID_SYS_STATS=y ++CONFIG_MEMORY_STATE_TIME=y ++CONFIG_MD=y ++CONFIG_BLK_DEV_MD=y ++CONFIG_MD_LINEAR=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_DM_DEBUG=y ++CONFIG_DM_CRYPT=y ++CONFIG_DM_REQ_CRYPT=y ++CONFIG_DM_DEFAULT_KEY=y ++CONFIG_DM_UEVENT=y ++CONFIG_DM_VERITY=y ++CONFIG_DM_VERITY_FEC=y ++CONFIG_DM_ANDROID_VERITY=y ++CONFIG_NETDEVICES=y ++CONFIG_DUMMY=y ++CONFIG_TUN=y ++CONFIG_PPP=y ++CONFIG_PPP_BSDCOMP=y ++CONFIG_PPP_DEFLATE=y ++CONFIG_PPP_FILTER=y ++CONFIG_PPP_MPPE=y ++CONFIG_PPP_MULTILINK=y ++CONFIG_PPPOE=y ++CONFIG_PPPOL2TP=y ++CONFIG_PPPOLAC=y ++CONFIG_PPPOPNS=y ++CONFIG_PPP_ASYNC=y ++CONFIG_PPP_SYNC_TTY=y ++CONFIG_WCNSS_MEM_PRE_ALLOC=y ++CONFIG_CNSS=y ++CONFIG_CNSS_SDIO=y ++CONFIG_CLD_HL_SDIO_CORE=y ++CONFIG_INPUT_EVDEV=y ++CONFIG_INPUT_EVBUG=y ++CONFIG_KEYBOARD_GPIO=y ++CONFIG_KEYBOARD_MATRIX=y ++# CONFIG_INPUT_MOUSE is not set ++CONFIG_INPUT_JOYSTICK=y ++CONFIG_JOYSTICK_XPAD=y ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y ++CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y ++CONFIG_TOUCHSCREEN_GEN_VKEYS=y ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_QPNP_POWER_ON=y ++CONFIG_STMVL53L0X=y ++CONFIG_INPUT_UINPUT=y ++CONFIG_INPUT_GPIO=y ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_DEVMEM is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_MSM=y ++CONFIG_SERIAL_MSM_CONSOLE=y ++CONFIG_SERIAL_MSM_HS=y ++CONFIG_SERIAL_MSM_SMD=y ++CONFIG_DIAG_CHAR=y ++CONFIG_DIAG_USES_SMD=y ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_MSM_LEGACY=y ++CONFIG_MSM_SMD_PKT=y ++CONFIG_MSM_ADSPRPC=y ++CONFIG_MSM_RDBG=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_I2C_MSM_V2=y ++CONFIG_SPI=y ++CONFIG_SPI_QUP=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_SLIMBUS_MSM_NGD=y ++CONFIG_SPMI=y ++CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y ++CONFIG_PINCTRL_MSM8909=y ++CONFIG_PINCTRL_QCOM_SPMI_PMIC=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_QCOM=y ++CONFIG_QCOM_DLOAD_MODE=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_POWER_RESET_SYSCON_POWEROFF=y ++CONFIG_POWER_SUPPLY=y ++CONFIG_SMB1360_CHARGER_FG=y ++CONFIG_QPNP_VM_BMS=y ++CONFIG_QPNP_LINEAR_CHARGER=y ++CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y ++CONFIG_THERMAL=y ++CONFIG_THERMAL_WRITABLE_TRIPS=y ++CONFIG_THERMAL_GOV_USER_SPACE=y ++CONFIG_THERMAL_GOV_LOW_LIMITS=y ++CONFIG_CPU_THERMAL=y ++CONFIG_DEVFREQ_THERMAL=y ++CONFIG_THERMAL_QPNP=y ++CONFIG_THERMAL_QPNP_ADC_TM=y ++CONFIG_THERMAL_TSENS=y ++CONFIG_MSM_BCL_PERIPHERAL_CTL=y ++CONFIG_QTI_THERMAL_LIMITS_DCVS=y ++CONFIG_QTI_QMI_COOLING_DEVICE=y ++CONFIG_REGULATOR_COOLING_DEVICE=y ++CONFIG_MFD_QCOM_RPM=y ++CONFIG_MFD_SPMI_PMIC=y ++CONFIG_REGULATOR=y ++CONFIG_REGULATOR_FIXED_VOLTAGE=y ++CONFIG_REGULATOR_PROXY_CONSUMER=y ++CONFIG_REGULATOR_QCOM_RPM=y ++CONFIG_REGULATOR_QCOM_SPMI=y ++CONFIG_REGULATOR_CPR=y ++CONFIG_REGULATOR_MEM_ACC=y ++CONFIG_REGULATOR_MSM_GFX_LDO=y ++CONFIG_REGULATOR_QPNP=y ++CONFIG_REGULATOR_RPM_SMD=y ++CONFIG_REGULATOR_SPM=y ++CONFIG_REGULATOR_STUB=y ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_MEDIA_CAMERA_SUPPORT=y ++CONFIG_MEDIA_CONTROLLER=y ++CONFIG_VIDEO_V4L2_SUBDEV_API=y ++CONFIG_V4L_PLATFORM_DRIVERS=y ++CONFIG_SOC_CAMERA=y ++CONFIG_MSMB_CAMERA=y ++CONFIG_MSM_CAMERA_SENSOR=y ++CONFIG_MSM_CSI30_HEADER=y ++CONFIG_MSM_CSIPHY=y ++CONFIG_MSM_CSID=y ++CONFIG_MSM_EEPROM=y ++CONFIG_MSM_ISP_V1=y ++CONFIG_MSM_ISPIF=y ++CONFIG_MSM_VIDC_3X_V4L2=y ++CONFIG_MSM_VIDC_3X_GOVERNORS=y ++CONFIG_RADIO_IRIS=y ++CONFIG_RADIO_IRIS_TRANSPORT=y ++CONFIG_QCOM_KGSL=y ++CONFIG_FB=y ++CONFIG_FB_VIRTUAL=y ++CONFIG_FB_MSM=y ++CONFIG_FB_MSM_MDSS=y ++CONFIG_FB_MSM_MDSS_WRITEBACK=y ++CONFIG_FB_MSM_MDSS_SPI_PANEL=y ++CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y ++CONFIG_FB_MSM_MDSS_MDP3=y ++CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y ++CONFIG_BACKLIGHT_LCD_SUPPORT=y ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_LOGO=y ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_DYNAMIC_MINORS=y ++CONFIG_SND_SOC=y ++CONFIG_UHID=y ++CONFIG_USB_HIDDEV=y ++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y ++CONFIG_USB_MON=y ++CONFIG_USB_EHCI_HCD=y ++CONFIG_USB_EHCI_MSM=y ++CONFIG_USB_ACM=y ++CONFIG_USB_SERIAL=y ++CONFIG_USB_SERIAL_CP210X=y ++CONFIG_USB_SERIAL_FTDI_SIO=y ++CONFIG_USB_SERIAL_DEBUG=y ++CONFIG_USB_EHSET_TEST_FIXTURE=y ++CONFIG_USB_GADGET=y ++CONFIG_USB_GADGET_DEBUG_FILES=y ++CONFIG_USB_GADGET_DEBUG_FS=y ++CONFIG_USB_GADGET_VBUS_DRAW=500 ++CONFIG_USB_CI13XXX_MSM=y ++CONFIG_USB_CONFIGFS=y ++CONFIG_USB_CONFIGFS_SERIAL=y ++CONFIG_USB_CONFIGFS_NCM=y ++CONFIG_USB_CONFIGFS_RNDIS=y ++CONFIG_USB_CONFIGFS_RMNET_BAM=y ++CONFIG_USB_CONFIGFS_MASS_STORAGE=y ++CONFIG_USB_CONFIGFS_F_FS=y ++CONFIG_USB_CONFIGFS_F_MTP=y ++CONFIG_USB_CONFIGFS_F_PTP=y ++CONFIG_USB_CONFIGFS_UEVENT=y ++CONFIG_USB_CONFIGFS_F_DIAG=y ++CONFIG_USB_CONFIGFS_F_CDEV=y ++CONFIG_MMC=y ++CONFIG_MMC_PERF_PROFILING=y ++CONFIG_MMC_RING_BUFFER=y ++CONFIG_MMC_PARANOID_SD_INIT=y ++CONFIG_MMC_CLKGATE=y ++CONFIG_MMC_BLOCK_MINORS=32 ++CONFIG_MMC_BLOCK_DEFERRED_RESUME=y ++CONFIG_MMC_TEST=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y ++CONFIG_MMC_SDHCI_MSM=y ++CONFIG_MMC_SDHCI_MSM_ICE=y ++CONFIG_LEDS_CLASS_FLASH=y ++CONFIG_LEDS_QPNP=y ++CONFIG_LEDS_MSM_GPIO_FLASH=y ++CONFIG_LEDS_QPNP_VIBRATOR=y ++CONFIG_LEDS_TRIGGERS=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_QPNP=y ++CONFIG_DMADEVICES=y ++CONFIG_QCOM_SPS_DMA=y ++CONFIG_UIO=y ++CONFIG_UIO_MSM_SHAREDMEM=y ++CONFIG_STAGING=y ++CONFIG_ASHMEM=y ++CONFIG_ANDROID_LOW_MEMORY_KILLER=y ++CONFIG_ION=y ++CONFIG_ION_MSM=y ++CONFIG_IPA=y ++CONFIG_RMNET_IPA=y ++CONFIG_RNDIS_IPA=y ++CONFIG_SPS=y ++CONFIG_SPS_SUPPORT_NDP_BAM=y ++CONFIG_QPNP_REVID=y ++CONFIG_USB_BAM=y ++CONFIG_MSM_RMNET_BAM=y ++CONFIG_MSM_MDSS_PLL=y ++CONFIG_REMOTE_SPINLOCK_MSM=y ++CONFIG_MAILBOX=y ++CONFIG_ARM_SMMU=y ++CONFIG_QCOM_LAZY_MAPPING=y ++CONFIG_MSM_SPM=y ++CONFIG_MSM_L2_SPM=y ++CONFIG_MSM_BOOT_STATS=y ++CONFIG_MSM_CORE_HANG_DETECT=y ++CONFIG_MSM_GLADIATOR_HANG_DETECT=y ++CONFIG_QCOM_WATCHDOG_V2=y ++CONFIG_QCOM_MEMORY_DUMP_V2=y ++CONFIG_QCOM_BUS_SCALING=y ++CONFIG_QCOM_SECURE_BUFFER=y ++CONFIG_QCOM_EARLY_RANDOM=y ++CONFIG_MSM_SMEM=y ++CONFIG_MSM_SMD=y ++CONFIG_MSM_SMD_DEBUG=y ++CONFIG_MSM_GLINK=y ++CONFIG_MSM_TZ_SMMU=y ++CONFIG_MSM_GLINK_LOOPBACK_SERVER=y ++CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y ++CONFIG_MSM_GLINK_SPI_XPRT=y ++CONFIG_TRACER_PKT=y ++CONFIG_MSM_SMP2P=y ++CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y ++CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y ++CONFIG_MSM_QMI_INTERFACE=y ++CONFIG_MSM_GLINK_PKT=y ++CONFIG_MSM_SUBSYSTEM_RESTART=y ++CONFIG_MSM_PIL=y ++CONFIG_MSM_PIL_SSR_GENERIC=y ++CONFIG_MSM_PIL_MSS_QDSP6V5=y ++CONFIG_MSM_EVENT_TIMER=y ++CONFIG_MSM_AVTIMER=y ++CONFIG_QTI_RPM_STATS_LOG=y ++CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y ++CONFIG_MEM_SHARE_QMI_SERVICE=y ++CONFIG_MSM_BAM_DMUX=y ++CONFIG_WCNSS_CORE=y ++CONFIG_WCNSS_CORE_PRONTO=y ++CONFIG_WCNSS_REGISTER_DUMP_ON_BITE=y ++CONFIG_CNSS_CRYPTO=y ++CONFIG_IIO=y ++CONFIG_INV_ICM20602_IIO=y ++CONFIG_PWM=y ++CONFIG_PWM_QPNP=y ++CONFIG_QCOM_SHOW_RESUME_IRQ=y ++CONFIG_QTI_MPM=y ++CONFIG_ANDROID=y ++CONFIG_ANDROID_BINDER_IPC=y ++CONFIG_ANDROID_BINDER_IPC_32BIT=y ++CONFIG_STM=y ++CONFIG_SENSORS_SSC=y ++CONFIG_MSM_TZ_LOG=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_SECURITY=y ++CONFIG_EXT4_ENCRYPTION=y ++CONFIG_EXT4_FS_ENCRYPTION=y ++CONFIG_EXT4_FS_ICE_ENCRYPTION=y ++CONFIG_F2FS_FS=y ++CONFIG_F2FS_FS_SECURITY=y ++CONFIG_F2FS_FS_ENCRYPTION=y ++CONFIG_FANOTIFY=y ++CONFIG_QUOTA=y ++CONFIG_QUOTA_NETLINK_INTERFACE=y ++# CONFIG_PRINT_QUOTA_WARNING is not set ++CONFIG_FUSE_FS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_TMPFS=y ++CONFIG_ECRYPT_FS=y ++CONFIG_ECRYPT_FS_MESSAGING=y ++CONFIG_SDCARD_FS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ASCII=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_PRINTK_TIME=y ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_DEBUG_INFO=y ++CONFIG_FRAME_WARN=2048 ++CONFIG_PAGE_OWNER=y ++CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_DEBUG_PAGEALLOC=y ++CONFIG_SLUB_DEBUG_PANIC_ON=y ++CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y ++CONFIG_DEBUG_OBJECTS=y ++CONFIG_DEBUG_OBJECTS_FREE=y ++CONFIG_DEBUG_OBJECTS_TIMERS=y ++CONFIG_DEBUG_OBJECTS_WORK=y ++CONFIG_DEBUG_OBJECTS_RCU_HEAD=y ++CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y ++CONFIG_DEBUG_KMEMLEAK=y ++CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 ++CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y ++CONFIG_DEBUG_STACK_USAGE=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y ++# CONFIG_DETECT_HUNG_TASK is not set ++CONFIG_WQ_WATCHDOG=y ++CONFIG_PANIC_TIMEOUT=5 ++CONFIG_PANIC_ON_SCHED_BUG=y ++CONFIG_PANIC_ON_RT_THROTTLING=y ++CONFIG_SCHEDSTATS=y ++CONFIG_SCHED_STACK_END_CHECK=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_ATOMIC_SLEEP=y ++CONFIG_DEBUG_LIST=y ++CONFIG_FAULT_INJECTION=y ++CONFIG_FAIL_PAGE_ALLOC=y ++CONFIG_FAULT_INJECTION_DEBUG_FS=y ++CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y ++CONFIG_IPC_LOGGING=y ++CONFIG_QCOM_RTB=y ++CONFIG_QCOM_RTB_SEPARATE_CPUS=y ++CONFIG_FUNCTION_TRACER=y ++CONFIG_IRQSOFF_TRACER=y ++CONFIG_PREEMPT_TRACER=y ++CONFIG_BLK_DEV_IO_TRACE=y ++CONFIG_CPU_FREQ_SWITCH_PROFILER=y ++CONFIG_LKDTM=y ++CONFIG_MEMTEST=y ++CONFIG_PANIC_ON_DATA_CORRUPTION=y ++CONFIG_DEBUG_USER=y ++CONFIG_PID_IN_CONTEXTIDR=y ++CONFIG_DEBUG_SET_MODULE_RONX=y ++CONFIG_CORESIGHT=y ++CONFIG_PFK=y ++CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y ++CONFIG_SECURITY=y ++CONFIG_LSM_MMAP_MIN_ADDR=4096 ++CONFIG_HARDENED_USERCOPY=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_SMACK=y ++CONFIG_SECURITY_APPARMOR=y ++CONFIG_CRYPTO_XCBC=y ++CONFIG_CRYPTO_MD4=y ++CONFIG_CRYPTO_TWOFISH=y ++CONFIG_CRYPTO_ANSI_CPRNG=y ++CONFIG_CRYPTO_DEV_QCE=y ++CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y ++CONFIG_CRYPTO_DEV_QCRYPTO=y ++CONFIG_CRYPTO_DEV_QCEDEV=y ++CONFIG_CRYPTO_DEV_OTA_CRYPTO=y ++CONFIG_CRYPTO_DEV_QCOM_ICE=y ++CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" ++CONFIG_ARM_CRYPTO=y ++CONFIG_CRYPTO_SHA1_ARM_NEON=y ++CONFIG_CRYPTO_SHA2_ARM_CE=y ++CONFIG_CRYPTO_AES_ARM_BS=y ++CONFIG_CRYPTO_AES_ARM_CE=y ++CONFIG_QMI_ENCDEC=y +diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h +index a89b4076cde4..72821b4721ad 100644 +--- a/arch/arm/include/asm/percpu.h ++++ b/arch/arm/include/asm/percpu.h +@@ -16,6 +16,8 @@ + #ifndef _ASM_ARM_PERCPU_H_ + #define _ASM_ARM_PERCPU_H_ + ++#include ++ + /* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 +diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h +index a91ae499614c..2c3b952be63e 100644 +--- a/arch/arm/include/asm/unified.h ++++ b/arch/arm/include/asm/unified.h +@@ -20,8 +20,10 @@ + #ifndef __ASM_UNIFIED_H + #define __ASM_UNIFIED_H + +-#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) ++#if defined(__ASSEMBLY__) + .syntax unified ++#else ++__asm__(".syntax unified"); + #endif + + #ifdef CONFIG_CPU_V7M +@@ -64,77 +66,4 @@ + + #endif /* CONFIG_THUMB2_KERNEL */ + +-#ifndef CONFIG_ARM_ASM_UNIFIED +- +-/* +- * If the unified assembly syntax isn't used (in ARM mode), these +- * macros expand to an empty string +- */ +-#ifdef __ASSEMBLY__ +- .macro it, cond +- .endm +- .macro itt, cond +- .endm +- .macro ite, cond +- .endm +- .macro ittt, cond +- .endm +- .macro itte, cond +- .endm +- .macro itet, cond +- .endm +- .macro itee, cond +- .endm +- .macro itttt, cond +- .endm +- .macro ittte, cond +- .endm +- .macro ittet, cond +- .endm +- .macro ittee, cond +- .endm +- .macro itett, cond +- .endm +- .macro itete, cond +- .endm +- .macro iteet, cond +- .endm +- .macro iteee, cond +- .endm +-#else /* !__ASSEMBLY__ */ +-__asm__( +-" .macro it, cond\n" +-" .endm\n" +-" .macro itt, cond\n" +-" .endm\n" +-" .macro ite, cond\n" +-" .endm\n" +-" .macro ittt, cond\n" +-" .endm\n" +-" .macro itte, cond\n" +-" .endm\n" +-" .macro itet, cond\n" +-" .endm\n" +-" .macro itee, cond\n" +-" .endm\n" +-" .macro itttt, cond\n" +-" .endm\n" +-" .macro ittte, cond\n" +-" .endm\n" +-" .macro ittet, cond\n" +-" .endm\n" +-" .macro ittee, cond\n" +-" .endm\n" +-" .macro itett, cond\n" +-" .endm\n" +-" .macro itete, cond\n" +-" .endm\n" +-" .macro iteet, cond\n" +-" .endm\n" +-" .macro iteee, cond\n" +-" .endm\n"); +-#endif /* __ASSEMBLY__ */ +- +-#endif /* CONFIG_ARM_ASM_UNIFIED */ +- + #endif /* !__ASM_UNIFIED_H */ +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S +index 04286fd9e09c..2e336acd68b0 100644 +--- a/arch/arm/kernel/head.S ++++ b/arch/arm/kernel/head.S +@@ -673,12 +673,8 @@ ARM_BE8(rev16 ip, ip) + ldrcc r7, [r4], #4 @ use branch for delay slot + bcc 1b + bx lr +-#else +-#ifdef CONFIG_CPU_ENDIAN_BE8 +- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction + #else + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction +-#endif + b 2f + 1: ldr ip, [r7, r3] + #ifdef CONFIG_CPU_ENDIAN_BE8 +@@ -687,7 +683,7 @@ ARM_BE8(rev16 ip, ip) + tst ip, #0x000f0000 @ check the rotation field + orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 + biceq ip, ip, #0x00004000 @ clear bit 22 +- orreq ip, ip, r0 @ mask in offset bits 7-0 ++ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0 + #else + bic ip, ip, #0x000000ff + tst ip, #0xf00 @ check the rotation field +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c +index 25538a935874..59e04e2d9d9d 100644 +--- a/arch/arm/kernel/hw_breakpoint.c ++++ b/arch/arm/kernel/hw_breakpoint.c +@@ -688,26 +688,68 @@ static void disable_single_step(struct perf_event *bp) + arch_install_hw_breakpoint(bp); + } + ++/* ++ * Arm32 hardware does not always report a watchpoint hit address that matches ++ * one of the watchpoints set. It can also report an address "near" the ++ * watchpoint if a single instruction access both watched and unwatched ++ * addresses. There is no straight-forward way, short of disassembling the ++ * offending instruction, to map that address back to the watchpoint. This ++ * function computes the distance of the memory access from the watchpoint as a ++ * heuristic for the likelyhood that a given access triggered the watchpoint. ++ * ++ * See this same function in the arm64 platform code, which has the same ++ * problem. ++ * ++ * The function returns the distance of the address from the bytes watched by ++ * the watchpoint. In case of an exact match, it returns 0. ++ */ ++static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, ++ struct arch_hw_breakpoint_ctrl *ctrl) ++{ ++ u32 wp_low, wp_high; ++ u32 lens, lene; ++ ++ lens = __ffs(ctrl->len); ++ lene = __fls(ctrl->len); ++ ++ wp_low = val + lens; ++ wp_high = val + lene; ++ if (addr < wp_low) ++ return wp_low - addr; ++ else if (addr > wp_high) ++ return addr - wp_high; ++ else ++ return 0; ++} ++ ++static int watchpoint_fault_on_uaccess(struct pt_regs *regs, ++ struct arch_hw_breakpoint *info) ++{ ++ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; ++} ++ + static void watchpoint_handler(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) + { +- int i, access; +- u32 val, ctrl_reg, alignment_mask; ++ int i, access, closest_match = 0; ++ u32 min_dist = -1, dist; ++ u32 val, ctrl_reg; + struct perf_event *wp, **slots; + struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = this_cpu_ptr(wp_on_reg); + ++ /* ++ * Find all watchpoints that match the reported address. If no exact ++ * match is found. Attribute the hit to the closest watchpoint. ++ */ ++ rcu_read_lock(); + for (i = 0; i < core_num_wrps; ++i) { +- rcu_read_lock(); +- + wp = slots[i]; +- + if (wp == NULL) +- goto unlock; ++ continue; + +- info = counter_arch_bp(wp); + /* + * The DFAR is an unknown value on debug architectures prior + * to 7.1. Since we only allow a single watchpoint on these +@@ -716,50 +758,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, + */ + if (debug_arch < ARM_DEBUG_ARCH_V7_1) { + BUG_ON(i > 0); ++ info = counter_arch_bp(wp); + info->trigger = wp->attr.bp_addr; + } else { +- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) +- alignment_mask = 0x7; +- else +- alignment_mask = 0x3; +- +- /* Check if the watchpoint value matches. */ +- val = read_wb_reg(ARM_BASE_WVR + i); +- if (val != (addr & ~alignment_mask)) +- goto unlock; +- +- /* Possible match, check the byte address select. */ +- ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); +- decode_ctrl_reg(ctrl_reg, &ctrl); +- if (!((1 << (addr & alignment_mask)) & ctrl.len)) +- goto unlock; +- + /* Check that the access type matches. */ + if (debug_exception_updates_fsr()) { + access = (fsr & ARM_FSR_ACCESS_MASK) ? + HW_BREAKPOINT_W : HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) +- goto unlock; ++ continue; + } + ++ val = read_wb_reg(ARM_BASE_WVR + i); ++ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); ++ decode_ctrl_reg(ctrl_reg, &ctrl); ++ dist = get_distance_from_watchpoint(addr, val, &ctrl); ++ if (dist < min_dist) { ++ min_dist = dist; ++ closest_match = i; ++ } ++ /* Is this an exact match? */ ++ if (dist != 0) ++ continue; ++ + /* We have a winner. */ ++ info = counter_arch_bp(wp); + info->trigger = addr; + } + + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); ++ ++ /* ++ * If we triggered a user watchpoint from a uaccess routine, ++ * then handle the stepping ourselves since userspace really ++ * can't help us with this. ++ */ ++ if (watchpoint_fault_on_uaccess(regs, info)) ++ goto step; ++ + perf_bp_event(wp, regs); + + /* +- * If no overflow handler is present, insert a temporary +- * mismatch breakpoint so we can single-step over the +- * watchpoint trigger. ++ * Defer stepping to the overflow handler if one is installed. ++ * Otherwise, insert a temporary mismatch breakpoint so that ++ * we can single-step over the watchpoint trigger. + */ ++ if (!is_default_overflow_handler(wp)) ++ continue; ++step: ++ enable_single_step(wp, instruction_pointer(regs)); ++ } ++ ++ if (min_dist > 0 && min_dist != -1) { ++ /* No exact match found. */ ++ wp = slots[closest_match]; ++ info = counter_arch_bp(wp); ++ info->trigger = addr; ++ pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); ++ perf_bp_event(wp, regs); + if (is_default_overflow_handler(wp)) + enable_single_step(wp, instruction_pointer(regs)); +- +-unlock: +- rcu_read_unlock(); + } ++ ++ rcu_read_unlock(); + } + + static void watchpoint_single_step_handler(unsigned long pc) +@@ -830,7 +891,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) + info->trigger = addr; + pr_debug("breakpoint fired: address = 0x%x\n", addr); + perf_bp_event(bp, regs); +- if (!bp->overflow_handler) ++ if (is_default_overflow_handler(bp)) + enable_single_step(bp, addr); + goto unlock; + } +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c +index ae738a6319f6..364985c96a92 100644 +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -227,8 +227,8 @@ static struct undef_hook arm_break_hook = { + }; + + static struct undef_hook thumb_break_hook = { +- .instr_mask = 0xffff, +- .instr_val = 0xde01, ++ .instr_mask = 0xffffffff, ++ .instr_val = 0x0000de01, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = break_trap, +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c +index 775d70c81587..108b5d17e89e 100755 +--- a/arch/arm/kernel/setup.c ++++ b/arch/arm/kernel/setup.c +@@ -590,9 +590,11 @@ void notrace cpu_init(void) + * In Thumb-2, msr with an immediate value is not allowed. + */ + #ifdef CONFIG_THUMB2_KERNEL +-#define PLC "r" ++#define PLC_l "l" ++#define PLC_r "r" + #else +-#define PLC "I" ++#define PLC_l "I" ++#define PLC_r "I" + #endif + + /* +@@ -614,15 +616,15 @@ void notrace cpu_init(void) + "msr cpsr_c, %9" + : + : "r" (stk), +- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), ++ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), +- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), ++ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), +- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), ++ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), +- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), ++ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), + "I" (offsetof(struct stack, fiq[0])), +- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) ++ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + : "r14"); + #endif + } +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c +index 7abc908ebea0..f82a1ac22164 100644 +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -626,18 +626,20 @@ struct page *get_signal_page(void) + + addr = page_address(page); + ++ /* Poison the entire page */ ++ memset32(addr, __opcode_to_mem_arm(0xe7fddef1), ++ PAGE_SIZE / sizeof(u32)); ++ + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; + +- /* +- * Copy signal return handlers into the vector page, and +- * set sigreturn to be a pointer to these. +- */ ++ /* Copy signal return handlers into the page */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + +- ptr = (unsigned long)addr + offset; +- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); ++ /* Flush out all instructions in this page */ ++ ptr = (unsigned long)addr; ++ flush_icache_range(ptr, ptr + PAGE_SIZE); + + return page; + } +diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c +index fe76010f75cf..7e939c7b0a21 100644 +--- a/arch/arm/kernel/stacktrace.c ++++ b/arch/arm/kernel/stacktrace.c +@@ -19,6 +19,19 @@ + * A simple function epilogue looks like this: + * ldm sp, {fp, sp, pc} + * ++ * When compiled with clang, pc and sp are not pushed. A simple function ++ * prologue looks like this when built with clang: ++ * ++ * stmdb {..., fp, lr} ++ * add fp, sp, #x ++ * sub sp, sp, #y ++ * ++ * A simple function epilogue looks like this when built with clang: ++ * ++ * sub sp, fp, #x ++ * ldm {..., fp, pc} ++ * ++ * + * Note that with framepointer enabled, even the leaf functions have the same + * prologue and epilogue, therefore we can ignore the LR value in this case. + */ +@@ -31,6 +44,16 @@ int notrace unwind_frame(struct stackframe *frame) + low = frame->sp; + high = ALIGN(low, THREAD_SIZE); + ++#ifdef CONFIG_CC_IS_CLANG ++ /* check current frame pointer is within bounds */ ++ if (fp < low + 4 || fp > high - 4) ++ return -EINVAL; ++ ++ frame->sp = frame->fp; ++ frame->fp = *(unsigned long *)(fp); ++ frame->pc = frame->lr; ++ frame->lr = *(unsigned long *)(fp + 4); ++#else + /* check current frame pointer is within bounds */ + if (fp < low + 12 || fp > high - 4) + return -EINVAL; +@@ -39,6 +62,7 @@ int notrace unwind_frame(struct stackframe *frame) + frame->fp = *(unsigned long *)(fp - 12); + frame->sp = *(unsigned long *)(fp - 8); + frame->pc = *(unsigned long *)(fp - 4); ++#endif + + return 0; + } +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c +index bb0d5e21d60b..16615ae6e257 100644 +--- a/arch/arm/kvm/mmu.c ++++ b/arch/arm/kvm/mmu.c +@@ -298,12 +298,6 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) + next = stage2_pgd_addr_end(addr, end); + if (!stage2_pgd_none(*pgd)) + unmap_stage2_puds(kvm, pgd, addr, next); +- /* +- * If the range is too large, release the kvm->mmu_lock +- * to prevent starvation and lockup detector warnings. +- */ +- if (next != end) +- cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); + } + +@@ -1840,7 +1834,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, + * Prevent userspace from creating a memory region outside of the IPA + * space addressable by the KVM guest IPA space. + */ +- if (memslot->base_gfn + memslot->npages >= ++ if (memslot->base_gfn + memslot->npages > + (KVM_PHYS_SIZE >> PAGE_SHIFT)) + return -EFAULT; + +diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c +index 8ba0e2e5ad97..0efac1404418 100644 +--- a/arch/arm/mach-at91/pm.c ++++ b/arch/arm/mach-at91/pm.c +@@ -411,13 +411,13 @@ static void __init at91_pm_sram_init(void) + sram_pool = gen_pool_get(&pdev->dev, NULL); + if (!sram_pool) { + pr_warn("%s: sram pool unavailable!\n", __func__); +- return; ++ goto out_put_device; + } + + sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); + if (!sram_base) { + pr_warn("%s: unable to alloc sram!\n", __func__); +- return; ++ goto out_put_device; + } + + sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); +@@ -425,12 +425,17 @@ static void __init at91_pm_sram_init(void) + at91_pm_suspend_in_sram_sz, false); + if (!at91_suspend_sram_fn) { + pr_warn("SRAM: Could not map\n"); +- return; ++ goto out_put_device; + } + + /* Copy the pm suspend handler to SRAM */ + at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, + &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); ++ return; ++ ++out_put_device: ++ put_device(&pdev->dev); ++ return; + } + + static const struct of_device_id atmel_pmc_ids[] __initconst = { +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c +index 96a3d73ef4bf..fd6c9169fa78 100644 +--- a/arch/arm/mach-footbridge/dc21285.c ++++ b/arch/arm/mach-footbridge/dc21285.c +@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("ldrb %0, [%1, %2]" ++ asm volatile("ldrb %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 2: +- asm("ldrh %0, [%1, %2]" ++ asm volatile("ldrh %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + case 4: +- asm("ldr %0, [%1, %2]" ++ asm volatile("ldr %0, [%1, %2]" + : "=r" (v) : "r" (addr), "r" (where) : "cc"); + break; + } +@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, + if (addr) + switch (size) { + case 1: +- asm("strb %0, [%1, %2]" ++ asm volatile("strb %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 2: +- asm("strh %0, [%1, %2]" ++ asm volatile("strh %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; + case 4: +- asm("str %0, [%1, %2]" ++ asm volatile("str %0, [%1, %2]" + : : "r" (value), "r" (addr), "r" (where) + : "cc"); + break; +diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c +index 868781fd460c..14c630c899c5 100644 +--- a/arch/arm/mach-imx/pm-imx5.c ++++ b/arch/arm/mach-imx/pm-imx5.c +@@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram( + if (!ocram_pool) { + pr_warn("%s: ocram pool unavailable!\n", __func__); + ret = -ENODEV; +- goto put_node; ++ goto put_device; + } + + ocram_base = gen_pool_alloc(ocram_pool, size); + if (!ocram_base) { + pr_warn("%s: unable to alloc ocram!\n", __func__); + ret = -ENOMEM; +- goto put_node; ++ goto put_device; + } + + phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); +@@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram( + if (virt_out) + *virt_out = virt; + ++put_device: ++ put_device(&pdev->dev); + put_node: + of_node_put(node); + +diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c +index dd9eb3f14f45..6da26692f2fd 100644 +--- a/arch/arm/mach-imx/pm-imx6.c ++++ b/arch/arm/mach-imx/pm-imx6.c +@@ -481,14 +481,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) + if (!ocram_pool) { + pr_warn("%s: ocram pool unavailable!\n", __func__); + ret = -ENODEV; +- goto put_node; ++ goto put_device; + } + + ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); + if (!ocram_base) { + pr_warn("%s: unable to alloc ocram!\n", __func__); + ret = -ENOMEM; +- goto put_node; ++ goto put_device; + } + + ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); +@@ -511,7 +511,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) + ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); + if (ret) { + pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); +- goto put_node; ++ goto put_device; + } + + ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); +@@ -558,7 +558,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) + &imx6_suspend, + MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); + +- goto put_node; ++ goto put_device; + + pl310_cache_map_failed: + iounmap(pm_info->gpc_base.vbase); +@@ -568,6 +568,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) + iounmap(pm_info->src_base.vbase); + src_map_failed: + iounmap(pm_info->mmdc_base.vbase); ++put_device: ++ put_device(&pdev->dev); + put_node: + of_node_put(node); + +diff --git a/arch/arm/mach-imx/suspend-imx53.S b/arch/arm/mach-imx/suspend-imx53.S +index 5ed078ad110a..f12d24104075 100644 +--- a/arch/arm/mach-imx/suspend-imx53.S ++++ b/arch/arm/mach-imx/suspend-imx53.S +@@ -33,11 +33,11 @@ + * ^ + * ^ + * imx53_suspend code +- * PM_INFO structure(imx53_suspend_info) ++ * PM_INFO structure(imx5_cpu_suspend_info) + * ======================== low address ======================= + */ + +-/* Offsets of members of struct imx53_suspend_info */ ++/* Offsets of members of struct imx5_cpu_suspend_info */ + #define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0 + #define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4 + #define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8 +diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S +index 7d84b617af48..99d2e296082c 100644 +--- a/arch/arm/mach-imx/suspend-imx6.S ++++ b/arch/arm/mach-imx/suspend-imx6.S +@@ -73,6 +73,7 @@ + #define MX6Q_CCM_CCR 0x0 + + .align 3 ++ .arm + + .macro sync_l2_cache + +diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig +index cefe44f6889b..ba124f8704fa 100644 +--- a/arch/arm/mach-integrator/Kconfig ++++ b/arch/arm/mach-integrator/Kconfig +@@ -3,6 +3,8 @@ menuconfig ARCH_INTEGRATOR + depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6 + select ARM_AMBA + select COMMON_CLK_VERSATILE ++ select CMA ++ select DMA_CMA + select HAVE_TCM + select ICST + select MFD_SYSCON +@@ -34,14 +36,13 @@ config INTEGRATOR_IMPD1 + select ARM_VIC + select GPIO_PL061 + select GPIOLIB ++ select REGULATOR ++ select REGULATOR_FIXED_VOLTAGE + help + The IM-PD1 is an add-on logic module for the Integrator which + allows ARM(R) Ltd PrimeCells to be developed and evaluated. + The IM-PD1 can be found on the Integrator/PP2 platform. + +- To compile this driver as a module, choose M here: the +- module will be called impd1. +- + config INTEGRATOR_CM7TDMI + bool "Integrator/CM7TDMI core module" + depends on ARCH_INTEGRATOR_AP +diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c +index 84613abf35a3..79ff5b953431 100644 +--- a/arch/arm/mach-keystone/keystone.c ++++ b/arch/arm/mach-keystone/keystone.c +@@ -65,7 +65,7 @@ static void __init keystone_init(void) + static long long __init keystone_pv_fixup(void) + { + long long offset; +- phys_addr_t mem_start, mem_end; ++ u64 mem_start, mem_end; + + mem_start = memblock_start_of_DRAM(); + mem_end = memblock_end_of_DRAM(); +@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void) + if (mem_start < KEYSTONE_HIGH_PHYS_START || + mem_end > KEYSTONE_HIGH_PHYS_END) { + pr_crit("Invalid address space for memory (%08llx-%08llx)\n", +- (u64)mem_start, (u64)mem_end); ++ mem_start, mem_end); + return 0; + } + +diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c +index 6b6fda65fb3b..5eeecf83c9e6 100644 +--- a/arch/arm/mach-omap2/board-n8x0.c ++++ b/arch/arm/mach-omap2/board-n8x0.c +@@ -327,6 +327,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot) + + static void n8x0_mmc_callback(void *data, u8 card_mask) + { ++#ifdef CONFIG_MMC_OMAP + int bit, *openp, index; + + if (board_is_n800()) { +@@ -344,7 +345,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask) + else + *openp = 0; + +-#ifdef CONFIG_MMC_OMAP + omap_mmc_notify_cover_event(mmc_device, index, *openp); + #else + pr_warn("MMC: notify cover event not available\n"); +diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c +index f989145480c8..bf236b7af8c1 100644 +--- a/arch/arm/mach-omap2/omap_device.c ++++ b/arch/arm/mach-omap2/omap_device.c +@@ -224,10 +224,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, + break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); +- if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && +- pm_runtime_status_suspended(dev)) { ++ if (od) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; +- pm_runtime_set_active(dev); ++ if (od->_state == OMAP_DEVICE_STATE_ENABLED && ++ pm_runtime_status_suspended(dev)) { ++ pm_runtime_set_active(dev); ++ } + } + break; + case BUS_NOTIFY_ADD_DEVICE: +diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c +index c378ab0c2431..93f2245c9775 100644 +--- a/arch/arm/mach-socfpga/pm.c ++++ b/arch/arm/mach-socfpga/pm.c +@@ -60,14 +60,14 @@ static int socfpga_setup_ocram_self_refresh(void) + if (!ocram_pool) { + pr_warn("%s: ocram pool unavailable!\n", __func__); + ret = -ENODEV; +- goto put_node; ++ goto put_device; + } + + ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); + if (!ocram_base) { + pr_warn("%s: unable to alloc ocram!\n", __func__); + ret = -ENOMEM; +- goto put_node; ++ goto put_device; + } + + ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); +@@ -78,7 +78,7 @@ static int socfpga_setup_ocram_self_refresh(void) + if (!suspend_ocram_base) { + pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); + ret = -ENOMEM; +- goto put_node; ++ goto put_device; + } + + /* Copy the code that puts DDR in self refresh to ocram */ +@@ -92,6 +92,8 @@ static int socfpga_setup_ocram_self_refresh(void) + if (!socfpga_sdram_self_refresh_in_ocram) + ret = -EFAULT; + ++put_device: ++ put_device(&pdev->dev); + put_node: + of_node_put(np); + +diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c +index e01cbca196b5..a67fcf7a5643 100644 +--- a/arch/arm/mach-tegra/tegra.c ++++ b/arch/arm/mach-tegra/tegra.c +@@ -137,8 +137,8 @@ static const char * const tegra_dt_board_compat[] = { + }; + + DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)") +- .l2c_aux_val = 0x3c400001, +- .l2c_aux_mask = 0xc20fc3fe, ++ .l2c_aux_val = 0x3c400000, ++ .l2c_aux_mask = 0xc20fc3ff, + .smp = smp_ops(tegra_smp_ops), + .map_io = tegra_map_common_io, + .init_early = tegra_init_early, +diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c +index d1870c777c6e..3a465bfa7d4a 100644 +--- a/arch/arm/mm/cache-l2x0.c ++++ b/arch/arm/mm/cache-l2x0.c +@@ -1252,20 +1252,28 @@ static void __init l2c310_of_parse(const struct device_node *np, + + ret = of_property_read_u32(np, "prefetch-data", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-data property value is missing\n"); + } + + ret = of_property_read_u32(np, "prefetch-instr", &val); + if (ret == 0) { +- if (val) ++ if (val) { + prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; +- else ++ *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } else { + prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ *aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; ++ } ++ *aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF prefetch-instr property value is missing\n"); + } +diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S +index cb41b18eef0e..516fefc2d7bd 100644 +--- a/arch/arm/mm/proc-macros.S ++++ b/arch/arm/mm/proc-macros.S +@@ -4,6 +4,7 @@ + * VMA_VM_FLAGS + * VM_EXEC + */ ++#include + #include + #include + +@@ -34,7 +35,7 @@ + * act_mm - get current->active_mm + */ + .macro act_mm, rd +- bic \rd, sp, #8128 ++ bic \rd, sp, #(THREAD_SIZE - 1) & ~63 + bic \rd, \rd, #63 + ldr \rd, [\rd, #TI_TASK] + ldr \rd, [\rd, #TSK_ACTIVE_MM] +diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig +index 3265b8f86069..6ec01491735e 100644 +--- a/arch/arm/plat-samsung/Kconfig ++++ b/arch/arm/plat-samsung/Kconfig +@@ -242,6 +242,7 @@ config SAMSUNG_PM_DEBUG + bool "Samsung PM Suspend debug" + depends on PM && DEBUG_KERNEL + depends on DEBUG_EXYNOS_UART || DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART ++ depends on DEBUG_LL && MMU + help + Say Y here if you want verbose debugging from the PM Suspend and + Resume code. See +diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c +index 3eb018fa1a1f..c3362ddd6c4c 100644 +--- a/arch/arm/probes/kprobes/core.c ++++ b/arch/arm/probes/kprobes/core.c +@@ -270,6 +270,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) + switch (kcb->kprobe_status) { + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: ++ case KPROBE_HIT_SS: + /* A pre- or post-handler probe got us here. */ + kprobes_inc_nmissed_count(p); + save_previous_kprobe(kcb); +@@ -278,6 +279,11 @@ void __kprobes kprobe_handler(struct pt_regs *regs) + singlestep(p, regs, kcb); + restore_previous_kprobe(kcb); + break; ++ case KPROBE_REENTER: ++ /* A nested probe was hit in FIQ, it is a BUG */ ++ pr_warn("Unrecoverable kprobe detected at %p.\n", ++ p->addr); ++ /* fall through */ + default: + /* impossible cases */ + BUG(); +diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c +index b683b4517458..4254391f3906 100644 +--- a/arch/arm/probes/kprobes/test-thumb.c ++++ b/arch/arm/probes/kprobes/test-thumb.c +@@ -444,21 +444,21 @@ void kprobe_thumb32_test_cases(void) + "3: mvn r0, r0 \n\t" + "2: nop \n\t") + +- TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", ++ TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]", + "9: \n\t" + ".short (2f-1b-4)>>1 \n\t" + ".short (3f-1b-4)>>1 \n\t" + "3: mvn r0, r0 \n\t" + "2: nop \n\t") + +- TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", ++ TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]", + "9: \n\t" + ".short (2f-1b-4)>>1 \n\t" + ".short (3f-1b-4)>>1 \n\t" + "3: mvn r0, r0 \n\t" + "2: nop \n\t") + +- TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", ++ TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]", + "9: \n\t" + ".short (2f-1b-4)>>1 \n\t" + ".short (3f-1b-4)>>1 \n\t" +@@ -471,10 +471,10 @@ void kprobe_thumb32_test_cases(void) + + TEST_UNSUPPORTED("strexb r0, r1, [r2]") + TEST_UNSUPPORTED("strexh r0, r1, [r2]") +- TEST_UNSUPPORTED("strexd r0, r1, [r2]") ++ TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]") + TEST_UNSUPPORTED("ldrexb r0, [r1]") + TEST_UNSUPPORTED("ldrexh r0, [r1]") +- TEST_UNSUPPORTED("ldrexd r0, [r1]") ++ TEST_UNSUPPORTED("ldrexd r0, r1, [r1]") + + TEST_GROUP("Data-processing (shifted register) and (modified immediate)") + +diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c +index d1329f1ba4e4..b97230704b74 100644 +--- a/arch/arm/probes/uprobes/core.c ++++ b/arch/arm/probes/uprobes/core.c +@@ -207,7 +207,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) + static struct undef_hook uprobes_arm_break_hook = { + .instr_mask = 0x0fffffff, + .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff), +- .cpsr_mask = MODE_MASK, ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK), + .cpsr_val = USR_MODE, + .fn = uprobe_trap_handler, + }; +@@ -215,7 +215,7 @@ static struct undef_hook uprobes_arm_break_hook = { + static struct undef_hook uprobes_arm_ss_hook = { + .instr_mask = 0x0fffffff, + .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff), +- .cpsr_mask = MODE_MASK, ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK), + .cpsr_val = USR_MODE, + .fn = uprobe_trap_handler, + }; +diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c +index 0ed01f2d5ee4..b4ec8d1b0bef 100644 +--- a/arch/arm/xen/p2m.c ++++ b/arch/arm/xen/p2m.c +@@ -91,10 +91,39 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + int i; + + for (i = 0; i < count; i++) { ++ struct gnttab_unmap_grant_ref unmap; ++ int rc; ++ + if (map_ops[i].status) + continue; +- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, +- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); ++ if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, ++ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) ++ continue; ++ ++ /* ++ * Signal an error for this slot. This in turn requires ++ * immediate unmapping. ++ */ ++ map_ops[i].status = GNTST_general_error; ++ unmap.host_addr = map_ops[i].host_addr, ++ unmap.handle = map_ops[i].handle; ++ map_ops[i].handle = ~0; ++ if (map_ops[i].flags & GNTMAP_device_map) ++ unmap.dev_bus_addr = map_ops[i].dev_bus_addr; ++ else ++ unmap.dev_bus_addr = 0; ++ ++ /* ++ * Pre-populate the status field, to be recognizable in ++ * the log message below. ++ */ ++ unmap.status = 1; ++ ++ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ++ &unmap, 1); ++ if (rc || unmap.status != GNTST_okay) ++ pr_err_once("gnttab unmap failed: rc=%d st=%d\n", ++ rc, unmap.status); + } + + return 0; +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms +index 0b3990b7db6f..8a9030c4e633 100644 +--- a/arch/arm64/Kconfig.platforms ++++ b/arch/arm64/Kconfig.platforms +@@ -41,6 +41,7 @@ config ARCH_BCM_IPROC + config ARCH_BERLIN + bool "Marvell Berlin SoC Family" + select DW_APB_ICTL ++ select DW_APB_TIMER_OF + select GPIOLIB + select PINCTRL + help +diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi +index 7d3a2acc6a55..2aa01eaa0cd1 100644 +--- a/arch/arm64/boot/dts/arm/juno-base.dtsi ++++ b/arch/arm64/boot/dts/arm/juno-base.dtsi +@@ -414,13 +414,13 @@ + clocks { + compatible = "arm,scpi-clocks"; + +- scpi_dvfs: scpi-dvfs { ++ scpi_dvfs: clocks-0 { + compatible = "arm,scpi-dvfs-clocks"; + #clock-cells = <1>; + clock-indices = <0>, <1>, <2>; + clock-output-names = "atlclk", "aplclk","gpuclk"; + }; +- scpi_clk: scpi-clk { ++ scpi_clk: clocks-1 { + compatible = "arm,scpi-variable-clocks"; + #clock-cells = <1>; + clock-indices = <3>; +@@ -428,7 +428,7 @@ + }; + }; + +- scpi_devpd: scpi-power-domains { ++ scpi_devpd: power-controller { + compatible = "arm,scpi-power-domains"; + num-domains = <2>; + #power-domain-cells = <1>; +diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +index c528dd52ba2d..e43e804c42c3 100644 +--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts ++++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +@@ -64,7 +64,7 @@ + s2mps15_pmic@66 { + compatible = "samsung,s2mps15-pmic"; + reg = <0x66>; +- interrupts = <2 IRQ_TYPE_NONE>; ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&gpa0>; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_irq>; +@@ -131,6 +131,7 @@ + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1150000>; + regulator-enable-ramp-delay = <125>; ++ regulator-always-on; + }; + + ldo8_reg: LDO8 { +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi +index 6328a66ed97e..4c7c40ce5066 100644 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi +@@ -65,8 +65,10 @@ + }; + + psci { +- compatible = "arm,psci-0.2"; ++ compatible = "arm,psci"; + method = "smc"; ++ cpu_off = <0x84000002>; ++ cpu_on = <0xC4000003>; + }; + + soc: soc { +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +index 97d331ec2500..cd8db85f7c11 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +@@ -177,6 +177,7 @@ + ranges = <0x0 0x00 0x1700000 0x100000>; + reg = <0x00 0x1700000 0x0 0x100000>; + interrupts = <0 75 0x4>; ++ dma-coherent; + + sec_jr0: jr@10000 { + compatible = "fsl,sec-v5.4-job-ring", +diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +index b307d6b6357e..ad34be46c361 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi +@@ -914,7 +914,7 @@ + <&mmsys CLK_MM_DSI1_DIGITAL>, + <&mipi_tx1>; + clock-names = "engine", "digital", "hs"; +- phy = <&mipi_tx1>; ++ phys = <&mipi_tx1>; + phy-names = "dphy"; + status = "disabled"; + }; +diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi +index 87ef72bffd86..7501bfc2b0f1 100644 +--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi ++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi +@@ -727,6 +727,7 @@ + <&tegra_car 128>, /* hda2hdmi */ + <&tegra_car 111>; /* hda2codec_2x */ + reset-names = "hda", "hda2hdmi", "hda2codec_2x"; ++ power-domains = <&pd_sor>; + status = "disabled"; + }; + +diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile +index eb1ed96a92f7..e58254cb22e6 100644 +--- a/arch/arm64/boot/dts/qcom/Makefile ++++ b/arch/arm64/boot/dts/qcom/Makefile +@@ -242,7 +242,8 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) + sxr1130-usbc-mtp-overlay.dtbo \ + sxr1130-usbc-pm660a-mtp-overlay.dtbo \ + sxr1130-qrd-overlay.dtbo \ +- sxr1130-svr-overlay.dtbo ++ sxr1130-svr-overlay.dtbo \ ++ sxr1130-arglass-overlay.dtbo + + sdm670-cdp-overlay.dtbo-base := sdm670.dtb + sdm670-mtp-overlay.dtbo-base := sdm670.dtb +@@ -318,6 +319,7 @@ sxr1130-usbc-mtp-overlay.dtbo-base := sxr1130.dtb + sxr1130-usbc-pm660a-mtp-overlay.dtbo-base := sxr1130.dtb + sxr1130-qrd-overlay.dtbo-base := sxr1130.dtb + sxr1130-svr-overlay.dtbo-base := sxr1130.dtb ++sxr1130-arglass-overlay.dtbo-base := sxr1130.dtb + + else + dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \ +@@ -390,7 +392,8 @@ dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \ + sxr1130-usbc-mtp.dtb \ + sxr1130-usbc-pm660a-mtp.dtb \ + sxr1130-qrd.dtb \ +- sxr1130-svr.dtb ++ sxr1130-svr.dtb \ ++ sxr1130-arglass.dtb + endif + + ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) +@@ -667,7 +670,8 @@ dtb-$(CONFIG_ARCH_MSM8909) += msm8909-pm8916-mtp.dtb \ + msm8905-qrd-skub.dtb \ + msm8905-qrd-sku3.dtb \ + msm8909-mtp.dtb \ +- msm8909-1gb-mtp.dtb ++ msm8909-1gb-mtp.dtb \ ++ msm8909-1gb-qrd-skue.dtb + + dtb-$(CONFIG_ARCH_SDM450) += sdm450-rcm.dtb \ + sdm450-cdp.dtb \ +diff --git a/arch/arm64/boot/dts/qcom/batterydata-qrd-skue-4v35-2000mah.dtsi b/arch/arm64/boot/dts/qcom/batterydata-qrd-skue-4v35-2000mah.dtsi +new file mode 100644 +index 000000000000..98d6bd9288f4 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/batterydata-qrd-skue-4v35-2000mah.dtsi +@@ -0,0 +1,121 @@ ++/* Copyright (c) 2017,2020 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++qcom,qrd-skue-4v35-2000mah-data { ++ qcom,battery-type = "qrd_skue_4v35_2000mah"; ++ qcom,batt-id-kohm = <0>; ++ qcom,chg-term-ua = <100000>; ++ qcom,default-rbatt-mohm = <148>; ++ qcom,fcc-mah = <2000>; ++ qcom,max-voltage-uv = <4350000>; ++ qcom,rbatt-capacitive-mohm = <50>; ++ qcom,v-cutoff-uv = <3400000>; ++ qcom,flat-ocv-threshold-uv = <3800000>; ++ ++ qcom,fcc-temp-lut { ++ qcom,lut-col-legend = <(-20) 0 25 40 60>; ++ qcom,lut-data = <2084 2082 2080 2073 2064>; ++ }; ++ ++ qcom,ibat-acc-lut { ++ qcom,lut-col-legend = <(-20) 0 25>; ++ qcom,lut-row-legend = <0 250 500 1000>; ++ qcom,lut-data = <2020 2060 2038>, ++ <514 1883 2019>, ++ <87 1619 1999>, ++ <6 1036 1942>; ++ }; ++ ++ qcom,pc-temp-ocv-lut { ++ qcom,lut-col-legend = <(-20) 0 25 40 60>; ++ qcom,lut-row-legend = <100 95 90 85 80>, ++ <75 70 65 60 55>, ++ <50 45 40 35 30>, ++ <25 20 16 13 11>, ++ <10 9 8 7 6>, ++ <5 4 3 2 1>, ++ <0>; ++ qcom,lut-data = <4332 4331 4326 4322 4314>, ++ <4235 4255 4257 4254 4250>, ++ <4168 4195 4198 4196 4192>, ++ <4107 4142 4142 4140 4136>, ++ <4058 4090 4090 4088 4084>, ++ <3971 4039 4044 4039 4035>, ++ <3925 3963 3986 3992 3990>, ++ <3889 3927 3953 3953 3951>, ++ <3856 3892 3912 3914 3912>, ++ <3830 3858 3867 3870 3868>, ++ <3808 3829 3834 3835 3833>, ++ <3790 3806 3811 3811 3810>, ++ <3775 3788 3793 3794 3792>, ++ <3760 3775 3780 3779 3778>, ++ <3746 3764 3769 3765 3758>, ++ <3730 3749 3757 3749 3735>, ++ <3711 3730 3739 3730 3715>, ++ <3693 3715 3715 3708 3693>, ++ <3674 3706 3692 3684 3671>, ++ <3656 3699 3688 3679 3668>, ++ <3646 3695 3686 3678 3667>, ++ <3633 3692 3685 3677 3666>, ++ <3617 3686 3683 3676 3664>, ++ <3597 3680 3680 3673 3661>, ++ <3573 3667 3672 3665 3651>, ++ <3542 3643 3647 3641 3622>, ++ <3500 3603 3602 3598 3577>, ++ <3440 3545 3540 3539 3520>, ++ <3347 3463 3458 3463 3434>, ++ <3189 3340 3337 3345 3302>, ++ <3000 3106 3000 3000 3000>; ++ }; ++ ++ qcom,rbatt-sf-lut { ++ qcom,lut-col-legend = <(-20) 0 25 40 60>; ++ qcom,lut-row-legend = <100 95 90 85 80>, ++ <75 70 65 60 55>, ++ <50 45 40 35 30>, ++ <25 20 16 13 11>, ++ <10 9 8 7 6>, ++ <5 4 3 2 1>, ++ <0>; ++ qcom,lut-data = <1410 265 100 84 79>, ++ <1408 265 100 84 79>, ++ <1342 268 101 85 79>, ++ <1273 273 103 87 80>, ++ <1243 276 106 88 81>, ++ <1164 286 112 92 82>, ++ <1164 268 115 94 84>, ++ <1173 268 123 98 88>, ++ <1189 264 122 102 90>, ++ <1212 265 109 96 86>, ++ <1242 268 101 87 80>, ++ <1276 276 102 87 79>, ++ <1314 287 104 90 81>, ++ <1358 302 106 93 84>, ++ <1411 318 109 93 83>, ++ <1485 336 110 90 79>, ++ <1584 357 110 91 80>, ++ <1693 396 110 91 81>, ++ <1720 421 108 89 78>, ++ <1960 465 110 89 80>, ++ <2175 494 112 91 81>, ++ <2445 530 114 94 83>, ++ <2825 573 119 96 86>, ++ <3334 629 125 99 89>, ++ <4062 689 130 100 89>, ++ <5168 783 129 99 83>, ++ <6923 916 132 99 85>, ++ <10172 1160 145 105 90>, ++ <16692 1530 169 119 103>, ++ <104534 2708 230 145 150>, ++ <279842 43438 178115 106625 131239>; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-arglass-boe-dual-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-arglass-boe-dual-1080p-video.dtsi +new file mode 100644 +index 000000000000..ab2de32b61f5 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/dsi-panel-arglass-boe-dual-1080p-video.dtsi +@@ -0,0 +1,99 @@ ++/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++&mdss_mdp { ++ dsi_dual_arglass_boe_dual_1080p_video: qcom,mdss_dsi_arglass_cmd_boe { ++ qcom,mdss-dsi-panel-name = ++ "Dual ARGlass video mode dsi BOE panel without DSC"; ++ qcom,mdss-dsi-panel-type = "dsi_video_mode"; ++ qcom,mdss-dsi-virtual-channel-id = <0>; ++ qcom,mdss-dsi-stream = <0>; ++ qcom,mdss-dsi-panel-hdr-enabled; ++ qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000 ++ 17000 15500 30000 8000 3000>; ++ qcom,mdss-dsi-panel-peak-brightness = <4200000>; ++ qcom,mdss-dsi-panel-blackness-level = <3230>; ++ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event"; ++ qcom,mdss-dsi-bllp-eof-power-mode; ++ qcom,mdss-dsi-bllp-power-mode; ++ qcom,mdss-dsi-lane-0-state; ++ qcom,mdss-dsi-lane-1-state; ++ qcom,mdss-dsi-lane-2-state; ++ qcom,mdss-dsi-lane-3-state; ++ qcom,mdss-dsi-dma-trigger = "trigger_sw"; ++ qcom,mdss-dsi-mdp-trigger = "none"; ++ qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>; ++ qcom,mdss-pan-physical-width-dimension = <74>; ++ qcom,mdss-pan-physical-height-dimension = <131>; ++ qcom,mdss-dsi-tx-eot-append; ++ qcom,mdss-dsi-color-order = "rgb_swap_rgb"; ++ qcom,mdss-dsi-underflow-color = <0xff>; ++ qcom,mdss-dsi-border-color = <0>; ++ qcom,mdss-dsi-bpp = <24>; ++ qcom,mdss-dsi-display-timings { ++ timing@0{ ++ qcom,mdss-dsi-panel-width = <1920>; ++ qcom,mdss-dsi-panel-height = <1080>; ++ qcom,mdss-dsi-h-front-porch = <56>; ++ qcom,mdss-dsi-h-back-porch = <72>; ++ qcom,mdss-dsi-h-pulse-width = <32>; ++ qcom,mdss-dsi-h-sync-skew = <0>; ++ qcom,mdss-dsi-v-back-porch = <8>; ++ qcom,mdss-dsi-v-front-porch = <8>; ++ qcom,mdss-dsi-v-pulse-width = <4>; ++ qcom,mdss-dsi-panel-framerate = <60>; ++ qcom,mdss-dsi-on-command = [ ++ 39 01 00 00 00 00 02 03 00 ++ 39 01 00 00 00 00 02 53 20 ++ /* Brightness control, max: 0xff */ ++ 39 01 00 00 00 00 03 51 7F 00 ++ 39 01 00 00 00 00 02 6B 00 ++ 39 01 00 00 00 00 05 80 01 E0 0E 11 ++ 39 01 00 00 00 00 08 81 02 20 00 0C 00 ++ 08 01 ++ 39 01 00 00 00 00 08 82 02 20 00 08 00 ++ 10 01 ++ /* Flip by 180 */ ++ 39 01 00 00 00 00 02 36 03 ++ 39 01 00 04 00 00 02 36 00 ++ 39 01 00 00 00 00 02 35 00 ++ /* CMD2 P1 */ ++ 39 01 00 00 00 00 03 F0 AA 11 ++ 39 01 00 00 00 00 0A C2 00 00 01 99 03 ++ FF 00 90 02 ++ /* CMD2 P2 */ ++ 39 01 00 00 00 00 03 F0 AA 12 ++ 39 01 00 00 00 00 02 B0 93 ++ 39 01 00 00 00 00 03 B6 14 14 ++ 39 01 00 00 00 00 03 B9 01 73 ++ 39 01 00 00 00 00 05 BA 01 20 01 83 ++ 39 01 00 00 00 00 03 FF 5A 81 ++ 39 01 00 00 00 00 02 65 14 ++ 39 01 00 00 00 00 12 F9 64 68 6C 70 74 ++ 78 7C 80 84 88 8C 90 95 9A A0 A5 B0 ++ 39 01 00 00 00 00 02 26 20 ++ 39 01 00 00 00 00 03 F0 AA 13 ++ 39 01 00 00 00 00 02 D5 00 ++ 05 01 00 00 cb 00 02 11 00 ++ 05 01 00 00 00 00 02 29 00 ++ ]; ++ ++ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 ++ 02 28 00 05 01 00 00 3c 00 02 10 00]; ++ ++ qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; ++ qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; ++ ++ }; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8379c-fwvga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8379c-fwvga-video.dtsi +new file mode 100644 +index 000000000000..4b43c40eaacc +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8379c-fwvga-video.dtsi +@@ -0,0 +1,90 @@ ++/* Copyright (c) 2015, 2017,2020 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++&mdss_mdp { ++ dsi_hx8379c_fwvga_video: qcom,mdss_dsi_hx8379c_fwvga_video { ++ qcom,mdss-dsi-panel-name = "hx8379c fwvga video mode dsi panel"; ++ qcom,mdss-dsi-panel-type = "dsi_video_mode"; ++ qcom,mdss-dsi-panel-framerate = <60>; ++ qcom,mdss-dsi-virtual-channel-id = <0>; ++ qcom,mdss-dsi-stream = <0>; ++ qcom,mdss-dsi-panel-width = <480>; ++ qcom,mdss-dsi-panel-height = <854>; ++ qcom,mdss-dsi-h-front-porch = <60>; ++ qcom,mdss-dsi-h-back-porch = <60>; ++ qcom,mdss-dsi-h-pulse-width = <60>; ++ qcom,mdss-dsi-h-sync-skew = <0>; ++ qcom,mdss-dsi-v-back-porch = <6>; ++ qcom,mdss-dsi-v-front-porch = <6>; ++ qcom,mdss-dsi-v-pulse-width = <5>; ++ qcom,mdss-dsi-h-left-border = <0>; ++ qcom,mdss-dsi-h-right-border = <0>; ++ qcom,mdss-dsi-v-top-border = <0>; ++ qcom,mdss-dsi-v-bottom-border = <0>; ++ qcom,mdss-dsi-bpp = <24>; ++ qcom,mdss-dsi-color-order = <0>; ++ qcom,mdss-dsi-underflow-color = <0xff>; ++ qcom,mdss-dsi-border-color = <0>; ++ qcom,ulps-enabled; ++ qcom,mdss-dsi-on-command = [39 01 00 00 00 00 04 B9 FF 83 79 ++ 39 01 00 00 00 00 11 B1 44 1C 1C 31 31 50 D0 EE ++ 54 80 38 38 F8 32 22 22 ++ 39 01 00 00 00 00 0A B2 80 FE 0B 04 00 50 11 42 ++ 15 ++ 39 01 00 00 00 00 0B B4 69 6A 69 6A 69 6A 22 70 ++ 23 70 ++ 39 01 00 00 00 00 0B BA 41 83 A8 4D B2 24 00 00 ++ 50 90 ++ 39 01 00 00 00 00 05 C7 00 00 00 C0 ++ 39 01 00 00 00 00 02 CC 02 ++ 39 01 00 00 00 00 02 D2 77 ++ 39 01 00 00 00 00 1E D3 00 07 00 00 00 00 00 32 ++ 10 03 00 03 03 60 03 60 00 08 00 08 45 44 08 08 ++ 37 08 08 37 09 ++ 39 01 00 00 00 00 23 D5 18 18 19 19 18 18 20 21 ++ 24 25 18 18 18 18 00 01 04 05 02 03 06 07 18 18 ++ 18 18 18 18 18 18 18 18 00 00 ++ 39 01 00 00 00 00 21 D6 18 18 18 18 19 19 25 24 ++ 21 20 18 18 18 18 05 04 01 00 03 02 07 06 18 18 ++ 18 18 18 18 18 18 18 18 ++ 39 01 00 00 00 00 2B E0 00 04 0B 2F 39 3F 21 46 ++ 07 0A 0C 17 0F 13 16 14 15 07 11 13 30 00 04 0B ++ 2F 3A 3F 21 46 07 0A 0C 17 0F 14 16 14 15 07 11 ++ 13 16 ++ 39 01 00 00 00 00 03 B6 4E 4E ++ 05 01 00 00 78 00 02 11 00 ++ 05 01 00 00 14 00 02 29 00]; ++ qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00 ++ 05 01 00 00 78 00 02 10 00]; ++ qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; ++ qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; ++ qcom,mdss-dsi-h-sync-pulse = <1>; ++ qcom,mdss-dsi-traffic-mode = "burst_mode"; ++ qcom,mdss-dsi-bllp-eof-power-mode; ++ qcom,mdss-dsi-bllp-power-mode; ++ qcom,mdss-dsi-lane-0-state; ++ qcom,mdss-dsi-lane-1-state; ++ qcom,mdss-dsi-panel-timings = [7B 21 1A 00 31 2D 1E 23 2B 03 04 ++ 00]; ++ qcom,mdss-dsi-t-clk-post = <0x20>; ++ qcom,mdss-dsi-t-clk-pre = <0x2C>; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; ++ qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>; ++ qcom,mdss-dsi-bl-pmic-bank-select = <0>; ++ qcom,mdss-dsi-dma-trigger = "trigger_sw"; ++ qcom,mdss-dsi-mdp-trigger = "none"; ++ qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>; ++ qcom,mdss-dsi-lp11-init; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-goertek-merlin-230mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-goertek-merlin-230mah.dtsi +new file mode 100644 +index 000000000000..b7095889688a +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-goertek-merlin-230mah.dtsi +@@ -0,0 +1,80 @@ ++/* Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++qcom,fg-gen3-batterydata-goertek-merlin-230mah { ++ qcom,max-voltage-uv = <4350000>; ++ qcom,fg-cc-cv-threshold-mv = <4340>; ++ qcom,nom-batt-capacity-mah = <230>; ++ qcom,batt-id-kohm = <0>; ++ qcom,battery-beta = <3440>; ++ qcom,battery-type = "fg-gen3-batterydata-goertek-merlin-230mah"; ++ qcom,checksum = <0x163C>; ++ qcom,gui-version = "PM660GUI - 0.0.0.45"; ++ qcom,fg-profile-data = [ ++ 2A 1E 8D EC ++ 6A 01 88 E2 ++ CF 1C 2E 02 ++ 5A 0D F6 0A ++ 2A 19 81 22 ++ F0 45 5E 52 ++ 63 00 00 00 ++ 12 00 00 00 ++ 00 00 AF CD ++ 44 BC 06 C2 ++ 3A 00 08 00 ++ 8A F2 1E 07 ++ C3 F4 2A EA ++ 1D EB 57 0B ++ EC 04 96 32 ++ 1C 06 09 20 ++ 27 00 14 00 ++ BD 1E 06 07 ++ 42 02 CD ED ++ BA 1C FC 02 ++ F7 15 35 12 ++ 95 18 82 23 ++ 6E 44 86 5A ++ 61 00 00 00 ++ 0F 00 00 00 ++ 00 00 B8 D5 ++ 8D CA 00 9C ++ 36 00 00 00 ++ 22 01 1E 07 ++ 72 ED 47 CC ++ F3 06 ED EB ++ 69 FA 37 FA ++ 99 33 CC FF ++ 07 10 00 00 ++ F6 00 99 45 ++ 36 00 40 00 ++ EC 05 0A FA ++ FF 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ 00 00 00 00 ++ ]; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi +index b43c87633365..4e9c90eef774 100644 +--- a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -207,6 +207,7 @@ + regulator-name = "gpu_gx_gdsc"; + reg = <0x509100c 0x4>; + qcom,poll-cfg-gdscr; ++ qcom,skip-disable-before-sw-enable; + status = "disabled"; + }; + +diff --git a/arch/arm64/boot/dts/qcom/msm8905-qrd-skub.dtsi b/arch/arm64/boot/dts/qcom/msm8905-qrd-skub.dtsi +index 4593eadac55a..e4b0fced3fd7 100644 +--- a/arch/arm64/boot/dts/qcom/msm8905-qrd-skub.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8905-qrd-skub.dtsi +@@ -226,6 +226,18 @@ + qcom,fast-avg-setup = <0>; + qcom,vadc-thermal-node; + }; ++ ++ chan@36 { ++ qcom,vadc-thermal-node; ++ }; ++ ++ chan@32 { ++ qcom,vadc-thermal-node; ++ }; ++ ++ chan@3c { ++ qcom,vadc-thermal-node; ++ }; + }; + + &soc { +@@ -289,7 +301,8 @@ + <&afe_pcm_rx>, <&afe_pcm_tx>, + <&afe_proxy_rx>, <&afe_proxy_tx>, + <&incall_record_rx>, <&incall_record_tx>, +- <&incall_music_rx>, <&incall_music_2_rx>; ++ <&incall_music_rx>, <&incall_music_2_rx>, ++ <&proxy_rx>, <&proxy_tx>; + asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8", + "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", + "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", +@@ -304,7 +317,8 @@ + "msm-dai-q6-dev.224", "msm-dai-q6-dev.225", + "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", + "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", +- "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770"; ++ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", ++ "msm-dai-q6-dev.8194", "msm-dai-q6-dev.8195"; + asoc-codec = <&stub_codec>, <&msm_digital_codec>, + <&pmic_analog_codec>; + asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec", +diff --git a/arch/arm64/boot/dts/qcom/msm8909-1gb-qrd-skue.dts b/arch/arm64/boot/dts/qcom/msm8909-1gb-qrd-skue.dts +new file mode 100644 +index 000000000000..a397d11dd96f +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8909-1gb-qrd-skue.dts +@@ -0,0 +1,20 @@ ++/* Copyright (c) 2016,2020 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/dts-v1/; ++ ++#include "msm8909-qrd-skue.dtsi" ++ ++/ { ++ qcom,board-id= <0x1000b 0x9>; ++}; ++ +diff --git a/arch/arm64/boot/dts/qcom/msm8909-camera-sensor-skue.dtsi b/arch/arm64/boot/dts/qcom/msm8909-camera-sensor-skue.dtsi +new file mode 100644 +index 000000000000..8a67a01cfa63 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8909-camera-sensor-skue.dtsi +@@ -0,0 +1,129 @@ ++/* ++ * Copyright (c) 2017 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++&soc { ++ SY7803_default: SY7803_default { ++ mux { ++ /* CLK, DATA */ ++ pins = "gpio31", "gpio32"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio31", "gpio32"; ++ bias-disable; /* No PULL */ ++ drive-strength = <2>; /* 2 MA */ ++ }; ++ }; ++ ++ flash_SY7803:flashlight { ++ compatible = "qcom,leds-gpio-flash"; ++ status = "okay"; ++ pinctrl-names = "flash_default"; ++ pinctrl-0 = <&SY7803_default>; ++ qcom,flash-en = <&msm_gpio 31 0>; ++ qcom,flash-now = <&msm_gpio 32 0>; ++ qcom,op-seq = "flash_en", "flash_now"; ++ qcom,torch-seq-val = <1 0>; ++ qcom,flash-seq-val = <1 1>; ++ linux,name = "flashlight"; ++ linux,default-trigger = "flashlight-trigger"; ++ }; ++ ++ led_flash0: qcom,camera-led-flash { ++ cell-index = <0>; ++ compatible = "qcom,camera-led-flash"; ++ qcom,flash-type = <3>; ++ qcom,flash-source = <&flash_SY7803>; ++ qcom,torch-source = <&flash_SY7803>; ++ }; ++}; ++ ++&i2c_3 { ++ ++ qcom,camera@0 { ++ cell-index = <0>; ++ compatible = "qcom,camera"; ++ reg = <0x2>; ++ qcom,csiphy-sd-index = <0>; ++ qcom,csid-sd-index = <0>; ++ qcom,mount-angle = <90>; ++ qcom,led-flash-src = <&led_flash0>; ++ cam_vana-supply = <&pm8909_l17>; ++ cam_vio-supply = <&pm8909_l6>; ++ cam_vaf-supply = <&pm8909_l8>; ++ qcom,cam-vreg-name = "cam_vio", "cam_vana", ++ "cam_vaf"; ++ qcom,cam-vreg-type = <1 0 0>; ++ qcom,cam-vreg-min-voltage = <0 2800000 2850000>; ++ qcom,cam-vreg-max-voltage = <0 2850000 2900000>; ++ qcom,cam-vreg-op-mode = <0 80000 100000>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk0_default ++ &cam_sensor_rear_default>; ++ pinctrl-1 = <&cam_sensor_mclk0_sleep &cam_sensor_rear_sleep>; ++ gpios = <&msm_gpio 26 0>, ++ <&msm_gpio 35 0>, ++ <&msm_gpio 34 0>; ++ qcom,gpio-reset = <1>; ++ qcom,gpio-standby = <2>; ++ qcom,gpio-req-tbl-num = <0 1 2>; ++ qcom,gpio-req-tbl-flags = <1 0 0>; ++ qcom,gpio-req-tbl-label = "CAMIF_MCLK", ++ "CAM_RESET1", ++ "CAM_STANDBY"; ++ qcom,sensor-position = <0>; ++ qcom,sensor-mode = <0>; ++ qcom,cci-master = <0>; ++ status = "ok"; ++ clocks = <&clock_gcc clk_mclk0_clk_src>, ++ <&clock_gcc clk_gcc_camss_mclk0_clk>; ++ clock-names = "cam_src_clk", "cam_clk"; ++ qcom,clock-rates = <24000000 0>; ++ }; ++ ++ qcom,camera@1 { ++ cell-index = <1>; ++ compatible = "qcom,camera"; ++ reg = <0x1>; ++ qcom,csiphy-sd-index = <0>; ++ qcom,csid-sd-index = <0>; ++ qcom,mount-angle = <90>; ++ cam_vana-supply = <&pm8909_l17>; ++ cam_vio-supply = <&pm8909_l6>; ++ qcom,cam-vreg-name = "cam_vio", "cam_vana"; ++ qcom,cam-vreg-type = <1 0>; ++ qcom,cam-vreg-min-voltage = <0 2800000>; ++ qcom,cam-vreg-max-voltage = <0 2850000>; ++ qcom,cam-vreg-op-mode = <0 80000>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk1_default ++ &cam_sensor_front_default>; ++ pinctrl-1 = <&cam_sensor_mclk1_sleep &cam_sensor_front_sleep>; ++ gpios = <&msm_gpio 27 0>, ++ <&msm_gpio 28 0>, ++ <&msm_gpio 33 0>; ++ qcom,gpio-reset = <1>; ++ qcom,gpio-standby = <2>; ++ qcom,gpio-req-tbl-num = <0 1 2>; ++ qcom,gpio-req-tbl-flags = <1 0 0>; ++ qcom,gpio-req-tbl-label = "CAMIF_MCLK", ++ "CAM_RESET", ++ "CAM_STANDBY"; ++ qcom,cci-master = <0>; ++ status = "ok"; ++ clocks = <&clock_gcc clk_mclk1_clk_src>, ++ <&clock_gcc clk_gcc_camss_mclk1_clk>; ++ clock-names = "cam_src_clk", "cam_clk"; ++ qcom,clock-rates = <24000000 0>; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi +index 9d35b0628db6..8822da898295 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909-coresight.dtsi +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 an +@@ -626,34 +626,6 @@ + clock-names = "apb_pclk"; + }; + +- /* Venus CTI */ +- cti_video_cpu0: cti@830000 { +- compatible = "arm,primecell"; +- arm,primecell-periphid = <0x0003b966>; +- +- reg = <0x830000 0x1000>; +- reg-names = "cti-base"; +- coresight-name = "coresight-cti-video-cpu0"; +- +- clocks = <&clock_rpm clk_qdss_clk>, +- <&clock_rpm clk_qdss_a_clk>; +- clock-names = "apb_pclk"; +- }; +- +- /* Pronto CTI */ +- cti_wcn_cpu0: cti@835000 { +- compatible = "arm,primecell"; +- arm,primecell-periphid = <0x0003b966>; +- +- reg = <0x835000 0x1000>; +- reg-names = "cti-base"; +- coresight-name = "coresight-cti-wcn-cpu0"; +- +- clocks = <&clock_rpm clk_qdss_clk>, +- <&clock_rpm clk_qdss_a_clk>; +- clock-names = "apb_pclk"; +- }; +- + wcn_etm0 { + compatible = "qcom,coresight-remote-etm"; + coresight-name = "coresight-wcn-etm0"; +diff --git a/arch/arm64/boot/dts/qcom/msm8909-mdss-panels.dtsi b/arch/arm64/boot/dts/qcom/msm8909-mdss-panels.dtsi +index 15f0c31d498a..35625963b471 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909-mdss-panels.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909-mdss-panels.dtsi +@@ -12,6 +12,7 @@ + + #include "dsi-panel-390p-auo-cmd.dtsi" + #include "dsi-panel-hx8394d-720p-video.dtsi" ++#include "dsi-panel-hx8379c-fwvga-video.dtsi" + + &soc { + dsi_panel_pwr_supply: dsi_panel_pwr_supply { +diff --git a/arch/arm64/boot/dts/qcom/msm8909-pm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909-pm8909.dtsi +index 4f59e2d3fd7a..d60cf8917260 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909-pm8909.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909-pm8909.dtsi +@@ -20,7 +20,6 @@ + }; + + &pm8909_vadc { +- #thermal-sensor-cells = <1>; + chan@0 { + label = "usb_in"; + reg = <0>; +@@ -118,7 +117,6 @@ + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; +- qcom,vadc-thermal-node; + }; + + chan@32 { +@@ -130,7 +128,6 @@ + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; +- qcom,vadc-thermal-node; + }; + + chan@3c { +@@ -142,7 +139,6 @@ + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; +- qcom,vadc-thermal-node; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/msm8909-qrd-skue.dtsi b/arch/arm64/boot/dts/qcom/msm8909-qrd-skue.dtsi +new file mode 100644 +index 000000000000..d2f4dfd9f46e +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8909-qrd-skue.dtsi +@@ -0,0 +1,490 @@ ++/* Copyright (c) 2014-2018, The Linux Foundation. All rights ++ * reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include "msm8909-qrd.dtsi" ++#include "msm8909-camera-sensor-skue.dtsi" ++ ++/ { ++ model = "Qualcomm Technologies, Inc. MSM8909 QRD SKUE"; ++ compatible = "qcom,msm8909-qrd", "qcom,msm8909", "qcom,qrd"; ++}; ++ ++&soc { ++ i2c@78b9000 { /* BLSP1 QUP5 */ ++ focaltech@38 { ++ compatible = "focaltech,5x06"; ++ reg = <0x38>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <13 0x2>; ++ vdd-supply = <&pm8909_l17>; ++ vcc_i2c-supply = <&pm8909_l6>; ++ /* pins used by touchscreen */ ++ pinctrl-names = "pmx_ts_active","pmx_ts_suspend", ++ "pmx_ts_release"; ++ pinctrl-0 = <&ts_int_active &ts_reset_active>; ++ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>; ++ pinctrl-2 = <&ts_release>; ++ focaltech,name = "ft6306"; ++ focaltech,family-id = <0x06>; ++ focaltech,reset-gpio = <&msm_gpio 12 0x0>; ++ focaltech,irq-gpio = <&msm_gpio 13 0x0>; ++ focaltech,display-coords = <0 0 480 854>; ++ focaltech,panel-coords = <0 0 480 950>; ++ focaltech,button-map= <139 102 158>; ++ focaltech,no-force-update; ++ focaltech,i2c-pull-up; ++ focaltech,group-id = <1>; ++ focaltech,hard-reset-delay-ms = <20>; ++ focaltech,soft-reset-delay-ms = <200>; ++ focaltech,num-max-touches = <2>; ++ focaltech,fw-delay-aa-ms = <30>; ++ focaltech,fw-delay-55-ms = <30>; ++ focaltech,fw-upgrade-id1 = <0x79>; ++ focaltech,fw-upgrade-id2 = <0x08>; ++ focaltech,fw-delay-readid-ms = <10>; ++ focaltech,fw-delay-era-flsh-ms = <2000>; ++ focaltech,fw-auto-cal; ++ focaltech,ignore-id-check; ++ }; ++ }; ++ ++ i2c@78b5000 { /* BLSP1 QUP1 */ ++ aw2013@45 { ++ compatible = "awinic,aw2013"; ++ reg = <0x45>; ++ vdd-supply = <&pm8909_l17>; ++ vcc-supply = <&pm8909_l6>; ++ ++ aw2013,red { ++ aw2013,name = "red"; ++ aw2013,id = <0>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ ++ aw2013,green { ++ aw2013,name = "green"; ++ aw2013,id = <1>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ ++ aw2013,blue { ++ aw2013,name = "blue"; ++ aw2013,id = <2>; ++ aw2013,max-brightness = <255>; ++ aw2013,max-current = <1>; ++ aw2013,rise-time-ms = <2>; ++ aw2013,hold-time-ms = <1>; ++ aw2013,fall-time-ms = <2>; ++ aw2013,off-time-ms = <1>; ++ }; ++ }; ++ }; ++ ++ gen-vkeys { ++ compatible = "qcom,gen-vkeys"; ++ label = "ft5x06_ts"; ++ qcom,disp-maxx = <480>; ++ qcom,disp-maxy = <854>; ++ qcom,panel-maxx = <480>; ++ qcom,panel-maxy = <946>; ++ qcom,key-codes = <139 172 158>; ++ qcom,y-offset = <0>; ++ }; ++ ++ i2c@78b5000 { /* BLSP1 QUP1 */ ++ liteon@23 { ++ compatible = "liteon,ltr553"; ++ reg = <0x23>; ++ vdd-supply = <&pm8909_l17>; ++ vio-supply = <&pm8909_l6>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <94 0x2002>; ++ pinctrl-names = "default","sleep"; ++ pinctrl-0 = <<r553_default>; ++ pinctrl-1 = <<r553_sleep>; ++ liteon,irq-gpio = <&msm_gpio 94 0x2002>; ++ liteon,als-ps-persist = <0>; ++ liteon,ps-led = <0x7f>; ++ liteon,ps-pulses = <4>; ++ liteon,wakeup-threshold = <4>; ++ liteon,als-integration-time = <0>; ++ liteon,ps-distance-table = \ ++ <1376 566 343 287 200 170 155>; ++ }; ++ ++ bosch@18 { /* Accelerometer sensor */ ++ compatible = "bosch,bma2x2"; ++ reg = <0x18>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&bma2x2_int1_default &bma2x2_int2_default>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <96 0x2002>; ++ vdd-supply = <&pm8909_l17>; ++ vio-supply = <&pm8909_l6>; ++ bosch,init-interval = <200>; ++ bosch,place = <4>; ++ bosch,gpio-int1 = <&msm_gpio 96 0x2002>; ++ bosch,gpio-int2 = <&msm_gpio 65 0x2002>; ++ }; ++ }; ++ ++ hall { ++ compatible = "hall-switch"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&hall_sensor_int_default>; ++ interrupt-parent = <&msm_gpio>; ++ interrupts = <36 0x2003>; ++ vddio-supply = <&pm8909_l5>; ++ linux,gpio-int = <&msm_gpio 36 0x1>; ++ linux,wakeup; ++ linux,min-uv = <1650000>; ++ linux,max-uv = <3300000>; ++ }; ++ ++ sound { ++ compatible = "qcom,msm8952-audio-codec"; ++ qcom,model = "msm8909-skue-snd-card"; ++ reg = <0x7702000 0x4>, ++ <0x7702004 0x4>, ++ <0x7702008 0x4>; ++ reg-names = ++ "csr_gp_io_mux_mic_ctl", ++ "csr_gp_io_mux_spkr_ctl", ++ "csr_gp_io_lpaif_pri_pcm_pri_mode_muxsel"; ++ qcom,msm-snd-card-id = <0>; ++ qcom,msm-codec-type = "internal"; ++ qcom,msm-ext-pa = "primary"; ++ qcom,msm-mclk-freq = <9600000>; ++ qcom,msm-mbhc-hphl-swh = <1>; ++ qcom,msm-mbhc-gnd-swh = <0>; ++ qcom,msm-hs-micbias-type = "internal"; ++ qcom,msm-micbias1-ext-cap; ++ qcom,msm-micbias2-ext-cap; ++ qcom,audio-routing = ++ "RX_BIAS", "MCLK", ++ "SPK_RX_BIAS", "MCLK", ++ "INT_LDO_H", "MCLK", ++ "MIC BIAS Internal1", "Handset Mic", ++ "MIC BIAS Internal2", "Headset Mic", ++ "AMIC1", "MIC BIAS Internal1", ++ "AMIC2", "MIC BIAS Internal2"; ++ qcom,msm-gpios = ++ "pri_i2s", ++ "us_eu_gpio"; ++ qcom,pinctrl-names = ++ "all_off", ++ "pri_i2s_act", ++ "us_eu_gpio_act", ++ "pri_i2s_us_eu_gpio_act"; ++ pinctrl-names = ++ "all_off", ++ "pri_i2s_act", ++ "us_eu_gpio_act", ++ "pri_i2s_us_eu_gpio_act"; ++ pinctrl-0 = <&cdc_pdm_lines_sus &cross_conn_det_sus ++ &vdd_spkdrv_sus>; ++ pinctrl-1 = <&cdc_pdm_lines_act &cross_conn_det_sus ++ &vdd_spkdrv_act>; ++ pinctrl-2 = <&cdc_pdm_lines_sus &cross_conn_det_act ++ &vdd_spkdrv_sus>; ++ pinctrl-3 = <&cdc_pdm_lines_act &cross_conn_det_act ++ &vdd_spkdrv_act>; ++ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, ++ <&loopback>, <&compress>, <&hostless>, ++ <&afe>, <&lsm>, <&routing>, <&lpa>, ++ <&voice_svc>; ++ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1", ++ "msm-pcm-dsp.2", "msm-voip-dsp", ++ "msm-pcm-voice", "msm-pcm-loopback", ++ "msm-compress-dsp", "msm-pcm-hostless", ++ "msm-pcm-afe", "msm-lsm-client", ++ "msm-pcm-routing", "msm-pcm-lpa", ++ "msm-voice-svc"; ++ asoc-cpu = <&dai_pri_auxpcm>, <&dai_hdmi>, ++ <&dai_mi2s0>, <&dai_mi2s1>, ++ <&dai_mi2s2>, <&dai_mi2s3>, ++ <&dai_mi2s4>, <&dai_mi2s5>, ++ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>, ++ <&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>, ++ <&bt_sco_rx>, <&bt_sco_tx>, ++ <&int_fm_rx>, <&int_fm_tx>, ++ <&afe_pcm_rx>, <&afe_pcm_tx>, ++ <&afe_proxy_rx>, <&afe_proxy_tx>, ++ <&incall_record_rx>, <&incall_record_tx>, ++ <&incall_music_rx>, <&incall_music_2_rx>; ++ asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8", ++ "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", ++ "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", ++ "msm-dai-q6-mi2s.5", "msm-dai-q6-mi2s.6", ++ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385", ++ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387", ++ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391", ++ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393", ++ "msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289", ++ "msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293", ++ "msm-dai-q6-dev.224", "msm-dai-q6-dev.225", ++ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", ++ "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", ++ "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770"; ++ asoc-codec = <&stub_codec>, <&pm8909_conga_dig>; ++ asoc-codec-names = "msm-stub-codec.1", "cajon_codec"; ++ }; ++}; ++ ++&spmi_bus { ++ qcom,pm8909@0 { ++ qcom,leds@a300 { ++ status = "okay"; ++ qcom,led_mpp_4 { ++ label = "mpp"; ++ linux,name = "button-backlight"; ++ linux,default-trigger = "none"; ++ qcom,default-state = "off"; ++ qcom,max-current = <40>; ++ qcom,current-setting = <5>; ++ qcom,id = <6>; ++ qcom,mode = "manual"; ++ qcom,source-sel = <1>; ++ qcom,mode-ctrl = <0x60>; ++ }; ++ }; ++ }; ++}; ++ ++&sdc2_cd_on { ++ /delete-property/ bias-pull-up; ++ bias-pull-down; ++}; ++ ++&sdc2_cd_off { ++ /delete-property/ bias-disable; ++ bias-pull-down; ++}; ++ ++&sdhc_2 { ++ qcom,nonremovable; ++ ++ interrupts = <0 1>; ++ interrupt-map = <0 &intc 0 125 0 ++ 1 &intc 0 221 0>; ++ interrupt-names = "hc_irq", "pwr_irq"; ++ /delete-property/ cd-gpios; ++ pinctrl-names = "active", "sleep"; ++ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>; ++ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>; ++}; ++ ++&pm8909_mpps { ++ mpp@a000 { /* MPP 1 */ ++ /* VDD_PX */ ++ status = "disabled"; ++ }; ++ ++ mpp@a100 { /* MPP 2 */ ++ /* Backlight PWM */ ++ qcom,mode = <1>; /* Digital output */ ++ qcom,invert = <0>; /* Disable invert */ ++ qcom,src-sel = <4>; /* DTEST1 */ ++ qcom,vin-sel = <0>; /* VPH_PWR */ ++ qcom,master-en = <1>; /* Enable MPP */ ++ }; ++ ++ mpp@a200 { /* MPP 3 */ ++ /* VREF DAC */ ++ status = "disabled"; ++ }; ++ ++ mpp@a300 { /* MPP 4 */ ++ /* HR LED */ ++ status = "disabled"; ++ }; ++}; ++ ++&mdss_mdp { ++ qcom,mdss-pref-prim-intf = "dsi"; ++}; ++ ++&vendor_fstab { ++ status = "ok"; ++}; ++ ++&system_fstab { ++ status = "disabled"; ++}; ++ ++&dsi_hx8379c_fwvga_video { ++ qcom,cont-splash-enabled; ++ qcom,mdss-dsi-pwm-gpio = <&pm8909_mpps 2 0>; ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++}; ++ ++&msm_gpio { ++ pmx_mdss { ++ mdss_dsi_active: mdss_dsi_active { ++ mux { ++ pins = "gpio25"; ++ }; ++ config { ++ pins = "gpio25"; ++ }; ++ }; ++ mdss_dsi_suspend: mdss_dsi_suspend { ++ mux { ++ pins = "gpio25"; ++ }; ++ config { ++ pins = "gpio25"; ++ }; ++ }; ++ }; ++ pmx_mdss_te { ++ mdss_te_active: mdss_te_active { ++ mux { ++ pins = "gpio24"; ++ }; ++ config { ++ pins = "gpio24"; ++ }; ++ }; ++ mdss_te_suspend: mdss_te_suspend { ++ mux { ++ pins = "gpio24"; ++ }; ++ config { ++ pins = "gpio24"; ++ }; ++ }; ++ }; ++ bma2x2_int1_pin { ++ bma2x2_int1_default: int1_default { ++ mux { ++ pins = "gpio96"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio96"; ++ drive-dtrength = <6>; ++ bias-pull-up; ++ }; ++ }; ++ }; ++ bma2x2_int2_pin { ++ bma2x2_int2_default: int2_default { ++ mux { ++ pins = "gpio65"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio65"; ++ drive-dtrength = <6>; ++ bias-pull-up; ++ }; ++ }; ++ }; ++ ltr553_int_pin { ++ ltr553_default: ltr553_default { ++ mux { ++ pins = "gpio94"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio94"; ++ drive-dtrength = <6>; ++ bias-pull-up; ++ }; ++ }; ++ ltr553_sleep: ltr553_sleep { ++ mux { ++ pins = "gpio94"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio94"; ++ drive-dtrength = <2>; ++ bias-pull-down; ++ }; ++ }; ++ }; ++ hall_int_pin { ++ hall_sensor_int_default: hall_sensor_int_default { ++ mux { ++ pins = "gpio36"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio36"; ++ drive-dtrength = <6>; ++ bias-pull-up; ++ }; ++ }; ++ }; ++}; ++ ++&mdss_dsi0 { ++ qcom,dsi-pref-prim-pan = <&dsi_hx8379c_fwvga_video>; ++ pinctrl-names = "mdss_default", "mdss_sleep"; ++ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; ++ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; ++ ++ qcom,platform-reset-gpio = <&msm_gpio 25 0>; ++}; ++ ++&pm8909_vadc { ++ chan@30 { ++ qcom,scale-function = <13>; ++ }; ++}; ++ ++&pm8909_adc_tm { ++ chan@30 { ++ qcom,scale-function = <8>; ++ }; ++}; ++ ++&pm8909_chg { ++ qcom,vddmax-mv = <4350>; ++ qcom,vddsafe-mv = <4380>; ++ qcom,vinmin-mv = <4470>; ++ qcom,batt-hot-percentage = <25>; ++ qcom,batt-cold-percentage = <80>; ++ qcom,tchg-mins = <360>; ++ qcom,disable-vbatdet-based-recharge; ++ status = "okay"; ++}; ++ ++/ { ++ qrd_batterydata: qcom,battery-data { ++ qcom,rpull-up-kohm = <0>; ++ qcom,vref-batt-therm = <1800000>; ++ ++ #include "batterydata-qrd-skue-4v35-2000mah.dtsi" ++ }; ++}; ++ ++&pm8909_bms { ++ status = "okay"; ++ qcom,resume-soc = <95>; ++ qcom,use-reported-soc; ++ qcom,force-bms-active-on-charger; ++ qcom,battery-data = <&qrd_batterydata>; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8909-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8909-qrd.dtsi +index 2ab9a8e31e13..4d5e3dadb8a9 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909-qrd.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909-qrd.dtsi +@@ -149,3 +149,12 @@ + &android_usb { + qcom,android-usb-cdrom; + }; ++ ++&mdss_dsi { ++ vdda-supply = <&pm8909_l2>; ++ vddio-supply = <&pm8909_l6>; ++ qcom,mdss_dsi_ctrl0@1ac8000 { ++ vdd-supply = <&pm8909_l17>; ++ vddio-supply = <&pm8909_l6>; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi b/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi +index 5eeaa210be62..b6239fbd3f8e 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909-vidc.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -26,6 +26,8 @@ + qcom,clock-configs = <0x1 0x0 0x0 0x0>; + qcom,sw-power-collapse; + qcom,slave-side-cp; ++ qcom,qos-type-all-cores; ++ qcom,pm-qos-latency-us = <2>; + qcom,hfi = "venus"; + qcom,reg-presets = <0xe0020 0x05555556>, + <0xe0024 0x05555556>, +diff --git a/arch/arm64/boot/dts/qcom/msm8909.dtsi b/arch/arm64/boot/dts/qcom/msm8909.dtsi +index 7d968c2e79f1..57206388ab81 100644 +--- a/arch/arm64/boot/dts/qcom/msm8909.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8909.dtsi +@@ -1665,6 +1665,16 @@ + qcom,msm-dai-q6-dev-id = <240>; + }; + ++ proxy_rx: qcom,msm-dai-q6-proxy-rx { ++ compatible = "qcom,msm-dai-q6-dev"; ++ qcom,msm-dai-q6-dev-id = <8194>; ++ }; ++ ++ proxy_tx: qcom,msm-dai-q6-proxy-tx { ++ compatible = "qcom,msm-dai-q6-dev"; ++ qcom,msm-dai-q6-dev-id = <8195>; ++ }; ++ + afe_loopback_tx: qcom,msm-dai-q6-afe-loopback-tx { + compatible = "qcom,msm-dai-q6-dev"; + qcom,msm-dai-q6-dev-id = <24577>; +diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +index 10c83e11c272..1f9ff2cea215 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +@@ -542,7 +542,7 @@ + pins = "gpio63", "gpio64", "gpio65", "gpio66", + "gpio67", "gpio68"; + drive-strength = <8>; +- bias-pull-none; ++ bias-disable; + }; + }; + cdc_pdm_lines_sus: pdm_lines_off { +@@ -555,7 +555,7 @@ + pins = "gpio63", "gpio64", "gpio65", "gpio66", + "gpio67", "gpio68"; + drive-strength = <2>; +- bias-disable; ++ bias-pull-down; + }; + }; + }; +@@ -571,7 +571,7 @@ + pins = "gpio113", "gpio114", "gpio115", + "gpio116"; + drive-strength = <8>; +- bias-pull-none; ++ bias-disable; + }; + }; + +@@ -599,7 +599,7 @@ + pinconf { + pins = "gpio110"; + drive-strength = <8>; +- bias-pull-none; ++ bias-disable; + }; + }; + +@@ -625,7 +625,7 @@ + pinconf { + pins = "gpio116"; + drive-strength = <8>; +- bias-pull-none; ++ bias-disable; + }; + }; + ext_mclk_tlmm_lines_sus: mclk_lines_off { +@@ -653,7 +653,7 @@ + pins = "gpio112", "gpio117", "gpio118", + "gpio119"; + drive-strength = <8>; +- bias-pull-none; ++ bias-disable; + }; + }; + ext_sec_tlmm_lines_sus: tlmm_lines_off { +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi +index 08b88f6791be..c2557cf43b3d 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi +@@ -62,7 +62,7 @@ + no-map; + }; + +- reserved@8668000 { ++ reserved@86680000 { + reg = <0x0 0x86680000 0x0 0x80000>; + no-map; + }; +@@ -72,7 +72,7 @@ + no-map; + }; + +- rfsa@867e00000 { ++ rfsa@867e0000 { + reg = <0x0 0x867e0000 0x0 0x20000>; + no-map; + }; +@@ -715,7 +715,7 @@ + reg-names = "mdp_phys"; + + interrupt-parent = <&mdss>; +- interrupts = <0 0>; ++ interrupts = <0>; + + clocks = <&gcc GCC_MDSS_AHB_CLK>, + <&gcc GCC_MDSS_AXI_CLK>, +@@ -745,7 +745,7 @@ + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; +- interrupts = <4 0>; ++ interrupts = <4>; + + assigned-clocks = <&gcc BYTE0_CLK_SRC>, + <&gcc PCLK0_CLK_SRC>; +diff --git a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi +index 8efb7c1948d1..7fc81cab5a87 100644 +--- a/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi ++++ b/arch/arm64/boot/dts/qcom/qcs605-pm660-pm8005-regulator.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -134,7 +134,7 @@ + qcom,set = ; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1200000>; +- qcom,init-voltage = <1000000>; ++ qcom,init-voltage = <1128000>; + }; + }; + +diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi +index b6bc964347b1..fad4b595dcd4 100644 +--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi ++++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi +@@ -435,3 +435,20 @@ + &kgsl_smmu { + /delete-property/qcom,deferred-regulator-disable-delay; + }; ++ ++&usb0 { ++ qcom,core-clk-rate = <200000000>; ++ qcom,msm-bus,vectors-KBps = ++ , ++ , ++ , ++ , ++ , ++ ; ++ ++ dwc3@a600000 { ++ snps,usb3-u1u2-disable; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/qm215.dtsi b/arch/arm64/boot/dts/qcom/qm215.dtsi +index 172af2692918..7dfafd726aa1 100644 +--- a/arch/arm64/boot/dts/qcom/qm215.dtsi ++++ b/arch/arm64/boot/dts/qcom/qm215.dtsi +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -258,3 +258,23 @@ + }; + }; + }; ++ ++&other_ext_mem { ++ reg = <0x0 0x86000000 0x0 0x800000>; ++}; ++ ++&modem_mem { ++ reg = <0x0 0x86800000 0x0 0x4900000>; ++}; ++ ++&adsp_fw_mem { ++ reg = <0x0 0x8b100000 0x0 0x1100000>; ++}; ++ ++&wcnss_fw_mem { ++ reg = <0x0 0x8c200000 0x0 0x700000>; ++}; ++ ++&qcom_seecom { ++ reg = <0x86000000 0x300000>; ++}; +diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-soc.dtsi b/arch/arm64/boot/dts/qcom/sdm429-bg-soc.dtsi +index 44bc91c73d23..7a420ffd52f1 100644 +--- a/arch/arm64/boot/dts/qcom/sdm429-bg-soc.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm429-bg-soc.dtsi +@@ -103,6 +103,8 @@ + + spi_3: spi@78b7000 { /* BLSP1 QUP3*/ + status = "ok"; ++ qcom,shared_ee; ++ + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; +diff --git a/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi b/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi +index d83cc5f3dd3e..68e4515aa5f4 100644 +--- a/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm429-spyro.dtsi +@@ -51,4 +51,5 @@ + + &usb_otg { + qcom,enumeration-check-for-sdp; ++ qcom,hsusb-otg-mode = <1>; /* DEVICE mode */ + }; +diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-arglass.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-arglass.dtsi +new file mode 100644 +index 000000000000..abceb23c6ed0 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-arglass.dtsi +@@ -0,0 +1,431 @@ ++/* ++ * Copyright (c) 2020, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++&soc { ++ led_flash_rear: qcom,camera-flash@0 { ++ cell-index = <0>; ++ reg = <0x00 0x00>; ++ compatible = "qcom,camera-flash"; ++ flash-source = <&pm660l_flash0 &pm660l_flash1>; ++ torch-source = <&pm660l_torch0 &pm660l_torch1>; ++ switch-source = <&pm660l_switch0>; ++ status = "disabled"; ++ }; ++ ++ led_flash_rear_aux: qcom,camera-flash@1 { ++ cell-index = <1>; ++ reg = <0x01 0x00>; ++ compatible = "qcom,camera-flash"; ++ flash-source = <&pm660l_flash0 &pm660l_flash1>; ++ torch-source = <&pm660l_torch0 &pm660l_torch1>; ++ switch-source = <&pm660l_switch0>; ++ status = "disabled"; ++ }; ++ ++ led_flash_front: qcom,camera-flash@2 { ++ cell-index = <2>; ++ reg = <0x02 0x00>; ++ compatible = "qcom,camera-flash"; ++ flash-source = <&pm660l_flash2>; ++ torch-source = <&pm660l_torch2>; ++ switch-source = <&pm660l_switch1>; ++ gpios = <&tlmm 0 0>, ++ <&tlmm 1 0>, ++ <&tlmm 23 0>; ++ gpio-req-tbl-num = <0 1 2>; ++ gpio-req-tbl-flags = <0 0 0>; ++ gpio-req-tbl-label = "TCKING_LED_3V3_EN", ++ "TCKING_LED_1V2_EN", ++ "TCKing_LED_EN"; ++ gpio-req-tbl-delay = <20 20 20>; ++ status = "ok"; ++ }; ++ ++ actuator_regulator: gpio-regulator@0 { ++ compatible = "regulator-fixed"; ++ reg = <0x00 0x00>; ++ regulator-name = "actuator_regulator"; ++ regulator-min-microvolt = <2800000>; ++ regulator-max-microvolt = <2800000>; ++ regulator-enable-ramp-delay = <100>; ++ enable-active-high; ++ gpio = <&tlmm 27 0>; ++ status = "disabled"; ++ }; ++ ++ camera_ldo: gpio-regulator@2 { ++ compatible = "regulator-fixed"; ++ reg = <0x02 0x00>; ++ regulator-name = "camera_ldo"; ++ regulator-min-microvolt = <1352000>; ++ regulator-max-microvolt = <1352000>; ++ regulator-enable-ramp-delay = <233>; ++ enable-active-high; ++ gpio = <&pm660l_gpios 3 0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&camera_dvdd_en_default>; ++ vin-supply = <&pm660_s6>; ++ status = "disabled"; ++ }; ++ ++ camera_rear_ldo: gpio-regulator@1 { ++ compatible = "regulator-fixed"; ++ reg = <0x01 0x00>; ++ regulator-name = "camera_rear_ldo"; ++ regulator-min-microvolt = <1352000>; ++ regulator-max-microvolt = <1352000>; ++ regulator-enable-ramp-delay = <135>; ++ enable-active-high; ++ gpio = <&pm660l_gpios 4 0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&camera_rear_dvdd_en_default>; ++ vin-supply = <&pm660_s6>; ++ status = "disabled"; ++ }; ++ ++ camera_vio_ldo: gpio-regulator@3 { ++ compatible = "regulator-fixed"; ++ reg = <0x03 0x00>; ++ regulator-name = "camera_vio_ldo"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-enable-ramp-delay = <233>; ++ enable-active-high; ++ gpio = <&tlmm 29 0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&cam_sensor_rear_vio>; ++ vin-supply = <&pm660_s4>; ++ status = "disabled"; ++ }; ++ ++ camera_vana_ldo: gpio-regulator@4 { ++ compatible = "regulator-fixed"; ++ reg = <0x04 0x00>; ++ regulator-name = "camera_vana_ldo"; ++ regulator-min-microvolt = <2850000>; ++ regulator-max-microvolt = <2850000>; ++ regulator-enable-ramp-delay = <233>; ++ enable-active-high; ++ gpio = <&tlmm 8 0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&cam_sensor_rear_vana>; ++ vin-supply = <&pm660l_bob>; ++ status = "disabled"; ++ }; ++}; ++ ++&cam_cci { ++ qcom,cam-res-mgr { ++ compatible = "qcom,cam-res-mgr"; ++ status = "ok"; ++ shared-gpios = <144>; ++ pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend"; ++ pinctrl-0 = <&cam_sensor_6dof_vdig_active>; ++ pinctrl-1 = <&cam_sensor_6dof_vdig_suspend>; ++ }; ++ ++ actuator_rear: qcom,actuator@0 { ++ cell-index = <0>; ++ reg = <0x0>; ++ compatible = "qcom,actuator"; ++ cci-master = <0>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <2800000>; ++ rgltr-max-voltage = <2800000>; ++ rgltr-load-current = <0>; ++ }; ++ ++ actuator_rear_aux: qcom,actuator@1 { ++ cell-index = <1>; ++ reg = <0x1>; ++ compatible = "qcom,actuator"; ++ cci-master = <1>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <2800000>; ++ rgltr-max-voltage = <2800000>; ++ rgltr-load-current = <0>; ++ }; ++ ++ actuator_front: qcom,actuator@2 { ++ cell-index = <2>; ++ reg = <0x2>; ++ compatible = "qcom,actuator"; ++ cci-master = <1>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <2800000>; ++ rgltr-max-voltage = <2800000>; ++ rgltr-load-current = <0>; ++ }; ++ ++ ois_rear: qcom,ois@0 { ++ cell-index = <0>; ++ reg = <0x0>; ++ compatible = "qcom,ois"; ++ cci-master = <0>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <2800000>; ++ rgltr-max-voltage = <2800000>; ++ rgltr-load-current = <0>; ++ status = "disabled"; ++ }; ++ ++ eeprom_rear: qcom,eeprom@0 { ++ cell-index = <0>; ++ reg = <0>; ++ compatible = "qcom,eeprom"; ++ cam_vio-supply = <&camera_vio_ldo>; ++ cam_vana-supply = <&camera_vana_ldo>; ++ cam_vdig-supply = <&camera_rear_ldo>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vio", "cam_vana", "cam_vdig", ++ "cam_clk", "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>; ++ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>; ++ rgltr-load-current = <0 80000 105000 0 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk0_active ++ &cam_sensor_rear2_active>; ++ pinctrl-1 = <&cam_sensor_mclk0_suspend ++ &cam_sensor_rear2_suspend>; ++ gpios = <&tlmm 13 0>, ++ <&tlmm 28 0>; ++ gpio-reset = <1>; ++ gpio-req-tbl-num = <0 1>; ++ gpio-req-tbl-flags = <1 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK0", ++ "CAM_RESET0"; ++ sensor-mode = <0>; ++ cci-master = <0>; ++ status = "disabled"; ++ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++ ++ eeprom_rear_aux: qcom,eeprom@1 { ++ cell-index = <1>; ++ reg = <0x1>; ++ compatible = "qcom,eeprom"; ++ cam_vio-supply = <&camera_vio_ldo>; ++ cam_vana-supply = <&camera_vana_ldo>; ++ cam_vdig-supply = <&camera_ldo>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vdig", "cam_vio", "cam_vana", ++ "cam_clk", "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>; ++ rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>; ++ rgltr-load-current = <105000 0 80000 0 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk1_active ++ &cam_sensor_rear2_active>; ++ pinctrl-1 = <&cam_sensor_mclk1_suspend ++ &cam_sensor_rear2_suspend>; ++ gpios = <&tlmm 14 0>, ++ <&tlmm 28 0>; ++ gpio-reset = <1>; ++ gpio-req-tbl-num = <0 1>; ++ gpio-req-tbl-flags = <1 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK1", ++ "CAM_RESET1"; ++ sensor-position = <0>; ++ sensor-mode = <0>; ++ cci-master = <1>; ++ status = "disabled"; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++ ++ eeprom_front: qcom,eeprom@2 { ++ cell-index = <2>; ++ reg = <0x2>; ++ compatible = "qcom,eeprom"; ++ cam_vio-supply = <&camera_vio_ldo>; ++ cam_vana-supply = <&camera_vana_ldo>; ++ cam_vdig-supply = <&camera_ldo>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ cam_vaf-supply = <&actuator_regulator>; ++ regulator-names = "cam_vio", "cam_vana", "cam_vdig", ++ "cam_clk", "cam_vaf"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>; ++ rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>; ++ rgltr-load-current = <0 80000 105000 0 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk2_active ++ &cam_sensor_front_active>; ++ pinctrl-1 = <&cam_sensor_mclk2_suspend ++ &cam_sensor_front_suspend>; ++ gpios = <&tlmm 15 0>, ++ <&tlmm 9 0>; ++ gpio-reset = <1>; ++ gpio-req-tbl-num = <0 1>; ++ gpio-req-tbl-flags = <1 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK2", ++ "CAM_RESET2"; ++ sensor-mode = <0>; ++ cci-master = <1>; ++ status = "disabled"; ++ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++ ++ /* RGB R RGB R- MCLK0(GPIO13), RESET(GPIO 27) CCI1*/ ++ qcom,cam-sensor@0 { ++ cell-index = <0>; ++ compatible = "qcom,cam-sensor"; ++ reg = <0x0>; ++ csiphy-sd-index = <0>; ++ sensor-position-roll = <270>; ++ sensor-position-pitch = <0>; ++ sensor-position-yaw = <180>; ++ cam_vio-supply = <&pm660_l9>; ++ cam_vana-supply = <&pm660_l16>; ++ cam_vdig-supply = <&pm660_l7>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ regulator-names = "cam_vio", "cam_vana", "cam_vdig","cam_clk"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1800000 2700000 1200000 0>; ++ rgltr-max-voltage = <1800000 2700000 1200000 0>; ++ rgltr-load-current = <150000 600000 1200000 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk0_active ++ &cam_sensor_rgbr_active>; ++ pinctrl-1 = <&cam_sensor_mclk0_suspend ++ &cam_sensor_rgbr_suspend>; ++ gpios = <&tlmm 13 0>, ++ <&tlmm 27 0>, ++ <&tlmm 29 0>; ++ gpio-reset = <1>; ++ gpio-vio = <2>; ++ gpio-req-tbl-num = <0 1 2>; ++ gpio-req-tbl-flags = <1 0 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK", ++ "CAM_RESET", ++ "CAM_VIO"; ++ sensor-mode = <0>; ++ cci-master = <1>; ++ status = "ok"; ++ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++ ++ /* 6DOF Left */ ++ qcom,cam-sensor@1 { ++ cell-index = <1>; ++ compatible = "qcom,cam-sensor"; ++ reg = <0x1>; ++ csiphy-sd-index = <1>; ++ sensor-position-roll = <270>; ++ sensor-position-pitch = <0>; ++ sensor-position-yaw = <180>; ++ cam_vio-supply = <&pm660_l11>; ++ cam_vana-supply = <&pm660l_l3>; ++ cam_vdig-supply = <&pm660_l7>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ regulator-names = "cam_vio", "cam_vana", "cam_vdig", ++ "cam_clk"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1800000 2800000 1200000 0>; ++ rgltr-max-voltage = <1800000 2800000 1200000 0>; ++ rgltr-load-current = <300000 600000 300000 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk1_active ++ &cam_sensor_6dof_active>; ++ pinctrl-1 = <&cam_sensor_mclk1_suspend ++ &cam_sensor_6dof_suspend>; ++ gpios = <&tlmm 14 0>, ++ <&tlmm 24 0>, ++ <&tlmm 144 0>; ++ gpio-reset = <1>; ++ gpio-vdig = <2>; ++ gpio-req-tbl-num = <0 1 2>; ++ gpio-req-tbl-flags = <1 0 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK1", ++ "CAM_RESET1", ++ "CAM_VDIG1"; ++ sensor-mode = <0>; ++ cci-master = <0>; ++ status = "ok"; ++ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++ ++ /* 6DOF Right */ ++ qcom,cam-sensor@2 { ++ cell-index = <2>; ++ compatible = "qcom,cam-sensor"; ++ reg = <0x02>; ++ csiphy-sd-index = <2>; ++ sensor-position-roll = <270>; ++ sensor-position-pitch = <0>; ++ sensor-position-yaw = <0>; ++ cam_vio-supply = <&pm660_l11>; ++ cam_vana-supply = <&pm660l_l3>; ++ cam_vdig-supply = <&pm660_l7>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ regulator-names = "cam_vio", "cam_vana", "cam_vdig", ++ "cam_clk"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1800000 2800000 1200000 0>; ++ rgltr-max-voltage = <1800000 2800000 1200000 0>; ++ rgltr-load-current = <150000 600000 1200000 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk2_active ++ &cam_sensor_eyet_active>; ++ pinctrl-1 = <&cam_sensor_mclk2_suspend ++ &cam_sensor_eyet_suspend>; ++ gpios = <&tlmm 15 0>, ++ <&tlmm 25 0>, ++ <&tlmm 144 0>; ++ gpio-reset = <1>; ++ gpio-vdig = <2>; ++ gpio-req-tbl-num = <0 1 2>; ++ gpio-req-tbl-flags = <1 0 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK2", ++ "CAM_RESET2", ++ "CAM_VDIG2"; ++ sensor-mode = <0>; ++ cci-master = <0>; ++ status = "ok"; ++ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-svr.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-svr.dtsi +index f4aaad2a345c..34b87ff3ad6a 100644 +--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-svr.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-svr.dtsi +@@ -374,7 +374,7 @@ + rgltr-cntrl-support; + rgltr-min-voltage = <1800000 2800000 1800000 0>; + rgltr-max-voltage = <1800000 2800000 1800000 0>; +- rgltr-load-current = <300000 600000 300000 0>; ++ rgltr-load-current = <300000 600000 105000 0>; + gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk3_active +diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi +index 5324581504c5..9063f1ff320f 100644 +--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -725,10 +725,11 @@ + clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>, + <&clock_gpucc GPU_CC_CXO_CLK>, + <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, +- <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>; ++ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, ++ <&clock_aop QDSS_CLK>; + + clock-names = "gmu_clk", "cxo_clk", "axi_clk", +- "memnoc_clk"; ++ "memnoc_clk", "apb_pclk"; + + qcom,gmu-pwrlevels { + #address-cells = <1>; +diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi +index 9747adca69e2..0bb1d8b26c3a 100644 +--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -31,6 +31,7 @@ + #include "dsi-panel-hx8399-truly-singlemipi-fhd-video.dtsi" + #include "dsi-panel-hx83112a-truly-singlemipi-fhd-video.dtsi" + #include "dsi-panel-xrsmrtvwr-jdi-dual-video.dtsi" ++#include "dsi-panel-arglass-boe-dual-1080p-video.dtsi" + #include + + &soc { +@@ -623,6 +624,26 @@ + ibb-supply = <&lcdb_ncp_vreg>; + }; + ++ dsi_dual_arglass_boe_dual_1080p_video_display: qcom,dsi-display@22 { ++ compatible = "qcom,dsi-display"; ++ label = "dsi_dual_arglass_boe_dual_1080p_video_display"; ++ qcom,display-type = "primary"; ++ ++ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; ++ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; ++ clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>, ++ <&mdss_dsi0_pll PCLK_MUX_0_CLK>; ++ clock-names = "mux_byte_clk", "mux_pixel_clk"; ++ ++ pinctrl-names = "panel_active", "panel_suspend"; ++ pinctrl-0 = <&sde_dsi_active &sde_te_active>; ++ pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,dsi-panel = <&dsi_dual_arglass_boe_dual_1080p_video>; ++ ++ vddio-supply = <&pm660_l13>; ++ }; ++ + sde_wb: qcom,wb-display@0 { + compatible = "qcom,wb-display"; + cell-index = <0>; +@@ -654,6 +675,28 @@ + connectors = <&sde_rscc &sde_wb &sde_dp>; + }; + ++&dsi_dual_arglass_boe_dual_1080p_video { ++ qcom,mdss-dsi-t-clk-post = <0x10>; ++ qcom,mdss-dsi-t-clk-pre = <0x3D>; ++ qcom,mdss-dsi-min-refresh-rate = <60>; ++ qcom,mdss-dsi-max-refresh-rate = <90>; ++ qcom,esd-check-enabled; ++ qcom,mdss-dsi-panel-status-check-mode = "te_signal_check"; ++ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; ++ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; ++ qcom,mdss-dsi-panel-status-value = <0x9c>; ++ qcom,mdss-dsi-panel-on-check-value = <0x9c>; ++ qcom,mdss-dsi-panel-status-read-length = <1>; ++ qcom,mdss-dsi-display-timings { ++ timing@0{ ++ qcom,mdss-dsi-panel-phy-timings = [00 29 0A 0B 27 26 ++ 0A 0B 07 02 04 00]; ++ qcom,display-topology = <2 0 2>; ++ qcom,default-topology-index = <0>; ++ }; ++ }; ++}; ++ + &dsi_dual_xrsmrtvwr_jdi_video { + qcom,mdss-dsi-t-clk-post = <0x10>; + qcom,mdss-dsi-t-clk-pre = <0x3D>; +diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi +index d956c8f44307..52613085b76d 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -349,10 +349,11 @@ + clocks = <&clock_gpucc GPU_CC_CX_GMU_CLK>, + <&clock_gpucc GPU_CC_CXO_CLK>, + <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, +- <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>; ++ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, ++ <&clock_aop QDSS_CLK>; + + clock-names = "gmu_clk", "cxo_clk", "axi_clk", +- "memnoc_clk"; ++ "memnoc_clk", "apb_pclk"; + + qcom,gmu-pwrlevels { + #address-cells = <1>; +diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi +index 7ebc146f4aa2..018e2bb2cf1f 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -51,6 +51,7 @@ + mem_client_3_size: qcom,client_3 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x500000>; ++ memory-region = <&memshare_mem>; + qcom,client-id = <1>; + qcom,allocate-on-request; + label = "modem"; +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi +index 8bc98dd5654c..03c165741808 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -655,6 +655,14 @@ + size = <0 0x2400000>; + }; + ++ memshare_mem: memshare_region { ++ compatible = "shared-dma-pool"; ++ no-map; ++ alloc-ranges = <0x0 0x00000000 0x0 0xdfffffff>; ++ alignment = <0x0 0x100000>; ++ size = <0x0 0x800000>; ++ }; ++ + /* global autoconfigured region for contiguous allocations */ + linux,cma { + compatible = "shared-dma-pool"; +diff --git a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts +index 63638fc58cba..c9d17005d8c5 100644 +--- a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts ++++ b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-alpha.dts +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -128,6 +128,8 @@ + + spi@78B8000 { /* BLSP1 QUP4 */ + status = "ok"; ++ qcom,shared_ee; ++ + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; +diff --git a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts +index e23d1a6e19d4..21686c3762b6 100644 +--- a/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts ++++ b/arch/arm64/boot/dts/qcom/sdw3100-apq8009w-wtp.dts +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -148,6 +148,8 @@ + + spi@78B8000 { /* BLSP1 QUP4 */ + status = "ok"; ++ qcom,shared_ee; ++ + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; +diff --git a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts +index 8d180443b087..35dae8d1e9b6 100644 +--- a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts ++++ b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-1gb-wtp.dts +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -149,6 +149,8 @@ + + spi@78B8000 { /* BLSP1 QUP4 */ + status = "ok"; ++ qcom,shared_ee; ++ + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; +diff --git a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts +index 607b3542d92d..b640e64cc77b 100644 +--- a/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts ++++ b/arch/arm64/boot/dts/qcom/sdw3100-msm8909w-wtp.dts +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -148,6 +148,8 @@ + + spi@78B8000 { /* BLSP1 QUP4 */ + status = "ok"; ++ qcom,shared_ee; ++ + qcom,bg-spi { + compatible = "qcom,bg-spi"; + reg = <0>; +diff --git a/arch/arm64/boot/dts/qcom/sxr1130-arglass-overlay.dts b/arch/arm64/boot/dts/qcom/sxr1130-arglass-overlay.dts +new file mode 100644 +index 000000000000..3df0ea7efa11 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/sxr1130-arglass-overlay.dts +@@ -0,0 +1,35 @@ ++/* ++ * Copyright (c) 2020, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++/dts-v1/; ++/plugin/; ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "sdm670-audio-overlay.dtsi" ++#include "sxr1130-arglass.dtsi" ++ ++/ { ++ model = "Qualcomm Technologies, Inc. SXR1130 AR GLASS"; ++ compatible = "qcom,sxr1130-arglass", "qcom,sxr1130", "qcom,xr-arglass"; ++ qcom,msm-id = <371 0x0>; ++ qcom,board-id = <8 0x0B>; ++ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, ++ <0x0001001b 0x0102001a 0x0 0x0>, ++ <0x0001001b 0x0201011a 0x0 0x0>; ++}; ++ +diff --git a/arch/arm64/boot/dts/qcom/sxr1130-arglass-pinctrl-overlay.dtsi b/arch/arm64/boot/dts/qcom/sxr1130-arglass-pinctrl-overlay.dtsi +new file mode 100644 +index 000000000000..9551e73a4a7a +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/sxr1130-arglass-pinctrl-overlay.dtsi +@@ -0,0 +1,150 @@ ++/* Copyright (c) 2020, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++&cam_sensor_mclk0_active { ++ /* MCLK0 */ ++ mux { ++ pins = "gpio13"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio13"; ++ bias-disable; /* No PULL */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk0_suspend { ++ /* MCLK0 */ ++ mux { ++ pins = "gpio13"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio13"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk1_active { ++ /* MCLK1 */ ++ mux { ++ pins = "gpio14"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio14"; ++ bias-disable; /* No PULL */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk1_suspend { ++ /* MCLK1 */ ++ mux { ++ pins = "gpio14"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio14"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk2_active { ++ /* MCLK2 */ ++ mux { ++ pins = "gpio15"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio15"; ++ bias-disable; /* No PULL */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk2_suspend { ++ /* MCLK2 */ ++ mux { ++ pins = "gpio15"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio15"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk3_active { ++ /* MCLK3 */ ++ mux { ++ pins = "gpio16"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio16"; ++ bias-disable; /* No PULL */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&cam_sensor_mclk3_suspend { ++ /* MCLK3 */ ++ mux { ++ pins = "gpio16"; ++ function = "cam_mclk"; ++ }; ++ ++ config { ++ pins = "gpio16"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <16>; /* 16 MA */ ++ }; ++}; ++ ++&sde_dsi_active { ++ mux { ++ pins = "gpio75", "gpio26"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio75", "gpio26"; ++ bias-disable = <0>; /* PULL DOWN */ ++ drive-strength = <2>; /* 2 MA */ ++ }; ++}; ++ ++&sde_dsi_suspend { ++ mux { ++ pins = "gpio75", "gpio26"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio75", "gpio26"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <2>; /* 2 MA */ ++ }; ++}; ++ +diff --git a/arch/arm64/boot/dts/qcom/sxr1130-arglass.dts b/arch/arm64/boot/dts/qcom/sxr1130-arglass.dts +new file mode 100644 +index 000000000000..8b5cc580bb64 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/sxr1130-arglass.dts +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (c) 2020, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++ ++/dts-v1/; ++ ++#include "sxr1130.dtsi" ++#include "sdm670-audio-overlay.dtsi" ++#include "sxr1130-arglass.dtsi" ++ ++/ { ++ model = "Qualcomm Technologies, Inc. SXR1130 AR Glass"; ++ compatible = "qcom,sxr1130-arglass", "qcom,sxr1130", "qcom,xr-arglass"; ++ qcom,board-id = <8 0xB>; ++ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, ++ <0x0001001b 0x0102001a 0x0 0x0>, ++ <0x0001001b 0x0201011a 0x0 0x0>; ++}; ++ ++&cam_cci { ++ /delete-node/ qcom,cam-sensor@1; ++ qcom,cam-sensor@1 { ++ cell-index = <1>; ++ compatible = "qcom,cam-sensor"; ++ reg = <0x1>; ++ csiphy-sd-index = <1>; ++ sensor-position-roll = <90>; ++ sensor-position-pitch = <0>; ++ sensor-position-yaw = <180>; ++ eeprom-src = <&eeprom_rear_aux>; ++ cam_vio-supply = <&camera_vio_ldo>; ++ cam_vana-supply = <&camera_vana_ldo>; ++ cam_vdig-supply = <&camera_ldo>; ++ cam_clk-supply = <&titan_top_gdsc>; ++ regulator-names = "cam_vdig", "cam_vio", "cam_vana", ++ "cam_clk"; ++ rgltr-cntrl-support; ++ rgltr-min-voltage = <1352000 1800000 2850000 0>; ++ rgltr-max-voltage = <1352000 1800000 2850000 0>; ++ rgltr-load-current = <105000 0 80000 0>; ++ gpio-no-mux = <0>; ++ pinctrl-names = "cam_default", "cam_suspend"; ++ pinctrl-0 = <&cam_sensor_mclk0_active ++ &cam_sensor_rear2_active>; ++ pinctrl-1 = <&cam_sensor_mclk0_suspend ++ &cam_sensor_rear2_suspend>; ++ gpios = <&tlmm 13 0>, ++ <&tlmm 28 0>; ++ gpio-reset = <1>; ++ gpio-req-tbl-num = <0 1>; ++ gpio-req-tbl-flags = <1 0>; ++ gpio-req-tbl-label = "CAMIF_MCLK0", ++ "CAM_RESET1"; ++ sensor-mode = <0>; ++ cci-master = <1>; ++ status = "ok"; ++ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>; ++ clock-names = "cam_clk"; ++ clock-cntl-level = "turbo"; ++ clock-rates = <24000000>; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/sxr1130-arglass.dtsi b/arch/arm64/boot/dts/qcom/sxr1130-arglass.dtsi +new file mode 100644 +index 000000000000..7d90c5576f9f +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/sxr1130-arglass.dtsi +@@ -0,0 +1,536 @@ ++/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include "sxr1130-arglass-pinctrl-overlay.dtsi" ++#include "sdm670-pmic-overlay.dtsi" ++#include "sdm670-sde-display.dtsi" ++#include "sdm670-camera-sensor-arglass.dtsi" ++#include ++ ++&qupv3_se10_i2c { ++#include "smb1355.dtsi" ++}; ++ ++&tlmm { ++ mag_rst_gpio_default: mag_rst_gpio_default { ++ mux { ++ pins = "gpio116"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio116"; ++ drive-streangth = <8>; ++ bias-disable = <0>; ++ output-high; ++ }; ++ }; ++ ++ mag_rst_gpio_sleep: mag_rst_gpio_sleep { ++ mux { ++ pins = "gpio116"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio116"; ++ drive-streangth = <8>; ++ bias-pull-down; ++ input-enable; ++ }; ++ }; ++ ++ arglass_psp_gpio: arglass_psp_gpio { ++ mux { ++ pins = "gpio78"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio78"; ++ bias-disable; ++ input-enable; ++ }; ++ }; ++ ++ arglass_pv_gpio: arglass_pv_gpio { ++ mux { ++ pins = "gpio77"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio77"; ++ bias-pull-down; ++ drive-strength = <16>; ++ }; ++ }; ++ ++ arglass_nv_gpio: arglass_nv_gpio { ++ mux { ++ pins = "gpio76"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio76"; ++ bias-pull-down; ++ drive-strength = <16>; ++ }; ++ }; ++ ++ cam_sensor_6dof_vdig_active: cam_sensor_6dof_vdig_active { ++ /* VDIG Shared GPIO */ ++ mux { ++ pins = "gpio144"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio144"; ++ bias-disable; /* No PULL */ ++ drive-strength = <2>; /* 2 MA */ ++ }; ++ }; ++ ++ cam_sensor_6dof_vdig_suspend: cam_sensor_6dof_vdig_suspend { ++ /* VDIG Shared GPIO */ ++ mux { ++ pins = "gpio144"; ++ function = "gpio"; ++ }; ++ ++ config { ++ pins = "gpio144"; ++ bias-pull-down; /* PULL DOWN */ ++ drive-strength = <2>; /* 2 MA */ ++ output-low; ++ }; ++ }; ++}; ++ ++&ufsphy_mem { ++ compatible = "qcom,ufs-phy-qmp-v3"; ++ ++ vdda-phy-supply = <&pm660l_l1>; /* 0.88v */ ++ vdda-pll-supply = <&pm660_l1>; /* 1.2v */ ++ vdda-phy-max-microamp = <62900>; ++ vdda-pll-max-microamp = <18300>; ++ ++ status = "ok"; ++}; ++ ++&ufshc_mem { ++ vdd-hba-supply = <&ufs_phy_gdsc>; ++ vdd-hba-fixed-regulator; ++ vcc-supply = <&pm660l_l4>; ++ vcc-voltage-level = <2960000 2960000>; ++ vccq2-supply = <&pm660_l8>; ++ vcc-max-microamp = <600000>; ++ vccq2-max-microamp = <600000>; ++ ++ qcom,vddp-ref-clk-supply = <&pm660_l1>; ++ qcom,vddp-ref-clk-max-microamp = <100>; ++ ++ status = "ok"; ++}; ++ ++&pm660l_switch1 { ++ pinctrl-names = "led_enable", "led_disable"; ++ pinctrl-0 = <&flash_led3_front_en>; ++ pinctrl-1 = <&flash_led3_front_dis>; ++}; ++ ++&qupv3_se9_2uart { ++ status = "disabled"; ++}; ++ ++&qupv3_se12_2uart { ++ status = "ok"; ++}; ++ ++&qupv3_se8_spi { ++ status = "disabled"; ++}; ++ ++&qupv3_se3_i2c { ++ status = "disabled"; ++ nq@28 { ++ compatible = "qcom,nq-nci"; ++ reg = <0x28>; ++ qcom,nq-irq = <&tlmm 44 0x00>; ++ qcom,nq-ven = <&tlmm 12 0x00>; ++ qcom,nq-firm = <&tlmm 43 0x00>; ++ qcom,nq-clkreq = <&pm660_gpios 4 0x00>; ++ qcom,nq-esepwr = <&tlmm 116 0x00>; ++ interrupt-parent = <&tlmm>; ++ interrupts = <44 0>; ++ interrupt-names = "nfc_irq"; ++ pinctrl-names = "nfc_active", "nfc_suspend"; ++ pinctrl-0 = <&nfc_int_active ++ &nfc_enable_active ++ &nfc_clk_default>; ++ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; ++ }; ++}; ++ ++&qupv3_se9_i2c { ++ status = "ok"; ++}; ++ ++ ++&qupv3_se10_i2c { ++ status = "ok"; ++}; ++ ++&qupv3_se6_4uart { ++ status = "ok"; ++}; ++ ++&sdhc_1 { ++ vdd-supply = <&pm660l_l4>; ++ qcom,vdd-voltage-level = <2960000 2960000>; ++ qcom,vdd-current-level = <0 570000>; ++ ++ vdd-io-supply = <&pm660_l8>; ++ qcom,vdd-io-always-on; ++ qcom,vdd-io-lpm-sup; ++ qcom,vdd-io-voltage-level = <1800000 1800000>; ++ qcom,vdd-io-current-level = <0 325000>; ++ ++ pinctrl-names = "active", "sleep"; ++ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>; ++ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>; ++ ++ status = "ok"; ++}; ++ ++&sdhc_2 { ++ vdd-supply = <&pm660l_l5>; ++ qcom,vdd-voltage-level = <2960000 2960000>; ++ qcom,vdd-current-level = <0 800000>; ++ ++ vdd-io-supply = <&pm660l_l2>; ++ qcom,vdd-io-voltage-level = <1800000 2960000>; ++ qcom,vdd-io-current-level = <0 22000>; ++ ++ pinctrl-names = "active", "sleep"; ++ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; ++ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; ++ ++ cd-gpios = <&tlmm 96 0x1>; ++ ++ status = "disabled"; ++}; ++ ++&vendor { ++ ar_batterydata: qcom,battery-data { ++ qcom,batt-id-range-pct = <15>; ++ #include "fg-gen3-batterydata-goertek-merlin-230mah.dtsi" ++ }; ++}; ++ ++&pm660_fg { ++ qcom,battery-data = <&ar_batterydata>; ++}; ++ ++&pm660_charger { ++ qcom,battery-data = <&ar_batterydata>; ++}; ++ ++&tlmm { ++ smb_int_default: smb_int_default { ++ mux { ++ pins = "gpio54"; ++ function = "gpio"; ++ }; ++ config { ++ pins = "gpio54"; ++ drive-strength = <2>; ++ bias-pull-up; ++ input-enable; ++ }; ++ }; ++}; ++ ++&smb1355_0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&smb_int_default ++ &smb_shutdown_default>; ++ interrupt-parent = <&tlmm>; ++ interrupts = <54 IRQ_TYPE_LEVEL_LOW>; ++ smb1355_charger_0: qcom,smb1355-charger@1000 { ++ io-channels = <&pm660_rradc 2>, ++ <&pm660_rradc 12>; ++ io-channel-names = "charger_temp", ++ "charger_temp_max"; ++ status ="ok"; ++ }; ++}; ++ ++&smb1355_1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&smb_int_default ++ &smb_shutdown_default>; ++ interrupt-parent = <&tlmm>; ++ interrupts = <54 IRQ_TYPE_LEVEL_LOW>; ++ smb1355_charger_1: qcom,smb1355-charger@1000 { ++ io-channels = <&pm660_rradc 2>, ++ <&pm660_rradc 12>; ++ io-channel-names = "charger_temp", ++ "charger_temp_max"; ++ status ="ok"; ++ }; ++}; ++ ++&soc { ++ qcom,xr-smrtvwr-misc { ++ compatible = "qcom,xr-smrtvwr-misc"; ++ /* Manetometer gpio */ ++ mag_rst_gpio = <&tlmm 116 0>; ++ enable-active-high; ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&mag_rst_gpio_default>; ++ pinctrl-1 = <&mag_rst_gpio_sleep>; ++ }; ++ ++ rpmh-regulator-ldoa14 { ++ pm660_l14: regulator-pm660-l14 { ++ qcom,init-mode = ; ++ }; ++ }; ++}; ++ ++&dsi_dual_arglass_boe_dual_1080p_video { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <255>; ++ /*Restricting max brightness to 127 */ ++ qcom,mdss-brightness-max-level = <255>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,platform-reset-gpio = <&tlmm 77 0>; ++ qcom,1p8-en-gpio = <&tlmm 76 0>; /*AVDD GPIO */ ++ qcom,switch-power-gpio = <&tlmm 75 20>; /* AVEE GPIO */ ++ qcom,led-5v-en-gpio = <&tlmm 134 0>; ++ qcom,selab-gpio = <&tlmm 105 0>;/* High for DSI */ ++ qcom,oenab-gpio = <&tlmm 106 0>; /*Low for DSI */ ++ qcom,led-driver-en1-gpio = <&tlmm 110 0>; ++ qcom,led-driver-en2-gpio = <&tlmm 111 0>; ++}; ++ ++&dsi_dual_xrsmrtvwr_jdi_video { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,panel-mode-gpio = <&tlmm 79 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,led-5v-en-gpio = <&tlmm 134 0>; ++ qcom,selab-gpio = <&tlmm 105 0>;/* High for DSI */ ++ qcom,oenab-gpio = <&tlmm 106 0>; /*Low for DSI */ ++ qcom,1p8-en-gpio = <&tlmm 2 0>; ++ qcom,led-driver-en1-gpio = <&tlmm 110 0>; ++ qcom,led-driver-en2-gpio = <&tlmm 111 0>; ++ qcom,switch-power-gpio = <&tlmm 112 0>; ++}; ++ ++ ++&dsi_dual_nt35597_truly_video { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_dual_nt35597_truly_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++}; ++ ++&dsi_nt35597_truly_dsc_video { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_nt35597_truly_dsc_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++}; ++ ++&dsi_sim_vid { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 90 0>; ++}; ++ ++&dsi_dual_sim_vid { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_sim_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_dual_sim_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_sim_dsc_375_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_dual_sim_dsc_375_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_dual_nt35597_video { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_dual_nt35597_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++}; ++ ++&dsi_rm67195_amoled_fhd_cmd { ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++}; ++ ++&dsi_nt35695b_truly_fhd_video { ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++}; ++ ++&dsi_nt35695b_truly_fhd_cmd { ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++}; ++ ++&dsi_hx8399_truly_cmd { ++ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-min-level = <1>; ++ qcom,mdss-dsi-bl-max-level = <4095>; ++ qcom,platform-reset-gpio = <&tlmm 75 0>; ++ qcom,platform-te-gpio = <&tlmm 10 0>; ++ qcom,panel-mode-gpio = <&tlmm 76 0>; ++ qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; ++}; ++ ++&pm660l_wled { ++ status = "disabled"; ++ qcom,led-strings-list = [01 02]; ++}; ++ ++&mdss_mdp { ++ #cooling-cells = <2>; ++}; ++ ++&thermal_zones { ++ xo-therm-step { ++ status = "disabled"; ++ }; ++}; ++ ++&dsi_dual_arglass_boe_dual_1080p_video_display { ++ qcom,dsi-display-active; ++}; ++ ++&pm660_charger { ++ qcom,ufp-only-mode; ++ qcom,shutdown-enable; ++}; ++ ++&pm660_pdphy { ++ qcom,sxr1130-sxr-dp-sink; ++}; ++ ++&bluetooth { ++ qca,bt-disable; ++}; ++ ++&icnss { ++ qcom,icnss-disable; ++}; ++ ++&usb0 { ++ qcom,shutdown-enable; ++}; ++ ++&mdss_dsi_phy0 { ++ qcom,phy-supply-entries { ++ qcom,phy-supply-entry@0 { ++ qcom,supply-post-on-sleep = <20>; ++ }; ++ }; ++}; ++ ++&mdss_dsi_phy1 { ++ qcom,phy-supply-entries { ++ qcom,phy-supply-entry@0 { ++ qcom,supply-post-on-sleep = <20>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/sxr1130-svr.dtsi b/arch/arm64/boot/dts/qcom/sxr1130-svr.dtsi +index 16ed7648dc20..00d08fbccc14 100644 +--- a/arch/arm64/boot/dts/qcom/sxr1130-svr.dtsi ++++ b/arch/arm64/boot/dts/qcom/sxr1130-svr.dtsi +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -284,7 +284,7 @@ + + &dsi_dual_xrsmrtvwr_jdi_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; +@@ -344,7 +344,7 @@ + + &dsi_sim_vid { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; ++ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,platform-reset-gpio = <&tlmm 90 0>; + }; + +@@ -435,8 +435,7 @@ + }; + + &pm660l_wled { +- status = "okay"; +- qcom,led-strings-list = [01 02]; ++ status = "disabled"; + }; + + &mdss_mdp { +@@ -532,3 +531,22 @@ + &pm660_pdphy { + qcom,sxr1130-sxr-dp-sink; + }; ++ ++/*Adding delays so that display power settles down properly*/ ++&mdss_dsi_phy0 { ++ qcom,phy-supply-entries { ++ qcom,phy-supply-entry@0 { ++ qcom,supply-pre-on-sleep = <40>; ++ qcom,supply-post-on-sleep = <40>; ++ }; ++ }; ++}; ++ ++&mdss_dsi_phy1 { ++ qcom,phy-supply-entries { ++ qcom,phy-supply-entry@0 { ++ qcom,supply-pre-on-sleep = <40>; ++ qcom,supply-post-on-sleep = <40>; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +index 7afbfb0f96a3..dd211dbdaaae 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi +@@ -65,6 +65,9 @@ + i2c6 = &i2c6; + i2c7 = &i2c7; + i2c8 = &i2c8; ++ mmc0 = &sdio0; ++ mmc1 = &sdmmc; ++ mmc2 = &sdhci; + serial0 = &uart0; + serial1 = &uart1; + serial2 = &uart2; +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +index 54dc28351c8c..b9ff69798733 100644 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +@@ -175,7 +175,7 @@ + }; + + i2c0: i2c@ff020000 { +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 17 4>; +@@ -185,7 +185,7 @@ + }; + + i2c1: i2c@ff030000 { +- compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10"; ++ compatible = "cdns,i2c-r1p14"; + status = "disabled"; + interrupt-parent = <&gic>; + interrupts = <0 18 4>; +diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig +index f1b3aa19524f..54b9b78a673d 100644 +--- a/arch/arm64/configs/cuttlefish_defconfig ++++ b/arch/arm64/configs/cuttlefish_defconfig +@@ -16,6 +16,7 @@ CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y + CONFIG_MEMCG=y + CONFIG_MEMCG_SWAP=y ++CONFIG_BLK_CGROUP=y + CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_NAMESPACES=y +@@ -48,7 +49,6 @@ CONFIG_PCI_HOST_GENERIC=y + CONFIG_PREEMPT=y + CONFIG_HZ_100=y + # CONFIG_SPARSEMEM_VMEMMAP is not set +-CONFIG_KSM=y + CONFIG_ZSMALLOC=y + CONFIG_SECCOMP=y + CONFIG_PARAVIRT=y +diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig +index b8cf9736fe95..ceab0c9c24f9 100755 +--- a/arch/arm64/configs/sdm670-perf_defconfig ++++ b/arch/arm64/configs/sdm670-perf_defconfig +@@ -22,7 +22,6 @@ CONFIG_CPUSETS=y + CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y + CONFIG_BLK_CGROUP=y +-CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y + CONFIG_NAMESPACES=y +@@ -627,6 +626,8 @@ CONFIG_CPU_FREQ_SWITCH_PROFILER=y + CONFIG_DEBUG_ALIGN_RODATA=y + CONFIG_CORESIGHT=y + CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y ++CONFIG_CORESIGHT_REMOTE_ETM=y ++CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 + CONFIG_CORESIGHT_QCOM_REPLICATOR=y + CONFIG_CORESIGHT_STM=y + CONFIG_CORESIGHT_TPDA=y +diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig +index 8c04ca5c09f5..57da388a6d88 100755 +--- a/arch/arm64/configs/sdm670_defconfig ++++ b/arch/arm64/configs/sdm670_defconfig +@@ -24,7 +24,6 @@ CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y + CONFIG_BLK_CGROUP=y + CONFIG_DEBUG_BLK_CGROUP=y +-CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y + CONFIG_NAMESPACES=y +diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig +index c9b3dda39168..e970e8962916 100755 +--- a/arch/arm64/configs/sdm845-perf_defconfig ++++ b/arch/arm64/configs/sdm845-perf_defconfig +@@ -21,7 +21,6 @@ CONFIG_CGROUP_FREEZER=y + CONFIG_CPUSETS=y + CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y +-CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y + CONFIG_SCHED_CORE_ROTATE=y +diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig +index 270075fed538..28d0031905e1 100644 +--- a/arch/arm64/configs/sdm845_defconfig ++++ b/arch/arm64/configs/sdm845_defconfig +@@ -22,7 +22,6 @@ CONFIG_CGROUP_FREEZER=y + CONFIG_CPUSETS=y + CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y +-CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y + CONFIG_SCHED_CORE_CTL=y + CONFIG_SCHED_CORE_ROTATE=y +diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h +index f819fdcff1ac..1cc42441bc67 100644 +--- a/arch/arm64/include/asm/atomic_ll_sc.h ++++ b/arch/arm64/include/asm/atomic_ll_sc.h +@@ -37,7 +37,7 @@ + * (the optimize attribute silently ignores these options). + */ + +-#define ATOMIC_OP(op, asm_op) \ ++#define ATOMIC_OP(op, asm_op, constraint) \ + __LL_SC_INLINE void \ + __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ + { \ +@@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i)); \ ++ : #constraint "r" (i)); \ + } \ + __LL_SC_EXPORT(atomic_##op); + +-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE int \ + __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ + { \ +@@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ + " cbnz %w1, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ + } \ + __LL_SC_EXPORT(atomic_##op##_return##name); + +-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ + __LL_SC_INLINE int \ + __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ + { \ +@@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ + " cbnz %w2, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ +@@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name); + ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ + ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) + +-ATOMIC_OPS(add, add) +-ATOMIC_OPS(sub, sub) ++ATOMIC_OPS(add, add, I) ++ATOMIC_OPS(sub, sub, J) + + #undef ATOMIC_OPS + #define ATOMIC_OPS(...) \ +@@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub) + ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ + ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) + +-ATOMIC_OPS(and, and) +-ATOMIC_OPS(andnot, bic) +-ATOMIC_OPS(or, orr) +-ATOMIC_OPS(xor, eor) ++ATOMIC_OPS(and, and, ) ++ATOMIC_OPS(andnot, bic, ) ++ATOMIC_OPS(or, orr, ) ++ATOMIC_OPS(xor, eor, ) + + #undef ATOMIC_OPS + #undef ATOMIC_FETCH_OP + #undef ATOMIC_OP_RETURN + #undef ATOMIC_OP + +-#define ATOMIC64_OP(op, asm_op) \ ++#define ATOMIC64_OP(op, asm_op, constraint) \ + __LL_SC_INLINE void \ + __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ + { \ +@@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i)); \ ++ : #constraint "r" (i)); \ + } \ + __LL_SC_EXPORT(atomic64_##op); + +-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE long \ + __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ + { \ +@@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ + " cbnz %w1, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ + } \ + __LL_SC_EXPORT(atomic64_##op##_return##name); + +-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ ++#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ + __LL_SC_INLINE long \ + __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ + { \ +@@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ + " cbnz %w2, 1b\n" \ + " " #mb \ + : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ +- : "Ir" (i) \ ++ : #constraint "r" (i) \ + : cl); \ + \ + return result; \ +@@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name); + ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ + ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) + +-ATOMIC64_OPS(add, add) +-ATOMIC64_OPS(sub, sub) ++ATOMIC64_OPS(add, add, I) ++ATOMIC64_OPS(sub, sub, J) + + #undef ATOMIC64_OPS + #define ATOMIC64_OPS(...) \ +@@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub) + ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ + ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) + +-ATOMIC64_OPS(and, and) +-ATOMIC64_OPS(andnot, bic) +-ATOMIC64_OPS(or, orr) +-ATOMIC64_OPS(xor, eor) ++ATOMIC64_OPS(and, and, L) ++ATOMIC64_OPS(andnot, bic, ) ++ATOMIC64_OPS(or, orr, L) ++ATOMIC64_OPS(xor, eor, L) + + #undef ATOMIC64_OPS + #undef ATOMIC64_FETCH_OP +@@ -248,49 +248,54 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) + } + __LL_SC_EXPORT(atomic64_dec_if_positive); + +-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ +-__LL_SC_INLINE unsigned long \ +-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ +- unsigned long old, \ +- unsigned long new)) \ ++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ ++__LL_SC_INLINE u##sz \ ++__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ ++ unsigned long old, \ ++ u##sz new)) \ + { \ +- unsigned long tmp, oldval; \ ++ unsigned long tmp; \ ++ u##sz oldval; \ + \ + asm volatile( \ + " prfm pstl1strm, %[v]\n" \ +- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ ++ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ + " cbnz %" #w "[tmp], 2f\n" \ +- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ ++ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ + " cbnz %w[tmp], 1b\n" \ + " " #mb "\n" \ +- " mov %" #w "[oldval], %" #w "[old]\n" \ + "2:" \ + : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ +- [v] "+Q" (*(unsigned long *)ptr) \ +- : [old] "Lr" (old), [new] "r" (new) \ ++ [v] "+Q" (*(u##sz *)ptr) \ ++ : [old] #constraint "r" (old), [new] "r" (new) \ + : cl); \ + \ + return oldval; \ + } \ +-__LL_SC_EXPORT(__cmpxchg_case_##name); ++__LL_SC_EXPORT(__cmpxchg_case_##name##sz); + +-__CMPXCHG_CASE(w, b, 1, , , , ) +-__CMPXCHG_CASE(w, h, 2, , , , ) +-__CMPXCHG_CASE(w, , 4, , , , ) +-__CMPXCHG_CASE( , , 8, , , , ) +-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory") +-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory") +-__CMPXCHG_CASE(w, , acq_4, , a, , "memory") +-__CMPXCHG_CASE( , , acq_8, , a, , "memory") +-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory") +-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory") +-__CMPXCHG_CASE(w, , rel_4, , , l, "memory") +-__CMPXCHG_CASE( , , rel_8, , , l, "memory") +-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") +-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") +-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") ++/* ++ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly ++ * handle the 'K' constraint for the value 4294967295 - thus we use no ++ * constraint for 32 bit operations. ++ */ ++__CMPXCHG_CASE(w, b, , 8, , , , , ) ++__CMPXCHG_CASE(w, h, , 16, , , , , ) ++__CMPXCHG_CASE(w, , , 32, , , , , ) ++__CMPXCHG_CASE( , , , 64, , , , , L) ++__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", ) ++__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", ) ++__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", ) ++__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) ++__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", ) ++__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", ) ++__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", ) ++__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) ++__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", ) ++__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) + + #undef __CMPXCHG_CASE + +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h +index d32a0160c89f..982fe05e5058 100644 +--- a/arch/arm64/include/asm/atomic_lse.h ++++ b/arch/arm64/include/asm/atomic_lse.h +@@ -446,22 +446,22 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) + + #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) + +-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ +-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ +- unsigned long old, \ +- unsigned long new) \ ++#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ ++static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ ++ unsigned long old, \ ++ u##sz new) \ + { \ + register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ + register unsigned long x1 asm ("x1") = old; \ +- register unsigned long x2 asm ("x2") = new; \ ++ register u##sz x2 asm ("x2") = new; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ +- __LL_SC_CMPXCHG(name) \ ++ __LL_SC_CMPXCHG(name##sz) \ + __nops(2), \ + /* LSE atomics */ \ + " mov " #w "30, %" #w "[old]\n" \ +- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ ++ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ + " mov %" #w "[ret], " #w "30") \ + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ + : [old] "r" (x1), [new] "r" (x2) \ +@@ -470,22 +470,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ + return x0; \ + } + +-__CMPXCHG_CASE(w, b, 1, ) +-__CMPXCHG_CASE(w, h, 2, ) +-__CMPXCHG_CASE(w, , 4, ) +-__CMPXCHG_CASE(x, , 8, ) +-__CMPXCHG_CASE(w, b, acq_1, a, "memory") +-__CMPXCHG_CASE(w, h, acq_2, a, "memory") +-__CMPXCHG_CASE(w, , acq_4, a, "memory") +-__CMPXCHG_CASE(x, , acq_8, a, "memory") +-__CMPXCHG_CASE(w, b, rel_1, l, "memory") +-__CMPXCHG_CASE(w, h, rel_2, l, "memory") +-__CMPXCHG_CASE(w, , rel_4, l, "memory") +-__CMPXCHG_CASE(x, , rel_8, l, "memory") +-__CMPXCHG_CASE(w, b, mb_1, al, "memory") +-__CMPXCHG_CASE(w, h, mb_2, al, "memory") +-__CMPXCHG_CASE(w, , mb_4, al, "memory") +-__CMPXCHG_CASE(x, , mb_8, al, "memory") ++__CMPXCHG_CASE(w, b, , 8, ) ++__CMPXCHG_CASE(w, h, , 16, ) ++__CMPXCHG_CASE(w, , , 32, ) ++__CMPXCHG_CASE(x, , , 64, ) ++__CMPXCHG_CASE(w, b, acq_, 8, a, "memory") ++__CMPXCHG_CASE(w, h, acq_, 16, a, "memory") ++__CMPXCHG_CASE(w, , acq_, 32, a, "memory") ++__CMPXCHG_CASE(x, , acq_, 64, a, "memory") ++__CMPXCHG_CASE(w, b, rel_, 8, l, "memory") ++__CMPXCHG_CASE(w, h, rel_, 16, l, "memory") ++__CMPXCHG_CASE(w, , rel_, 32, l, "memory") ++__CMPXCHG_CASE(x, , rel_, 64, l, "memory") ++__CMPXCHG_CASE(w, b, mb_, 8, al, "memory") ++__CMPXCHG_CASE(w, h, mb_, 16, al, "memory") ++__CMPXCHG_CASE(w, , mb_, 32, al, "memory") ++__CMPXCHG_CASE(x, , mb_, 64, al, "memory") + + #undef __LL_SC_CMPXCHG + #undef __CMPXCHG_CASE +diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h +index 09f65339d66d..e6d66c508d81 100644 +--- a/arch/arm64/include/asm/checksum.h ++++ b/arch/arm64/include/asm/checksum.h +@@ -30,16 +30,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) + { + __uint128_t tmp; + u64 sum; ++ int n = ihl; /* we want it signed */ + + tmp = *(const __uint128_t *)iph; + iph += 16; +- ihl -= 4; ++ n -= 4; + tmp += ((tmp >> 64) | (tmp << 64)); + sum = tmp >> 64; + do { + sum += *(const u32 *)iph; + iph += 4; +- } while (--ihl); ++ } while (--n > 0); + + sum += ((sum >> 32) | (sum << 32)); + return csum_fold(sum >> 32); +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index 9b2e2e2e728a..ed6a1aae6fbb 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -29,46 +29,46 @@ + * barrier case is generated as release+dmb for the former and + * acquire+release for the latter. + */ +-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \ +-static inline unsigned long __xchg_case_##name(unsigned long x, \ +- volatile void *ptr) \ +-{ \ +- unsigned long ret, tmp; \ +- \ +- asm volatile(ARM64_LSE_ATOMIC_INSN( \ +- /* LL/SC */ \ +- " prfm pstl1strm, %2\n" \ +- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \ +- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \ +- " cbnz %w1, 1b\n" \ +- " " #mb, \ +- /* LSE atomics */ \ +- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ +- __nops(3) \ +- " " #nop_lse) \ +- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ +- : "r" (x) \ +- : cl); \ +- \ +- return ret; \ ++#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \ ++static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \ ++{ \ ++ u##sz ret; \ ++ unsigned long tmp; \ ++ \ ++ asm volatile(ARM64_LSE_ATOMIC_INSN( \ ++ /* LL/SC */ \ ++ " prfm pstl1strm, %2\n" \ ++ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \ ++ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \ ++ " cbnz %w1, 1b\n" \ ++ " " #mb, \ ++ /* LSE atomics */ \ ++ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \ ++ __nops(3) \ ++ " " #nop_lse) \ ++ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \ ++ : "r" (x) \ ++ : cl); \ ++ \ ++ return ret; \ + } + +-__XCHG_CASE(w, b, 1, , , , , , ) +-__XCHG_CASE(w, h, 2, , , , , , ) +-__XCHG_CASE(w, , 4, , , , , , ) +-__XCHG_CASE( , , 8, , , , , , ) +-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory") +-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory") +-__XCHG_CASE(w, , acq_4, , , a, a, , "memory") +-__XCHG_CASE( , , acq_8, , , a, a, , "memory") +-__XCHG_CASE(w, b, rel_1, , , , , l, "memory") +-__XCHG_CASE(w, h, rel_2, , , , , l, "memory") +-__XCHG_CASE(w, , rel_4, , , , , l, "memory") +-__XCHG_CASE( , , rel_8, , , , , l, "memory") +-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") +-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, b, , 8, , , , , , ) ++__XCHG_CASE(w, h, , 16, , , , , , ) ++__XCHG_CASE(w, , , 32, , , , , , ) ++__XCHG_CASE( , , , 64, , , , , , ) ++__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory") ++__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory") ++__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory") ++__XCHG_CASE( , , acq_, 64, , , a, a, , "memory") ++__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") ++__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") ++__XCHG_CASE(w, , rel_, 32, , , , , l, "memory") ++__XCHG_CASE( , , rel_, 64, , , , , l, "memory") ++__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") ++__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") + + #undef __XCHG_CASE + +@@ -79,13 +79,13 @@ static __always_inline unsigned long __xchg##sfx(unsigned long x, \ + { \ + switch (size) { \ + case 1: \ +- return __xchg_case##sfx##_1(x, ptr); \ ++ return __xchg_case##sfx##_8(x, ptr); \ + case 2: \ +- return __xchg_case##sfx##_2(x, ptr); \ ++ return __xchg_case##sfx##_16(x, ptr); \ + case 4: \ +- return __xchg_case##sfx##_4(x, ptr); \ ++ return __xchg_case##sfx##_32(x, ptr); \ + case 8: \ +- return __xchg_case##sfx##_8(x, ptr); \ ++ return __xchg_case##sfx##_64(x, ptr); \ + default: \ + BUILD_BUG(); \ + } \ +@@ -122,13 +122,13 @@ static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ + { \ + switch (size) { \ + case 1: \ +- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \ ++ return __cmpxchg_case##sfx##_8(ptr, (u8)old, new); \ + case 2: \ +- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \ ++ return __cmpxchg_case##sfx##_16(ptr, (u16)old, new); \ + case 4: \ +- return __cmpxchg_case##sfx##_4(ptr, old, new); \ ++ return __cmpxchg_case##sfx##_32(ptr, old, new); \ + case 8: \ +- return __cmpxchg_case##sfx##_8(ptr, old, new); \ ++ return __cmpxchg_case##sfx##_64(ptr, old, new); \ + default: \ + BUILD_BUG(); \ + } \ +@@ -222,16 +222,16 @@ __CMPXCHG_GEN(_mb) + __ret; \ + }) + +-#define __CMPWAIT_CASE(w, sz, name) \ +-static inline void __cmpwait_case_##name(volatile void *ptr, \ +- unsigned long val) \ ++#define __CMPWAIT_CASE(w, sfx, sz) \ ++static inline void __cmpwait_case_##sz(volatile void *ptr, \ ++ unsigned long val) \ + { \ + unsigned long tmp; \ + \ + asm volatile( \ + " sevl\n" \ + " wfe\n" \ +- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ ++ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ + " cbnz %" #w "[tmp], 1f\n" \ + " wfe\n" \ +@@ -240,10 +240,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ + : [val] "r" (val)); \ + } + +-__CMPWAIT_CASE(w, b, 1); +-__CMPWAIT_CASE(w, h, 2); +-__CMPWAIT_CASE(w, , 4); +-__CMPWAIT_CASE( , , 8); ++__CMPWAIT_CASE(w, b, 8); ++__CMPWAIT_CASE(w, h, 16); ++__CMPWAIT_CASE(w, , 32); ++__CMPWAIT_CASE( , , 64); + + #undef __CMPWAIT_CASE + +@@ -254,13 +254,13 @@ static __always_inline void __cmpwait##sfx(volatile void *ptr, \ + { \ + switch (size) { \ + case 1: \ +- return __cmpwait_case##sfx##_1(ptr, (u8)val); \ ++ return __cmpwait_case##sfx##_8(ptr, (u8)val); \ + case 2: \ +- return __cmpwait_case##sfx##_2(ptr, (u16)val); \ ++ return __cmpwait_case##sfx##_16(ptr, (u16)val); \ + case 4: \ +- return __cmpwait_case##sfx##_4(ptr, val); \ ++ return __cmpwait_case##sfx##_32(ptr, val); \ + case 8: \ +- return __cmpwait_case##sfx##_8(ptr, val); \ ++ return __cmpwait_case##sfx##_64(ptr, val); \ + default: \ + BUILD_BUG(); \ + } \ +diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h +index b71420a12f26..12e2f82341b9 100644 +--- a/arch/arm64/include/asm/debug-monitors.h ++++ b/arch/arm64/include/asm/debug-monitors.h +@@ -116,6 +116,8 @@ void disable_debug_monitors(enum dbg_active_el el); + + void user_rewind_single_step(struct task_struct *task); + void user_fastforward_single_step(struct task_struct *task); ++void user_regs_reset_single_step(struct user_pt_regs *regs, ++ struct task_struct *task); + + void kernel_enable_single_step(struct pt_regs *regs); + void kernel_disable_single_step(void); +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h +index e69df2816ae4..6b9e92db84ad 100755 +--- a/arch/arm64/include/asm/futex.h ++++ b/arch/arm64/include/asm/futex.h +@@ -23,26 +23,34 @@ + + #include + ++#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ ++ + #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ + do { \ ++ unsigned int loops = FUTEX_MAX_LOOPS; \ + uaccess_enable(); \ ++ \ + asm volatile( \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %w1, %2\n" \ + insn "\n" \ + "2: stlxr %w0, %w3, %2\n" \ +-" cbnz %w0, 1b\n" \ +-" dmb ish\n" \ ++" cbz %w0, 3f\n" \ ++" sub %w4, %w4, %w0\n" \ ++" cbnz %w4, 1b\n" \ ++" mov %w0, %w7\n" \ + "3:\n" \ ++" dmb ish\n" \ + " .pushsection .fixup,\"ax\"\n" \ + " .align 2\n" \ +-"4: mov %w0, %w5\n" \ ++"4: mov %w0, %w6\n" \ + " b 3b\n" \ + " .popsection\n" \ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ +- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ +- : "r" (oparg), "Ir" (-EFAULT) \ ++ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ ++ "+r" (loops) \ ++ : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ + : "memory"); \ + uaccess_disable(); \ + } while (0) +@@ -56,23 +64,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) + + switch (op) { + case FUTEX_OP_SET: +- __futex_atomic_op("mov %w3, %w4", ++ __futex_atomic_op("mov %w3, %w5", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_ADD: +- __futex_atomic_op("add %w3, %w1, %w4", ++ __futex_atomic_op("add %w3, %w1, %w5", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_OR: +- __futex_atomic_op("orr %w3, %w1, %w4", ++ __futex_atomic_op("orr %w3, %w1, %w5", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_ANDN: +- __futex_atomic_op("and %w3, %w1, %w4", ++ __futex_atomic_op("and %w3, %w1, %w5", + ret, oldval, uaddr, tmp, ~oparg); + break; + case FUTEX_OP_XOR: +- __futex_atomic_op("eor %w3, %w1, %w4", ++ __futex_atomic_op("eor %w3, %w1, %w5", + ret, oldval, uaddr, tmp, oparg); + break; + default: +@@ -92,6 +100,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, + u32 oldval, u32 newval) + { + int ret = 0; ++ unsigned int loops = FUTEX_MAX_LOOPS; + u32 val, tmp; + u32 __user *uaddr; + +@@ -103,20 +112,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, + asm volatile("// futex_atomic_cmpxchg_inatomic\n" + " prfm pstl1strm, %2\n" + "1: ldxr %w1, %2\n" +-" sub %w3, %w1, %w4\n" +-" cbnz %w3, 3f\n" +-"2: stlxr %w3, %w5, %2\n" +-" cbnz %w3, 1b\n" +-" dmb ish\n" ++" sub %w3, %w1, %w5\n" ++" cbnz %w3, 4f\n" ++"2: stlxr %w3, %w6, %2\n" ++" cbz %w3, 3f\n" ++" sub %w4, %w4, %w3\n" ++" cbnz %w4, 1b\n" ++" mov %w0, %w8\n" + "3:\n" ++" dmb ish\n" ++"4:\n" + " .pushsection .fixup,\"ax\"\n" +-"4: mov %w0, %w6\n" +-" b 3b\n" ++"5: mov %w0, %w7\n" ++" b 4b\n" + " .popsection\n" +- _ASM_EXTABLE(1b, 4b) +- _ASM_EXTABLE(2b, 4b) +- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) +- : "r" (oldval), "r" (newval), "Ir" (-EFAULT) ++ _ASM_EXTABLE(1b, 5b) ++ _ASM_EXTABLE(2b, 5b) ++ : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) ++ : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) + : "memory"); + uaccess_disable(); + +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h +index a11c8c2915c9..e8cb69b0cf4f 100644 +--- a/arch/arm64/include/asm/kvm_arm.h ++++ b/arch/arm64/include/asm/kvm_arm.h +@@ -78,10 +78,11 @@ + * IMO: Override CPSR.I and enable signaling with VI + * FMO: Override CPSR.F and enable signaling with VF + * SWIO: Turn set/way invalidates into set/way clean+invalidate ++ * PTW: Take a stage2 fault if a stage1 walk steps in device memory + */ + #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ +- HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) ++ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_PTW) + #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) + #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) + #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h +index 8f5cf83b2339..3d2fddac25b9 100644 +--- a/arch/arm64/include/asm/kvm_asm.h ++++ b/arch/arm64/include/asm/kvm_asm.h +@@ -82,6 +82,34 @@ extern u32 __init_stage2_translation(void); + *__hyp_this_cpu_ptr(sym); \ + }) + ++#define __KVM_EXTABLE(from, to) \ ++ " .pushsection __kvm_ex_table, \"a\"\n" \ ++ " .align 3\n" \ ++ " .long (" #from " - .), (" #to " - .)\n" \ ++ " .popsection\n" ++ ++ ++#define __kvm_at(at_op, addr) \ ++( { \ ++ int __kvm_at_err = 0; \ ++ u64 spsr, elr; \ ++ asm volatile( \ ++ " mrs %1, spsr_el2\n" \ ++ " mrs %2, elr_el2\n" \ ++ "1: at "at_op", %3\n" \ ++ " isb\n" \ ++ " b 9f\n" \ ++ "2: msr spsr_el2, %1\n" \ ++ " msr elr_el2, %2\n" \ ++ " mov %w0, %4\n" \ ++ "9:\n" \ ++ __KVM_EXTABLE(1b, 2b) \ ++ : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ ++ : "r" (addr), "i" (-EFAULT)); \ ++ __kvm_at_err; \ ++} ) ++ ++ + #else /* __ASSEMBLY__ */ + + .macro hyp_adr_this_cpu reg, sym, tmp +@@ -106,6 +134,21 @@ extern u32 __init_stage2_translation(void); + kern_hyp_va \vcpu + .endm + ++/* ++ * KVM extable for unexpected exceptions. ++ * In the same format _asm_extable, but output to a different section so that ++ * it can be mapped to EL2. The KVM version is not sorted. The caller must ++ * ensure: ++ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented ++ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. ++ */ ++.macro _kvm_extable, from, to ++ .pushsection __kvm_ex_table, "a" ++ .align 3 ++ .long (\from - .), (\to - .) ++ .popsection ++.endm ++ + #endif + + #endif /* __ARM_KVM_ASM_H__ */ +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h +index 4cdfbd01b2de..a152a7bbc85a 100644 +--- a/arch/arm64/include/asm/kvm_host.h ++++ b/arch/arm64/include/asm/kvm_host.h +@@ -161,6 +161,7 @@ enum vcpu_sysreg { + #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ + #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ + #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ ++#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */ + #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ + #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ + #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ +@@ -188,6 +189,7 @@ enum vcpu_sysreg { + #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) + #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) + #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) ++#define cp14_DBGVCR (DBGVCR32_EL2 * 2) + + #define NR_COPRO_REGS (NR_SYS_REGS * 2) + +@@ -290,8 +292,10 @@ struct kvm_vcpu_arch { + * CP14 and CP15 live in the same array, as they are backed by the + * same system registers. + */ +-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) +-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) ++#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ++ ++#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ++#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) + + #ifdef CONFIG_CPU_BIG_ENDIAN + #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) +diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h +index 600887e491fd..496070f97c54 100644 +--- a/arch/arm64/include/asm/numa.h ++++ b/arch/arm64/include/asm/numa.h +@@ -25,6 +25,9 @@ const struct cpumask *cpumask_of_node(int node); + /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ + static inline const struct cpumask *cpumask_of_node(int node) + { ++ if (node == NUMA_NO_NODE) ++ return cpu_all_mask; ++ + return node_to_cpumask_map[node]; + } + #endif +diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h +index 5bc3de78306a..94f30e6c327c 100644 +--- a/arch/arm64/include/asm/pgtable-prot.h ++++ b/arch/arm64/include/asm/pgtable-prot.h +@@ -65,7 +65,7 @@ + #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) + #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) + #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) +-#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) ++#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) + + #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) + #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index d0eeb96c8407..46e95557b8c2 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -86,8 +86,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) + #define pte_valid_not_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) +-#define pte_valid_young(pte) \ +- ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) + #define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) + +@@ -95,9 +93,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; + * Could the pte be present in the TLB? We must check mm_tlb_flush_pending + * so that we don't erroneously return false for pages that have been + * remapped as PROT_NONE but are yet to be flushed from the TLB. ++ * Note that we can't make any assumptions based on the state of the access ++ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the ++ * TLB. + */ + #define pte_accessible(mm, pte) \ +- (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) ++ (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) + + /* + * p??_access_permitted() is true for valid user mappings (subject to the +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c +index 9f1adca3c346..3101d1b61713 100644 +--- a/arch/arm64/kernel/debug-monitors.c ++++ b/arch/arm64/kernel/debug-monitors.c +@@ -149,17 +149,20 @@ postcore_initcall(debug_monitors_init); + /* + * Single step API and exception handling. + */ +-static void set_regs_spsr_ss(struct pt_regs *regs) ++static void set_user_regs_spsr_ss(struct user_pt_regs *regs) + { + regs->pstate |= DBG_SPSR_SS; + } +-NOKPROBE_SYMBOL(set_regs_spsr_ss); ++NOKPROBE_SYMBOL(set_user_regs_spsr_ss); + +-static void clear_regs_spsr_ss(struct pt_regs *regs) ++static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) + { + regs->pstate &= ~DBG_SPSR_SS; + } +-NOKPROBE_SYMBOL(clear_regs_spsr_ss); ++NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); ++ ++#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) ++#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) + + /* EL1 Single Step Handler hooks */ + static LIST_HEAD(step_hook); +@@ -377,17 +380,26 @@ void user_rewind_single_step(struct task_struct *task) + * If single step is active for this thread, then set SPSR.SS + * to 1 to avoid returning to the active-pending state. + */ +- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) ++ if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); + } + NOKPROBE_SYMBOL(user_rewind_single_step); + + void user_fastforward_single_step(struct task_struct *task) + { +- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) ++ if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + clear_regs_spsr_ss(task_pt_regs(task)); + } + ++void user_regs_reset_single_step(struct user_pt_regs *regs, ++ struct task_struct *task) ++{ ++ if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) ++ set_user_regs_spsr_ss(regs); ++ else ++ clear_user_regs_spsr_ss(regs); ++} ++ + /* Kernel API */ + void kernel_enable_single_step(struct pt_regs *regs) + { +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 9c32d0eab888..a064aa18fecb 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -847,6 +847,7 @@ __primary_switch: + + tlbi vmalle1 // Remove any stale TLB entries + dsb nsh ++ isb + + msr sctlr_el1, x19 // re-enable the MMU + isb +diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c +index 72a660a74ff9..44d757308e47 100644 +--- a/arch/arm64/kernel/kgdb.c ++++ b/arch/arm64/kernel/kgdb.c +@@ -256,7 +256,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) + if (user_mode(regs)) + return DBG_HOOK_ERROR; + +- kgdb_handle_exception(1, SIGTRAP, 0, regs); ++ kgdb_handle_exception(0, SIGTRAP, 0, regs); + return DBG_HOOK_HANDLED; + } + NOKPROBE_SYMBOL(kgdb_step_brk_fn); +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c +index 64b892d9c7cd..e6f2efd67513 100644 +--- a/arch/arm64/kernel/perf_event.c ++++ b/arch/arm64/kernel/perf_event.c +@@ -750,6 +750,28 @@ static void armv8pmu_disable_event(struct perf_event *event) + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + } + ++static void armv8pmu_start(struct arm_pmu *cpu_pmu) ++{ ++ unsigned long flags; ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); ++ ++ raw_spin_lock_irqsave(&events->pmu_lock, flags); ++ /* Enable all counters */ ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags); ++} ++ ++static void armv8pmu_stop(struct arm_pmu *cpu_pmu) ++{ ++ unsigned long flags; ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); ++ ++ raw_spin_lock_irqsave(&events->pmu_lock, flags); ++ /* Disable all counters */ ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags); ++} ++ + static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) + { + u32 pmovsr; +@@ -775,6 +797,11 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) + */ + regs = get_irq_regs(); + ++ /* ++ * Stop the PMU while processing the counter overflows ++ * to prevent skews in group events. ++ */ ++ armv8pmu_stop(cpu_pmu); + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; +@@ -799,6 +826,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(event); + } ++ armv8pmu_start(cpu_pmu); + + /* + * Handle the pending perf events. +@@ -812,28 +840,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) + return IRQ_HANDLED; + } + +-static void armv8pmu_start(struct arm_pmu *cpu_pmu) +-{ +- unsigned long flags; +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); +- +- raw_spin_lock_irqsave(&events->pmu_lock, flags); +- /* Enable all counters */ +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +-} +- +-static void armv8pmu_stop(struct arm_pmu *cpu_pmu) +-{ +- unsigned long flags; +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); +- +- raw_spin_lock_irqsave(&events->pmu_lock, flags); +- /* Disable all counters */ +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +-} +- + static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct perf_event *event) + { +diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c +index 3f62b35fb6f1..815c395a1076 100644 +--- a/arch/arm64/kernel/perf_regs.c ++++ b/arch/arm64/kernel/perf_regs.c +@@ -13,15 +13,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) + return 0; + + /* +- * Compat (i.e. 32 bit) mode: +- * - PC has been set in the pt_regs struct in kernel_entry, +- * - Handle SP and LR here. ++ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but ++ * we're stuck with it for ABI compatability reasons. ++ * ++ * For a 32-bit consumer inspecting a 32-bit task, then it will look at ++ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). ++ * These correspond directly to a prefix of the registers saved in our ++ * 'struct pt_regs', with the exception of the PC, so we copy that down ++ * (x15 corresponds to SP_hyp in the architecture). ++ * ++ * So far, so good. ++ * ++ * The oddity arises when a 64-bit consumer looks at a 32-bit task and ++ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return ++ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and ++ * PC registers would normally live. The initial idea was to allow a ++ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear ++ * how well that works in practice, somebody might be relying on it. ++ * ++ * At the time we make a sample, we don't know whether the consumer is ++ * 32-bit or 64-bit, so we have to cater for both possibilities. + */ + if (compat_user_mode(regs)) { + if ((u32)idx == PERF_REG_ARM64_SP) + return regs->compat_sp; + if ((u32)idx == PERF_REG_ARM64_LR) + return regs->compat_lr; ++ if (idx == 15) ++ return regs->pc; + } + + if ((u32)idx == PERF_REG_ARM64_SP) +diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c +index 35e0e6464e62..884b8d5b1cdf 100755 +--- a/arch/arm64/kernel/psci.c ++++ b/arch/arm64/kernel/psci.c +@@ -69,7 +69,6 @@ static int cpu_psci_cpu_disable(unsigned int cpu) + + static void cpu_psci_cpu_die(unsigned int cpu) + { +- int ret; + /* + * There are no known implementations of PSCI actually using the + * power state field, pass a sensible default for now. +@@ -77,9 +76,7 @@ static void cpu_psci_cpu_die(unsigned int cpu) + u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN << + PSCI_0_2_POWER_STATE_TYPE_SHIFT; + +- ret = psci_ops.cpu_off(state); +- +- pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); ++ psci_ops.cpu_off(state); + } + + static int cpu_psci_cpu_kill(unsigned int cpu) +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c +index a22161ccf447..7990377dbbbc 100644 +--- a/arch/arm64/kernel/ptrace.c ++++ b/arch/arm64/kernel/ptrace.c +@@ -1448,8 +1448,8 @@ static int valid_native_regs(struct user_pt_regs *regs) + */ + int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) + { +- if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) +- regs->pstate &= ~DBG_SPSR_SS; ++ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ ++ user_regs_reset_single_step(regs, task); + + if (is_compat_thread(task_thread_info(task))) + return valid_compat_regs(regs); +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index e5733cbd0c84..e8c9fe3753a2 100755 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -737,14 +737,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) + } + #endif + +- if (show_unhandled_signals_ratelimited()) { +- pr_info("%s[%d]: syscall %d\n", current->comm, +- task_pid_nr(current), (int)regs->syscallno); +- dump_instr("", regs); +- if (user_mode(regs)) +- __show_regs(regs); +- } +- + return sys_ni_syscall(); + } + +diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S +index beca249bc2f3..b3e6c4d5b75c 100644 +--- a/arch/arm64/kernel/vdso/vdso.lds.S ++++ b/arch/arm64/kernel/vdso/vdso.lds.S +@@ -39,6 +39,13 @@ SECTIONS + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + ++ /* ++ * Discard .note.gnu.property sections which are unused and have ++ * different alignment requirement from vDSO note sections. ++ */ ++ /DISCARD/ : { ++ *(.note.GNU-stack .note.gnu.property) ++ } + .note : { *(.note.*) } :text :note + + . = ALIGN(16); +@@ -59,7 +66,6 @@ SECTIONS + PROVIDE(end = .); + + /DISCARD/ : { +- *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S +index 402a62165d40..ad1142a7780c 100644 +--- a/arch/arm64/kernel/vmlinux.lds.S ++++ b/arch/arm64/kernel/vmlinux.lds.S +@@ -23,6 +23,13 @@ ENTRY(_text) + + jiffies = jiffies_64; + ++ ++#define HYPERVISOR_EXTABLE \ ++ . = ALIGN(SZ_8); \ ++ VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \ ++ *(__kvm_ex_table) \ ++ VMLINUX_SYMBOL(__stop___kvm_ex_table) = .; ++ + #define HYPERVISOR_TEXT \ + /* \ + * Align to 4 KB so that \ +@@ -38,6 +45,7 @@ jiffies = jiffies_64; + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ ++ HYPERVISOR_EXTABLE \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + + #define IDMAP_TEXT \ +diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S +index a360ac6e89e9..4e0eac361f87 100644 +--- a/arch/arm64/kvm/hyp/entry.S ++++ b/arch/arm64/kvm/hyp/entry.S +@@ -17,6 +17,7 @@ + + #include + ++#include + #include + #include + #include +@@ -62,6 +63,15 @@ ENTRY(__guest_enter) + // Store the host regs + save_callee_saved_regs x1 + ++ // Now the host state is stored if we have a pending RAS SError it must ++ // affect the host. If any asynchronous exception is pending we defer ++ // the guest entry. ++ mrs x1, isr_el1 ++ cbz x1, 1f ++ mov x0, #ARM_EXCEPTION_IRQ ++ ret ++ ++1: + add x18, x0, #VCPU_CONTEXT + + // Restore guest regs x0-x17 +@@ -135,18 +145,22 @@ ENTRY(__guest_exit) + // This is our single instruction exception window. A pending + // SError is guaranteed to occur at the earliest when we unmask + // it, and at the latest just after the ISB. +- .global abort_guest_exit_start + abort_guest_exit_start: + + isb + +- .global abort_guest_exit_end + abort_guest_exit_end: ++ msr daifset, #4 // Mask aborts ++ ret ++ ++ _kvm_extable abort_guest_exit_start, 9997f ++ _kvm_extable abort_guest_exit_end, 9997f ++9997: ++ msr daifset, #4 // Mask aborts ++ mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + +- // If the exception took place, restore the EL1 exception +- // context so that we can report some information. +- // Merge the exception code with the SError pending bit. +- tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f ++ // restore the EL1 exception context so that we can report some ++ // information. Merge the exception code with the SError pending bit. + msr elr_el2, x2 + msr esr_el2, x3 + msr spsr_el2, x4 +diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S +index bf4988f9dae8..7ced1fb93d07 100644 +--- a/arch/arm64/kvm/hyp/hyp-entry.S ++++ b/arch/arm64/kvm/hyp/hyp-entry.S +@@ -25,6 +25,30 @@ + #include + #include + ++.macro save_caller_saved_regs_vect ++ stp x0, x1, [sp, #-16]! ++ stp x2, x3, [sp, #-16]! ++ stp x4, x5, [sp, #-16]! ++ stp x6, x7, [sp, #-16]! ++ stp x8, x9, [sp, #-16]! ++ stp x10, x11, [sp, #-16]! ++ stp x12, x13, [sp, #-16]! ++ stp x14, x15, [sp, #-16]! ++ stp x16, x17, [sp, #-16]! ++.endm ++ ++.macro restore_caller_saved_regs_vect ++ ldp x16, x17, [sp], #16 ++ ldp x14, x15, [sp], #16 ++ ldp x12, x13, [sp], #16 ++ ldp x10, x11, [sp], #16 ++ ldp x8, x9, [sp], #16 ++ ldp x6, x7, [sp], #16 ++ ldp x4, x5, [sp], #16 ++ ldp x2, x3, [sp], #16 ++ ldp x0, x1, [sp], #16 ++.endm ++ + .text + .pushsection .hyp.text, "ax" + +@@ -177,26 +201,24 @@ el1_error: + mov x0, #ARM_EXCEPTION_EL1_SERROR + b __guest_exit + ++el2_sync: ++ save_caller_saved_regs_vect ++ stp x29, x30, [sp, #-16]! ++ bl kvm_unexpected_el2_exception ++ ldp x29, x30, [sp], #16 ++ restore_caller_saved_regs_vect ++ ++ eret ++ + el2_error: +- /* +- * Only two possibilities: +- * 1) Either we come from the exit path, having just unmasked +- * PSTATE.A: change the return code to an EL2 fault, and +- * carry on, as we're already in a sane state to handle it. +- * 2) Or we come from anywhere else, and that's a bug: we panic. +- * +- * For (1), x0 contains the original return code and x1 doesn't +- * contain anything meaningful at that stage. We can reuse them +- * as temp registers. +- * For (2), who cares? +- */ +- mrs x0, elr_el2 +- adr x1, abort_guest_exit_start +- cmp x0, x1 +- adr x1, abort_guest_exit_end +- ccmp x0, x1, #4, ne +- b.ne __hyp_panic +- mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) ++ save_caller_saved_regs_vect ++ stp x29, x30, [sp, #-16]! ++ ++ bl kvm_unexpected_el2_exception ++ ++ ldp x29, x30, [sp], #16 ++ restore_caller_saved_regs_vect ++ + eret + + ENTRY(__hyp_do_panic) +@@ -225,7 +247,6 @@ ENDPROC(\label) + invalid_vector el2t_irq_invalid + invalid_vector el2t_fiq_invalid + invalid_vector el2t_error_invalid +- invalid_vector el2h_sync_invalid + invalid_vector el2h_irq_invalid + invalid_vector el2h_fiq_invalid + invalid_vector el1_sync_invalid +@@ -242,7 +263,7 @@ ENTRY(__kvm_hyp_vector) + ventry el2t_fiq_invalid // FIQ EL2t + ventry el2t_error_invalid // Error EL2t + +- ventry el2h_sync_invalid // Synchronous EL2h ++ ventry el2_sync // Synchronous EL2h + ventry el2h_irq_invalid // IRQ EL2h + ventry el2h_fiq_invalid // FIQ EL2h + ventry el2_error // Error EL2h +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c +index 993d7ad96acd..37b3fd8137e1 100644 +--- a/arch/arm64/kvm/hyp/switch.c ++++ b/arch/arm64/kvm/hyp/switch.c +@@ -25,6 +25,10 @@ + #include + #include + #include ++#include ++ ++extern struct exception_table_entry __start___kvm_ex_table; ++extern struct exception_table_entry __stop___kvm_ex_table; + + static bool __hyp_text __fpsimd_enabled_nvhe(void) + { +@@ -202,10 +206,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) + * saved the guest context yet, and we may return early... + */ + par = read_sysreg(par_el1); +- asm volatile("at s1e1r, %0" : : "r" (far)); +- isb(); +- +- tmp = read_sysreg(par_el1); ++ if (!__kvm_at("s1e1r", far)) ++ tmp = read_sysreg(par_el1); ++ else ++ tmp = 1; /* back to the guest */ + write_sysreg(par, par_el1); + + if (unlikely(tmp & 1)) +@@ -426,7 +430,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, + * making sure it is a kernel address and not a PC-relative + * reference. + */ +- asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); ++ asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); + + __hyp_do_panic(str_va, + spsr, elr, +@@ -468,3 +472,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) + + unreachable(); + } ++ ++asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) ++{ ++ unsigned long addr, fixup; ++ struct kvm_cpu_context *host_ctxt; ++ struct exception_table_entry *entry, *end; ++ unsigned long elr_el2 = read_sysreg(elr_el2); ++ ++ entry = hyp_symbol_addr(__start___kvm_ex_table); ++ end = hyp_symbol_addr(__stop___kvm_ex_table); ++ host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state); ++ ++ while (entry < end) { ++ addr = (unsigned long)&entry->insn + entry->insn; ++ fixup = (unsigned long)&entry->fixup + entry->fixup; ++ ++ if (addr != elr_el2) { ++ entry++; ++ continue; ++ } ++ ++ write_sysreg(fixup, elr_el2); ++ return; ++ } ++ ++ hyp_panic(host_ctxt); ++} +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c +index 7cee552ce0bf..10d80456f38f 100644 +--- a/arch/arm64/kvm/sys_regs.c ++++ b/arch/arm64/kvm/sys_regs.c +@@ -450,6 +450,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) + { + u64 pmcr, val; + ++ /* No PMU available, PMCR_EL0 may UNDEF... */ ++ if (!kvm_arm_support_pmu_v3()) ++ return; ++ + pmcr = read_sysreg(pmcr_el0); + /* + * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN +@@ -1207,9 +1211,9 @@ static const struct sys_reg_desc cp14_regs[] = { + { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, + DBG_BCR_BVR_WCR_WVR(1), + /* DBGDCCINT */ +- { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, ++ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT }, + /* DBGDSCRext */ +- { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, ++ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext }, + DBG_BCR_BVR_WCR_WVR(2), + /* DBGDTR[RT]Xint */ + { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, +@@ -1224,7 +1228,7 @@ static const struct sys_reg_desc cp14_regs[] = { + { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, + DBG_BCR_BVR_WCR_WVR(6), + /* DBGVCR */ +- { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, ++ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR }, + DBG_BCR_BVR_WCR_WVR(7), + DBG_BCR_BVR_WCR_WVR(8), + DBG_BCR_BVR_WCR_WVR(9), +@@ -1315,6 +1319,7 @@ static const struct sys_reg_desc cp15_regs[] = { + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, ++ { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 }, + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, +diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c +index b1e42bad69ac..fddae9b8e1bf 100644 +--- a/arch/arm64/mm/numa.c ++++ b/arch/arm64/mm/numa.c +@@ -58,7 +58,11 @@ EXPORT_SYMBOL(node_to_cpumask_map); + */ + const struct cpumask *cpumask_of_node(int node) + { +- if (WARN_ON(node >= nr_node_ids)) ++ ++ if (node == NUMA_NO_NODE) ++ return cpu_all_mask; ++ ++ if (WARN_ON(node < 0 || node >= nr_node_ids)) + return cpu_none_mask; + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) +diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c +index dc2d16ce8a0d..3e33a9844d99 100644 +--- a/arch/h8300/kernel/asm-offsets.c ++++ b/arch/h8300/kernel/asm-offsets.c +@@ -62,6 +62,9 @@ int main(void) + OFFSET(TI_FLAGS, thread_info, flags); + OFFSET(TI_CPU, thread_info, cpu); + OFFSET(TI_PRE, thread_info, preempt_count); ++#ifdef CONFIG_PREEMPTION ++ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); ++#endif + + return 0; + } +diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S +index ec87e67feb19..22c10102712a 100644 +--- a/arch/hexagon/kernel/vmlinux.lds.S ++++ b/arch/hexagon/kernel/vmlinux.lds.S +@@ -71,13 +71,8 @@ SECTIONS + + _end = .; + +- /DISCARD/ : { +- EXIT_TEXT +- EXIT_DATA +- EXIT_CALL +- } +- + STABS_DEBUG + DWARF_DEBUG + ++ DISCARDS + } +diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h +index 845143990a1d..9d3d4fb87a7a 100644 +--- a/arch/ia64/include/asm/ptrace.h ++++ b/arch/ia64/include/asm/ptrace.h +@@ -53,8 +53,7 @@ + + static inline unsigned long user_stack_pointer(struct pt_regs *regs) + { +- /* FIXME: should this be bspstore + nr_dirty regs? */ +- return regs->ar_bspstore; ++ return regs->r12; + } + + static inline int is_syscall_success(struct pt_regs *regs) +@@ -78,11 +77,6 @@ static inline long regs_return_value(struct pt_regs *regs) + unsigned long __ip = instruction_pointer(regs); \ + (__ip & ~3UL) + ((__ip & 3UL) << 2); \ + }) +-/* +- * Why not default? Because user_stack_pointer() on ia64 gives register +- * stack backing store instead... +- */ +-#define current_user_stack_pointer() (current_pt_regs()->r12) + + /* given a pointer to a task_struct, return the user's pt_regs */ + # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) +diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h +index 1d0b875fec44..ec909eec0b4c 100644 +--- a/arch/ia64/include/asm/syscall.h ++++ b/arch/ia64/include/asm/syscall.h +@@ -35,7 +35,7 @@ static inline void syscall_rollback(struct task_struct *task, + static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) + { +- return regs->r10 == -1 ? regs->r8:0; ++ return regs->r10 == -1 ? -regs->r8:0; + } + + static inline long syscall_get_return_value(struct task_struct *task, +diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile +index 9edda5466020..bcd3668f1bb8 100644 +--- a/arch/ia64/kernel/Makefile ++++ b/arch/ia64/kernel/Makefile +@@ -42,7 +42,7 @@ endif + obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o + obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o + +-obj-$(CONFIG_BINFMT_ELF) += elfcore.o ++obj-$(CONFIG_ELF_CORE) += elfcore.o + + # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. + CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 +diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c +index 9509cc73b9c6..64ae9cde8bdb 100644 +--- a/arch/ia64/kernel/mca.c ++++ b/arch/ia64/kernel/mca.c +@@ -1858,7 +1858,7 @@ ia64_mca_cpu_init(void *cpu_data) + data = mca_bootmem(); + first_time = 0; + } else +- data = (void *)__get_free_pages(GFP_KERNEL, ++ data = (void *)__get_free_pages(GFP_ATOMIC, + get_order(sz)); + if (!data) + panic("Could not allocate MCA memory for cpu %d\n", +diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c +index 94f8bf777afa..3503d488e9b3 100644 +--- a/arch/ia64/kernel/mca_drv.c ++++ b/arch/ia64/kernel/mca_drv.c +@@ -343,7 +343,7 @@ init_record_index_pools(void) + + /* - 2 - */ + sect_min_size = sal_log_sect_min_sizes[0]; +- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++) ++ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++) + if (sect_min_size > sal_log_sect_min_sizes[i]) + sect_min_size = sal_log_sect_min_sizes[i]; + +diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c +index 36f660da8124..56007258c014 100644 +--- a/arch/ia64/kernel/ptrace.c ++++ b/arch/ia64/kernel/ptrace.c +@@ -2144,27 +2144,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) + { + struct syscall_get_set_args *args = data; + struct pt_regs *pt = args->regs; +- unsigned long *krbs, cfm, ndirty; ++ unsigned long *krbs, cfm, ndirty, nlocals, nouts; + int i, count; + + if (unw_unwind_to_user(info) < 0) + return; + ++ /* ++ * We get here via a few paths: ++ * - break instruction: cfm is shared with caller. ++ * syscall args are in out= regs, locals are non-empty. ++ * - epsinstruction: cfm is set by br.call ++ * locals don't exist. ++ * ++ * For both cases argguments are reachable in cfm.sof - cfm.sol. ++ * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ] ++ */ + cfm = pt->cr_ifs; ++ nlocals = (cfm >> 7) & 0x7f; /* aka sol */ ++ nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */ + krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; + ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); + + count = 0; + if (in_syscall(pt)) +- count = min_t(int, args->n, cfm & 0x7f); ++ count = min_t(int, args->n, nouts); + ++ /* Iterate over outs. */ + for (i = 0; i < count; i++) { ++ int j = ndirty + nlocals + i + args->i; + if (args->rw) +- *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = +- args->args[i]; ++ *ia64_rse_skip_regs(krbs, j) = args->args[i]; + else +- args->args[i] = *ia64_rse_skip_regs(krbs, +- ndirty + i + args->i); ++ args->args[i] = *ia64_rse_skip_regs(krbs, j); + } + + if (!args->rw) { +diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c +index 878626805369..3b0c892953ab 100644 +--- a/arch/ia64/mm/discontig.c ++++ b/arch/ia64/mm/discontig.c +@@ -99,7 +99,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, + * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been + * called yet. Note that node 0 will also count all non-existent cpus. + */ +-static int __meminit early_nr_cpus_node(int node) ++static int early_nr_cpus_node(int node) + { + int cpu, n = 0; + +@@ -114,7 +114,7 @@ static int __meminit early_nr_cpus_node(int node) + * compute_pernodesize - compute size of pernode data + * @node: the node id. + */ +-static unsigned long __meminit compute_pernodesize(int node) ++static unsigned long compute_pernodesize(int node) + { + unsigned long pernodesize = 0, cpus; + +@@ -411,7 +411,7 @@ static void __init reserve_pernode_space(void) + } + } + +-static void __meminit scatter_node_data(void) ++static void scatter_node_data(void) + { + pg_data_t **dst; + int node; +diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py +index 2bfd941ff7c7..c27849889e19 100644 +--- a/arch/ia64/scripts/unwcheck.py ++++ b/arch/ia64/scripts/unwcheck.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # + # Usage: unwcheck.py FILE + # +diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h +index 3177ce8331d6..baee0c77b981 100644 +--- a/arch/m68k/include/asm/m53xxacr.h ++++ b/arch/m68k/include/asm/m53xxacr.h +@@ -88,9 +88,9 @@ + * coherency though in all cases. And for copyback caches we will need + * to push cached data as well. + */ +-#define CACHE_INIT CACR_CINVA +-#define CACHE_INVALIDATE CACR_CINVA +-#define CACHE_INVALIDATED CACR_CINVA ++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) ++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) ++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) + + #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ + (0x000f0000) + \ +diff --git a/arch/m68k/include/asm/mac_via.h b/arch/m68k/include/asm/mac_via.h +index 53c632c85b03..dff6db19ae4d 100644 +--- a/arch/m68k/include/asm/mac_via.h ++++ b/arch/m68k/include/asm/mac_via.h +@@ -256,6 +256,7 @@ extern int rbv_present,via_alt_mapping; + + struct irq_desc; + ++extern void via_l2_flush(int writeback); + extern void via_register_interrupts(void); + extern void via_irq_enable(int); + extern void via_irq_disable(int); +diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c +index e46895316eb0..dcf18e1ca0bb 100644 +--- a/arch/m68k/mac/config.c ++++ b/arch/m68k/mac/config.c +@@ -61,7 +61,6 @@ extern void iop_preinit(void); + extern void iop_init(void); + extern void via_init(void); + extern void via_init_clock(irq_handler_t func); +-extern void via_flush_cache(void); + extern void oss_init(void); + extern void psc_init(void); + extern void baboon_init(void); +@@ -132,21 +131,6 @@ int __init mac_parse_bootinfo(const struct bi_record *record) + return unknown; + } + +-/* +- * Flip into 24bit mode for an instant - flushes the L2 cache card. We +- * have to disable interrupts for this. Our IRQ handlers will crap +- * themselves if they take an IRQ in 24bit mode! +- */ +- +-static void mac_cache_card_flush(int writeback) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- via_flush_cache(); +- local_irq_restore(flags); +-} +- + void __init config_mac(void) + { + if (!MACH_IS_MAC) +@@ -179,9 +163,8 @@ void __init config_mac(void) + * not. + */ + +- if (macintosh_config->ident == MAC_MODEL_IICI +- || macintosh_config->ident == MAC_MODEL_IIFX) +- mach_l2_flush = mac_cache_card_flush; ++ if (macintosh_config->ident == MAC_MODEL_IICI) ++ mach_l2_flush = via_l2_flush; + } + + +diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c +index 7990b6f50105..cb516cacc819 100644 +--- a/arch/m68k/mac/iop.c ++++ b/arch/m68k/mac/iop.c +@@ -173,7 +173,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 + + static __inline__ void iop_stop(volatile struct mac_iop *iop) + { +- iop->status_ctrl &= ~IOP_RUN; ++ iop->status_ctrl = IOP_AUTOINC; + } + + static __inline__ void iop_start(volatile struct mac_iop *iop) +@@ -181,14 +181,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop) + iop->status_ctrl = IOP_RUN | IOP_AUTOINC; + } + +-static __inline__ void iop_bypass(volatile struct mac_iop *iop) +-{ +- iop->status_ctrl |= IOP_BYPASS; +-} +- + static __inline__ void iop_interrupt(volatile struct mac_iop *iop) + { +- iop->status_ctrl |= IOP_IRQ; ++ iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC; + } + + static int iop_alive(volatile struct mac_iop *iop) +@@ -239,7 +234,6 @@ void __init iop_preinit(void) + } else { + iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; + } +- iop_base[IOP_NUM_SCC]->status_ctrl = 0x87; + iop_scc_present = 1; + } else { + iop_base[IOP_NUM_SCC] = NULL; +@@ -251,7 +245,7 @@ void __init iop_preinit(void) + } else { + iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; + } +- iop_base[IOP_NUM_ISM]->status_ctrl = 0; ++ iop_stop(iop_base[IOP_NUM_ISM]); + iop_ism_present = 1; + } else { + iop_base[IOP_NUM_ISM] = NULL; +@@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan) + iop_free_msg(msg2); + + iop_send_queue[iop_num][chan] = msg; +- if (msg) iop_do_send(msg); ++ if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) ++ iop_do_send(msg); + } + + /* +@@ -497,16 +492,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata, + + if (!(q = iop_send_queue[iop_num][chan])) { + iop_send_queue[iop_num][chan] = msg; ++ iop_do_send(msg); + } else { + while (q->next) q = q->next; + q->next = msg; + } + +- if (iop_readb(iop_base[iop_num], +- IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) { +- iop_do_send(msg); +- } +- + return 0; + } + +diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c +index a435aced6e43..35382c1b563f 100644 +--- a/arch/m68k/mac/via.c ++++ b/arch/m68k/mac/via.c +@@ -299,10 +299,14 @@ void via_debug_dump(void) + * the system into 24-bit mode for an instant. + */ + +-void via_flush_cache(void) ++void via_l2_flush(int writeback) + { ++ unsigned long flags; ++ ++ local_irq_save(flags); + via2[gBufB] &= ~VIA2B_vMode32; + via2[gBufB] |= VIA2B_vMode32; ++ local_irq_restore(flags); + } + + /* +diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c +index ea89a24f4600..cc0f924bbdd2 100644 +--- a/arch/m68k/q40/config.c ++++ b/arch/m68k/q40/config.c +@@ -303,6 +303,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) + { + int tmp = Q40_RTC_CTRL; + ++ pll->pll_ctrl = 0; + pll->pll_value = tmp & Q40_RTC_PLL_MASK; + if (tmp & Q40_RTC_PLL_SIGN) + pll->pll_value = -pll->pll_value; +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig +index f8a529c85279..24eb7fe7922e 100644 +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -848,6 +848,7 @@ config SNI_RM + select I8253 + select I8259 + select ISA ++ select MIPS_L1_CACHE_SHIFT_6 + select SWAP_IO_SPACE if CPU_BIG_ENDIAN + select SYS_HAS_CPU_R4X00 + select SYS_HAS_CPU_R5000 +diff --git a/arch/mips/Makefile b/arch/mips/Makefile +index 1a6bac7b076f..af4eff7d22ec 100644 +--- a/arch/mips/Makefile ++++ b/arch/mips/Makefile +@@ -256,12 +256,23 @@ ifdef CONFIG_64BIT + endif + endif + ++# When linking a 32-bit executable the LLVM linker cannot cope with a ++# 32-bit load address that has been sign-extended to 64 bits. Simply ++# remove the upper 32 bits then, as it is safe to do so with other ++# linkers. ++ifdef CONFIG_64BIT ++ load-ld = $(load-y) ++else ++ load-ld = $(subst 0xffffffff,0x,$(load-y)) ++endif ++ + KBUILD_AFLAGS += $(cflags-y) + KBUILD_CFLAGS += $(cflags-y) +-KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) ++KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) -DLINKER_LOAD_ADDRESS=$(load-ld) + KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0) + + bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \ ++ LINKER_LOAD_ADDRESS=$(load-ld) \ + VMLINUX_ENTRY_ADDRESS=$(entry-y) \ + PLATFORM="$(platform-y)" + ifdef CONFIG_32BIT +@@ -275,7 +286,7 @@ LDFLAGS += -m $(ld-emul) + + ifdef CONFIG_MIPS + CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ +- egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ ++ egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ + sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') + ifdef CONFIG_64BIT + CHECKFLAGS += -m64 +diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c +index 0fc53e08a894..c05f7376148a 100644 +--- a/arch/mips/alchemy/board-xxs1500.c ++++ b/arch/mips/alchemy/board-xxs1500.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + #include + + const char *get_system_type(void) +diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c +index 7ba7ea0a22f8..e6d0044393b0 100644 +--- a/arch/mips/alchemy/common/clock.c ++++ b/arch/mips/alchemy/common/clock.c +@@ -151,6 +151,7 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, + { + struct clk_init_data id; + struct clk_hw *h; ++ struct clk *clk; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) +@@ -163,7 +164,13 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name, + id.ops = &alchemy_clkops_cpu; + h->init = &id; + +- return clk_register(NULL, h); ++ clk = clk_register(NULL, h); ++ if (IS_ERR(clk)) { ++ pr_err("failed to register clock\n"); ++ kfree(h); ++ } ++ ++ return clk; + } + + /* AUXPLLs ************************************************************/ +diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig +index e970fd9cf769..7ca7384fd5c9 100644 +--- a/arch/mips/bcm47xx/Kconfig ++++ b/arch/mips/bcm47xx/Kconfig +@@ -26,6 +26,7 @@ config BCM47XX_BCMA + select BCMA + select BCMA_HOST_SOC + select BCMA_DRIVER_MIPS ++ select BCMA_DRIVER_PCI if PCI + select BCMA_DRIVER_PCI_HOSTMODE if PCI + select BCMA_DRIVER_GPIO + default y +diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile +index 2f77e250b91d..3e93eea5a5f5 100644 +--- a/arch/mips/boot/compressed/Makefile ++++ b/arch/mips/boot/compressed/Makefile +@@ -33,7 +33,7 @@ KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ + KCOV_INSTRUMENT := n + + # decompressor objects (linked with vmlinuz) +-vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o ++vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o $(obj)/bswapsi.o + + ifdef CONFIG_DEBUG_ZBOOT + vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o +@@ -47,7 +47,7 @@ extra-y += uart-ath79.c + $(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c + $(call cmd,shipped) + +-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o ++vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o + + extra-y += ashldi3.c bswapsi.c + $(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib +@@ -87,7 +87,7 @@ ifneq ($(zload-y),) + VMLINUZ_LOAD_ADDRESS := $(zload-y) + else + VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ +- $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) ++ $(obj)/vmlinux.bin $(LINKER_LOAD_ADDRESS)) + endif + + vmlinuzobjs-y += $(obj)/piggy.o +diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c +index fdf99e9dd4c3..66096c766a60 100644 +--- a/arch/mips/boot/compressed/decompress.c ++++ b/arch/mips/boot/compressed/decompress.c +@@ -11,12 +11,15 @@ + * option) any later version. + */ + ++#define DISABLE_BRANCH_PROFILING ++ + #include + #include + #include + #include + + #include ++#include + + /* + * These two variables specify the free mem region +@@ -124,7 +127,7 @@ void decompress_kernel(unsigned long boot_heap_start) + dtb_size = fdt_totalsize((void *)&__appended_dtb); + + /* last four bytes is always image size in little endian */ +- image_size = le32_to_cpup((void *)&__image_end - 4); ++ image_size = get_unaligned_le32((void *)&__image_end - 4); + + /* copy dtb to where the booted kernel will expect it */ + memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, +diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h +index bdd6dc18e65c..941efd878334 100644 +--- a/arch/mips/include/asm/cpu-type.h ++++ b/arch/mips/include/asm/cpu-type.h +@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) + case CPU_34K: + case CPU_1004K: + case CPU_74K: ++ case CPU_1074K: + case CPU_M14KC: + case CPU_M14KEC: + case CPU_INTERAPTIV: +diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h +index dc5ea5736440..ceece76fc971 100644 +--- a/arch/mips/include/asm/div64.h ++++ b/arch/mips/include/asm/div64.h +@@ -1,5 +1,5 @@ + /* +- * Copyright (C) 2000, 2004 Maciej W. Rozycki ++ * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki + * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) + * + * This file is subject to the terms and conditions of the GNU General Public +@@ -9,25 +9,18 @@ + #ifndef __ASM_DIV64_H + #define __ASM_DIV64_H + +-#include +- +-#if BITS_PER_LONG == 64 ++#include + +-#include ++#if BITS_PER_LONG == 32 + + /* + * No traps on overflows for any of these... + */ + +-#define __div64_32(n, base) \ +-({ \ ++#define do_div64_32(res, high, low, base) ({ \ + unsigned long __cf, __tmp, __tmp2, __i; \ + unsigned long __quot32, __mod32; \ +- unsigned long __high, __low; \ +- unsigned long long __n; \ + \ +- __high = *__n >> 32; \ +- __low = __n; \ + __asm__( \ + " .set push \n" \ + " .set noat \n" \ +@@ -51,18 +44,48 @@ + " subu %0, %0, %z6 \n" \ + " addiu %2, %2, 1 \n" \ + "3: \n" \ +- " bnez %4, 0b\n\t" \ +- " srl %5, %1, 0x1f\n\t" \ ++ " bnez %4, 0b \n" \ ++ " srl %5, %1, 0x1f \n" \ + " .set pop" \ + : "=&r" (__mod32), "=&r" (__tmp), \ + "=&r" (__quot32), "=&r" (__cf), \ + "=&r" (__i), "=&r" (__tmp2) \ +- : "Jr" (base), "0" (__high), "1" (__low)); \ ++ : "Jr" (base), "0" (high), "1" (low)); \ + \ +- (__n) = __quot32; \ ++ (res) = __quot32; \ + __mod32; \ + }) + +-#endif /* BITS_PER_LONG == 64 */ ++#define __div64_32(n, base) ({ \ ++ unsigned long __upper, __low, __high, __radix; \ ++ unsigned long long __quot; \ ++ unsigned long long __div; \ ++ unsigned long __mod; \ ++ \ ++ __div = (*n); \ ++ __radix = (base); \ ++ \ ++ __high = __div >> 32; \ ++ __low = __div; \ ++ \ ++ if (__high < __radix) { \ ++ __upper = __high; \ ++ __high = 0; \ ++ } else { \ ++ __upper = __high % __radix; \ ++ __high /= __radix; \ ++ } \ ++ \ ++ __mod = do_div64_32(__low, __upper, __low, __radix); \ ++ \ ++ __quot = __high; \ ++ __quot = __quot << 32 | __low; \ ++ (*n) = __quot; \ ++ __mod; \ ++}) ++ ++#endif /* BITS_PER_LONG == 32 */ ++ ++#include + + #endif /* __ASM_DIV64_H */ +diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h +index 982bc0685330..4747a4694669 100644 +--- a/arch/mips/include/asm/hugetlb.h ++++ b/arch/mips/include/asm/hugetlb.h +@@ -67,7 +67,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) + { +- flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); ++ /* ++ * clear the huge pte entry firstly, so that the other smp threads will ++ * not get old pte entry after finishing flush_tlb_page and before ++ * setting new huge pte entry ++ */ ++ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); ++ flush_tlb_page(vma, addr); + } + + static inline int huge_pte_none(pte_t pte) +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h +index bebec370324f..22573b4f25b6 100644 +--- a/arch/mips/include/asm/kvm_host.h ++++ b/arch/mips/include/asm/kvm_host.h +@@ -243,8 +243,12 @@ enum emulation_result { + #define MIPS3_PG_SHIFT 6 + #define MIPS3_PG_FRAME 0x3fffffc0 + ++#if defined(CONFIG_64BIT) ++#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) ++#else + #define VPN2_MASK 0xffffe000 +-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID ++#endif ++#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) + #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) + #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) + #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S +index ae810da4d499..59ed811eb32a 100644 +--- a/arch/mips/kernel/genex.S ++++ b/arch/mips/kernel/genex.S +@@ -429,20 +429,20 @@ NESTED(nmi_handler, PT_SIZE, sp) + .endm + + .macro __build_clear_fpe ++ CLI ++ TRACE_IRQS_OFF + .set push + /* gas fails to assemble cfc1 for some archs (octeon).*/ \ + .set mips1 + SET_HARDFLOAT + cfc1 a1, fcr31 + .set pop +- CLI +- TRACE_IRQS_OFF + .endm + + .macro __build_clear_msa_fpe +- _cfcmsa a1, MSA_CSR + CLI + TRACE_IRQS_OFF ++ _cfcmsa a1, MSA_CSR + .endm + + .macro __build_clear_ade +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c +index 60177a612cb1..df65516778a2 100644 +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -123,9 +123,9 @@ static char *cm2_causes[32] = { + "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", + "0x08", "0x09", "0x0a", "0x0b", + "0x0c", "0x0d", "0x0e", "0x0f", +- "0x10", "0x11", "0x12", "0x13", +- "0x14", "0x15", "0x16", "INTVN_WR_ERR", +- "INTVN_RD_ERR", "0x19", "0x1a", "0x1b", ++ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", ++ "0x14", "0x15", "0x16", "0x17", ++ "0x18", "0x19", "0x1a", "0x1b", + "0x1c", "0x1d", "0x1e", "0x1f" + }; + +diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c +index 1958910b75c0..b4a7c303019b 100644 +--- a/arch/mips/kernel/relocate.c ++++ b/arch/mips/kernel/relocate.c +@@ -175,8 +175,14 @@ static int __init relocate_exception_table(long offset) + static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) + { +- size_t i; +- unsigned long *ptr = (unsigned long *)area; ++ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); ++ size_t diff, i; ++ ++ diff = (void *)ptr - area; ++ if (unlikely(size < diff + sizeof(hash))) ++ return hash; ++ ++ size = ALIGN_DOWN(size - diff, sizeof(hash)); + + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c +index 7cc1d29334ee..2c3b89a65317 100644 +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -847,7 +847,17 @@ static void __init arch_mem_init(char **cmdline_p) + BOOTMEM_DEFAULT); + #endif + device_tree_init(); ++ ++ /* ++ * In order to reduce the possibility of kernel panic when failed to ++ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate ++ * low memory as small as possible before plat_swiotlb_setup(), so ++ * make sparse_init() using top-down allocation. ++ */ ++ memblock_set_bottom_up(false); + sparse_init(); ++ memblock_set_bottom_up(true); ++ + plat_swiotlb_setup(); + + dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); +diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c +index 416d53f587e7..6e3671752775 100644 +--- a/arch/mips/kernel/smp-bmips.c ++++ b/arch/mips/kernel/smp-bmips.c +@@ -236,6 +236,8 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) + */ + static void bmips_init_secondary(void) + { ++ bmips_cpu_setup(); ++ + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c +index a7f81261c781..b15ee1258668 100644 +--- a/arch/mips/kernel/time.c ++++ b/arch/mips/kernel/time.c +@@ -22,12 +22,77 @@ + #include + #include + #include ++#include ++#include + + #include + #include + #include + #include + ++#ifdef CONFIG_CPU_FREQ ++ ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); ++static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); ++static unsigned long glb_lpj_ref; ++static unsigned long glb_lpj_ref_freq; ++ ++static int cpufreq_callback(struct notifier_block *nb, ++ unsigned long val, void *data) ++{ ++ int cpu; ++ struct cpufreq_freqs *freq = data; ++ ++ /* ++ * Skip lpj numbers adjustment if the CPU-freq transition is safe for ++ * the loops delay. (Is this possible?) ++ */ ++ if (freq->flags & CPUFREQ_CONST_LOOPS) ++ return NOTIFY_OK; ++ ++ /* Save the initial values of the lpjes for future scaling. */ ++ if (!glb_lpj_ref) { ++ glb_lpj_ref = boot_cpu_data.udelay_val; ++ glb_lpj_ref_freq = freq->old; ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(pcp_lpj_ref, cpu) = ++ cpu_data[cpu].udelay_val; ++ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; ++ } ++ } ++ ++ cpu = freq->cpu; ++ /* ++ * Adjust global lpj variable and per-CPU udelay_val number in ++ * accordance with the new CPU frequency. ++ */ ++ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || ++ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { ++ loops_per_jiffy = cpufreq_scale(glb_lpj_ref, ++ glb_lpj_ref_freq, ++ freq->new); ++ ++ cpu_data[cpu].udelay_val = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), ++ per_cpu(pcp_lpj_ref_freq, cpu), freq->new); ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block cpufreq_notifier = { ++ .notifier_call = cpufreq_callback, ++}; ++ ++static int __init register_cpufreq_notifier(void) ++{ ++ return cpufreq_register_notifier(&cpufreq_notifier, ++ CPUFREQ_TRANSITION_NOTIFIER); ++} ++core_initcall(register_cpufreq_notifier); ++ ++#endif /* CONFIG_CPU_FREQ */ ++ + /* + * forward reference + */ +diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c +index cf3eb61fad12..68da7613874a 100644 +--- a/arch/mips/kernel/topology.c ++++ b/arch/mips/kernel/topology.c +@@ -19,7 +19,7 @@ static int __init topology_init(void) + for_each_present_cpu(i) { + struct cpu *c = &per_cpu(cpu_devices, i); + +- c->hotpluggable = 1; ++ c->hotpluggable = !!i; + ret = register_cpu(c, i); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " +diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c +index 8e0749665934..5f717473d08e 100644 +--- a/arch/mips/kernel/traps.c ++++ b/arch/mips/kernel/traps.c +@@ -2137,6 +2137,7 @@ static void configure_status(void) + + change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, + status_set); ++ back_to_back_c0_hazard(); + } + + unsigned int hwrena; +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S +index 2d965d91fee4..23c5509f3b51 100644 +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -49,7 +49,7 @@ SECTIONS + /* . = 0xa800000000300000; */ + . = 0xffffffff80300000; + #endif +- . = VMLINUX_LOAD_ADDRESS; ++ . = LINKER_LOAD_ADDRESS; + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { +@@ -92,6 +92,7 @@ SECTIONS + + INIT_TASK_DATA(THREAD_SIZE) + NOSAVE_DATA ++ PAGE_ALIGNED_DATA(PAGE_SIZE) + CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) + DATA_DATA +diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c +index 7c6f75c2aa4d..e64f678ca12c 100644 +--- a/arch/mips/lantiq/irq.c ++++ b/arch/mips/lantiq/irq.c +@@ -245,7 +245,7 @@ static void ltq_hw_irqdispatch(int module) + do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); + + /* if this is a EBU irq, we need to ack it or get a deadlock */ +- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) ++ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0) + ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, + LTQ_EBU_PCC_ISTAT); + } +diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c +index 5530070e0d05..57497a26e79c 100644 +--- a/arch/mips/lib/mips-atomic.c ++++ b/arch/mips/lib/mips-atomic.c +@@ -37,7 +37,7 @@ + */ + notrace void arch_local_irq_disable(void) + { +- preempt_disable(); ++ preempt_disable_notrace(); + + __asm__ __volatile__( + " .set push \n" +@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void) + : /* no inputs */ + : "memory"); + +- preempt_enable(); ++ preempt_enable_notrace(); + } + EXPORT_SYMBOL(arch_local_irq_disable); + +@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void) + { + unsigned long flags; + +- preempt_disable(); ++ preempt_disable_notrace(); + + __asm__ __volatile__( + " .set push \n" +@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void) + : /* no inputs */ + : "memory"); + +- preempt_enable(); ++ preempt_enable_notrace(); + + return flags; + } +@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags) + { + unsigned long __tmp1; + +- preempt_disable(); ++ preempt_disable_notrace(); + + __asm__ __volatile__( + " .set push \n" +@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags) + : "0" (flags) + : "memory"); + +- preempt_enable(); ++ preempt_enable_notrace(); + } + EXPORT_SYMBOL(arch_local_irq_restore); + +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c +index 0ff379f0cc4a..b9dea4ce290c 100644 +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -1630,7 +1630,7 @@ static int probe_scache(void) + return 1; + } + +-static void __init loongson2_sc_init(void) ++static void loongson2_sc_init(void) + { + struct cpuinfo_mips *c = ¤t_cpu_data; + +@@ -1746,7 +1746,11 @@ static void setup_scache(void) + printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", + scache_size >> 10, + way_string[c->scache.ways], c->scache.linesz); ++ ++ if (current_cpu_type() == CPU_BMIPS5000) ++ c->options |= MIPS_CPU_INCLUSIVE_CACHES; + } ++ + #else + if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) + panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); +diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c +index 0596505770db..11985399c469 100644 +--- a/arch/mips/mm/tlb-r4k.c ++++ b/arch/mips/mm/tlb-r4k.c +@@ -424,6 +424,7 @@ int has_transparent_hugepage(void) + } + return mask == PM_HUGE_MASK; + } ++EXPORT_SYMBOL(has_transparent_hugepage); + + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c +index 516e1233d771..0e51db7ac172 100644 +--- a/arch/mips/mti-malta/malta-platform.c ++++ b/arch/mips/mti-malta/malta-platform.c +@@ -48,7 +48,8 @@ static struct plat_serial8250_port uart8250_data[] = { + .mapbase = 0x1f000900, /* The CBUS UART */ + .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, + .uartclk = 3686400, /* Twice the usual clk! */ +- .iotype = UPIO_MEM32, ++ .iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? ++ UPIO_MEM32BE : UPIO_MEM32, + .flags = CBUS_UART_FLAGS, + .regshift = 3, + }, +diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c +index 2d6886f09ba3..009b840ee5ef 100644 +--- a/arch/mips/pci/pci-legacy.c ++++ b/arch/mips/pci/pci-legacy.c +@@ -158,8 +158,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) + res = hose->mem_resource; + break; + } +- if (res != NULL) +- of_pci_range_to_resource(&range, node, res); ++ if (res != NULL) { ++ res->name = node->full_name; ++ res->flags = range.flags; ++ res->start = range.cpu_addr; ++ res->end = range.cpu_addr + range.size - 1; ++ res->parent = res->child = res->sibling = NULL; ++ } + } + } + +diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c +index 0aa67a2d0ae6..6b7226830354 100644 +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -10,6 +10,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -27,6 +28,7 @@ + + __iomem void *rt_sysc_membase; + __iomem void *rt_memc_membase; ++EXPORT_SYMBOL_GPL(rt_sysc_membase); + + __iomem void *plat_of_remap_node(const char *node) + { +diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c +index f9407e170476..c6af7047eb0d 100644 +--- a/arch/mips/sni/a20r.c ++++ b/arch/mips/sni/a20r.c +@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = { + }, + }; + +-static u32 a20r_ack_hwint(void) ++/* ++ * Trigger chipset to update CPU's CAUSE IP field ++ */ ++static u32 a20r_update_cause_ip(void) + { + u32 status = read_c0_status(); + +@@ -205,12 +208,14 @@ static void a20r_hwint(void) + int irq; + + clear_c0_status(IE_IRQ0); +- status = a20r_ack_hwint(); ++ status = a20r_update_cause_ip(); + cause = read_c0_cause(); + + irq = ffs(((cause & status) >> 8) & 0xf8); + if (likely(irq > 0)) + do_IRQ(SNI_A20R_IRQ_BASE + irq - 1); ++ ++ a20r_update_cause_ip(); + set_c0_status(IE_IRQ0); + } + +diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c +index 530a36f465ce..afcc86726448 100644 +--- a/arch/mips/vdso/genvdso.c ++++ b/arch/mips/vdso/genvdso.c +@@ -126,6 +126,7 @@ static void *map_vdso(const char *path, size_t *_size) + if (fstat(fd, &stat) != 0) { + fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name, + path, strerror(errno)); ++ close(fd); + return NULL; + } + +@@ -134,6 +135,7 @@ static void *map_vdso(const char *path, size_t *_size) + if (addr == MAP_FAILED) { + fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name, + path, strerror(errno)); ++ close(fd); + return NULL; + } + +@@ -143,6 +145,7 @@ static void *map_vdso(const char *path, size_t *_size) + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) { + fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name, + path); ++ close(fd); + return NULL; + } + +@@ -154,6 +157,7 @@ static void *map_vdso(const char *path, size_t *_size) + default: + fprintf(stderr, "%s: '%s' has invalid ELF class\n", + program_name, path); ++ close(fd); + return NULL; + } + +@@ -165,6 +169,7 @@ static void *map_vdso(const char *path, size_t *_size) + default: + fprintf(stderr, "%s: '%s' has invalid ELF data order\n", + program_name, path); ++ close(fd); + return NULL; + } + +@@ -172,15 +177,18 @@ static void *map_vdso(const char *path, size_t *_size) + fprintf(stderr, + "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", + program_name, path); ++ close(fd); + return NULL; + } else if (swap_uint16(ehdr->e_type) != ET_DYN) { + fprintf(stderr, + "%s: '%s' has invalid ELF type (expected ET_DYN)\n", + program_name, path); ++ close(fd); + return NULL; + } + + *_size = stat.st_size; ++ close(fd); + return addr; + } + +@@ -284,10 +292,12 @@ int main(int argc, char **argv) + /* Calculate and write symbol offsets to */ + if (!get_symbols(dbg_vdso_path, dbg_vdso)) { + unlink(out_path); ++ fclose(out_file); + return EXIT_FAILURE; + } + + fprintf(out_file, "};\n"); ++ fclose(out_file); + + return EXIT_SUCCESS; + } +diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h +index cfb1be441dec..921589b45bc2 100644 +--- a/arch/mips/vdso/vdso.h ++++ b/arch/mips/vdso/vdso.h +@@ -81,7 +81,7 @@ static inline const union mips_vdso_data *get_vdso_data(void) + + static inline void __iomem *get_gic(const union mips_vdso_data *data) + { +- return (void __iomem *)data - PAGE_SIZE; ++ return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE; + } + + #endif /* CONFIG_CLKSRC_MIPS_GIC */ +diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h +new file mode 100644 +index 000000000000..7538294721be +--- /dev/null ++++ b/arch/openrisc/include/asm/barrier.h +@@ -0,0 +1,9 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __ASM_BARRIER_H ++#define __ASM_BARRIER_H ++ ++#define mb() asm volatile ("l.msync" ::: "memory") ++ ++#include ++ ++#endif /* __ASM_BARRIER_H */ +diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S +index c17e8451d997..3fbe420f49c4 100644 +--- a/arch/openrisc/kernel/entry.S ++++ b/arch/openrisc/kernel/entry.S +@@ -1092,13 +1092,13 @@ ENTRY(__sys_clone) + l.movhi r29,hi(sys_clone) + l.ori r29,r29,lo(sys_clone) + l.j _fork_save_extra_regs_and_call +- l.addi r7,r1,0 ++ l.nop + + ENTRY(__sys_fork) + l.movhi r29,hi(sys_fork) + l.ori r29,r29,lo(sys_fork) + l.j _fork_save_extra_regs_and_call +- l.addi r3,r1,0 ++ l.nop + + ENTRY(sys_rt_sigreturn) + l.j _sys_rt_sigreturn +diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c +index b4ed8b36e078..e5f5b69a7b7b 100644 +--- a/arch/openrisc/kernel/setup.c ++++ b/arch/openrisc/kernel/setup.c +@@ -278,6 +278,8 @@ void calibrate_delay(void) + pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); ++ ++ of_node_put(cpu); + } + + void __init setup_arch(char **cmdline_p) +diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h +index 5394b9c5f914..1616428a5f95 100644 +--- a/arch/parisc/include/asm/atomic.h ++++ b/arch/parisc/include/asm/atomic.h +@@ -255,6 +255,8 @@ atomic64_set(atomic64_t *v, s64 i) + _atomic_spin_unlock_irqrestore(v, flags); + } + ++#define atomic64_set_release(v, i) atomic64_set((v), (i)) ++ + static __inline__ s64 + atomic64_read(const atomic64_t *v) + { +diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h +index 90253bdc2ee5..8dc6d198039d 100644 +--- a/arch/parisc/include/asm/cmpxchg.h ++++ b/arch/parisc/include/asm/cmpxchg.h +@@ -59,6 +59,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); + extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, + unsigned int new_); + extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); ++extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); + + /* don't worry...optimizer will get rid of most of this */ + static inline unsigned long +@@ -70,6 +71,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) + #endif + case 4: return __cmpxchg_u32((unsigned int *)ptr, + (unsigned int)old, (unsigned int)new_); ++ case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff); + } + __cmpxchg_called_with_bad_pointer(); + return old; +diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c +index 8e45b0a97abf..3284a7adb0a3 100644 +--- a/arch/parisc/lib/bitops.c ++++ b/arch/parisc/lib/bitops.c +@@ -78,3 +78,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign + _atomic_spin_unlock_irqrestore(ptr, flags); + return (unsigned long)prev; + } ++ ++u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) ++{ ++ unsigned long flags; ++ u8 prev; ++ ++ _atomic_spin_lock_irqsave(ptr, flags); ++ if ((prev = *ptr) == old) ++ *ptr = new; ++ _atomic_spin_unlock_irqrestore(ptr, flags); ++ return prev; ++} +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig +index a4632af7f64e..2351ad8657c7 100644 +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -623,7 +623,7 @@ config PPC_64K_PAGES + + config PPC_256K_PAGES + bool "256k page size" +- depends on 44x && !STDBINUTILS ++ depends on 44x && !STDBINUTILS && !PPC_47x + help + Make the page size 256k. + +diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug +index 63292f64b25a..f66326339bc8 100644 +--- a/arch/powerpc/Kconfig.debug ++++ b/arch/powerpc/Kconfig.debug +@@ -348,6 +348,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR + config FAIL_IOMMU + bool "Fault-injection capability for IOMMU" + depends on FAULT_INJECTION ++ depends on PCI || IBMVIO + help + Provide fault-injection capability for IOMMU. Each device can + be selectively enabled via the fail_iommu property. +diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c +index a7e21a35c03a..27c84b82b588 100644 +--- a/arch/powerpc/boot/devtree.c ++++ b/arch/powerpc/boot/devtree.c +@@ -17,6 +17,7 @@ + #include "string.h" + #include "stdio.h" + #include "ops.h" ++#include "of.h" + + void dt_fixup_memory(u64 start, u64 size) + { +@@ -27,21 +28,25 @@ void dt_fixup_memory(u64 start, u64 size) + root = finddevice("/"); + if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0) + naddr = 2; ++ else ++ naddr = be32_to_cpu(naddr); + if (naddr < 1 || naddr > 2) + fatal("Can't cope with #address-cells == %d in /\n\r", naddr); + + if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0) + nsize = 1; ++ else ++ nsize = be32_to_cpu(nsize); + if (nsize < 1 || nsize > 2) + fatal("Can't cope with #size-cells == %d in /\n\r", nsize); + + i = 0; + if (naddr == 2) +- memreg[i++] = start >> 32; +- memreg[i++] = start & 0xffffffff; ++ memreg[i++] = cpu_to_be32(start >> 32); ++ memreg[i++] = cpu_to_be32(start & 0xffffffff); + if (nsize == 2) +- memreg[i++] = size >> 32; +- memreg[i++] = size & 0xffffffff; ++ memreg[i++] = cpu_to_be32(size >> 32); ++ memreg[i++] = cpu_to_be32(size & 0xffffffff); + + memory = finddevice("/memory"); + if (! memory) { +@@ -49,9 +54,9 @@ void dt_fixup_memory(u64 start, u64 size) + setprop_str(memory, "device_type", "memory"); + } + +- printf("Memory <- <0x%x", memreg[0]); ++ printf("Memory <- <0x%x", be32_to_cpu(memreg[0])); + for (i = 1; i < (naddr + nsize); i++) +- printf(" 0x%x", memreg[i]); ++ printf(" 0x%x", be32_to_cpu(memreg[i])); + printf("> (%ldMB)\n\r", (unsigned long)(size >> 20)); + + setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32)); +@@ -69,10 +74,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus) + printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus)); + + while ((devp = find_node_by_devtype(devp, "cpu"))) { +- setprop_val(devp, "clock-frequency", cpu); +- setprop_val(devp, "timebase-frequency", tb); ++ setprop_val(devp, "clock-frequency", cpu_to_be32(cpu)); ++ setprop_val(devp, "timebase-frequency", cpu_to_be32(tb)); + if (bus > 0) +- setprop_val(devp, "bus-frequency", bus); ++ setprop_val(devp, "bus-frequency", cpu_to_be32(bus)); + } + + timebase_period_ns = 1000000000 / tb; +@@ -84,7 +89,7 @@ void dt_fixup_clock(const char *path, u32 freq) + + if (devp) { + printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq)); +- setprop_val(devp, "clock-frequency", freq); ++ setprop_val(devp, "clock-frequency", cpu_to_be32(freq)); + } + } + +@@ -137,8 +142,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize) + { + if (getprop(node, "#address-cells", naddr, 4) != 4) + *naddr = 2; ++ else ++ *naddr = be32_to_cpu(*naddr); + if (getprop(node, "#size-cells", nsize, 4) != 4) + *nsize = 1; ++ else ++ *nsize = be32_to_cpu(*nsize); + } + + static void copy_val(u32 *dest, u32 *src, int naddr) +@@ -167,9 +176,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr) + int i, carry = 0; + + for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) { +- u64 tmp = (u64)reg[i] + add[i] + carry; ++ u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry; + carry = tmp >> 32; +- reg[i] = (u32)tmp; ++ reg[i] = cpu_to_be32((u32)tmp); + } + + return !carry; +@@ -184,18 +193,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize) + u32 end; + + for (i = 0; i < MAX_ADDR_CELLS; i++) { +- if (reg[i] < range[i]) ++ if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i])) + return 0; +- if (reg[i] > range[i]) ++ if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i])) + break; + } + + for (i = 0; i < MAX_ADDR_CELLS; i++) { +- end = range[i] + rangesize[i]; ++ end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]); + +- if (reg[i] < end) ++ if (be32_to_cpu(reg[i]) < end) + break; +- if (reg[i] > end) ++ if (be32_to_cpu(reg[i]) > end) + return 0; + } + +@@ -244,7 +253,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, + return 0; + + dt_get_reg_format(parent, &naddr, &nsize); +- + if (nsize > 2) + return 0; + +@@ -256,10 +264,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, + + copy_val(last_addr, prop_buf + offset, naddr); + +- ret_size = prop_buf[offset + naddr]; ++ ret_size = be32_to_cpu(prop_buf[offset + naddr]); + if (nsize == 2) { + ret_size <<= 32; +- ret_size |= prop_buf[offset + naddr + 1]; ++ ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]); + } + + for (;;) { +@@ -282,7 +290,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, + + offset = find_range(last_addr, prop_buf, prev_naddr, + naddr, prev_nsize, buflen / 4); +- + if (offset < 0) + return 0; + +@@ -300,8 +307,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, + if (naddr > 2) + return 0; + +- ret_addr = ((u64)last_addr[2] << 32) | last_addr[3]; +- ++ ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]); + if (sizeof(void *) == 4 && + (ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL || + ret_addr + ret_size > 0x100000000ULL)) +@@ -354,11 +360,14 @@ int dt_is_compatible(void *node, const char *compat) + int dt_get_virtual_reg(void *node, void **addr, int nres) + { + unsigned long xaddr; +- int n; ++ int n, i; + + n = getprop(node, "virtual-reg", addr, nres * 4); +- if (n > 0) ++ if (n > 0) { ++ for (i = 0; i < n/4; i ++) ++ ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]); + return n / 4; ++ } + + for (n = 0; n < nres; n++) { + if (!dt_xlate_reg(node, n, &xaddr, NULL)) +diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +index af12ead88c5f..404f570ebe23 100644 +--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi ++++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +@@ -122,7 +122,15 @@ + }; + + /include/ "pq3-i2c-0.dtsi" ++ i2c@3000 { ++ fsl,i2c-erratum-a004447; ++ }; ++ + /include/ "pq3-i2c-1.dtsi" ++ i2c@3100 { ++ fsl,i2c-erratum-a004447; ++ }; ++ + /include/ "pq3-duart-0.dtsi" + /include/ "pq3-espi-0.dtsi" + spi0: spi@7000 { +diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +index 51e975d7631a..8921f17fca42 100644 +--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi ++++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +@@ -389,7 +389,23 @@ + }; + + /include/ "qoriq-i2c-0.dtsi" ++ i2c@118000 { ++ fsl,i2c-erratum-a004447; ++ }; ++ ++ i2c@118100 { ++ fsl,i2c-erratum-a004447; ++ }; ++ + /include/ "qoriq-i2c-1.dtsi" ++ i2c@119000 { ++ fsl,i2c-erratum-a004447; ++ }; ++ ++ i2c@119100 { ++ fsl,i2c-erratum-a004447; ++ }; ++ + /include/ "qoriq-duart-0.dtsi" + /include/ "qoriq-duart-1.dtsi" + /include/ "qoriq-gpio-0.dtsi" +diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c +index 8c9ead94be06..cea34a20085c 100644 +--- a/arch/powerpc/boot/ns16550.c ++++ b/arch/powerpc/boot/ns16550.c +@@ -14,6 +14,7 @@ + #include "stdio.h" + #include "io.h" + #include "ops.h" ++#include "of.h" + + #define UART_DLL 0 /* Out: Divisor Latch Low */ + #define UART_DLM 1 /* Out: Divisor Latch High */ +@@ -57,16 +58,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp) + int n; + u32 reg_offset; + +- if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1) ++ if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1) { ++ printf("virt reg parse fail...\r\n"); + return -1; ++ } + + n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset)); + if (n == sizeof(reg_offset)) +- reg_base += reg_offset; ++ reg_base += be32_to_cpu(reg_offset); + + n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift)); + if (n != sizeof(reg_shift)) + reg_shift = 0; ++ else ++ reg_shift = be32_to_cpu(reg_shift); + + scdp->open = ns16550_open; + scdp->putc = ns16550_putc; +diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig +index 76f4edd441d3..07bc4f038249 100644 +--- a/arch/powerpc/configs/pasemi_defconfig ++++ b/arch/powerpc/configs/pasemi_defconfig +@@ -115,7 +115,6 @@ CONFIG_FB_NVIDIA=y + CONFIG_FB_NVIDIA_I2C=y + CONFIG_FB_RADEON=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + CONFIG_SOUND=y + CONFIG_SND=y +diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig +index 8fbf49801233..1c6815bcc616 100644 +--- a/arch/powerpc/configs/ppc6xx_defconfig ++++ b/arch/powerpc/configs/ppc6xx_defconfig +@@ -796,7 +796,6 @@ CONFIG_FB_TRIDENT=m + CONFIG_FB_SM501=m + CONFIG_FB_IBM_GXT4500=y + CONFIG_LCD_PLATFORM=m +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_FRAMEBUFFER_CONSOLE=y + CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y + CONFIG_LOGO=y +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index 80024c4f2093..0dbdf88162b7 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -41,6 +41,8 @@ + # define SMPWMB eieio + #endif + ++/* clang defines this macro for a builtin, which will not work with runtime patching */ ++#undef __lwsync + #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") + #define dma_rmb() __lwsync() + #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h +index 6b8b2d57fdc8..e588028922a8 100644 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h +@@ -411,9 +411,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h +new file mode 100644 +index 000000000000..cce8e7497d72 +--- /dev/null ++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h +@@ -0,0 +1,23 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H ++#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H ++#include ++ ++DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); ++ ++/* Prototype for function defined in exceptions-64s.S */ ++void do_uaccess_flush(void); ++ ++static __always_inline void allow_user_access(void __user *to, const void __user *from, ++ unsigned long size) ++{ ++} ++ ++static inline void prevent_user_access(void __user *to, const void __user *from, ++ unsigned long size) ++{ ++ if (static_branch_unlikely(&uaccess_flush_key)) ++ do_uaccess_flush(); ++} ++ ++#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h +index 9fd77f8794a0..315758c84187 100644 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h +@@ -754,10 +754,25 @@ extern struct page *pgd_page(pgd_t pgd); + #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) + #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) + +-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) +-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) +-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) +-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) ++static inline unsigned long pgd_index(unsigned long address) ++{ ++ return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); ++} ++ ++static inline unsigned long pud_index(unsigned long address) ++{ ++ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); ++} ++ ++static inline unsigned long pmd_index(unsigned long address) ++{ ++ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); ++} ++ ++static inline unsigned long pte_index(unsigned long address) ++{ ++ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); ++} + + /* + * Find an entry in a page-table-directory. We combine the address region +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h +index ab934f8232bd..985fa005c1df 100644 +--- a/arch/powerpc/include/asm/code-patching.h ++++ b/arch/powerpc/include/asm/code-patching.h +@@ -46,7 +46,7 @@ void __patch_exception(int exc, unsigned long addr); + #endif + + #define OP_RT_RA_MASK 0xffff0000UL +-#define LIS_R2 0x3c020000UL ++#define LIS_R2 0x3c400000UL + #define ADDIS_R2_R12 0x3c4c0000UL + #define ADDI_R2_R2 0x38420000UL + +diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h +index 6e834caa3720..7b10b3ef7739 100644 +--- a/arch/powerpc/include/asm/cpu_has_feature.h ++++ b/arch/powerpc/include/asm/cpu_has_feature.h +@@ -6,7 +6,7 @@ + #include + #include + +-static inline bool early_cpu_has_feature(unsigned long feature) ++static __always_inline bool early_cpu_has_feature(unsigned long feature) + { + return !!((CPU_FTRS_ALWAYS & feature) || + (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature)); +@@ -45,7 +45,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature) + return static_branch_likely(&cpu_feature_keys[i]); + } + #else +-static inline bool cpu_has_feature(unsigned long feature) ++static __always_inline bool cpu_has_feature(unsigned long feature) + { + return early_cpu_has_feature(feature); + } +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h +index cf51aea47510..090edefe125d 100644 +--- a/arch/powerpc/include/asm/cputable.h ++++ b/arch/powerpc/include/asm/cputable.h +@@ -419,7 +419,6 @@ enum { + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ + CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) +-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) + + /* 64-bit CPUs */ + #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ +@@ -510,8 +509,6 @@ enum { + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | + CPU_FTRS_CLASSIC32 | +-#else +- CPU_FTRS_GENERIC_32 | + #endif + #ifdef CONFIG_8xx + CPU_FTRS_8XX | +@@ -562,8 +559,6 @@ enum { + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & + CPU_FTRS_CLASSIC32 & +-#else +- CPU_FTRS_GENERIC_32 & + #endif + #ifdef CONFIG_8xx + CPU_FTRS_8XX & +diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h +index 4a2beef74277..86fdda16bb73 100644 +--- a/arch/powerpc/include/asm/dcr-native.h ++++ b/arch/powerpc/include/asm/dcr-native.h +@@ -65,8 +65,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) + #define mfdcr(rn) \ + ({unsigned int rval; \ + if (__builtin_constant_p(rn) && rn < 1024) \ +- asm volatile("mfdcr %0," __stringify(rn) \ +- : "=r" (rval)); \ ++ asm volatile("mfdcr %0, %1" : "=r" (rval) \ ++ : "n" (rn)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + rval = mfdcrx(rn); \ + else \ +@@ -76,8 +76,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) + #define mtdcr(rn, v) \ + do { \ + if (__builtin_constant_p(rn) && rn < 1024) \ +- asm volatile("mtdcr " __stringify(rn) ",%0" \ +- : : "r" (v)); \ ++ asm volatile("mtdcr %0, %1" \ ++ : : "n" (rn), "r" (v)); \ + else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ + mtdcrx(rn, v); \ + else \ +diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h +index e2200100828d..6ffec5b18a6d 100644 +--- a/arch/powerpc/include/asm/exception-64s.h ++++ b/arch/powerpc/include/asm/exception-64s.h +@@ -66,11 +66,18 @@ + nop; \ + nop + ++#define ENTRY_FLUSH_SLOT \ ++ ENTRY_FLUSH_FIXUP_SECTION; \ ++ nop; \ ++ nop; \ ++ nop; ++ + /* + * r10 must be free to use, r13 must be paca + */ + #define INTERRUPT_TO_KERNEL \ +- STF_ENTRY_BARRIER_SLOT ++ STF_ENTRY_BARRIER_SLOT; \ ++ ENTRY_FLUSH_SLOT + + /* + * Macros for annotating the expected destination of (h)rfid +@@ -563,6 +570,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) + EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV) + ++#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label) \ ++ EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec); \ ++ EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD) ++ + /* + * Our exception common code can be passed various "additions" + * to specify the behaviour of interrupts, whether to kick the +diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h +index 175128e19025..a8e7ca27fb54 100644 +--- a/arch/powerpc/include/asm/feature-fixups.h ++++ b/arch/powerpc/include/asm/feature-fixups.h +@@ -205,6 +205,22 @@ void setup_feature_keys(void); + FTR_ENTRY_OFFSET 955b-956b; \ + .popsection; + ++#define UACCESS_FLUSH_FIXUP_SECTION \ ++959: \ ++ .pushsection __uaccess_flush_fixup,"a"; \ ++ .align 2; \ ++960: \ ++ FTR_ENTRY_OFFSET 959b-960b; \ ++ .popsection; ++ ++#define ENTRY_FLUSH_FIXUP_SECTION \ ++957: \ ++ .pushsection __entry_flush_fixup,"a"; \ ++ .align 2; \ ++958: \ ++ FTR_ENTRY_OFFSET 957b-958b; \ ++ .popsection; ++ + #define RFI_FLUSH_FIXUP_SECTION \ + 951: \ + .pushsection __rfi_flush_fixup,"a"; \ +@@ -236,8 +252,11 @@ void setup_feature_keys(void); + #ifndef __ASSEMBLY__ + + extern long stf_barrier_fallback; ++extern long entry_flush_fallback; + extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; + extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; ++extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; ++extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; + extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; + extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; + extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; +diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h +index b73ab8a7ebc3..10746519b351 100644 +--- a/arch/powerpc/include/asm/futex.h ++++ b/arch/powerpc/include/asm/futex.h +@@ -36,6 +36,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + { + int oldval = 0, ret; + ++ allow_write_to_user(uaddr, sizeof(*uaddr)); + pagefault_disable(); + + switch (op) { +@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + + *oval = oldval; + ++ prevent_write_to_user(uaddr, sizeof(*uaddr)); + return ret; + } + +@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + ++ allow_write_to_user(uaddr, sizeof(*uaddr)); + __asm__ __volatile__ ( + PPC_ATOMIC_ENTRY_BARRIER + "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ +@@ -97,6 +100,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + : "cc", "memory"); + + *uval = prev; ++ prevent_write_to_user(uaddr, sizeof(*uaddr)); + return ret; + } + +diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h +index 2c9759bdb63b..063d64c1c9e8 100644 +--- a/arch/powerpc/include/asm/kprobes.h ++++ b/arch/powerpc/include/asm/kprobes.h +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -60,7 +61,7 @@ typedef ppc_opcode_t kprobe_opcode_t; + #define kprobe_lookup_name(name, addr) \ + { \ + char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; \ +- char *modsym; \ ++ const char *modsym; \ + bool dot_appended = false; \ + if ((modsym = strchr(name, ':')) != NULL) { \ + modsym++; \ +diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h +new file mode 100644 +index 000000000000..f0f8e36ad71f +--- /dev/null ++++ b/arch/powerpc/include/asm/kup.h +@@ -0,0 +1,40 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_POWERPC_KUP_H_ ++#define _ASM_POWERPC_KUP_H_ ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++ ++#ifdef CONFIG_PPC_BOOK3S_64 ++#include ++#else ++static inline void allow_user_access(void __user *to, const void __user *from, ++ unsigned long size) { } ++static inline void prevent_user_access(void __user *to, const void __user *from, ++ unsigned long size) { } ++#endif /* CONFIG_PPC_BOOK3S_64 */ ++ ++static inline void allow_read_from_user(const void __user *from, unsigned long size) ++{ ++ allow_user_access(NULL, from, size); ++} ++ ++static inline void allow_write_to_user(void __user *to, unsigned long size) ++{ ++ allow_user_access(to, NULL, size); ++} ++ ++static inline void prevent_read_from_user(const void __user *from, unsigned long size) ++{ ++ prevent_user_access(NULL, from, size); ++} ++ ++static inline void prevent_write_to_user(void __user *to, unsigned long size) ++{ ++ prevent_user_access(to, NULL, size); ++} ++ ++#endif /* !__ASSEMBLY__ */ ++ ++#endif /* _ASM_POWERPC_KUP_H_ */ +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h +index 1263c22d60d8..330fe178c0c5 100644 +--- a/arch/powerpc/include/asm/nohash/pgtable.h ++++ b/arch/powerpc/include/asm/nohash/pgtable.h +@@ -155,9 +155,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + flush_hash_entry(mm, ptep, addr); + #endif + __asm__ __volatile__("\ +- stw%U0%X0 %2,%0\n\ ++ stw%X0 %2,%0\n\ + eieio\n\ +- stw%U0%X0 %L2,%1" ++ stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h +index 2cedefddba37..61c78205a1d3 100644 +--- a/arch/powerpc/include/asm/percpu.h ++++ b/arch/powerpc/include/asm/percpu.h +@@ -9,8 +9,6 @@ + + #ifdef CONFIG_SMP + +-#include +- + #define __my_cpu_offset local_paca->data_offset + + #endif /* CONFIG_SMP */ +@@ -18,4 +16,6 @@ + + #include + ++#include ++ + #endif /* _ASM_POWERPC_PERCPU_H_ */ +diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h +index a19f831a4cc9..83a6d99e9994 100644 +--- a/arch/powerpc/include/asm/ps3.h ++++ b/arch/powerpc/include/asm/ps3.h +@@ -83,6 +83,7 @@ struct ps3_dma_region_ops; + * @bus_addr: The 'translated' bus address of the region. + * @len: The length in bytes of the region. + * @offset: The offset from the start of memory of the region. ++ * @dma_mask: Device dma_mask. + * @ioid: The IOID of the device who owns this region + * @chunk_list: Opaque variable used by the ioc page manager. + * @region_ops: struct ps3_dma_region_ops - dma region operations +@@ -97,6 +98,7 @@ struct ps3_dma_region { + enum ps3_dma_region_type region_type; + unsigned long len; + unsigned long offset; ++ u64 dma_mask; + + /* driver variables (set by ps3_dma_region_create) */ + unsigned long bus_addr; +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h +index 26aeeaad3267..a36ef27155bc 100644 +--- a/arch/powerpc/include/asm/reg.h ++++ b/arch/powerpc/include/asm/reg.h +@@ -683,7 +683,7 @@ + #define THRM1_TIN (1 << 31) + #define THRM1_TIV (1 << 30) + #define THRM1_THRES(x) ((x&0x7f)<<23) +-#define THRM3_SITV(x) ((x&0x3fff)<<1) ++#define THRM3_SITV(x) ((x & 0x1fff) << 1) + #define THRM1_TID (1<<2) + #define THRM1_TIE (1<<1) + #define THRM1_V (1<<0) +diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h +index ccf44c135389..3b45a64e491e 100644 +--- a/arch/powerpc/include/asm/security_features.h ++++ b/arch/powerpc/include/asm/security_features.h +@@ -84,12 +84,19 @@ static inline bool security_ftr_enabled(unsigned long feature) + // Software required to flush link stack on context switch + #define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + ++// The L1-D cache should be flushed when entering the kernel ++#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull ++ ++// The L1-D cache should be flushed after user accesses from the kernel ++#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull + + // Features enabled by default + #define SEC_FTR_DEFAULT \ + (SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ ++ SEC_FTR_L1D_FLUSH_ENTRY | \ ++ SEC_FTR_L1D_FLUSH_UACCESS | \ + SEC_FTR_FAVOUR_SECURITY) + + #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ +diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h +index 862ebce3ae54..944c9eb0cdaf 100644 +--- a/arch/powerpc/include/asm/setup.h ++++ b/arch/powerpc/include/asm/setup.h +@@ -50,12 +50,16 @@ enum l1d_flush_type { + }; + + void setup_rfi_flush(enum l1d_flush_type, bool enable); ++void setup_entry_flush(bool enable); ++void setup_uaccess_flush(bool enable); + void do_rfi_flush_fixups(enum l1d_flush_type types); + #ifdef CONFIG_PPC_BARRIER_NOSPEC + void setup_barrier_nospec(void); + #else + static inline void setup_barrier_nospec(void) { }; + #endif ++void do_uaccess_flush_fixups(enum l1d_flush_type types); ++void do_entry_flush_fixups(enum l1d_flush_type types); + void do_barrier_nospec_fixups(bool enable); + extern bool barrier_nospec_enabled; + +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index da852153c1f8..fde865a4e2cb 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 +@@ -105,9 +106,14 @@ struct exception_table_entry { + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + + #define __get_user(x, ptr) \ +- __get_user_nocheck((x), (ptr), sizeof(*(ptr))) ++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) + #define __put_user(x, ptr) \ +- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) ++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) ++ ++#define __get_user_allowed(x, ptr) \ ++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) ++#define __put_user_allowed(x, ptr) \ ++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) + + #define __get_user_inatomic(x, ptr) \ + __get_user_nosleep((x), (ptr), sizeof(*(ptr))) +@@ -161,7 +167,7 @@ extern long __put_user_bad(void); + : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) + #endif /* __powerpc64__ */ + +-#define __put_user_size(x, ptr, size, retval) \ ++#define __put_user_size_allowed(x, ptr, size, retval) \ + do { \ + retval = 0; \ + switch (size) { \ +@@ -173,14 +179,28 @@ do { \ + } \ + } while (0) + +-#define __put_user_nocheck(x, ptr, size) \ ++#define __put_user_size(x, ptr, size, retval) \ ++do { \ ++ allow_write_to_user(ptr, size); \ ++ __put_user_size_allowed(x, ptr, size, retval); \ ++ prevent_write_to_user(ptr, size); \ ++} while (0) ++ ++#define __put_user_nocheck(x, ptr, size, do_allow) \ + ({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ +- __chk_user_ptr(ptr); \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ __chk_user_ptr(__pu_addr); \ ++ if (do_allow) \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ else \ ++ __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -188,9 +208,13 @@ do { \ + ({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ + might_fault(); \ +- if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -198,8 +222,12 @@ do { \ + ({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ +- __put_user_size((x), __pu_addr, (size), __pu_err); \ ++ __typeof__(*(ptr)) __pu_val = (x); \ ++ __typeof__(size) __pu_size = (size); \ ++ \ ++ __chk_user_ptr(__pu_addr); \ ++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ ++ \ + __pu_err; \ + }) + +@@ -246,7 +274,7 @@ extern long __get_user_bad(void); + : "b" (addr), "i" (-EFAULT), "0" (err)) + #endif /* __powerpc64__ */ + +-#define __get_user_size(x, ptr, size, retval) \ ++#define __get_user_size_allowed(x, ptr, size, retval) \ + do { \ + retval = 0; \ + __chk_user_ptr(ptr); \ +@@ -261,17 +289,30 @@ do { \ + } \ + } while (0) + +-#define __get_user_nocheck(x, ptr, size) \ ++#define __get_user_size(x, ptr, size, retval) \ ++do { \ ++ allow_read_from_user(ptr, size); \ ++ __get_user_size_allowed(x, ptr, size, retval); \ ++ prevent_read_from_user(ptr, size); \ ++} while (0) ++ ++#define __get_user_nocheck(x, ptr, size, do_allow) \ + ({ \ + long __gu_err; \ + unsigned long __gu_val; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ ++ __chk_user_ptr(__gu_addr); \ + if (!is_kernel_addr((unsigned long)__gu_addr)) \ + might_fault(); \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ if (do_allow) \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ ++ else \ ++ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + +@@ -280,12 +321,15 @@ do { \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ + might_fault(); \ +- if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ ++ if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + +@@ -294,10 +338,13 @@ do { \ + long __gu_err; \ + unsigned long __gu_val; \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ +- __chk_user_ptr(ptr); \ ++ __typeof__(size) __gu_size = (size); \ ++ \ ++ __chk_user_ptr(__gu_addr); \ + barrier_nospec(); \ +- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ \ + __gu_err; \ + }) + +@@ -312,9 +359,14 @@ extern unsigned long __copy_tofrom_user(void __user *to, + static inline unsigned long copy_from_user(void *to, + const void __user *from, unsigned long n) + { ++ unsigned long ret; ++ + if (likely(access_ok(VERIFY_READ, from, n))) { + check_object_size(to, n, false); +- return __copy_tofrom_user((__force void __user *)to, from, n); ++ allow_user_access(to, from, n); ++ ret = __copy_tofrom_user((__force void __user *)to, from, n); ++ prevent_user_access(to, from, n); ++ return ret; + } + memset(to, 0, n); + return n; +@@ -347,8 +399,9 @@ extern unsigned long copy_in_user(void __user *to, const void __user *from, + static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) + { ++ unsigned long ret; + if (__builtin_constant_p(n) && (n <= 8)) { +- unsigned long ret = 1; ++ ret = 1; + + switch (n) { + case 1: +@@ -375,27 +428,32 @@ static inline unsigned long __copy_from_user_inatomic(void *to, + check_object_size(to, n, false); + + barrier_nospec(); +- return __copy_tofrom_user((__force void __user *)to, from, n); ++ allow_read_from_user(from, n); ++ ret = __copy_tofrom_user((__force void __user *)to, from, n); ++ prevent_read_from_user(from, n); ++ return ret; + } + + static inline unsigned long __copy_to_user_inatomic(void __user *to, + const void *from, unsigned long n) + { ++ unsigned long ret; ++ + if (__builtin_constant_p(n) && (n <= 8)) { +- unsigned long ret = 1; ++ ret = 1; + + switch (n) { + case 1: +- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); ++ __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret); + break; + case 2: +- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); ++ __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret); + break; + case 4: +- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); ++ __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret); + break; + case 8: +- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); ++ __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret); + break; + } + if (ret == 0) +@@ -403,8 +461,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, + } + + check_object_size(from, n, true); +- +- return __copy_tofrom_user(to, (__force const void __user *)from, n); ++ allow_write_to_user(to, n); ++ ret = __copy_tofrom_user(to, (__force const void __user *)from, n); ++ prevent_write_to_user(to, n); ++ return ret; + } + + static inline unsigned long __copy_from_user(void *to, +@@ -421,20 +481,39 @@ static inline unsigned long __copy_to_user(void __user *to, + return __copy_to_user_inatomic(to, from, size); + } + +-extern unsigned long __clear_user(void __user *addr, unsigned long size); ++unsigned long __arch_clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) + { ++ unsigned long ret = size; + might_fault(); +- if (likely(access_ok(VERIFY_WRITE, addr, size))) +- return __clear_user(addr, size); +- return size; ++ if (likely(access_ok(VERIFY_WRITE, addr, size))) { ++ allow_write_to_user(addr, size); ++ ret = __arch_clear_user(addr, size); ++ prevent_write_to_user(addr, size); ++ } ++ return ret; ++} ++ ++static inline unsigned long __clear_user(void __user *addr, unsigned long size) ++{ ++ return clear_user(addr, size); + } + + extern long strncpy_from_user(char *dst, const char __user *src, long count); + extern __must_check long strlen_user(const char __user *str); + extern __must_check long strnlen_user(const char __user *str, long n); + ++ ++#define user_access_begin() do { } while (0) ++#define user_access_end() prevent_user_access(NULL, NULL, ~0ul) ++ ++#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) ++#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) ++#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) ++#define unsafe_copy_to_user(d, s, l, e) \ ++ unsafe_op_wrap(__copy_to_user_inatomic(d, s, l), e) ++ + #endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ + +diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h +index e8b6b5f7de7c..5e8f42ff797f 100644 +--- a/arch/powerpc/include/uapi/asm/errno.h ++++ b/arch/powerpc/include/uapi/asm/errno.h +@@ -1,6 +1,7 @@ + #ifndef _ASM_POWERPC_ERRNO_H + #define _ASM_POWERPC_ERRNO_H + ++#undef EDEADLOCK + #include + + #undef EDEADLOCK +diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c +index fb7cbaa37658..611e09add911 100644 +--- a/arch/powerpc/kernel/dma-iommu.c ++++ b/arch/powerpc/kernel/dma-iommu.c +@@ -99,7 +99,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev) + if (!tbl) + return 0; + +- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); ++ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + ++ tbl->it_page_shift - 1); + mask += mask - 1; + + return mask; +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c +index a7f229e59892..90d1f2bfb007 100644 +--- a/arch/powerpc/kernel/eeh.c ++++ b/arch/powerpc/kernel/eeh.c +@@ -366,14 +366,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) + pa = pte_pfn(*ptep); + + /* On radix we can do hugepage mappings for io, so handle that */ +- if (hugepage_shift) { +- pa <<= hugepage_shift; +- pa |= token & ((1ul << hugepage_shift) - 1); +- } else { +- pa <<= PAGE_SHIFT; +- pa |= token & (PAGE_SIZE - 1); +- } ++ if (!hugepage_shift) ++ hugepage_shift = PAGE_SHIFT; + ++ pa <<= PAGE_SHIFT; ++ pa |= token & ((1ul << hugepage_shift) - 1); + return pa; + } + +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S +index 0c8b966e8070..a1c22989a2f2 100644 +--- a/arch/powerpc/kernel/exceptions-64s.S ++++ b/arch/powerpc/kernel/exceptions-64s.S +@@ -487,7 +487,7 @@ EXC_COMMON_BEGIN(unrecover_mce) + b 1b + + +-EXC_REAL(data_access, 0x300, 0x380) ++EXC_REAL_OOL(data_access, 0x300, 0x380) + EXC_VIRT(data_access, 0x4300, 0x4380, 0x300) + TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) + +@@ -519,6 +519,10 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) ++ b tramp_data_access_slb ++EXC_REAL_END(data_access_slb, 0x380, 0x400) ++ ++TRAMP_REAL_BEGIN(tramp_data_access_slb) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_DAR +@@ -537,7 +541,6 @@ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400) + mtctr r10 + bctr + #endif +-EXC_REAL_END(data_access_slb, 0x380, 0x400) + + EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400) + SET_SCRATCH0(r13) +@@ -564,7 +567,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x4400) + TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) + + +-EXC_REAL(instruction_access, 0x400, 0x480) ++EXC_REAL_OOL(instruction_access, 0x400, 0x480) + EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400) + TRAMP_KVM(PACA_EXGEN, 0x400) + +@@ -587,6 +590,10 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) + EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_0(PACA_EXSLB) ++ b tramp_instruction_access_slb ++EXC_REAL_END(instruction_access_slb, 0x480, 0x500) ++ ++TRAMP_REAL_BEGIN(tramp_instruction_access_slb) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) + std r3,PACA_EXSLB+EX_R3(r13) + mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ +@@ -600,7 +607,6 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500) + mtctr r10 + bctr + #endif +-EXC_REAL_END(instruction_access_slb, 0x480, 0x500) + + EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500) + SET_SCRATCH0(r13) +@@ -851,13 +857,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) + + + EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980) +-EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900) ++EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x4980, 0x900) + TRAMP_KVM(PACA_EXGEN, 0x900) + EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) + + +-EXC_REAL_HV(hdecrementer, 0x980, 0xa00) +-EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980) ++EXC_REAL_OOL_HV(hdecrementer, 0x980, 0xa00) ++EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x4a00, 0x980) + TRAMP_KVM_HV(PACA_EXGEN, 0x980) + EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt) + +@@ -1371,6 +1377,48 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback) + .endr + blr + ++/* Clobbers r10, r11, ctr */ ++.macro L1D_DISPLACEMENT_FLUSH ++ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ++ ld r11,PACA_L1D_FLUSH_SIZE(r13) ++ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ ++ mtctr r11 ++ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ ++ ++ /* order ld/st prior to dcbt stop all streams with flushing */ ++ sync ++ ++ /* ++ * The load adresses are at staggered offsets within cachelines, ++ * which suits some pipelines better (on others it should not ++ * hurt). ++ */ ++1: ++ ld r11,(0x80 + 8)*0(r10) ++ ld r11,(0x80 + 8)*1(r10) ++ ld r11,(0x80 + 8)*2(r10) ++ ld r11,(0x80 + 8)*3(r10) ++ ld r11,(0x80 + 8)*4(r10) ++ ld r11,(0x80 + 8)*5(r10) ++ ld r11,(0x80 + 8)*6(r10) ++ ld r11,(0x80 + 8)*7(r10) ++ addi r10,r10,0x80*8 ++ bdnz 1b ++.endm ++ ++USE_TEXT_SECTION() ++ ++_GLOBAL(do_uaccess_flush) ++ UACCESS_FLUSH_FIXUP_SECTION ++ nop ++ nop ++ nop ++ blr ++ L1D_DISPLACEMENT_FLUSH ++ blr ++_ASM_NOKPROBE_SYMBOL(do_uaccess_flush) ++EXPORT_SYMBOL(do_uaccess_flush) ++ + /* + * Real mode exceptions actually use this too, but alternate + * instruction code patches (which end up in the common .text area) +@@ -1626,32 +1674,7 @@ rfi_flush_fallback: + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 +- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SIZE(r13) +- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ +- mtctr r11 +- DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ +- +- /* order ld/st prior to dcbt stop all streams with flushing */ +- sync +- +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +-1: +- ld r11,(0x80 + 8)*0(r10) +- ld r11,(0x80 + 8)*1(r10) +- ld r11,(0x80 + 8)*2(r10) +- ld r11,(0x80 + 8)*3(r10) +- ld r11,(0x80 + 8)*4(r10) +- ld r11,(0x80 + 8)*5(r10) +- ld r11,(0x80 + 8)*6(r10) +- ld r11,(0x80 + 8)*7(r10) +- addi r10,r10,0x80*8 +- bdnz 1b +- ++ L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) +@@ -1667,32 +1690,7 @@ hrfi_flush_fallback: + std r10,PACA_EXRFI+EX_R10(r13) + std r11,PACA_EXRFI+EX_R11(r13) + mfctr r9 +- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) +- ld r11,PACA_L1D_FLUSH_SIZE(r13) +- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ +- mtctr r11 +- DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ +- +- /* order ld/st prior to dcbt stop all streams with flushing */ +- sync +- +- /* +- * The load adresses are at staggered offsets within cachelines, +- * which suits some pipelines better (on others it should not +- * hurt). +- */ +-1: +- ld r11,(0x80 + 8)*0(r10) +- ld r11,(0x80 + 8)*1(r10) +- ld r11,(0x80 + 8)*2(r10) +- ld r11,(0x80 + 8)*3(r10) +- ld r11,(0x80 + 8)*4(r10) +- ld r11,(0x80 + 8)*5(r10) +- ld r11,(0x80 + 8)*6(r10) +- ld r11,(0x80 + 8)*7(r10) +- addi r10,r10,0x80*8 +- bdnz 1b +- ++ L1D_DISPLACEMENT_FLUSH + mtctr r9 + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) +@@ -1700,6 +1698,20 @@ hrfi_flush_fallback: + GET_SCRATCH0(r13); + hrfid + ++ .globl entry_flush_fallback ++entry_flush_fallback: ++ std r9,PACA_EXRFI+EX_R9(r13) ++ std r10,PACA_EXRFI+EX_R10(r13) ++ std r11,PACA_EXRFI+EX_R11(r13) ++ mfctr r9 ++ L1D_DISPLACEMENT_FLUSH ++ mtctr r9 ++ ld r9,PACA_EXRFI+EX_R9(r13) ++ ld r10,PACA_EXRFI+EX_R10(r13) ++ ld r11,PACA_EXRFI+EX_R11(r13) ++ blr ++ ++ + /* + * Called from arch_local_irq_enable when an interrupt needs + * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S +index 2274be535dda..3801b32b1642 100644 +--- a/arch/powerpc/kernel/head_8xx.S ++++ b/arch/powerpc/kernel/head_8xx.S +@@ -359,11 +359,9 @@ InstructionTLBMiss: + /* Load the MI_TWC with the attributes for this "segment." */ + MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ + +-#ifdef CONFIG_SWAP +- rlwinm r11, r10, 32-5, _PAGE_PRESENT ++ rlwinm r11, r10, 32-11, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +-#endif + li r11, RPN_PATTERN + /* The Linux PTE won't go exactly into the MMU TLB. + * Software indicator bits 20-23 and 28 must be clear. +@@ -443,11 +441,9 @@ _ENTRY(DTLBMiss_jmp) + * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); + * r10 = (r10 & ~PRESENT) | r11; + */ +-#ifdef CONFIG_SWAP +- rlwinm r11, r10, 32-5, _PAGE_PRESENT ++ rlwinm r11, r10, 32-11, _PAGE_PRESENT + and r11, r11, r10 + rlwimi r10, r11, 0, _PAGE_PRESENT +-#endif + /* The Linux PTE won't go exactly into the MMU TLB. + * Software indicator bits 22 and 28 must be clear. + * Software indicator bits 24, 25, 26, and 27 must be +diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c +index 9bfdd2510fd5..2cf900d16527 100644 +--- a/arch/powerpc/kernel/iommu.c ++++ b/arch/powerpc/kernel/iommu.c +@@ -1021,7 +1021,7 @@ int iommu_take_ownership(struct iommu_table *tbl) + + spin_lock_irqsave(&tbl->large_pool.lock, flags); + for (i = 0; i < tbl->nr_pools; i++) +- spin_lock(&tbl->pools[i].lock); ++ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); + + if (tbl->it_offset == 0) + clear_bit(0, tbl->it_map); +@@ -1050,7 +1050,7 @@ void iommu_release_ownership(struct iommu_table *tbl) + + spin_lock_irqsave(&tbl->large_pool.lock, flags); + for (i = 0; i < tbl->nr_pools; i++) +- spin_lock(&tbl->pools[i].lock); ++ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); + + memset(tbl->it_map, 0, sz); + +diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c +index 9dafd7af39b8..cb4d6cd949fc 100644 +--- a/arch/powerpc/kernel/machine_kexec.c ++++ b/arch/powerpc/kernel/machine_kexec.c +@@ -113,11 +113,12 @@ void machine_kexec(struct kimage *image) + + void __init reserve_crashkernel(void) + { +- unsigned long long crash_size, crash_base; ++ unsigned long long crash_size, crash_base, total_mem_sz; + int ret; + ++ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size(); + /* use common parsing */ +- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), ++ ret = parse_crashkernel(boot_command_line, total_mem_sz, + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; +@@ -176,6 +177,7 @@ void __init reserve_crashkernel(void) + /* Crash kernel trumps memory limit */ + if (memory_limit && memory_limit <= crashk_res.end) { + memory_limit = crashk_res.end + 1; ++ total_mem_sz = memory_limit; + printk("Adjusted memory limit for crashkernel, now 0x%llx\n", + memory_limit); + } +@@ -184,7 +186,7 @@ void __init reserve_crashkernel(void) + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start >> 20), +- (unsigned long)(memblock_phys_mem_size() >> 20)); ++ (unsigned long)(total_mem_sz >> 20)); + + if (!memblock_is_region_memory(crashk_res.start, crash_size) || + memblock_reserve(crashk_res.start, crash_size)) { +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c +index b868f07c4246..11b4ecec04ee 100644 +--- a/arch/powerpc/kernel/prom.c ++++ b/arch/powerpc/kernel/prom.c +@@ -262,7 +262,7 @@ static struct feature_property { + }; + + #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) +-static inline void identical_pvr_fixup(unsigned long node) ++static __init void identical_pvr_fixup(unsigned long node) + { + unsigned int pvr; + const char *model = of_get_flat_dt_prop(node, "model", NULL); +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index fdba10695208..56089034d401 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -685,7 +685,13 @@ early_initcall(disable_hardlockup_detector); + static enum l1d_flush_type enabled_flush_types; + static void *l1d_flush_fallback_area; + static bool no_rfi_flush; ++static bool no_entry_flush; ++static bool no_uaccess_flush; + bool rfi_flush; ++bool entry_flush; ++bool uaccess_flush; ++DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); ++EXPORT_SYMBOL(uaccess_flush_key); + + static int __init handle_no_rfi_flush(char *p) + { +@@ -695,6 +701,22 @@ static int __init handle_no_rfi_flush(char *p) + } + early_param("no_rfi_flush", handle_no_rfi_flush); + ++static int __init handle_no_entry_flush(char *p) ++{ ++ pr_info("entry-flush: disabled on command line."); ++ no_entry_flush = true; ++ return 0; ++} ++early_param("no_entry_flush", handle_no_entry_flush); ++ ++static int __init handle_no_uaccess_flush(char *p) ++{ ++ pr_info("uaccess-flush: disabled on command line."); ++ no_uaccess_flush = true; ++ return 0; ++} ++early_param("no_uaccess_flush", handle_no_uaccess_flush); ++ + /* + * The RFI flush is not KPTI, but because users will see doco that says to use + * nopti we hijack that option here to also disable the RFI flush. +@@ -726,6 +748,32 @@ void rfi_flush_enable(bool enable) + rfi_flush = enable; + } + ++void entry_flush_enable(bool enable) ++{ ++ if (enable) { ++ do_entry_flush_fixups(enabled_flush_types); ++ on_each_cpu(do_nothing, NULL, 1); ++ } else { ++ do_entry_flush_fixups(L1D_FLUSH_NONE); ++ } ++ ++ entry_flush = enable; ++} ++ ++void uaccess_flush_enable(bool enable) ++{ ++ if (enable) { ++ do_uaccess_flush_fixups(enabled_flush_types); ++ static_branch_enable(&uaccess_flush_key); ++ on_each_cpu(do_nothing, NULL, 1); ++ } else { ++ static_branch_disable(&uaccess_flush_key); ++ do_uaccess_flush_fixups(L1D_FLUSH_NONE); ++ } ++ ++ uaccess_flush = enable; ++} ++ + static void __ref init_fallback_flush(void) + { + u64 l1d_size, limit; +@@ -771,6 +819,24 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable) + rfi_flush_enable(enable); + } + ++void setup_entry_flush(bool enable) ++{ ++ if (cpu_mitigations_off()) ++ return; ++ ++ if (!no_entry_flush) ++ entry_flush_enable(enable); ++} ++ ++void setup_uaccess_flush(bool enable) ++{ ++ if (cpu_mitigations_off()) ++ return; ++ ++ if (!no_uaccess_flush) ++ uaccess_flush_enable(enable); ++} ++ + #ifdef CONFIG_DEBUG_FS + static int rfi_flush_set(void *data, u64 val) + { +@@ -798,9 +864,63 @@ static int rfi_flush_get(void *data, u64 *val) + + DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); + ++static int entry_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ /* Only do anything if we're changing state */ ++ if (enable != entry_flush) ++ entry_flush_enable(enable); ++ ++ return 0; ++} ++ ++static int entry_flush_get(void *data, u64 *val) ++{ ++ *val = entry_flush ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); ++ ++static int uaccess_flush_set(void *data, u64 val) ++{ ++ bool enable; ++ ++ if (val == 1) ++ enable = true; ++ else if (val == 0) ++ enable = false; ++ else ++ return -EINVAL; ++ ++ /* Only do anything if we're changing state */ ++ if (enable != uaccess_flush) ++ uaccess_flush_enable(enable); ++ ++ return 0; ++} ++ ++static int uaccess_flush_get(void *data, u64 *val) ++{ ++ *val = uaccess_flush ? 1 : 0; ++ return 0; ++} ++ ++DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); ++ + static __init int rfi_flush_debugfs_init(void) + { + debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); ++ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); ++ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); + return 0; + } + device_initcall(rfi_flush_debugfs_init); +diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c +index c4f1d1f7bae0..6949a360c584 100644 +--- a/arch/powerpc/kernel/sysfs.c ++++ b/arch/powerpc/kernel/sysfs.c +@@ -28,29 +28,27 @@ + + static DEFINE_PER_CPU(struct cpu, cpu_devices); + +-/* +- * SMT snooze delay stuff, 64-bit only for now +- */ +- + #ifdef CONFIG_PPC64 + +-/* Time in microseconds we delay before sleeping in the idle loop */ +-static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; ++/* ++ * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle: ++ * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in ++ * 2014: ++ * ++ * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean ++ * up the kernel code." ++ * ++ * powerpc-utils stopped using it as of 1.3.8. At some point in the future this ++ * code should be removed. ++ */ + + static ssize_t store_smt_snooze_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) + { +- struct cpu *cpu = container_of(dev, struct cpu, dev); +- ssize_t ret; +- long snooze; +- +- ret = sscanf(buf, "%ld", &snooze); +- if (ret != 1) +- return -EINVAL; +- +- per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; ++ pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n", ++ current->comm, current->pid); + return count; + } + +@@ -58,9 +56,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev, + struct device_attribute *attr, + char *buf) + { +- struct cpu *cpu = container_of(dev, struct cpu, dev); +- +- return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); ++ pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n", ++ current->comm, current->pid); ++ return sprintf(buf, "100\n"); + } + + static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, +@@ -68,16 +66,10 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, + + static int __init setup_smt_snooze_delay(char *str) + { +- unsigned int cpu; +- long snooze; +- + if (!cpu_has_feature(CPU_FTR_SMT)) + return 1; + +- snooze = simple_strtol(str, NULL, 10); +- for_each_possible_cpu(cpu) +- per_cpu(smt_snooze_delay, cpu) = snooze; +- ++ pr_warn("smt-snooze-delay command line option has no effect\n"); + return 1; + } + __setup("smt-snooze-delay=", setup_smt_snooze_delay); +diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c +index a753b72efbc0..70c9d134a9d4 100644 +--- a/arch/powerpc/kernel/tau_6xx.c ++++ b/arch/powerpc/kernel/tau_6xx.c +@@ -37,8 +37,6 @@ static struct tau_temp + + struct timer_list tau_timer; + +-#undef DEBUG +- + /* TODO: put these in a /proc interface, with some sanity checks, and maybe + * dynamic adjustment to minimize # of interrupts */ + /* configurable values for step size and how much to expand the window when +@@ -71,47 +69,33 @@ void set_thresholds(unsigned long cpu) + + void TAUupdate(int cpu) + { +- unsigned thrm; +- +-#ifdef DEBUG +- printk("TAUupdate "); +-#endif ++ u32 thrm; ++ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V; + + /* if both thresholds are crossed, the step_sizes cancel out + * and the window winds up getting expanded twice. */ +- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */ +- if(thrm & THRM1_TIN){ /* crossed low threshold */ +- if (tau[cpu].low >= step_size){ +- tau[cpu].low -= step_size; +- tau[cpu].high -= (step_size - window_expand); +- } +- tau[cpu].grew = 1; +-#ifdef DEBUG +- printk("low threshold crossed "); +-#endif ++ thrm = mfspr(SPRN_THRM1); ++ if ((thrm & bits) == bits) { ++ mtspr(SPRN_THRM1, 0); ++ ++ if (tau[cpu].low >= step_size) { ++ tau[cpu].low -= step_size; ++ tau[cpu].high -= (step_size - window_expand); + } ++ tau[cpu].grew = 1; ++ pr_debug("%s: low threshold crossed\n", __func__); + } +- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */ +- if(thrm & THRM1_TIN){ /* crossed high threshold */ +- if (tau[cpu].high <= 127-step_size){ +- tau[cpu].low += (step_size - window_expand); +- tau[cpu].high += step_size; +- } +- tau[cpu].grew = 1; +-#ifdef DEBUG +- printk("high threshold crossed "); +-#endif ++ thrm = mfspr(SPRN_THRM2); ++ if ((thrm & bits) == bits) { ++ mtspr(SPRN_THRM2, 0); ++ ++ if (tau[cpu].high <= 127 - step_size) { ++ tau[cpu].low += (step_size - window_expand); ++ tau[cpu].high += step_size; + } ++ tau[cpu].grew = 1; ++ pr_debug("%s: high threshold crossed\n", __func__); + } +- +-#ifdef DEBUG +- printk("grew = %d\n", tau[cpu].grew); +-#endif +- +-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */ +- set_thresholds(cpu); +-#endif +- + } + + #ifdef CONFIG_TAU_INT +@@ -136,18 +120,18 @@ void TAUException(struct pt_regs * regs) + static void tau_timeout(void * info) + { + int cpu; +- unsigned long flags; + int size; + int shrink; + +- /* disabling interrupts *should* be okay */ +- local_irq_save(flags); + cpu = smp_processor_id(); + + #ifndef CONFIG_TAU_INT + TAUupdate(cpu); + #endif + ++ /* Stop thermal sensor comparisons and interrupts */ ++ mtspr(SPRN_THRM3, 0); ++ + size = tau[cpu].high - tau[cpu].low; + if (size > min_window && ! tau[cpu].grew) { + /* do an exponential shrink of half the amount currently over size */ +@@ -169,22 +153,12 @@ static void tau_timeout(void * info) + + set_thresholds(cpu); + +- /* +- * Do the enable every time, since otherwise a bunch of (relatively) +- * complex sleep code needs to be added. One mtspr every time +- * tau_timeout is called is probably not a big deal. +- * +- * Enable thermal sensor and set up sample interval timer +- * need 20 us to do the compare.. until a nice 'cpu_speed' function +- * call is implemented, just assume a 500 mhz clock. It doesn't really +- * matter if we take too long for a compare since it's all interrupt +- * driven anyway. +- * +- * use a extra long time.. (60 us @ 500 mhz) ++ /* Restart thermal sensor comparisons and interrupts. ++ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet" ++ * recommends that "the maximum value be set in THRM3 under all ++ * conditions." + */ +- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); +- +- local_irq_restore(flags); ++ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E); + } + + static void tau_timeout_smp(unsigned long unused) +diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c +index 4111d30badfa..d24aea160352 100644 +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -704,7 +704,7 @@ int vdso_getcpu_init(void) + node = cpu_to_node(cpu); + WARN_ON_ONCE(node > 0xffff); + +- val = (cpu & 0xfff) | ((node & 0xffff) << 16); ++ val = (cpu & 0xffff) | ((node & 0xffff) << 16); + mtspr(SPRN_SPRG_VDSO_WRITE, val); + get_paca()->sprg_vdso = val; + +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S +index c20510497c49..5d450c74f6f6 100644 +--- a/arch/powerpc/kernel/vmlinux.lds.S ++++ b/arch/powerpc/kernel/vmlinux.lds.S +@@ -140,6 +140,20 @@ SECTIONS + __stop___stf_entry_barrier_fixup = .; + } + ++ . = ALIGN(8); ++ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { ++ __start___uaccess_flush_fixup = .; ++ *(__uaccess_flush_fixup) ++ __stop___uaccess_flush_fixup = .; ++ } ++ ++ . = ALIGN(8); ++ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { ++ __start___entry_flush_fixup = .; ++ *(__entry_flush_fixup) ++ __stop___entry_flush_fixup = .; ++ } ++ + . = ALIGN(8); + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { + __start___stf_exit_barrier_fixup = .; +diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c +index b1b2273d1f6d..308744830f55 100644 +--- a/arch/powerpc/kvm/book3s_rtas.c ++++ b/arch/powerpc/kvm/book3s_rtas.c +@@ -230,6 +230,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) + * value so we can restore it on the way out. + */ + orig_rets = args.rets; ++ if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { ++ /* ++ * Don't overflow our args array: ensure there is room for ++ * at least rets[0] (even if the call specifies 0 nret). ++ * ++ * Each handler must then check for the correct nargs and nret ++ * values, but they may always return failure in rets[0]. ++ */ ++ rc = -EINVAL; ++ goto fail; ++ } + args.rets = &args.args[be32_to_cpu(args.nargs)]; + + mutex_lock(&vcpu->kvm->arch.rtas_token_lock); +@@ -257,9 +268,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) + fail: + /* + * We only get here if the guest has called RTAS with a bogus +- * args pointer. That means we can't get to the args, and so we +- * can't fail the RTAS call. So fail right out to userspace, +- * which should kill the guest. ++ * args pointer or nargs/nret values that would overflow the ++ * array. That means we can't get to the args, and so we can't ++ * fail the RTAS call. So fail right out to userspace, which ++ * should kill the guest. ++ * ++ * SLOF should actually pass the hcall return value from the ++ * rtas handler call in r3, so enter_rtas could be modified to ++ * return a failure indication in r3 and we could return such ++ * errors to the guest rather than failing to host userspace. ++ * However old guests that don't test for failure could then ++ * continue silently after errors, so for now we won't do this. + */ + return rc; + } +diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c +index 08e3a3356c40..11b58949eb62 100644 +--- a/arch/powerpc/lib/checksum_wrappers.c ++++ b/arch/powerpc/lib/checksum_wrappers.c +@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, + unsigned int csum; + + might_sleep(); ++ allow_read_from_user(src, len); + + *err_ptr = 0; + +@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, + } + + out: ++ prevent_read_from_user(src, len); + return (__force __wsum)csum; + } + EXPORT_SYMBOL(csum_and_copy_from_user); +@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + unsigned int csum; + + might_sleep(); ++ allow_write_to_user(dst, len); + + *err_ptr = 0; + +@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, + } + + out: ++ prevent_write_to_user(dst, len); + return (__force __wsum)csum; + } + EXPORT_SYMBOL(csum_and_copy_to_user); +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c +index e6ed0ec94bc8..777a90e251cc 100644 +--- a/arch/powerpc/lib/feature-fixups.c ++++ b/arch/powerpc/lib/feature-fixups.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -232,6 +233,124 @@ void do_stf_barrier_fixups(enum stf_barrier_type types) + do_stf_exit_barrier_fixups(types); + } + ++void do_uaccess_flush_fixups(enum l1d_flush_type types) ++{ ++ unsigned int instrs[4], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___uaccess_flush_fixup); ++ end = PTRRELOC(&__stop___uaccess_flush_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ instrs[3] = 0x4e800020; /* blr */ ++ ++ i = 0; ++ if (types == L1D_FLUSH_FALLBACK) { ++ instrs[3] = 0x60000000; /* nop */ ++ /* fallthrough to fallback flush */ ++ } ++ ++ if (types & L1D_FLUSH_ORI) { ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ ++ } ++ ++ if (types & L1D_FLUSH_MTTRIG) ++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ ++ patch_instruction((dest + 1), instrs[1]); ++ patch_instruction((dest + 2), instrs[2]); ++ patch_instruction((dest + 3), instrs[3]); ++ } ++ ++ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, ++ (types == L1D_FLUSH_NONE) ? "no" : ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ++ ? "ori+mttrig type" ++ : "ori type" : ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type" ++ : "unknown"); ++} ++ ++static int __do_entry_flush_fixups(void *data) ++{ ++ enum l1d_flush_type types = *(enum l1d_flush_type *)data; ++ unsigned int instrs[3], *dest; ++ long *start, *end; ++ int i; ++ ++ start = PTRRELOC(&__start___entry_flush_fixup); ++ end = PTRRELOC(&__stop___entry_flush_fixup); ++ ++ instrs[0] = 0x60000000; /* nop */ ++ instrs[1] = 0x60000000; /* nop */ ++ instrs[2] = 0x60000000; /* nop */ ++ ++ i = 0; ++ if (types == L1D_FLUSH_FALLBACK) { ++ instrs[i++] = 0x7d4802a6; /* mflr r10 */ ++ instrs[i++] = 0x60000000; /* branch patched below */ ++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */ ++ } ++ ++ if (types & L1D_FLUSH_ORI) { ++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ ++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ ++ } ++ ++ if (types & L1D_FLUSH_MTTRIG) ++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ ++ ++ for (i = 0; start < end; start++, i++) { ++ dest = (void *)start + *start; ++ ++ pr_devel("patching dest %lx\n", (unsigned long)dest); ++ ++ patch_instruction(dest, instrs[0]); ++ ++ if (types == L1D_FLUSH_FALLBACK) ++ patch_branch((dest + 1), (unsigned long)&entry_flush_fallback, ++ BRANCH_SET_LINK); ++ else ++ patch_instruction((dest + 1), instrs[1]); ++ ++ patch_instruction((dest + 2), instrs[2]); ++ } ++ ++ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, ++ (types == L1D_FLUSH_NONE) ? "no" : ++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : ++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) ++ ? "ori+mttrig type" ++ : "ori type" : ++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type" ++ : "unknown"); ++ ++ return 0; ++} ++ ++void do_entry_flush_fixups(enum l1d_flush_type types) ++{ ++ /* ++ * The call to the fallback flush can not be safely patched in/out while ++ * other CPUs are executing it. So call __do_entry_flush_fixups() on one ++ * CPU while all other CPUs spin in the stop machine core with interrupts ++ * hard disabled. ++ */ ++ stop_machine(__do_entry_flush_fixups, &types, NULL); ++} ++ + void do_rfi_flush_fixups(enum l1d_flush_type types) + { + unsigned int instrs[3], *dest; +diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S +index d13e07603519..4e85411d4a7e 100644 +--- a/arch/powerpc/lib/string.S ++++ b/arch/powerpc/lib/string.S +@@ -89,7 +89,7 @@ _GLOBAL(memchr) + EXPORT_SYMBOL(memchr) + + #ifdef CONFIG_PPC32 +-_GLOBAL(__clear_user) ++_GLOBAL(__arch_clear_user) + addi r6,r3,-4 + li r3,0 + li r5,0 +@@ -130,5 +130,5 @@ _GLOBAL(__clear_user) + PPC_LONG 1b,91b + PPC_LONG 8b,92b + .text +-EXPORT_SYMBOL(__clear_user) ++EXPORT_SYMBOL(__arch_clear_user) + #endif +diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S +index 11e6372537fd..347029f65edb 100644 +--- a/arch/powerpc/lib/string_64.S ++++ b/arch/powerpc/lib/string_64.S +@@ -28,7 +28,7 @@ PPC64_CACHES: + .section ".text" + + /** +- * __clear_user: - Zero a block of memory in user space, with less checking. ++ * __arch_clear_user: - Zero a block of memory in user space, with less checking. + * @to: Destination address, in user space. + * @n: Number of bytes to zero. + * +@@ -78,7 +78,7 @@ err3; stb r0,0(r3) + mr r3,r4 + blr + +-_GLOBAL_TOC(__clear_user) ++_GLOBAL_TOC(__arch_clear_user) + cmpdi r4,32 + neg r6,r3 + li r0,0 +@@ -201,4 +201,4 @@ err1; dcbz 0,r3 + cmpdi r4,32 + blt .Lshort_clear + b .Lmedium_clear +-EXPORT_SYMBOL(__clear_user) ++EXPORT_SYMBOL(__arch_clear_user) +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c +index 2791f568bdb2..3e4fb430ae45 100644 +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -192,6 +192,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) + return MM_FAULT_CONTINUE; + } + ++// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE ++#define SIGFRAME_MAX_SIZE (4096 + 128) ++ + /* + * For 600- and 800-family processors, the error_code parameter is DSISR + * for a data fault, SRR1 for an instruction fault. For 400-family processors +@@ -341,7 +344,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, + /* + * N.B. The POWER/Open ABI allows programs to access up to + * 288 bytes below the stack pointer. +- * The kernel signal delivery code writes up to about 1.5kB ++ * The kernel signal delivery code writes up to about 4kB + * below the stack pointer (r1) before decrementing it. + * The exec code can write slightly over 640kB to the stack + * before setting the user r1. Thus we allow the stack to +@@ -365,7 +368,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, + * between the last mapped region and the stack will + * expand the stack rather than segfaulting. + */ +- if (address + 2048 < uregs->gpr[1] && !store_update_sp) ++ if (address + SIGFRAME_MAX_SIZE < uregs->gpr[1] && !store_update_sp) + goto bad_area; + } + if (expand_stack(vma, address)) +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c +index ba49ae6625f1..f2d8f35c181f 100644 +--- a/arch/powerpc/perf/core-book3s.c ++++ b/arch/powerpc/perf/core-book3s.c +@@ -2010,7 +2010,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + left += period; + if (left <= 0) + left = period; +- record = siar_valid(regs); ++ ++ /* ++ * If address is not requested in the sample via ++ * PERF_SAMPLE_IP, just record that sample irrespective ++ * of SIAR valid check. ++ */ ++ if (event->attr.sample_type & PERF_SAMPLE_IP) ++ record = siar_valid(regs); ++ else ++ record = 1; ++ + event->hw.last_period = event->hw.sample_period; + } + if (left < 0x80000000LL) +@@ -2022,6 +2032,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + local64_set(&event->hw.period_left, left); + perf_event_update_userpage(event); + ++ /* ++ * Due to hardware limitation, sometimes SIAR could sample a kernel ++ * address even when freeze on supervisor state (kernel) is set in ++ * MMCR2. Check attr.exclude_kernel and address to drop the sample in ++ * these cases. ++ */ ++ if (event->attr.exclude_kernel && ++ (event->attr.sample_type & PERF_SAMPLE_IP) && ++ is_kernel_addr(mfspr(SPRN_SIAR))) ++ record = 0; ++ + /* + * Finally record data if requested. + */ +@@ -2042,6 +2063,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, + + if (perf_event_overflow(event, &data, regs)) + power_pmu_stop(event, 0); ++ } else if (period) { ++ /* Account for interrupt in case of invalid SIAR */ ++ if (perf_event_account_interrupt(event)) ++ power_pmu_stop(event, 0); + } + } + +diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c +index 991c6a517ddc..2456522583c2 100644 +--- a/arch/powerpc/perf/hv-24x7.c ++++ b/arch/powerpc/perf/hv-24x7.c +@@ -1306,16 +1306,6 @@ static void h_24x7_event_read(struct perf_event *event) + h24x7hw = &get_cpu_var(hv_24x7_hw); + h24x7hw->events[i] = event; + put_cpu_var(h24x7hw); +- /* +- * Clear the event count so we can compute the _change_ +- * in the 24x7 raw counter value at the end of the txn. +- * +- * Note that we could alternatively read the 24x7 value +- * now and save its value in event->hw.prev_count. But +- * that would require issuing a hcall, which would then +- * defeat the purpose of using the txn interface. +- */ +- local64_set(&event->count, 0); + } + + put_cpu_var(hv_24x7_reqb); +diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h +index acd17648cd18..5ea24d16a74a 100644 +--- a/arch/powerpc/perf/hv-gpci-requests.h ++++ b/arch/powerpc/perf/hv-gpci-requests.h +@@ -94,7 +94,7 @@ REQUEST(__field(0, 8, partition_id) + + #define REQUEST_NAME system_performance_capabilities + #define REQUEST_NUM 0x40 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__field(0, 1, perf_collect_privileged) + __field(0x1, 1, capability_mask) +@@ -222,7 +222,7 @@ REQUEST(__field(0, 2, partition_id) + + #define REQUEST_NAME system_hypervisor_times + #define REQUEST_NUM 0xF0 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) + __count(0x8, 8, time_spent_processing_virtual_processor_timers) +@@ -233,7 +233,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors) + + #define REQUEST_NAME system_tlbie_count_and_time + #define REQUEST_NUM 0xF4 +-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff" ++#define REQUEST_IDX_KIND "starting_index=0xffffffff" + #include I(REQUEST_BEGIN) + REQUEST(__count(0, 8, tlbie_instructions_issued) + /* +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c +index 6143c99f3ec5..2d3557406424 100644 +--- a/arch/powerpc/perf/isa207-common.c ++++ b/arch/powerpc/perf/isa207-common.c +@@ -51,6 +51,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + + mask |= CNST_PMC_MASK(pmc); + value |= CNST_PMC_VAL(pmc); ++ ++ /* ++ * PMC5 and PMC6 are used to count cycles and instructions and ++ * they do not support most of the constraint bits. Add a check ++ * to exclude PMC5/6 from most of the constraints except for ++ * EBB/BHRB. ++ */ ++ if (pmc >= 5) ++ goto ebb_bhrb; + } + + if (pmc <= 4) { +@@ -111,6 +120,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); + } + ++ebb_bhrb: + if (!pmc && ebb) + /* EBB events must specify the PMC */ + return -1; +@@ -129,8 +139,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) + * EBB events are pinned & exclusive, so this should never actually + * hit, but we leave it as a fallback in case. + */ +- mask |= CNST_EBB_VAL(ebb); +- value |= CNST_EBB_MASK; ++ mask |= CNST_EBB_MASK; ++ value |= CNST_EBB_VAL(ebb); + + *maskp = mask; + *valp = value; +diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S +index 08ab6fefcf7a..5f44e9223413 100644 +--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S ++++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S +@@ -180,7 +180,7 @@ sram_code: + udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ + mullw r12, r12, r11 + mftb r13 /* start */ +- addi r12, r13, r12 /* end */ ++ add r12, r13, r12 /* end */ + 1: + mftb r13 /* current */ + cmp cr0, r13, r12 +diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig +index fbdae8377b71..a7ba4c61d8e9 100644 +--- a/arch/powerpc/platforms/Kconfig ++++ b/arch/powerpc/platforms/Kconfig +@@ -242,7 +242,7 @@ config TAU + temp is actually what /proc/cpuinfo says it is. + + config TAU_INT +- bool "Interrupt driven TAU driver (DANGEROUS)" ++ bool "Interrupt driven TAU driver (EXPERIMENTAL)" + depends on TAU + ---help--- + The TAU supports an interrupt driven mode which causes an interrupt +@@ -250,12 +250,7 @@ config TAU_INT + to get notified the temp has exceeded a range. With this option off, + a timer is used to re-check the temperature periodically. + +- However, on some cpus it appears that the TAU interrupt hardware +- is buggy and can cause a situation which would lead unexplained hard +- lockups. +- +- Unless you are extending the TAU driver, or enjoy kernel/hardware +- debugging, leave this option off. ++ If in doubt, say N here. + + config TAU_AVERAGE + bool "Average high and low temp" +diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig +index d9088f0b8fcc..621be4343330 100644 +--- a/arch/powerpc/platforms/cell/Kconfig ++++ b/arch/powerpc/platforms/cell/Kconfig +@@ -45,6 +45,7 @@ config SPU_FS + tristate "SPU file system" + default m + depends on PPC_CELL ++ depends on COREDUMP + select SPU_BASE + select MEMORY_HOTPLUG + help +diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c +index 06254467e4dd..f12b00a056cb 100644 +--- a/arch/powerpc/platforms/cell/spufs/file.c ++++ b/arch/powerpc/platforms/cell/spufs/file.c +@@ -2044,8 +2044,9 @@ static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, + static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { +- int ret; + struct spu_context *ctx = file->private_data; ++ u32 stat, data; ++ int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -2054,11 +2055,16 @@ static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_mbox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.prob.pu_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the mbox */ ++ if (!(stat & 0x0000ff)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_mbox_info_fops = { +@@ -2085,6 +2091,7 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ u32 stat, data; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2094,11 +2101,16 @@ static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_ibox_info_read(ctx, buf, len, pos); ++ stat = ctx->csa.prob.mb_stat_R; ++ data = ctx->csa.priv2.puint_mb_R; + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ /* EOF if there's no entry in the ibox */ ++ if (!(stat & 0xff0000)) ++ return 0; ++ ++ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); + } + + static const struct file_operations spufs_ibox_info_fops = { +@@ -2107,6 +2119,11 @@ static const struct file_operations spufs_ibox_info_fops = { + .llseek = generic_file_llseek, + }; + ++static size_t spufs_wbox_info_cnt(struct spu_context *ctx) ++{ ++ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); ++} ++ + static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { +@@ -2115,7 +2132,7 @@ static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, + u32 wbox_stat; + + wbox_stat = ctx->csa.prob.mb_stat_R; +- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); ++ cnt = spufs_wbox_info_cnt(ctx); + for (i = 0; i < cnt; i++) { + data[i] = ctx->csa.spu_mailbox_data[i]; + } +@@ -2128,7 +2145,8 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; +- int ret; ++ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; ++ int ret, count; + + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; +@@ -2137,11 +2155,13 @@ static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_wbox_info_read(ctx, buf, len, pos); ++ count = spufs_wbox_info_cnt(ctx); ++ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &data, ++ count * sizeof(u32)); + } + + static const struct file_operations spufs_wbox_info_fops = { +@@ -2150,27 +2170,33 @@ static const struct file_operations spufs_wbox_info_fops = { + .llseek = generic_file_llseek, + }; + +-static ssize_t __spufs_dma_info_read(struct spu_context *ctx, +- char __user *buf, size_t len, loff_t *pos) ++static void spufs_get_dma_info(struct spu_context *ctx, ++ struct spu_dma_info *info) + { +- struct spu_dma_info info; +- struct mfc_cq_sr *qp, *spuqp; + int i; + +- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; +- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; +- info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; +- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; +- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; ++ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; ++ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; ++ info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; ++ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; ++ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; + for (i = 0; i < 16; i++) { +- qp = &info.dma_info_command_data[i]; +- spuqp = &ctx->csa.priv2.spuq[i]; ++ struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; ++ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; + + qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; + qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; + qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; + qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; + } ++} ++ ++static ssize_t __spufs_dma_info_read(struct spu_context *ctx, ++ char __user *buf, size_t len, loff_t *pos) ++{ ++ struct spu_dma_info info; ++ ++ spufs_get_dma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2180,6 +2206,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_dma_info info; + int ret; + + if (!access_ok(VERIFY_WRITE, buf, len)) +@@ -2189,11 +2216,12 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_dma_info_read(ctx, buf, len, pos); ++ spufs_get_dma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_dma_info_fops = { +@@ -2202,13 +2230,31 @@ static const struct file_operations spufs_dma_info_fops = { + .llseek = no_llseek, + }; + ++static void spufs_get_proxydma_info(struct spu_context *ctx, ++ struct spu_proxydma_info *info) ++{ ++ int i; ++ ++ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; ++ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; ++ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; ++ ++ for (i = 0; i < 8; i++) { ++ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; ++ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; ++ ++ qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; ++ qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; ++ qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; ++ qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; ++ } ++} ++ + static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + char __user *buf, size_t len, loff_t *pos) + { + struct spu_proxydma_info info; +- struct mfc_cq_sr *qp, *puqp; + int ret = sizeof info; +- int i; + + if (len < ret) + return -EINVAL; +@@ -2216,18 +2262,7 @@ static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; + +- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; +- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; +- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; +- for (i = 0; i < 8; i++) { +- qp = &info.proxydma_info_command_data[i]; +- puqp = &ctx->csa.priv2.puq[i]; +- +- qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; +- qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; +- qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; +- qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; +- } ++ spufs_get_proxydma_info(ctx, &info); + + return simple_read_from_buffer(buf, len, pos, &info, + sizeof info); +@@ -2237,17 +2272,19 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) + { + struct spu_context *ctx = file->private_data; ++ struct spu_proxydma_info info; + int ret; + + ret = spu_acquire_saved(ctx); + if (ret) + return ret; + spin_lock(&ctx->csa.register_lock); +- ret = __spufs_proxydma_info_read(ctx, buf, len, pos); ++ spufs_get_proxydma_info(ctx, &info); + spin_unlock(&ctx->csa.register_lock); + spu_release_saved(ctx); + +- return ret; ++ return simple_read_from_buffer(buf, len, pos, &info, ++ sizeof(info)); + } + + static const struct file_operations spufs_proxydma_info_fops = { +diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c +index 4c827826c05e..1a8b6e276a11 100644 +--- a/arch/powerpc/platforms/powernv/opal-dump.c ++++ b/arch/powerpc/platforms/powernv/opal-dump.c +@@ -319,15 +319,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, + return count; + } + +-static struct dump_obj *create_dump_obj(uint32_t id, size_t size, +- uint32_t type) ++static void create_dump_obj(uint32_t id, size_t size, uint32_t type) + { + struct dump_obj *dump; + int rc; + + dump = kzalloc(sizeof(*dump), GFP_KERNEL); + if (!dump) +- return NULL; ++ return; + + dump->kobj.kset = dump_kset; + +@@ -347,34 +346,51 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size, + rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id); + if (rc) { + kobject_put(&dump->kobj); +- return NULL; ++ return; + } + ++ /* ++ * As soon as the sysfs file for this dump is created/activated there is ++ * a chance the opal_errd daemon (or any userspace) might read and ++ * acknowledge the dump before kobject_uevent() is called. If that ++ * happens then there is a potential race between ++ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a ++ * use-after-free of a kernfs object resulting in a kernel crash. ++ * ++ * To avoid that, we need to take a reference on behalf of the bin file, ++ * so that our reference remains valid while we call kobject_uevent(). ++ * We then drop our reference before exiting the function, leaving the ++ * bin file to drop the last reference (if it hasn't already). ++ */ ++ ++ /* Take a reference for the bin file */ ++ kobject_get(&dump->kobj); + rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr); +- if (rc) { ++ if (rc == 0) { ++ kobject_uevent(&dump->kobj, KOBJ_ADD); ++ ++ pr_info("%s: New platform dump. ID = 0x%x Size %u\n", ++ __func__, dump->id, dump->size); ++ } else { ++ /* Drop reference count taken for bin file */ + kobject_put(&dump->kobj); +- return NULL; + } + +- pr_info("%s: New platform dump. ID = 0x%x Size %u\n", +- __func__, dump->id, dump->size); +- +- kobject_uevent(&dump->kobj, KOBJ_ADD); +- +- return dump; ++ /* Drop our reference */ ++ kobject_put(&dump->kobj); ++ return; + } + + static irqreturn_t process_dump(int irq, void *data) + { + int rc; + uint32_t dump_id, dump_size, dump_type; +- struct dump_obj *dump; + char name[22]; + struct kobject *kobj; + + rc = dump_read_info(&dump_id, &dump_size, &dump_type); + if (rc != OPAL_SUCCESS) +- return rc; ++ return IRQ_HANDLED; + + sprintf(name, "0x%x-0x%x", dump_type, dump_id); + +@@ -386,12 +402,10 @@ static irqreturn_t process_dump(int irq, void *data) + if (kobj) { + /* Drop reference added by kset_find_obj() */ + kobject_put(kobj); +- return 0; ++ return IRQ_HANDLED; + } + +- dump = create_dump_obj(dump_id, dump_size, dump_type); +- if (!dump) +- return -1; ++ create_dump_obj(dump_id, dump_size, dump_type); + + return IRQ_HANDLED; + } +diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c +index f2344cbd2f46..3595f3cfefa3 100644 +--- a/arch/powerpc/platforms/powernv/opal-elog.c ++++ b/arch/powerpc/platforms/powernv/opal-elog.c +@@ -183,14 +183,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, + return count; + } + +-static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) ++static void create_elog_obj(uint64_t id, size_t size, uint64_t type) + { + struct elog_obj *elog; + int rc; + + elog = kzalloc(sizeof(*elog), GFP_KERNEL); + if (!elog) +- return NULL; ++ return; + + elog->kobj.kset = elog_kset; + +@@ -223,18 +223,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) + rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); + if (rc) { + kobject_put(&elog->kobj); +- return NULL; ++ return; + } + ++ /* ++ * As soon as the sysfs file for this elog is created/activated there is ++ * a chance the opal_errd daemon (or any userspace) might read and ++ * acknowledge the elog before kobject_uevent() is called. If that ++ * happens then there is a potential race between ++ * elog_ack_store->kobject_put() and kobject_uevent() which leads to a ++ * use-after-free of a kernfs object resulting in a kernel crash. ++ * ++ * To avoid that, we need to take a reference on behalf of the bin file, ++ * so that our reference remains valid while we call kobject_uevent(). ++ * We then drop our reference before exiting the function, leaving the ++ * bin file to drop the last reference (if it hasn't already). ++ */ ++ ++ /* Take a reference for the bin file */ ++ kobject_get(&elog->kobj); + rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); +- if (rc) { ++ if (rc == 0) { ++ kobject_uevent(&elog->kobj, KOBJ_ADD); ++ } else { ++ /* Drop the reference taken for the bin file */ + kobject_put(&elog->kobj); +- return NULL; + } + +- kobject_uevent(&elog->kobj, KOBJ_ADD); ++ /* Drop our reference */ ++ kobject_put(&elog->kobj); + +- return elog; ++ return; + } + + static irqreturn_t elog_event(int irq, void *data) +diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c +index 365e2b620201..b77d5eed9520 100644 +--- a/arch/powerpc/platforms/powernv/setup.c ++++ b/arch/powerpc/platforms/powernv/setup.c +@@ -124,12 +124,27 @@ static void pnv_setup_rfi_flush(void) + type = L1D_FLUSH_ORI; + } + ++ /* ++ * 4.9 doesn't support Power9 bare metal, so we don't need to flush ++ * here - the flushes fix a P9 specific vulnerability. ++ */ ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); ++ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); ++ + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); + setup_count_cache_flush(); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); ++ setup_entry_flush(enable); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); ++ setup_uaccess_flush(enable); + } + + static void __init pnv_setup_arch(void) +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c +index eec0e8d0454d..7e0f5fa0452b 100644 +--- a/arch/powerpc/platforms/powernv/smp.c ++++ b/arch/powerpc/platforms/powernv/smp.c +@@ -41,7 +41,7 @@ + #include + #define DBG(fmt...) udbg_printf(fmt) + #else +-#define DBG(fmt...) ++#define DBG(fmt...) do { } while (0) + #endif + + static void pnv_smp_setup_cpu(int cpu) +diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c +index b0f34663b1ae..76cbf1be9962 100644 +--- a/arch/powerpc/platforms/ps3/mm.c ++++ b/arch/powerpc/platforms/ps3/mm.c +@@ -18,6 +18,7 @@ + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + ++#include + #include + #include + #include +@@ -212,13 +213,14 @@ void ps3_mm_vas_destroy(void) + { + int result; + +- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); +- + if (map.vas_id) { + result = lv1_select_virtual_address_space(0); +- BUG_ON(result); +- result = lv1_destruct_virtual_address_space(map.vas_id); +- BUG_ON(result); ++ result += lv1_destruct_virtual_address_space(map.vas_id); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + map.vas_id = 0; + } + } +@@ -316,19 +318,20 @@ static void ps3_mm_region_destroy(struct mem_region *r) + int result; + + if (!r->destroy) { +- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", +- __func__, __LINE__, r->base, r->size); + return; + } + +- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); +- + if (r->base) { + result = lv1_release_memory(r->base); +- BUG_ON(result); ++ ++ if (result) { ++ lv1_panic(0); ++ } ++ + r->size = r->base = r->offset = 0; + map.total = map.rm.size; + } ++ + ps3_mm_set_repository_highmem(NULL); + } + +@@ -1130,6 +1133,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, + enum ps3_dma_region_type region_type, void *addr, unsigned long len) + { + unsigned long lpar_addr; ++ int result; + + lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; + +@@ -1141,6 +1145,16 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, + r->offset -= map.r1.offset; + r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); + ++ dev->core.dma_mask = &r->dma_mask; ++ ++ result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32)); ++ ++ if (result < 0) { ++ dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n", ++ __func__, __LINE__, result); ++ return result; ++ } ++ + switch (dev->dev_type) { + case PS3_DEVICE_TYPE_SB: + r->region_ops = (USE_DYNAMIC_DMA) +diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c +index 5abb8e2239a5..647dbd8514c4 100644 +--- a/arch/powerpc/platforms/pseries/dlpar.c ++++ b/arch/powerpc/platforms/pseries/dlpar.c +@@ -139,7 +139,6 @@ void dlpar_free_cc_nodes(struct device_node *dn) + #define NEXT_PROPERTY 3 + #define PREV_PARENT 4 + #define MORE_MEMORY 5 +-#define CALL_AGAIN -2 + #define ERR_CFG_USE -9003 + + struct device_node *dlpar_configure_connector(__be32 drc_index, +@@ -181,6 +180,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, + + spin_unlock(&rtas_data_buf_lock); + ++ if (rtas_busy_delay(rc)) ++ continue; ++ + switch (rc) { + case COMPLETE: + break; +@@ -233,9 +235,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, + parent_path = last_dn->parent->full_name; + break; + +- case CALL_AGAIN: +- break; +- + case MORE_MEMORY: + case ERR_CFG_USE: + default: +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c +index 7a2beedb9740..a7d9dd029850 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c +@@ -92,9 +92,6 @@ static void rtas_stop_self(void) + + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); + +- printk("cpu %u (hwid %u) Ready to die...\n", +- smp_processor_id(), hard_smp_processor_id()); +- + rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL); + + panic("Alas, I survived.\n"); +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index eee45b9220e0..1fd0b684bf5f 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -29,7 +29,7 @@ static bool rtas_hp_event; + unsigned long pseries_memory_block_size(void) + { + struct device_node *np; +- unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; ++ u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; + struct resource r; + + np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); +diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c +index 547fd13e4f8e..35d035d68dce 100644 +--- a/arch/powerpc/platforms/pseries/pci_dlpar.c ++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c +@@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic); + int remove_phb_dynamic(struct pci_controller *phb) + { + struct pci_bus *b = phb->bus; ++ struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge); + struct resource *res; + int rc, i; + +@@ -92,7 +93,8 @@ int remove_phb_dynamic(struct pci_controller *phb) + /* Remove the PCI bus and unregister the bridge device from sysfs */ + phb->bus = NULL; + pci_remove_bus(b); +- device_unregister(b->bridge); ++ host_bridge->bus = NULL; ++ device_unregister(&host_bridge->dev); + + /* Now release the IO resource */ + if (res->flags & IORESOURCE_IO) +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 8799d8a83d56..3d6b372fab3f 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -101,7 +101,6 @@ static void handle_system_shutdown(char event_modifier) + case EPOW_SHUTDOWN_ON_UPS: + pr_emerg("Loss of system power detected. System is running on" + " UPS/battery. Check RTAS error log for details\n"); +- orderly_poweroff(true); + break; + + case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: +@@ -311,10 +310,11 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) + /* + * Some versions of FWNMI place the buffer inside the 4kB page starting at + * 0x7000. Other versions place it inside the rtas buffer. We check both. ++ * Minimum size of the buffer is 16 bytes. + */ + #define VALID_FWNMI_BUFFER(A) \ +- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ +- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) ++ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \ ++ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16)))) + + /* + * Get the error information for errors coming through the +diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c +index 31ca557af60b..262b8c5e1b9d 100644 +--- a/arch/powerpc/platforms/pseries/rng.c ++++ b/arch/powerpc/platforms/pseries/rng.c +@@ -40,6 +40,7 @@ static __init int rng_init(void) + + ppc_md.get_random_seed = pseries_get_random_long; + ++ of_node_put(dn); + return 0; + } + machine_subsys_initcall(pseries, rng_init); +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index 30782859d898..bb7471138862 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -535,6 +535,14 @@ void pseries_setup_rfi_flush(void) + + setup_rfi_flush(types, enable); + setup_count_cache_flush(); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); ++ setup_entry_flush(enable); ++ ++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && ++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); ++ setup_uaccess_flush(enable); + } + + static void __init pSeries_setup_arch(void) +diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c +index e76aefae2aa2..0a0e0c8256f6 100644 +--- a/arch/powerpc/platforms/pseries/suspend.c ++++ b/arch/powerpc/platforms/pseries/suspend.c +@@ -224,7 +224,6 @@ static struct bus_type suspend_subsys = { + + static const struct platform_suspend_ops pseries_suspend_ops = { + .valid = suspend_valid_only_mem, +- .begin = pseries_suspend_begin, + .prepare_late = pseries_prepare_late, + .enter = pseries_suspend_enter, + }; +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c +index 47fb336741d4..e26552708a28 100644 +--- a/arch/powerpc/sysdev/mpic_msgr.c ++++ b/arch/powerpc/sysdev/mpic_msgr.c +@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev) + + /* IO map the message register block. */ + of_address_to_resource(np, 0, &rsrc); +- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); ++ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc)); + if (!msgr_block_addr) { + dev_err(&dev->dev, "Failed to iomap MPIC message registers"); + return -EFAULT; +diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c +index e7fa26c4ff73..d3a0322ee327 100644 +--- a/arch/powerpc/sysdev/xics/icp-hv.c ++++ b/arch/powerpc/sysdev/xics/icp-hv.c +@@ -179,6 +179,7 @@ int icp_hv_init(void) + + icp_ops = &icp_hv_ops; + ++ of_node_put(np); + return 0; + } + +diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c +index d00123421e00..eefe1b94e0aa 100644 +--- a/arch/powerpc/xmon/nonstdio.c ++++ b/arch/powerpc/xmon/nonstdio.c +@@ -182,7 +182,7 @@ void xmon_printf(const char *format, ...) + + if (n && rc == 0) { + /* No udbg hooks, fallback to printk() - dangerous */ +- printk("%s", xmon_outbuf); ++ pr_cont("%s", xmon_outbuf); + } + } + +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig +index 1c4a595e8224..1dff52abc786 100644 +--- a/arch/s390/Kconfig ++++ b/arch/s390/Kconfig +@@ -833,7 +833,7 @@ config CMM_IUCV + config APPLDATA_BASE + def_bool n + prompt "Linux - VM Monitor Stream, base infrastructure" +- depends on PROC_FS ++ depends on PROC_SYSCTL + help + This provides a kernel interface for creating and updating z/VM APPLDATA + monitor records. The monitor records are updated at certain time +diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h +index 836c56290499..6dd874d5ba7b 100644 +--- a/arch/s390/include/asm/ftrace.h ++++ b/arch/s390/include/asm/ftrace.h +@@ -19,6 +19,7 @@ void ftrace_caller(void); + + extern char ftrace_graph_caller_end; + extern unsigned long ftrace_plt; ++extern void *ftrace_func; + + struct dyn_arch_ftrace { }; + +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h +index 5792590d0e7c..0b3c26e6930d 100644 +--- a/arch/s390/include/asm/kvm_host.h ++++ b/arch/s390/include/asm/kvm_host.h +@@ -32,12 +32,12 @@ + #define KVM_USER_MEM_SLOTS 32 + + /* +- * These seem to be used for allocating ->chip in the routing table, +- * which we don't use. 4096 is an out-of-thin-air value. If we need +- * to look at ->chip later on, we'll need to revisit this. ++ * These seem to be used for allocating ->chip in the routing table, which we ++ * don't use. 1 is as small as we can get to reduce the needed memory. If we ++ * need to look at ->chip later on, we'll need to revisit this. + */ + #define KVM_NR_IRQCHIPS 1 +-#define KVM_IRQCHIP_NUM_PINS 4096 ++#define KVM_IRQCHIP_NUM_PINS 1 + #define KVM_HALT_POLL_NS_DEFAULT 80000 + + /* s390-specific vcpu->requests bit members */ +diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h +index 90240dfef76a..5889c1ed84c4 100644 +--- a/arch/s390/include/asm/percpu.h ++++ b/arch/s390/include/asm/percpu.h +@@ -28,7 +28,7 @@ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ old__, new__, prev__; \ + pcp_op_T__ *ptr__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + prev__ = *ptr__; \ + do { \ +@@ -36,7 +36,7 @@ + new__ = old__ op (val); \ + prev__ = cmpxchg(ptr__, old__, new__); \ + } while (prev__ != old__); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + new__; \ + }) + +@@ -67,7 +67,7 @@ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + if (__builtin_constant_p(val__) && \ + ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ +@@ -83,7 +83,7 @@ + : [val__] "d" (val__) \ + : "cc"); \ + } \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + } + + #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) +@@ -94,14 +94,14 @@ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + old__ + val__; \ + }) + +@@ -113,14 +113,14 @@ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + } + + #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") +@@ -135,10 +135,10 @@ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ ret__; \ + pcp_op_T__ *ptr__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = cmpxchg(ptr__, oval, nval); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + ret__; \ + }) + +@@ -151,10 +151,10 @@ + ({ \ + typeof(pcp) *ptr__; \ + typeof(pcp) ret__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + ptr__ = raw_cpu_ptr(&(pcp)); \ + ret__ = xchg(ptr__, nval); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + ret__; \ + }) + +@@ -170,11 +170,11 @@ + typeof(pcp1) *p1__; \ + typeof(pcp2) *p2__; \ + int ret__; \ +- preempt_disable(); \ ++ preempt_disable_notrace(); \ + p1__ = raw_cpu_ptr(&(pcp1)); \ + p2__ = raw_cpu_ptr(&(pcp2)); \ + ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ +- preempt_enable(); \ ++ preempt_enable_notrace(); \ + ret__; \ + }) + +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h +index 6bc941be6921..166fbd74e316 100644 +--- a/arch/s390/include/asm/syscall.h ++++ b/arch/s390/include/asm/syscall.h +@@ -41,7 +41,17 @@ static inline void syscall_rollback(struct task_struct *task, + static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) + { +- return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; ++ unsigned long error = regs->gprs[2]; ++#ifdef CONFIG_COMPAT ++ if (test_tsk_thread_flag(task, TIF_31BIT)) { ++ /* ++ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO ++ * and will match correctly in comparisons. ++ */ ++ error = (long)(int)error; ++ } ++#endif ++ return IS_ERR_VALUE(error) ? error : 0; + } + + static inline long syscall_get_return_value(struct task_struct *task, +diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c +index 7f48e568ac64..540912666740 100644 +--- a/arch/s390/kernel/cpcmd.c ++++ b/arch/s390/kernel/cpcmd.c +@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen) + + static int diag8_response(int cmdlen, char *response, int *rlen) + { ++ unsigned long _cmdlen = cmdlen | 0x40000000L; ++ unsigned long _rlen = *rlen; + register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; + register unsigned long reg3 asm ("3") = (addr_t) response; +- register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; +- register unsigned long reg5 asm ("5") = *rlen; ++ register unsigned long reg4 asm ("4") = _cmdlen; ++ register unsigned long reg5 asm ("5") = _rlen; + + asm volatile( + " sam31\n" +diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c +index f9dca1aed9a4..17b3e82415f8 100644 +--- a/arch/s390/kernel/dis.c ++++ b/arch/s390/kernel/dis.c +@@ -2026,7 +2026,7 @@ void show_code(struct pt_regs *regs) + + void print_fn_code(unsigned char *code, unsigned long len) + { +- char buffer[64], *ptr; ++ char buffer[128], *ptr; + int opsize, i; + + while (len) { +diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c +index a651c2bc94ef..f862cc27fe98 100644 +--- a/arch/s390/kernel/early.c ++++ b/arch/s390/kernel/early.c +@@ -288,6 +288,8 @@ static noinline __init void setup_lowcore_early(void) + psw_t psw; + + psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; ++ if (IS_ENABLED(CONFIG_KASAN)) ++ psw.mask |= PSW_MASK_DAT; + psw.addr = (unsigned long) s390_base_ext_handler; + S390_lowcore.external_new_psw = psw; + psw.addr = (unsigned long) s390_base_pgm_handler; +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index 771cfd2e1e6d..708b8ee604d0 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -902,6 +902,7 @@ ENTRY(ext_int_handler) + * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. + */ + ENTRY(psw_idle) ++ stg %r14,(__SF_GPRS+8*8)(%r15) + stg %r3,__SF_EMPTY(%r15) + larl %r1,.Lpsw_idle_lpsw+4 + stg %r1,__SF_EMPTY+8(%r15) +diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c +index 60a8a4e207ed..2ed98bd07415 100644 +--- a/arch/s390/kernel/ftrace.c ++++ b/arch/s390/kernel/ftrace.c +@@ -55,6 +55,7 @@ + * > brasl %r0,ftrace_caller # offset 0 + */ + ++void *ftrace_func __read_mostly = ftrace_stub; + unsigned long ftrace_plt; + + static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) +@@ -164,6 +165,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) + + int ftrace_update_ftrace_func(ftrace_func_t func) + { ++ ftrace_func = func; + return 0; + } + +diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S +index e9df35249f9f..26f4e758c412 100644 +--- a/arch/s390/kernel/mcount.S ++++ b/arch/s390/kernel/mcount.S +@@ -59,13 +59,13 @@ ENTRY(ftrace_caller) + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + aghik %r2,%r0,-MCOUNT_INSN_SIZE + lgrl %r4,function_trace_op +- lgrl %r1,ftrace_trace_function ++ lgrl %r1,ftrace_func + #else + lgr %r2,%r0 + aghi %r2,-MCOUNT_INSN_SIZE + larl %r4,function_trace_op + lg %r4,0(%r4) +- larl %r1,ftrace_trace_function ++ larl %r1,ftrace_func + lg %r1,0(%r1) + #endif + lgr %r3,%r14 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c +index c62eb09b2ba7..427b70397fcd 100644 +--- a/arch/s390/kernel/perf_cpum_sf.c ++++ b/arch/s390/kernel/perf_cpum_sf.c +@@ -1663,4 +1663,4 @@ static int __init init_cpum_sampling_pmu(void) + return err; + } + arch_initcall(init_cpum_sampling_pmu); +-core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); ++core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0644); +diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c +index fc2974b929c3..ee757d6f585e 100644 +--- a/arch/s390/kernel/ptrace.c ++++ b/arch/s390/kernel/ptrace.c +@@ -308,6 +308,25 @@ static inline void __poke_user_per(struct task_struct *child, + child->thread.per_user.end = data; + } + ++static void fixup_int_code(struct task_struct *child, addr_t data) ++{ ++ struct pt_regs *regs = task_pt_regs(child); ++ int ilc = regs->int_code >> 16; ++ u16 insn; ++ ++ if (ilc > 6) ++ return; ++ ++ if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), ++ &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) ++ return; ++ ++ /* double check that tracee stopped on svc instruction */ ++ if ((insn >> 8) != 0xa) ++ return; ++ ++ regs->int_code = 0x20000 | (data & 0xffff); ++} + /* + * Write a word to the user area of a process at location addr. This + * operation does have an additional problem compared to peek_user. +@@ -319,7 +338,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) + struct user *dummy = NULL; + addr_t offset; + ++ + if (addr < (addr_t) &dummy->regs.acrs) { ++ struct pt_regs *regs = task_pt_regs(child); + /* + * psw and gprs are stored on the stack + */ +@@ -337,7 +358,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) + /* Invalid addressing mode bits */ + return -EINVAL; + } +- *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; ++ ++ if (test_pt_regs_flag(regs, PIF_SYSCALL) && ++ addr == offsetof(struct user, regs.gprs[2])) ++ fixup_int_code(child, data); ++ *(addr_t *)((addr_t) ®s->psw + addr) = data; + + } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { + /* +@@ -703,6 +728,10 @@ static int __poke_user_compat(struct task_struct *child, + regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | + (__u64)(tmp & PSW32_ADDR_AMODE); + } else { ++ ++ if (test_pt_regs_flag(regs, PIF_SYSCALL) && ++ addr == offsetof(struct compat_user, regs.gprs[2])) ++ fixup_int_code(child, data); + /* gpr 0-15 */ + *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; + } +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c +index a559908d180e..9939879f5f25 100644 +--- a/arch/s390/kernel/setup.c ++++ b/arch/s390/kernel/setup.c +@@ -137,7 +137,7 @@ static void __init set_preferred_console(void) + else if (CONSOLE_IS_3270) + add_preferred_console("tty3270", 0, NULL); + else if (CONSOLE_IS_VT220) +- add_preferred_console("ttyS", 1, NULL); ++ add_preferred_console("ttysclp", 0, NULL); + else if (CONSOLE_IS_HVC) + add_preferred_console("hvc", 0, NULL); + } +@@ -529,7 +529,7 @@ static struct notifier_block kdump_mem_nb = { + /* + * Make sure that the area behind memory_end is protected + */ +-static void reserve_memory_end(void) ++static void __init reserve_memory_end(void) + { + #ifdef CONFIG_CRASH_DUMP + if (ipl_info.type == IPL_TYPE_FCP_DUMP && +@@ -547,7 +547,7 @@ static void reserve_memory_end(void) + /* + * Make sure that oldmem, where the dump is stored, is protected + */ +-static void reserve_oldmem(void) ++static void __init reserve_oldmem(void) + { + #ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) +@@ -559,7 +559,7 @@ static void reserve_oldmem(void) + /* + * Make sure that oldmem, where the dump is stored, is protected + */ +-static void remove_oldmem(void) ++static void __init remove_oldmem(void) + { + #ifdef CONFIG_CRASH_DUMP + if (OLDMEM_BASE) +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c +index cba8e56cd63d..54eb8fe95212 100644 +--- a/arch/s390/kernel/smp.c ++++ b/arch/s390/kernel/smp.c +@@ -727,7 +727,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, + static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) + { + struct sclp_core_entry *core; +- cpumask_t avail; ++ static cpumask_t avail; + bool configured; + u16 core_id; + int nr, i; +diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c +index 4a0c5bce3552..8a22def411c5 100644 +--- a/arch/s390/mm/hugetlbpage.c ++++ b/arch/s390/mm/hugetlbpage.c +@@ -111,7 +111,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) + _PAGE_YOUNG); + #ifdef CONFIG_MEM_SOFT_DIRTY + pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, +- _PAGE_DIRTY); ++ _PAGE_SOFT_DIRTY); + #endif + } else + pte_val(pte) = _PAGE_INVALID; +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 9b15a1dc6628..ed58ebab96cd 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -116,7 +116,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) + { + u32 r1 = reg2hex[b1]; + +- if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) ++ if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) + jit->seen_reg[r1] = 1; + } + +diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c +index f1147caebacf..af69fb7fef7c 100644 +--- a/arch/sh/boards/mach-landisk/setup.c ++++ b/arch/sh/boards/mach-landisk/setup.c +@@ -85,6 +85,9 @@ device_initcall(landisk_devices_setup); + + static void __init landisk_setup(char **cmdline_p) + { ++ /* I/O port identity mapping */ ++ __set_io_port_base(0); ++ + /* LED ON */ + __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); + +diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig +index 78bc97b1d027..ac834e9e0e0a 100644 +--- a/arch/sh/drivers/dma/Kconfig ++++ b/arch/sh/drivers/dma/Kconfig +@@ -62,8 +62,7 @@ config PVR2_DMA + + config G2_DMA + tristate "G2 Bus DMA support" +- depends on SH_DREAMCAST +- select SH_DMA_API ++ depends on SH_DREAMCAST && SH_DMA_API + help + This enables support for the DMA controller for the Dreamcast's + G2 bus. Drivers that want this will generally enable this on +diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S +index 28cc61216b64..ed5b758c650d 100644 +--- a/arch/sh/kernel/entry-common.S ++++ b/arch/sh/kernel/entry-common.S +@@ -203,7 +203,7 @@ syscall_trace_entry: + mov.l @(OFF_R7,r15), r7 ! arg3 + mov.l @(OFF_R3,r15), r3 ! syscall_nr + ! +- mov.l 2f, r10 ! Number of syscalls ++ mov.l 6f, r10 ! Number of syscalls + cmp/hs r10, r3 + bf syscall_call + mov #-ENOSYS, r0 +@@ -357,7 +357,7 @@ ENTRY(system_call) + tst r9, r8 + bf syscall_trace_entry + ! +- mov.l 2f, r8 ! Number of syscalls ++ mov.l 6f, r8 ! Number of syscalls + cmp/hs r8, r3 + bt syscall_badsys + ! +@@ -396,7 +396,7 @@ syscall_exit: + #if !defined(CONFIG_CPU_SH2) + 1: .long TRA + #endif +-2: .long NR_syscalls ++6: .long NR_syscalls + 3: .long sys_call_table + 7: .long do_syscall_trace_enter + 8: .long do_syscall_trace_leave +diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig +index cef42d4be292..f6d9c44b32df 100644 +--- a/arch/sparc/Kconfig ++++ b/arch/sparc/Kconfig +@@ -562,7 +562,7 @@ config COMPAT + bool + depends on SPARC64 + default y +- select COMPAT_BINFMT_ELF ++ select COMPAT_BINFMT_ELF if BINFMT_ELF + select HAVE_UID16 + select ARCH_WANT_OLD_COMPAT_IPC + select COMPAT_OLD_SIGACTION +diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c +index a331fdc11a2c..2f4316c14266 100644 +--- a/arch/sparc/kernel/ptrace_32.c ++++ b/arch/sparc/kernel/ptrace_32.c +@@ -45,82 +45,79 @@ enum sparc_regset { + REGSET_FP, + }; + ++static int regwindow32_get(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_from_user(uregs, (void __user *)reg_window, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int regwindow32_set(struct task_struct *target, ++ const struct pt_regs *regs, ++ u32 *uregs) ++{ ++ unsigned long reg_window = regs->u_regs[UREG_I6]; ++ int size = 16 * sizeof(u32); ++ ++ if (target == current) { ++ if (copy_to_user((void __user *)reg_window, uregs, size)) ++ return -EFAULT; ++ } else { ++ if (access_process_vm(target, reg_window, uregs, size, ++ FOLL_FORCE | FOLL_WRITE) != size) ++ return -EFAULT; ++ } ++ return 0; ++} ++ + static int genregs32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) + { + const struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- unsigned long *k = kbuf; +- unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- *k++ = regs->u_regs[pos++]; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (put_user(regs->u_regs[pos++], u++)) +- return -EFAULT; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, ®_window[pos++]) || +- put_user(reg, u++)) +- return -EFAULT; +- } +- } +- while (count > 0) { +- switch (pos) { +- case 32: /* PSR */ +- reg = regs->psr; +- break; +- case 33: /* PC */ +- reg = regs->pc; +- break; +- case 34: /* NPC */ +- reg = regs->npc; +- break; +- case 35: /* Y */ +- reg = regs->y; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- reg = 0; +- break; +- default: +- goto finish; +- } ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- *k++ = reg; +- else if (put_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- pos++; +- count--; ++ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); + +- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ uregs[0] = regs->psr; ++ uregs[1] = regs->pc; ++ uregs[2] = regs->npc; ++ uregs[3] = regs->y; ++ uregs[4] = 0; /* WIM */ ++ uregs[5] = 0; /* TBR */ ++ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 32 * sizeof(u32), 38 * sizeof(u32)); + } + + static int genregs32_set(struct task_struct *target, +@@ -129,82 +126,58 @@ static int genregs32_set(struct task_struct *target, + const void *kbuf, const void __user *ubuf) + { + struct pt_regs *regs = target->thread.kregs; +- unsigned long __user *reg_window; +- const unsigned long *k = kbuf; +- const unsigned long __user *u = ubuf; +- unsigned long reg; ++ u32 uregs[16]; ++ u32 psr; ++ int ret; + + if (target == current) + flush_user_windows(); + +- pos /= sizeof(reg); +- count /= sizeof(reg); +- +- if (kbuf) { +- for (; count > 0 && pos < 16; count--) +- regs->u_regs[pos++] = *k++; +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (put_user(*k++, ®_window[pos++])) +- return -EFAULT; +- } +- } else { +- for (; count > 0 && pos < 16; count--) { +- if (get_user(reg, u++)) +- return -EFAULT; +- regs->u_regs[pos++] = reg; +- } +- +- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6]; +- reg_window -= 16; +- for (; count > 0 && pos < 32; count--) { +- if (get_user(reg, u++) || +- put_user(reg, ®_window[pos++])) +- return -EFAULT; +- } +- } +- while (count > 0) { +- unsigned long psr; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ regs->u_regs, ++ 0, 16 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + +- if (kbuf) +- reg = *k++; +- else if (get_user(reg, u++)) ++ if (pos < 32 * sizeof(u32)) { ++ if (regwindow32_get(target, regs, uregs)) + return -EFAULT; +- +- switch (pos) { +- case 32: /* PSR */ +- psr = regs->psr; +- psr &= ~(PSR_ICC | PSR_SYSCALL); +- psr |= (reg & (PSR_ICC | PSR_SYSCALL)); +- regs->psr = psr; +- break; +- case 33: /* PC */ +- regs->pc = reg; +- break; +- case 34: /* NPC */ +- regs->npc = reg; +- break; +- case 35: /* Y */ +- regs->y = reg; +- break; +- case 36: /* WIM */ +- case 37: /* TBR */ +- break; +- default: +- goto finish; +- } +- +- pos++; +- count--; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ uregs, ++ 16 * sizeof(u32), 32 * sizeof(u32)); ++ if (ret) ++ return ret; ++ if (regwindow32_set(target, regs, uregs)) ++ return -EFAULT; ++ if (!count) ++ return 0; + } +-finish: +- pos *= sizeof(reg); +- count *= sizeof(reg); +- ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ &psr, ++ 32 * sizeof(u32), 33 * sizeof(u32)); ++ if (ret) ++ return ret; ++ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) | ++ (psr & (PSR_ICC | PSR_SYSCALL)); ++ if (!count) ++ return 0; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->pc, ++ 33 * sizeof(u32), 34 * sizeof(u32)); ++ if (ret || !count) ++ return ret; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->npc, ++ 34 * sizeof(u32), 35 * sizeof(u32)); ++ if (ret || !count) ++ return ret; ++ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ++ ®s->y, ++ 35 * sizeof(u32), 36 * sizeof(u32)); ++ if (ret || !count) ++ return ret; + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, +- 38 * sizeof(reg), -1); ++ 36 * sizeof(u32), 38 * sizeof(u32)); + } + + static int fpregs32_get(struct task_struct *target, +diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c +index 7037ca3b4328..0a89a0546077 100644 +--- a/arch/sparc/kernel/ptrace_64.c ++++ b/arch/sparc/kernel/ptrace_64.c +@@ -533,19 +533,13 @@ static int genregs32_get(struct task_struct *target, + for (; count > 0 && pos < 32; count--) { + if (access_process_vm(target, + (unsigned long) +- ®_window[pos], ++ ®_window[pos++], + ®, sizeof(reg), + FOLL_FORCE) + != sizeof(reg)) + return -EFAULT; +- if (access_process_vm(target, +- (unsigned long) u, +- ®, sizeof(reg), +- FOLL_FORCE | FOLL_WRITE) +- != sizeof(reg)) ++ if (put_user(reg, u++)) + return -EFAULT; +- pos++; +- u++; + } + } + } +@@ -645,12 +639,7 @@ static int genregs32_set(struct task_struct *target, + } + } else { + for (; count > 0 && pos < 32; count--) { +- if (access_process_vm(target, +- (unsigned long) +- u, +- ®, sizeof(reg), +- FOLL_FORCE) +- != sizeof(reg)) ++ if (get_user(reg, u++)) + return -EFAULT; + if (access_process_vm(target, + (unsigned long) +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index ca7cb8e57ab0..b81cdd53d090 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -1034,38 +1034,9 @@ void smp_fetch_global_pmu(void) + * are flush_tlb_*() routines, and these run after flush_cache_*() + * which performs the flushw. + * +- * The SMP TLB coherency scheme we use works as follows: +- * +- * 1) mm->cpu_vm_mask is a bit mask of which cpus an address +- * space has (potentially) executed on, this is the heuristic +- * we use to avoid doing cross calls. +- * +- * Also, for flushing from kswapd and also for clones, we +- * use cpu_vm_mask as the list of cpus to make run the TLB. +- * +- * 2) TLB context numbers are shared globally across all processors +- * in the system, this allows us to play several games to avoid +- * cross calls. +- * +- * One invariant is that when a cpu switches to a process, and +- * that processes tsk->active_mm->cpu_vm_mask does not have the +- * current cpu's bit set, that tlb context is flushed locally. +- * +- * If the address space is non-shared (ie. mm->count == 1) we avoid +- * cross calls when we want to flush the currently running process's +- * tlb state. This is done by clearing all cpu bits except the current +- * processor's in current->mm->cpu_vm_mask and performing the +- * flush locally only. This will force any subsequent cpus which run +- * this task to flush the context from the local tlb if the process +- * migrates to another cpu (again). +- * +- * 3) For shared address spaces (threads) and swapping we bite the +- * bullet for most cases and perform the cross call (but only to +- * the cpus listed in cpu_vm_mask). +- * +- * The performance gain from "optimizing" away the cross call for threads is +- * questionable (in theory the big win for threads is the massive sharing of +- * address space state across processors). ++ * mm->cpu_vm_mask is a bit mask of which cpus an address ++ * space has (potentially) executed on, this is the heuristic ++ * we use to limit cross calls. + */ + + /* This currently is only used by the hugetlb arch pre-fault +@@ -1075,18 +1046,13 @@ void smp_fetch_global_pmu(void) + void smp_flush_tlb_mm(struct mm_struct *mm) + { + u32 ctx = CTX_HWBITS(mm->context); +- int cpu = get_cpu(); + +- if (atomic_read(&mm->mm_users) == 1) { +- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); +- goto local_flush_and_out; +- } ++ get_cpu(); + + smp_cross_call_masked(&xcall_flush_tlb_mm, + ctx, 0, 0, + mm_cpumask(mm)); + +-local_flush_and_out: + __flush_tlb_mm(ctx, SECONDARY_CONTEXT); + + put_cpu(); +@@ -1109,17 +1075,15 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long + { + u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; +- int cpu = get_cpu(); ++ ++ get_cpu(); + + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + +- if (mm == current->mm && atomic_read(&mm->mm_users) == 1) +- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); +- else +- smp_call_function_many(mm_cpumask(mm), tlb_pending_func, +- &info, 1); ++ smp_call_function_many(mm_cpumask(mm), tlb_pending_func, ++ &info, 1); + + __flush_tlb_pending(ctx, nr, vaddrs); + +@@ -1129,14 +1093,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long + void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) + { + unsigned long context = CTX_HWBITS(mm->context); +- int cpu = get_cpu(); + +- if (mm == current->mm && atomic_read(&mm->mm_users) == 1) +- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); +- else +- smp_cross_call_masked(&xcall_flush_tlb_page, +- context, vaddr, 0, +- mm_cpumask(mm)); ++ get_cpu(); ++ ++ smp_cross_call_masked(&xcall_flush_tlb_page, ++ context, vaddr, 0, ++ mm_cpumask(mm)); ++ + __flush_tlb_page(context, vaddr); + + put_cpu(); +diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S +index bb539b42b088..992db8a0f7c9 100644 +--- a/arch/sparc/lib/memset.S ++++ b/arch/sparc/lib/memset.S +@@ -140,6 +140,7 @@ __bzero: + ZERO_LAST_BLOCKS(%o0, 0x48, %g2) + ZERO_LAST_BLOCKS(%o0, 0x08, %g2) + 13: ++ EXT(12b, 13b, 21f) + be 8f + andcc %o1, 4, %g0 + +diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c +index 3fd7c3efdb18..feb7f5ab4084 100644 +--- a/arch/um/drivers/chan_user.c ++++ b/arch/um/drivers/chan_user.c +@@ -256,7 +256,8 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, + goto out_close; + } + +- if (os_set_fd_block(*fd_out, 0)) { ++ err = os_set_fd_block(*fd_out, 0); ++ if (err) { + printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd " + "non-blocking.\n"); + goto out_close; +diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c +index 0d6b66c64a81..76d155631c5d 100644 +--- a/arch/um/drivers/slip_user.c ++++ b/arch/um/drivers/slip_user.c +@@ -145,7 +145,8 @@ static int slip_open(void *data) + } + sfd = err; + +- if (set_up_tty(sfd)) ++ err = set_up_tty(sfd); ++ if (err) + goto out_close2; + + pri->slave = sfd; +diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c +index 20e30be44795..e3b422ebce09 100644 +--- a/arch/um/drivers/xterm.c ++++ b/arch/um/drivers/xterm.c +@@ -18,6 +18,7 @@ + struct xterm_chan { + int pid; + int helper_pid; ++ int chan_fd; + char *title; + int device; + int raw; +@@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts) + return NULL; + *data = ((struct xterm_chan) { .pid = -1, + .helper_pid = -1, ++ .chan_fd = -1, + .device = device, + .title = opts->xterm_title, + .raw = opts->raw } ); +@@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d, + goto out_kill; + } + ++ data->chan_fd = fd; + new = xterm_fd(fd, &data->helper_pid); + if (new < 0) { + err = new; +@@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d) + os_kill_process(data->helper_pid, 0); + data->helper_pid = -1; + ++ if (data->chan_fd != -1) ++ os_close_file(data->chan_fd); + os_close_file(fd); + } + +diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S +index 4fdbcf958cd5..558e5258dfff 100644 +--- a/arch/um/kernel/dyn.lds.S ++++ b/arch/um/kernel/dyn.lds.S +@@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH) + ENTRY(_start) + jiffies = jiffies_64; + ++VERSION { ++ { ++ local: *; ++ }; ++} ++ + SECTIONS + { + PROVIDE (__executable_start = START); +diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c +index b5e0cbb34382..476ded92affa 100644 +--- a/arch/um/kernel/sigio.c ++++ b/arch/um/kernel/sigio.c +@@ -36,14 +36,14 @@ int write_sigio_irq(int fd) + } + + /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */ +-static DEFINE_SPINLOCK(sigio_spinlock); ++static DEFINE_MUTEX(sigio_mutex); + + void sigio_lock(void) + { +- spin_lock(&sigio_spinlock); ++ mutex_lock(&sigio_mutex); + } + + void sigio_unlock(void) + { +- spin_unlock(&sigio_spinlock); ++ mutex_unlock(&sigio_mutex); + } +diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S +index 1840f55ed042..f544b8c13c2e 100644 +--- a/arch/um/kernel/uml.lds.S ++++ b/arch/um/kernel/uml.lds.S +@@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH) + ENTRY(_start) + jiffies = jiffies_64; + ++VERSION { ++ { ++ local: *; ++ }; ++} ++ + SECTIONS + { + /* This must contain the right address - not quite the default ELF one.*/ +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index aa22b0d1609e..a99f5509d255 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -495,6 +495,7 @@ config X86_UV + depends on X86_EXTENDED_PLATFORM + depends on NUMA + depends on EFI ++ depends on KEXEC_CORE + depends on X86_X2APIC + depends on PCI + ---help--- +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 47be4f5db0fb..b6fa9286b4b0 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -34,12 +34,13 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ + -DDISABLE_BRANCH_PROFILING \ + -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ +- -mno-mmx -mno-sse ++ -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) + + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) + REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) ++REALMODE_CFLAGS += $(CLANG_FLAGS) + export REALMODE_CFLAGS + + # BITS is used as extension for files which are available in a 32 bit +@@ -61,6 +62,9 @@ endif + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow + KBUILD_CFLAGS += $(call cc-option,-mno-avx,) + ++# Intel CET isn't enabled in the kernel ++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) ++ + ifeq ($(CONFIG_X86_32),y) + BITS := 32 + UTS_MACHINE := i386 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile +index 3b16935b22bc..d1df7d2e31b1 100644 +--- a/arch/x86/boot/Makefile ++++ b/arch/x86/boot/Makefile +@@ -87,7 +87,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE + + SETUP_OBJS = $(addprefix $(obj)/,$(setup-y)) + +-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' ++sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p' + + quiet_cmd_zoffset = ZOFFSET $@ + cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile +index 89b163351e64..7be7acd6a540 100644 +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -35,6 +35,8 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) + KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) ++# Disable relocation relaxation in case the link is not PIE. ++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S +index 7532f6f53677..93f41b4f05ce 100644 +--- a/arch/x86/boot/compressed/head_32.S ++++ b/arch/x86/boot/compressed/head_32.S +@@ -48,16 +48,17 @@ + * Position Independent Executable (PIE) so that linker won't optimize + * R_386_GOT32X relocation to its fixed symbol address. Older + * linkers generate R_386_32 relocations against locally defined symbols, +- * _bss, _ebss, _got and _egot, in PIE. It isn't wrong, just less ++ * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less + * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle + * R_386_32 relocations when relocating the kernel. To generate +- * R_386_RELATIVE relocations, we mark _bss, _ebss, _got and _egot as ++ * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as + * hidden: + */ + .hidden _bss + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + ENTRY(startup_32) +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S +index 3fac2d133e4e..d096bcfcb3f6 100644 +--- a/arch/x86/boot/compressed/head_64.S ++++ b/arch/x86/boot/compressed/head_64.S +@@ -40,6 +40,7 @@ + .hidden _ebss + .hidden _got + .hidden _egot ++ .hidden _end + + __HEAD + .code32 +diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig +index 5fa6ee2c2dde..8b5d3580ae72 100644 +--- a/arch/x86/configs/i386_defconfig ++++ b/arch/x86/configs/i386_defconfig +@@ -217,7 +217,6 @@ CONFIG_FB_MODE_HELPERS=y + CONFIG_FB_TILEBLITTING=y + CONFIG_FB_EFI=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + # CONFIG_LOGO_LINUX_MONO is not set + # CONFIG_LOGO_LINUX_VGA16 is not set +@@ -247,6 +246,7 @@ CONFIG_USB_HIDDEV=y + CONFIG_USB=y + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + CONFIG_USB_MON=y ++CONFIG_USB_XHCI_HCD=y + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_EHCI_TT_NEWSCHED=y + CONFIG_USB_OHCI_HCD=y +diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig +index 63dae3d5bd63..79dee74cb32f 100644 +--- a/arch/x86/configs/x86_64_cuttlefish_defconfig ++++ b/arch/x86/configs/x86_64_cuttlefish_defconfig +@@ -20,6 +20,7 @@ CONFIG_CGROUP_CPUACCT=y + CONFIG_CGROUP_SCHEDTUNE=y + CONFIG_MEMCG=y + CONFIG_MEMCG_SWAP=y ++CONFIG_BLK_CGROUP=y + CONFIG_CGROUP_SCHED=y + CONFIG_RT_GROUP_SCHED=y + CONFIG_CGROUP_BPF=y +@@ -54,7 +55,6 @@ CONFIG_PREEMPT=y + # CONFIG_MICROCODE is not set + CONFIG_X86_MSR=y + CONFIG_X86_CPUID=y +-CONFIG_KSM=y + CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 + CONFIG_ZSMALLOC=y + # CONFIG_MTRR is not set +@@ -66,7 +66,7 @@ CONFIG_PHYSICAL_START=0x200000 + CONFIG_RANDOMIZE_BASE=y + CONFIG_PHYSICAL_ALIGN=0x1000000 + CONFIG_CMDLINE_BOOL=y +-CONFIG_CMDLINE="console=ttyS0 reboot=p nopti" ++CONFIG_CMDLINE="nopti" + CONFIG_PM_AUTOSLEEP=y + CONFIG_PM_WAKELOCKS=y + CONFIG_PM_WAKELOCKS_LIMIT=0 +diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig +index 7ef4a099defc..ef835d41cdf0 100644 +--- a/arch/x86/configs/x86_64_defconfig ++++ b/arch/x86/configs/x86_64_defconfig +@@ -212,7 +212,6 @@ CONFIG_FB_MODE_HELPERS=y + CONFIG_FB_TILEBLITTING=y + CONFIG_FB_EFI=y + # CONFIG_LCD_CLASS_DEVICE is not set +-CONFIG_VGACON_SOFT_SCROLLBACK=y + CONFIG_LOGO=y + # CONFIG_LOGO_LINUX_MONO is not set + # CONFIG_LOGO_LINUX_VGA16 is not set +@@ -242,6 +241,7 @@ CONFIG_USB_HIDDEV=y + CONFIG_USB=y + CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + CONFIG_USB_MON=y ++CONFIG_USB_XHCI_HCD=y + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_EHCI_TT_NEWSCHED=y + CONFIG_USB_OHCI_HCD=y +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index 657984f10953..cfb1f34fea02 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -58,7 +58,7 @@ ENDPROC(native_usergs_sysret64) + + .macro TRACE_IRQS_IRETQ + #ifdef CONFIG_TRACE_IRQFLAGS +- bt $9, EFLAGS(%rsp) /* interrupts off? */ ++ btl $9, EFLAGS(%rsp) /* interrupts off? */ + jnc 1f + TRACE_IRQS_ON + 1: +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index 5f72b473f3ed..f6e57bebbc6b 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -88,6 +88,7 @@ struct perf_ibs { + u64 max_period; + unsigned long offset_mask[1]; + int offset_max; ++ unsigned int fetch_count_reset_broken : 1; + struct cpu_perf_ibs __percpu *pcpu; + + struct attribute **format_attrs; +@@ -345,11 +346,15 @@ static u64 get_ibs_op_count(u64 config) + { + u64 count = 0; + ++ /* ++ * If the internal 27-bit counter rolled over, the count is MaxCnt ++ * and the lower 7 bits of CurCnt are randomized. ++ * Otherwise CurCnt has the full 27-bit current counter value. ++ */ + if (config & IBS_OP_VAL) +- count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */ +- +- if (ibs_caps & IBS_CAPS_RDWROPCNT) +- count += (config & IBS_OP_CUR_CNT) >> 32; ++ count = (config & IBS_OP_MAX_CNT) << 4; ++ else if (ibs_caps & IBS_CAPS_RDWROPCNT) ++ count = (config & IBS_OP_CUR_CNT) >> 32; + + return count; + } +@@ -374,7 +379,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, + static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, + struct hw_perf_event *hwc, u64 config) + { +- wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); ++ u64 tmp = hwc->config | config; ++ ++ if (perf_ibs->fetch_count_reset_broken) ++ wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask); ++ ++ wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask); + } + + /* +@@ -636,18 +646,24 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) + perf_ibs->offset_max, + offset + 1); + } while (offset < offset_max); ++ /* ++ * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately ++ * depending on their availability. ++ * Can't add to offset_max as they are staggered ++ */ + if (event->attr.sample_type & PERF_SAMPLE_RAW) { +- /* +- * Read IbsBrTarget and IbsOpData4 separately +- * depending on their availability. +- * Can't add to offset_max as they are staggered +- */ +- if (ibs_caps & IBS_CAPS_BRNTRGT) { +- rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); +- size++; ++ if (perf_ibs == &perf_ibs_op) { ++ if (ibs_caps & IBS_CAPS_BRNTRGT) { ++ rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++); ++ size++; ++ } ++ if (ibs_caps & IBS_CAPS_OPDATA4) { ++ rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); ++ size++; ++ } + } +- if (ibs_caps & IBS_CAPS_OPDATA4) { +- rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++); ++ if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) { ++ rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++); + size++; + } + } +@@ -743,6 +759,13 @@ static __init void perf_event_ibs_init(void) + { + struct attribute **attr = ibs_op_format_attrs; + ++ /* ++ * Some chips fail to reset the fetch count when it is written; instead ++ * they need a 0-1 transition of IbsFetchEn. ++ */ ++ if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) ++ perf_ibs_fetch.fetch_count_reset_broken = 1; ++ + perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); + + if (ibs_caps & IBS_CAPS_OPCNT) { +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c +index b28200dea715..2bbf3fba8097 100644 +--- a/arch/x86/events/amd/iommu.c ++++ b/arch/x86/events/amd/iommu.c +@@ -80,12 +80,12 @@ static struct attribute_group amd_iommu_format_group = { + * sysfs events attributes + *---------------------------------------------*/ + struct amd_iommu_event_desc { +- struct kobj_attribute attr; ++ struct device_attribute attr; + const char *event; + }; + +-static ssize_t _iommu_event_show(struct kobject *kobj, +- struct kobj_attribute *attr, char *buf) ++static ssize_t _iommu_event_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct amd_iommu_event_desc *event = + container_of(attr, struct amd_iommu_event_desc, attr); +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c +index 72d09340c24d..88ba013d08d4 100644 +--- a/arch/x86/events/intel/cstate.c ++++ b/arch/x86/events/intel/cstate.c +@@ -98,14 +98,14 @@ + MODULE_LICENSE("GPL"); + + #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \ +-static ssize_t __cstate_##_var##_show(struct kobject *kobj, \ +- struct kobj_attribute *attr, \ ++static ssize_t __cstate_##_var##_show(struct device *dev, \ ++ struct device_attribute *attr, \ + char *page) \ + { \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ + } \ +-static struct kobj_attribute format_attr_##_var = \ ++static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __cstate_##_var##_show, NULL) + + static ssize_t cstate_get_attr_cpumask(struct device *dev, +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c +index f562ddbeb20c..f39838d78976 100644 +--- a/arch/x86/events/intel/ds.c ++++ b/arch/x86/events/intel/ds.c +@@ -1473,7 +1473,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) + */ + if (!pebs_status && cpuc->pebs_enabled && + !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1))) +- pebs_status = cpuc->pebs_enabled; ++ pebs_status = p->status = cpuc->pebs_enabled; + + bit = find_first_bit((unsigned long *)&pebs_status, + x86_pmu.max_pebs_events); +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c +index df60b58691e7..1808c57ce161 100644 +--- a/arch/x86/events/intel/pt.c ++++ b/arch/x86/events/intel/pt.c +@@ -1117,7 +1117,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters) + if (!filter->range || !filter->size) + return -EOPNOTSUPP; + +- if (!filter->inode) { ++ if (!filter->path.dentry) { + if (!valid_kernel_ip(filter->offset)) + return -EINVAL; + +@@ -1144,7 +1144,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event) + return; + + list_for_each_entry(filter, &head->list, entry) { +- if (filter->inode && !offs[range]) { ++ if (filter->path.dentry && !offs[range]) { + msr_a = msr_b = 0; + } else { + /* apply the offset */ +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c +index 4c1b7ea18541..38dae3d1391b 100644 +--- a/arch/x86/events/intel/rapl.c ++++ b/arch/x86/events/intel/rapl.c +@@ -115,18 +115,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { + * any other bit is reserved + */ + #define RAPL_EVENT_MASK 0xFFULL +- +-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \ +-static ssize_t __rapl_##_var##_show(struct kobject *kobj, \ +- struct kobj_attribute *attr, \ +- char *page) \ +-{ \ +- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ +- return sprintf(page, _format "\n"); \ +-} \ +-static struct kobj_attribute format_attr_##_var = \ +- __ATTR(_name, 0444, __rapl_##_var##_show, NULL) +- + #define RAPL_CNTR_WIDTH 32 + + #define RAPL_EVENT_ATTR_STR(_name, v, str) \ +@@ -548,7 +536,7 @@ static struct attribute_group rapl_pmu_events_group = { + .attrs = NULL, /* patched at runtime */ + }; + +-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7"); ++PMU_FORMAT_ATTR(event, "config:0-7"); + static struct attribute *rapl_formats_attr[] = { + &format_attr_event.attr, + NULL, +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c +index 4f365267b12f..9f572bf6c621 100644 +--- a/arch/x86/events/intel/uncore.c ++++ b/arch/x86/events/intel/uncore.c +@@ -90,8 +90,8 @@ struct pci2phy_map *__find_pci2phy_map(int segment) + return map; + } + +-ssize_t uncore_event_show(struct kobject *kobj, +- struct kobj_attribute *attr, char *buf) ++ssize_t uncore_event_show(struct device *dev, ++ struct device_attribute *attr, char *buf) + { + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); +diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h +index ad986c1e29bc..f699783114ee 100644 +--- a/arch/x86/events/intel/uncore.h ++++ b/arch/x86/events/intel/uncore.h +@@ -124,7 +124,7 @@ struct intel_uncore_box { + #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ + + struct uncore_event_desc { +- struct kobj_attribute attr; ++ struct device_attribute attr; + const char *config; + }; + +@@ -136,8 +136,8 @@ struct pci2phy_map { + + struct pci2phy_map *__find_pci2phy_map(int segment); + +-ssize_t uncore_event_show(struct kobject *kobj, +- struct kobj_attribute *attr, char *buf); ++ssize_t uncore_event_show(struct device *dev, ++ struct device_attribute *attr, char *buf); + + #define INTEL_UNCORE_EVENT_DESC(_name, _config) \ + { \ +@@ -146,14 +146,14 @@ ssize_t uncore_event_show(struct kobject *kobj, + } + + #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \ +- struct kobj_attribute *attr, \ ++static ssize_t __uncore_##_var##_show(struct device *dev, \ ++ struct device_attribute *attr, \ + char *page) \ + { \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ + } \ +-static struct kobj_attribute format_attr_##_var = \ ++static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + + static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h +index c42c9d50c8ee..8095c7169e8a 100644 +--- a/arch/x86/events/perf_event.h ++++ b/arch/x86/events/perf_event.h +@@ -771,9 +771,10 @@ void x86_pmu_stop(struct perf_event *event, int flags); + + static inline void x86_pmu_disable_event(struct perf_event *event) + { ++ u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); + struct hw_perf_event *hwc = &event->hw; + +- wrmsrl(hwc->config_base, hwc->config); ++ wrmsrl(hwc->config_base, hwc->config & ~disable_mask); + } + + void x86_pmu_enable_event(struct perf_event *event); +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index f39fd349cef6..a6d034257b7b 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -176,16 +176,6 @@ static inline void lapic_update_tsc_freq(void) { } + #endif /* !CONFIG_X86_LOCAL_APIC */ + + #ifdef CONFIG_X86_X2APIC +-/* +- * Make previous memory operations globally visible before +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or +- * mfence for this. +- */ +-static inline void x2apic_wrmsr_fence(void) +-{ +- asm volatile("mfence" : : : "memory"); +-} +- + static inline void native_apic_msr_write(u32 reg, u32 v) + { + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || +diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h +index 5b0579abb398..3ac991d81e74 100644 +--- a/arch/x86/include/asm/archrandom.h ++++ b/arch/x86/include/asm/archrandom.h +@@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v) + bool ok; + unsigned int retry = RDRAND_RETRY_LOOPS; + do { +- asm volatile(RDRAND_LONG "\n\t" ++ asm volatile(RDRAND_LONG + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + if (ok) +@@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v) + bool ok; + unsigned int retry = RDRAND_RETRY_LOOPS; + do { +- asm volatile(RDRAND_INT "\n\t" ++ asm volatile(RDRAND_INT + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + if (ok) +@@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v) + static inline bool rdseed_long(unsigned long *v) + { + bool ok; +- asm volatile(RDSEED_LONG "\n\t" ++ asm volatile(RDSEED_LONG + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + return ok; +@@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v) + static inline bool rdseed_int(unsigned int *v) + { + bool ok; +- asm volatile(RDSEED_INT "\n\t" ++ asm volatile(RDSEED_INT + CC_SET(c) + : CC_OUT(c) (ok), "=a" (*v)); + return ok; +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h +index a0f450b21d67..89b75edf24af 100644 +--- a/arch/x86/include/asm/barrier.h ++++ b/arch/x86/include/asm/barrier.h +@@ -110,4 +110,22 @@ do { \ + + #include + ++/* ++ * Make previous memory operations globally visible before ++ * a WRMSR. ++ * ++ * MFENCE makes writes visible, but only affects load/store ++ * instructions. WRMSR is unfortunately not a load/store ++ * instruction and is unaffected by MFENCE. The LFENCE ensures ++ * that the WRMSR is not reordered. ++ * ++ * Most WRMSRs are full serializing instructions themselves and ++ * do not require this barrier. This is only required for the ++ * IA32_TSC_DEADLINE and X2APIC MSRs. ++ */ ++static inline void weak_wrmsr_fence(void) ++{ ++ asm volatile("mfence; lfence" : : : "memory"); ++} ++ + #endif /* _ASM_X86_BARRIER_H */ +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h +index 68557f52b961..fb402d4c4508 100644 +--- a/arch/x86/include/asm/bitops.h ++++ b/arch/x86/include/asm/bitops.h +@@ -77,7 +77,7 @@ set_bit(long nr, volatile unsigned long *addr) + : "iq" ((u8)CONST_MASK(nr)) + : "memory"); + } else { +- asm volatile(LOCK_PREFIX "bts %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" + : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + } + } +@@ -93,7 +93,7 @@ set_bit(long nr, volatile unsigned long *addr) + */ + static __always_inline void __set_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); ++ asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); + } + + /** +@@ -114,7 +114,7 @@ clear_bit(long nr, volatile unsigned long *addr) + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)~CONST_MASK(nr))); + } else { +- asm volatile(LOCK_PREFIX "btr %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +@@ -136,7 +136,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad + + static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); ++ asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); + } + + /* +@@ -168,7 +168,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * + */ + static __always_inline void __change_bit(long nr, volatile unsigned long *addr) + { +- asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); ++ asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); + } + + /** +@@ -187,7 +187,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { +- asm volatile(LOCK_PREFIX "btc %1,%0" ++ asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +@@ -203,7 +203,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) + */ + static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), ++ *addr, "Ir", nr, "%0", c); + } + + /** +@@ -232,7 +233,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * + { + bool oldbit; + +- asm("bts %2,%1\n\t" ++ asm(__ASM_SIZE(bts) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -249,7 +250,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * + */ + static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), ++ *addr, "Ir", nr, "%0", c); + } + + /** +@@ -272,7 +274,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long + { + bool oldbit; + +- asm volatile("btr %2,%1\n\t" ++ asm volatile(__ASM_SIZE(btr) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr)); +@@ -284,7 +286,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon + { + bool oldbit; + +- asm volatile("btc %2,%1\n\t" ++ asm volatile(__ASM_SIZE(btc) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit), ADDR + : "Ir" (nr) : "memory"); +@@ -302,7 +304,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon + */ + static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) + { +- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); ++ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), ++ *addr, "Ir", nr, "%0", c); + } + + static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) +@@ -315,7 +318,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l + { + bool oldbit; + +- asm volatile("bt %2,%1\n\t" ++ asm volatile(__ASM_SIZE(bt) " %2,%1" + CC_SET(c) + : CC_OUT(c) (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 2cd5d12a842c..8ceb7a8a249c 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -273,6 +273,7 @@ + #define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ + #define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ + #define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ ++#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */ + #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ +@@ -312,7 +313,6 @@ + #define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ + #define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ + +- + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h +index 21d6fa27b4a9..793c04cba0de 100644 +--- a/arch/x86/include/asm/fpu/internal.h ++++ b/arch/x86/include/asm/fpu/internal.h +@@ -94,6 +94,7 @@ static inline void fpstate_init_fxstate(struct fxregs_state *fx) + } + extern void fpstate_sanitize_xstate(struct fpu *fpu); + ++/* Returns 0 or the negated trap number, which results in -EFAULT for #PF */ + #define user_insn(insn, output, input...) \ + ({ \ + int err; \ +@@ -101,14 +102,14 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu); + might_fault(); \ + \ + asm volatile(ASM_STAC "\n" \ +- "1:" #insn "\n\t" \ ++ "1: " #insn "\n" \ + "2: " ASM_CLAC "\n" \ + ".section .fixup,\"ax\"\n" \ +- "3: movl $-1,%[err]\n" \ ++ "3: negl %%eax\n" \ + " jmp 2b\n" \ + ".previous\n" \ +- _ASM_EXTABLE(1b, 3b) \ +- : [err] "=r" (err), output \ ++ _ASM_EXTABLE_FAULT(1b, 3b) \ ++ : [err] "=a" (err), output \ + : "0"(0), input); \ + err; \ + }) +@@ -220,6 +221,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) + } + } + ++static inline void fxsave(struct fxregs_state *fx) ++{ ++ if (IS_ENABLED(CONFIG_X86_32)) ++ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx)); ++ else ++ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx)); ++} ++ + /* These macros all use (%edi)/(%rdi) as the single memory argument. */ + #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" + #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" +@@ -227,16 +236,20 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) + #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" + #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" + ++/* ++ * After this @err contains 0 on success or the negated trap number when ++ * the operation raises an exception. For faults this results in -EFAULT. ++ */ + #define XSTATE_OP(op, st, lmask, hmask, err) \ + asm volatile("1:" op "\n\t" \ + "xor %[err], %[err]\n" \ + "2:\n\t" \ + ".pushsection .fixup,\"ax\"\n\t" \ +- "3: movl $-2,%[err]\n\t" \ ++ "3: negl %%eax\n\t" \ + "jmp 2b\n\t" \ + ".popsection\n\t" \ +- _ASM_EXTABLE(1b, 3b) \ +- : [err] "=r" (err) \ ++ _ASM_EXTABLE_FAULT(1b, 3b) \ ++ : [err] "=a" (err) \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") + +@@ -289,28 +302,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") + +-/* +- * This function is called only during boot time when x86 caps are not set +- * up and alternative can not be used yet. +- */ +-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) +-{ +- u64 mask = -1; +- u32 lmask = mask; +- u32 hmask = mask >> 32; +- int err; +- +- WARN_ON(system_state != SYSTEM_BOOTING); +- +- if (static_cpu_has(X86_FEATURE_XSAVES)) +- XSTATE_OP(XSAVES, xstate, lmask, hmask, err); +- else +- XSTATE_OP(XSAVE, xstate, lmask, hmask, err); +- +- /* We should never fault when copying to a kernel buffer: */ +- WARN_ON_FPU(err); +-} +- + /* + * This function is called only during boot time when x86 caps are not set + * up and alternative can not be used yet. +diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h +index c2c01f84df75..3e0e18d376d2 100644 +--- a/arch/x86/include/asm/insn.h ++++ b/arch/x86/include/asm/insn.h +@@ -208,6 +208,21 @@ static inline int insn_offset_immediate(struct insn *insn) + return insn_offset_displacement(insn) + insn->displacement.nbytes; + } + ++/** ++ * for_each_insn_prefix() -- Iterate prefixes in the instruction ++ * @insn: Pointer to struct insn. ++ * @idx: Index storage. ++ * @prefix: Prefix byte. ++ * ++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix ++ * and the index is stored in @idx (note that this @idx is just for a cursor, ++ * do not change it.) ++ * Since prefixes.nbytes can be bigger than 4 if some prefixes ++ * are repeated, it cannot be used for looping over the prefixes. ++ */ ++#define for_each_insn_prefix(insn, idx, prefix) \ ++ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) ++ + #define POP_SS_OPCODE 0x1f + #define MOV_SREG_OPCODE 0x8e + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index b12b0a50ad1f..1fdea3c334e7 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -356,6 +356,7 @@ + #define MSR_AMD64_IBSOP_REG_MASK ((1UL< + ++struct task_struct; ++ + /* misc architecture specific prototypes */ + + void syscall_init(void); +diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h +index 14824fc78f7e..509b9f3307e4 100644 +--- a/arch/x86/include/asm/svm.h ++++ b/arch/x86/include/asm/svm.h +@@ -113,6 +113,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { + #define V_IGN_TPR_SHIFT 20 + #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) + ++#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) ++ + #define V_INTR_MASKING_SHIFT 24 + #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) + +diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h +index 34191cf1594b..a5eb6474b3e2 100644 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h +@@ -223,10 +223,31 @@ static inline int arch_within_stack_frames(const void * const stack, + + #endif + ++/* ++ * Thread-synchronous status. ++ * ++ * This is different from the flags in that nobody else ++ * ever touches our thread-synchronous status, so we don't ++ * have to worry about atomic accesses. ++ */ ++#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ ++ ++#ifndef __ASSEMBLY__ + #ifdef CONFIG_COMPAT + #define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */ ++#define TS_COMPAT_RESTART 0x0008 ++ ++#define arch_set_restart_data arch_set_restart_data ++ ++static inline void arch_set_restart_data(struct restart_block *restart) ++{ ++ struct thread_info *ti = current_thread_info(); ++ if (ti->status & TS_COMPAT) ++ ti->status |= TS_COMPAT_RESTART; ++ else ++ ti->status &= ~TS_COMPAT_RESTART; ++} + #endif +-#ifndef __ASSEMBLY__ + + #ifdef CONFIG_X86_32 + #define in_ia32_syscall() true +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h +index f5ca15622dc9..2bfa4deb8cae 100644 +--- a/arch/x86/include/asm/tlbflush.h ++++ b/arch/x86/include/asm/tlbflush.h +@@ -245,12 +245,15 @@ static inline void __native_flush_tlb_single(unsigned long addr) + * ASID. But, userspace flushes are probably much more + * important performance-wise. + * +- * Make sure to do only a single invpcid when KAISER is +- * disabled and we have only a single ASID. ++ * In the KAISER disabled case, do an INVLPG to make sure ++ * the mapping is flushed in case it is a global one. + */ +- if (kaiser_enabled) ++ if (kaiser_enabled) { + invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr); +- invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr); ++ invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr); ++ } else { ++ asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); ++ } + } + + static inline void __flush_tlb_all(void) +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 722a76b88bcc..107a9eff587b 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -476,6 +477,9 @@ static int lapic_next_deadline(unsigned long delta, + { + u64 tsc; + ++ /* This MSR is special and need a special fence: */ ++ weak_wrmsr_fence(); ++ + tsc = rdtsc(); + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); + return 0; +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 3401b28f1312..f398612e8a81 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -1042,6 +1042,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, + if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { + irq = mp_irqs[idx].srcbusirq; + legacy = mp_is_legacy_irq(irq); ++ /* ++ * IRQ2 is unusable for historical reasons on systems which ++ * have a legacy PIC. See the comment vs. IRQ2 further down. ++ * ++ * If this gets removed at some point then the related code ++ * in lapic_assign_system_vectors() needs to be adjusted as ++ * well. ++ */ ++ if (legacy && irq == PIC_CASCADE_IR) ++ return -EINVAL; + } + + mutex_lock(&ioapic_mutex); +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c +index 200af5ae9662..ca64c150d1c5 100644 +--- a/arch/x86/kernel/apic/x2apic_cluster.c ++++ b/arch/x86/kernel/apic/x2apic_cluster.c +@@ -27,7 +27,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); + } + +@@ -40,7 +41,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long flags; + u32 dest; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c +index ff111f05a314..8889420ea7c6 100644 +--- a/arch/x86/kernel/apic/x2apic_phys.c ++++ b/arch/x86/kernel/apic/x2apic_phys.c +@@ -40,7 +40,8 @@ static void x2apic_send_IPI(int cpu, int vector) + { + u32 dest = per_cpu(x86_cpu_to_apicid, cpu); + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); + } + +@@ -51,7 +52,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) + unsigned long this_cpu; + unsigned long flags; + +- x2apic_wrmsr_fence(); ++ /* x2apic MSRs are special and need a special fence: */ ++ weak_wrmsr_fence(); + + local_irq_save(flags); + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 5ef0a2b34261..a884bb7e7b01 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,7 +60,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; + u64 __ro_after_init x86_amd_ls_cfg_base; + u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; + +-/* Control conditional STIPB in switch_to() */ ++/* Control conditional STIBP in switch_to() */ + DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); + /* Control conditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +@@ -580,7 +580,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = ++static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = ++ SPECTRE_V2_USER_NONE; ++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = + SPECTRE_V2_USER_NONE; + + #ifdef RETPOLINE +@@ -632,10 +634,11 @@ enum spectre_v2_user_cmd { + }; + + static const char * const spectre_v2_user_strings[] = { +- [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", +- [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", +- [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", +- [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", ++ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", ++ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", ++ [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", ++ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", ++ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", + }; + + static const struct { +@@ -729,11 +732,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + if (boot_cpu_has(X86_FEATURE_IBPB)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + ++ spectre_v2_user_ibpb = mode; + switch (cmd) { + case SPECTRE_V2_USER_CMD_FORCE: + case SPECTRE_V2_USER_CMD_PRCTL_IBPB: + case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: + static_branch_enable(&switch_mm_always_ibpb); ++ spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; + break; + case SPECTRE_V2_USER_CMD_PRCTL: + case SPECTRE_V2_USER_CMD_AUTO: +@@ -749,21 +754,32 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + "always-on" : "conditional"); + } + +- /* If enhanced IBRS is enabled no STIPB required */ +- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) ++ /* ++ * If enhanced IBRS is enabled or SMT impossible, STIBP is not ++ * required. ++ */ ++ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return; + + /* +- * If SMT is not possible or STIBP is not available clear the STIPB +- * mode. ++ * At this point, an STIBP mode other than "off" has been set. ++ * If STIBP support is not being forced, check if STIBP always-on ++ * is preferred. ++ */ ++ if (mode != SPECTRE_V2_USER_STRICT && ++ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) ++ mode = SPECTRE_V2_USER_STRICT_PREFERRED; ++ ++ /* ++ * If STIBP is not available, clear the STIBP mode. + */ +- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) ++ if (!boot_cpu_has(X86_FEATURE_STIBP)) + mode = SPECTRE_V2_USER_NONE; ++ ++ spectre_v2_user_stibp = mode; ++ + set_mode: +- spectre_v2_user = mode; +- /* Only print the STIBP mode when SMT possible */ +- if (smt_possible) +- pr_info("%s\n", spectre_v2_user_strings[mode]); ++ pr_info("%s\n", spectre_v2_user_strings[mode]); + } + + static const char * const spectre_v2_strings[] = { +@@ -1003,10 +1019,11 @@ void arch_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + break; + case SPECTRE_V2_USER_STRICT: ++ case SPECTRE_V2_USER_STRICT_PREFERRED: + update_stibp_strict(); + break; + case SPECTRE_V2_USER_PRCTL: +@@ -1231,18 +1248,41 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) + return 0; + } + ++static bool is_spec_ib_user_controlled(void) ++{ ++ return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; ++} ++ + static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + { + switch (ctrl) { + case PR_SPEC_ENABLE: +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return 0; ++ + /* +- * Indirect branch speculation is always disabled in strict +- * mode. ++ * With strict mode for both IBPB and STIBP, the instruction ++ * code paths avoid checking this task flag and instead, ++ * unconditionally run the instruction. However, STIBP and IBPB ++ * are independent and either can be set to conditionally ++ * enabled regardless of the mode of the other. ++ * ++ * If either is set to conditional, allow the task flag to be ++ * updated, unless it was force-disabled by a previous prctl ++ * call. Currently, this is possible on an AMD CPU which has the ++ * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the ++ * kernel is booted with 'spectre_v2_user=seccomp', then ++ * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and ++ * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (!is_spec_ib_user_controlled() || ++ task_spec_ib_force_disable(task)) + return -EPERM; ++ + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); + break; +@@ -1252,10 +1292,13 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return -EPERM; +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ ++ if (!is_spec_ib_user_controlled()) + return 0; ++ + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) + task_set_spec_ib_force_disable(task); +@@ -1285,7 +1328,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) + { + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); + } + #endif +@@ -1314,21 +1358,21 @@ static int ib_prctl_get(struct task_struct *task) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + +- switch (spectre_v2_user) { +- case SPECTRE_V2_USER_NONE: ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: ++ else if (is_spec_ib_user_controlled()) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_STRICT: ++ } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return PR_SPEC_DISABLE; +- default: ++ else + return PR_SPEC_NOT_AFFECTED; +- } + } + + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +@@ -1569,11 +1613,13 @@ static char *stibp_state(void) + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + return ""; + +- switch (spectre_v2_user) { ++ switch (spectre_v2_user_stibp) { + case SPECTRE_V2_USER_NONE: + return ", STIBP: disabled"; + case SPECTRE_V2_USER_STRICT: + return ", STIBP: forced"; ++ case SPECTRE_V2_USER_STRICT_PREFERRED: ++ return ", STIBP: always-on"; + case SPECTRE_V2_USER_PRCTL: + case SPECTRE_V2_USER_SECCOMP: + if (static_key_enabled(&switch_to_cond_stibp)) +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index b16b6176738b..ff3253b9a879 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -849,6 +849,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c) + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) + c->x86_phys_bits = 36; + #endif ++ c->x86_cache_bits = c->x86_phys_bits; + + if (c->extended_cpuid_level >= 0x8000000a) + c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); +@@ -888,7 +889,6 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + } + } + #endif +- c->x86_cache_bits = c->x86_phys_bits; + } + + #define NO_SPECULATION BIT(0) +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c +index 1308abfc4758..0939151a42a9 100644 +--- a/arch/x86/kernel/cpu/microcode/intel.c ++++ b/arch/x86/kernel/cpu/microcode/intel.c +@@ -147,51 +147,6 @@ load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs, + } + } + +-/* +- * Given CPU signature and a microcode patch, this function finds if the +- * microcode patch has matching family and model with the CPU. +- */ +-static enum ucode_state +-matching_model_microcode(struct microcode_header_intel *mc_header, +- unsigned long sig) +-{ +- unsigned int fam, model; +- unsigned int fam_ucode, model_ucode; +- struct extended_sigtable *ext_header; +- unsigned long total_size = get_totalsize(mc_header); +- unsigned long data_size = get_datasize(mc_header); +- int ext_sigcount, i; +- struct extended_signature *ext_sig; +- +- fam = x86_family(sig); +- model = x86_model(sig); +- +- fam_ucode = x86_family(mc_header->sig); +- model_ucode = x86_model(mc_header->sig); +- +- if (fam == fam_ucode && model == model_ucode) +- return UCODE_OK; +- +- /* Look for ext. headers: */ +- if (total_size <= data_size + MC_HEADER_SIZE) +- return UCODE_NFOUND; +- +- ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; +- ext_sig = (void *)ext_header + EXT_HEADER_SIZE; +- ext_sigcount = ext_header->count; +- +- for (i = 0; i < ext_sigcount; i++) { +- fam_ucode = x86_family(ext_sig->sig); +- model_ucode = x86_model(ext_sig->sig); +- +- if (fam == fam_ucode && model == model_ucode) +- return UCODE_OK; +- +- ext_sig++; +- } +- return UCODE_NFOUND; +-} +- + static int + save_microcode(struct mc_saved_data *mcs, + struct microcode_intel **mc_saved_src, +@@ -332,7 +287,8 @@ get_matching_model_microcode(unsigned long start, void *data, size_t size, + * the platform, we need to find and save microcode patches + * with the same family and model as the BSP. + */ +- if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) { ++ if (!find_matching_signature(mc_header, uci->cpu_sig.sig, ++ uci->cpu_sig.pf)) { + ucode_ptr += mc_size; + continue; + } +diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c +index e12ee86906c6..9436f3452049 100644 +--- a/arch/x86/kernel/cpu/mtrr/generic.c ++++ b/arch/x86/kernel/cpu/mtrr/generic.c +@@ -166,9 +166,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, + *repeat = 0; + *uniform = 1; + +- /* Make end inclusive instead of exclusive */ +- end--; +- + prev_match = MTRR_TYPE_INVALID; + for (i = 0; i < num_var_ranges; ++i) { + unsigned short start_state, end_state, inclusive; +@@ -260,6 +257,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) + int repeat; + u64 partial_end; + ++ /* Make end inclusive instead of exclusive */ ++ end--; ++ + if (!mtrr_state_set) + return MTRR_TYPE_INVALID; + +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c +index 650830e39e3a..b3ac57f0e068 100644 +--- a/arch/x86/kernel/crash.c ++++ b/arch/x86/kernel/crash.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -564,7 +565,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) + struct crash_memmap_data cmd; + struct crash_mem *cmem; + +- cmem = vzalloc(sizeof(struct crash_mem)); ++ cmem = vzalloc(struct_size(cmem, ranges, 1)); + if (!cmem) + return -ENOMEM; + +diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c +index 7052d9a65fe9..e1c9e94fcce6 100644 +--- a/arch/x86/kernel/fpu/regset.c ++++ b/arch/x86/kernel/fpu/regset.c +@@ -123,7 +123,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, + /* + * A whole standard-format XSAVE buffer is needed: + */ +- if ((pos != 0) || (count < fpu_user_xstate_size)) ++ if (pos != 0 || count != fpu_user_xstate_size) + return -EFAULT; + + xsave = &fpu->state.xsave; +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c +index 769831d9fd11..07b0ebd49576 100644 +--- a/arch/x86/kernel/fpu/signal.c ++++ b/arch/x86/kernel/fpu/signal.c +@@ -276,15 +276,23 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) + return 0; + } + +- if (!access_ok(VERIFY_READ, buf, size)) ++ if (!access_ok(VERIFY_READ, buf, size)) { ++ fpu__clear(fpu); + return -EACCES; ++ } + + fpu__activate_curr(fpu); + +- if (!static_cpu_has(X86_FEATURE_FPU)) +- return fpregs_soft_set(current, NULL, +- 0, sizeof(struct user_i387_ia32_struct), +- NULL, buf) != 0; ++ if (!static_cpu_has(X86_FEATURE_FPU)) { ++ int ret = fpregs_soft_set(current, NULL, 0, ++ sizeof(struct user_i387_ia32_struct), ++ NULL, buf); ++ ++ if (ret) ++ fpu__clear(fpu); ++ ++ return ret != 0; ++ } + + if (use_xsave()) { + struct _fpx_sw_bytes fx_sw_user; +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index e9d7f461b7fa..02ad98ec5149 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -407,6 +407,24 @@ static void __init print_xstate_offset_size(void) + } + } + ++/* ++ * All supported features have either init state all zeros or are ++ * handled in setup_init_fpu() individually. This is an explicit ++ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch ++ * newly added supported features at build time and make people ++ * actually look at the init state for the new feature. ++ */ ++#define XFEATURES_INIT_FPSTATE_HANDLED \ ++ (XFEATURE_MASK_FP | \ ++ XFEATURE_MASK_SSE | \ ++ XFEATURE_MASK_YMM | \ ++ XFEATURE_MASK_OPMASK | \ ++ XFEATURE_MASK_ZMM_Hi256 | \ ++ XFEATURE_MASK_Hi16_ZMM | \ ++ XFEATURE_MASK_PKRU | \ ++ XFEATURE_MASK_BNDREGS | \ ++ XFEATURE_MASK_BNDCSR) ++ + /* + * setup the xstate image representing the init state + */ +@@ -414,6 +432,8 @@ static void __init setup_init_fpu_buf(void) + { + static int on_boot_cpu __initdata = 1; + ++ BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED); ++ + WARN_ON_FPU(!on_boot_cpu); + on_boot_cpu = 0; + +@@ -432,10 +452,22 @@ static void __init setup_init_fpu_buf(void) + copy_kernel_to_xregs_booting(&init_fpstate.xsave); + + /* +- * Dump the init state again. This is to identify the init state +- * of any feature which is not represented by all zero's. ++ * All components are now in init state. Read the state back so ++ * that init_fpstate contains all non-zero init state. This only ++ * works with XSAVE, but not with XSAVEOPT and XSAVES because ++ * those use the init optimization which skips writing data for ++ * components in init state. ++ * ++ * XSAVE could be used, but that would require to reshuffle the ++ * data when XSAVES is available because XSAVES uses xstate ++ * compaction. But doing so is a pointless exercise because most ++ * components have an all zeros init state except for the legacy ++ * ones (FP and SSE). Those can be saved with FXSAVE into the ++ * legacy area. Adding new features requires to ensure that init ++ * state is all zeroes or if not to add the necessary handling ++ * here. + */ +- copy_xregs_to_kernel_booting(&init_fpstate.xsave); ++ fxsave(&init_fpstate.fxsave); + } + + static int xfeature_uncompacted_offset(int xfeature_nr) +@@ -871,8 +903,6 @@ const void *get_xsave_field_ptr(int xsave_state) + + #ifdef CONFIG_ARCH_HAS_PKEYS + +-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) +-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) + /* + * This will go out and modify PKRU register to set the access + * rights for @pkey to @init_val. +@@ -891,6 +921,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + if (!boot_cpu_has(X86_FEATURE_OSPKE)) + return -EINVAL; + ++ /* ++ * This code should only be called with valid 'pkey' ++ * values originating from in-kernel users. Complain ++ * if a bad value is observed. ++ */ ++ WARN_ON_ONCE(pkey >= arch_max_pkey()); ++ + /* Set the bits we need in PKRU: */ + if (init_val & PKEY_DISABLE_ACCESS) + new_pkru_bits |= PKRU_AD_BIT; +diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c +index 26d5451b6b42..32e459ecff7a 100644 +--- a/arch/x86/kernel/i8259.c ++++ b/arch/x86/kernel/i8259.c +@@ -205,7 +205,7 @@ static void mask_and_ack_8259A(struct irq_data *data) + * lets ACK and report it. [once per IRQ] + */ + if (!(spurious_irq_mask & irqmask)) { +- printk(KERN_DEBUG ++ printk_deferred(KERN_DEBUG + "spurious 8259A interrupt: IRQ%d.\n", irq); + spurious_irq_mask |= irqmask; + } +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c +index 167ecc270ca5..316c05b8b728 100644 +--- a/arch/x86/kernel/kexec-bzimage64.c ++++ b/arch/x86/kernel/kexec-bzimage64.c +@@ -211,8 +211,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, + params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch; + + /* Copying screen_info will do? */ +- memcpy(¶ms->screen_info, &boot_params.screen_info, +- sizeof(struct screen_info)); ++ memcpy(¶ms->screen_info, &screen_info, sizeof(struct screen_info)); + + /* Fill in memsize later */ + params->screen_info.ext_mem_k = 0; +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index dcd6df5943d6..3da08881a010 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -754,6 +754,13 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + void *frame_pointer; + bool skipped = false; + ++ /* ++ * Set a dummy kprobe for avoiding kretprobe recursion. ++ * Since kretprobe never run in kprobe handler, kprobe must not ++ * be running at this point. ++ */ ++ kprobe_busy_begin(); ++ + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); + /* fixup registers */ +@@ -829,10 +836,9 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + orig_ret_address = (unsigned long)ri->ret_addr; + if (ri->rp && ri->rp->handler) { + __this_cpu_write(current_kprobe, &ri->rp->kp); +- get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; + ri->rp->handler(ri, regs); +- __this_cpu_write(current_kprobe, NULL); ++ __this_cpu_write(current_kprobe, &kprobe_busy); + } + + recycle_rp_inst(ri, &empty_rp); +@@ -848,6 +854,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs) + + kretprobe_hash_unlock(current, &flags); + ++ kprobe_busy_end(); ++ + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); +@@ -1010,6 +1018,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) + * So clear it by resetting the current kprobe: + */ + regs->flags &= ~X86_EFLAGS_TF; ++ /* ++ * Since the single step (trap) has been cancelled, ++ * we need to restore BTF here. ++ */ ++ restore_btf(); + + /* + * If the TF flag was set before the kprobe hit, +diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c +index 19977d2f97fb..3c09ca384199 100644 +--- a/arch/x86/kernel/module.c ++++ b/arch/x86/kernel/module.c +@@ -125,6 +125,7 @@ int apply_relocate(Elf32_Shdr *sechdrs, + *location += sym->st_value; + break; + case R_386_PC32: ++ case R_386_PLT32: + /* Add the value, subtract its position */ + *location += sym->st_value - (uint32_t)location; + break; +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index f1f3c471438f..fcc5dfb2cbd4 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -324,28 +324,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + u64 msr = x86_spec_ctrl_base; + bool updmsr = false; + +- /* +- * If TIF_SSBD is different, select the proper mitigation +- * method. Note that if SSBD mitigation is disabled or permanentely +- * enabled this branch can't be taken because nothing can set +- * TIF_SSBD. +- */ +- if (tif_diff & _TIF_SSBD) { +- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ /* Handle change of TIF_SSBD depending on the mitigation method. */ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- msr |= ssbd_tif_to_spec_ctrl(tifn); +- updmsr = true; +- } ++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) { ++ updmsr |= !!(tif_diff & _TIF_SSBD); ++ msr |= ssbd_tif_to_spec_ctrl(tifn); + } + +- /* +- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, +- * otherwise avoid the MSR write. +- */ ++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && + static_branch_unlikely(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); +diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h +index 898e97cf6629..320ab978fb1f 100644 +--- a/arch/x86/kernel/process.h ++++ b/arch/x86/kernel/process.h +@@ -19,7 +19,7 @@ static inline void switch_to_extra(struct task_struct *prev, + if (IS_ENABLED(CONFIG_SMP)) { + /* + * Avoid __switch_to_xtra() invocation when conditional +- * STIPB is disabled and the only different bit is ++ * STIBP is disabled and the only different bit is + * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not + * in the TIF_WORK_CTXSW masks. + */ +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c +index c55b11fe8e9f..75a1fd8b0e90 100644 +--- a/arch/x86/kernel/reboot.c ++++ b/arch/x86/kernel/reboot.c +@@ -198,6 +198,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), + }, + }, ++ { /* Handle problems with rebooting on Apple MacBook6,1 */ ++ .callback = set_pci_reboot, ++ .ident = "Apple MacBook6,1", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), ++ }, ++ }, + { /* Handle problems with rebooting on Apple MacBookPro5 */ + .callback = set_pci_reboot, + .ident = "Apple MacBookPro5", +@@ -470,6 +478,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { + }, + }, + ++ { /* PCIe Wifi card isn't detected after reboot otherwise */ ++ .callback = set_pci_reboot, ++ .ident = "Zotac ZBOX CI327 nano", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "NA"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"), ++ }, ++ }, ++ + /* Sony */ + { /* Handle problems with rebooting on Sony VGN-Z540N */ + .callback = set_bios_reboot, +@@ -531,29 +548,20 @@ static void emergency_vmx_disable_all(void) + local_irq_disable(); + + /* +- * We need to disable VMX on all CPUs before rebooting, otherwise +- * we risk hanging up the machine, because the CPU ignore INIT +- * signals when VMX is enabled. +- * +- * We can't take any locks and we may be on an inconsistent +- * state, so we use NMIs as IPIs to tell the other CPUs to disable +- * VMX and halt. ++ * Disable VMX on all CPUs before rebooting, otherwise we risk hanging ++ * the machine, because the CPU blocks INIT when it's in VMX root. + * +- * For safety, we will avoid running the nmi_shootdown_cpus() +- * stuff unnecessarily, but we don't have a way to check +- * if other CPUs have VMX enabled. So we will call it only if the +- * CPU we are running on has VMX enabled. ++ * We can't take any locks and we may be on an inconsistent state, so ++ * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. + * +- * We will miss cases where VMX is not enabled on all CPUs. This +- * shouldn't do much harm because KVM always enable VMX on all +- * CPUs anyway. But we can miss it on the small window where KVM +- * is still enabling VMX. ++ * Do the NMI shootdown even if VMX if off on _this_ CPU, as that ++ * doesn't prevent a different CPU from being in VMX root operation. + */ +- if (cpu_has_vmx() && cpu_vmx_enabled()) { +- /* Disable VMX on this CPU. */ +- cpu_vmxoff(); ++ if (cpu_has_vmx()) { ++ /* Safely force _this_ CPU out of VMX root operation. */ ++ __cpu_emergency_vmxoff(); + +- /* Halt and disable VMX on the other CPUs */ ++ /* Halt and exit VMX root operation on the other CPUs. */ + nmi_shootdown_cpus(vmxoff_nmi); + + } +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c +index ca010dfb9682..4050d5092c86 100644 +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -767,30 +767,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) + + static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) + { +- /* +- * This function is fundamentally broken as currently +- * implemented. +- * +- * The idea is that we want to trigger a call to the +- * restart_block() syscall and that we want in_ia32_syscall(), +- * in_x32_syscall(), etc. to match whatever they were in the +- * syscall being restarted. We assume that the syscall +- * instruction at (regs->ip - 2) matches whatever syscall +- * instruction we used to enter in the first place. +- * +- * The problem is that we can get here when ptrace pokes +- * syscall-like values into regs even if we're not in a syscall +- * at all. +- * +- * For now, we maintain historical behavior and guess based on +- * stored state. We could do better by saving the actual +- * syscall arch in restart_block or (with caveats on x32) by +- * checking if regs->ip points to 'int $0x80'. The current +- * behavior is incorrect if a tracer has a different bitness +- * than the tracee. +- */ + #ifdef CONFIG_IA32_EMULATION +- if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED)) ++ if (current_thread_info()->status & TS_COMPAT_RESTART) + return __NR_ia32_restart_syscall; + #endif + #ifdef CONFIG_X86_X32_ABI +diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c +index 89d1190b9d94..5e5de7a7f38d 100644 +--- a/arch/x86/kernel/time.c ++++ b/arch/x86/kernel/time.c +@@ -23,10 +23,6 @@ + #include + #include + +-#ifdef CONFIG_X86_64 +-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; +-#endif +- + unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c +index 73391c1bd2a9..52bb7413f352 100644 +--- a/arch/x86/kernel/uprobes.c ++++ b/arch/x86/kernel/uprobes.c +@@ -268,10 +268,11 @@ static volatile u32 good_2byte_insns[256 / 32] = { + + static bool is_prefix_bad(struct insn *insn) + { ++ insn_byte_t p; + int i; + +- for (i = 0; i < insn->prefixes.nbytes; i++) { +- switch (insn->prefixes.bytes[i]) { ++ for_each_insn_prefix(insn, i, p) { ++ switch (p) { + case 0x26: /* INAT_PFX_ES */ + case 0x2E: /* INAT_PFX_CS */ + case 0x36: /* INAT_PFX_DS */ +@@ -711,6 +712,7 @@ static const struct uprobe_xol_ops branch_xol_ops = { + static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) + { + u8 opc1 = OPCODE1(insn); ++ insn_byte_t p; + int i; + + switch (opc1) { +@@ -741,8 +743,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) + * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. + * No one uses these insns, reject any branch insns with such prefix. + */ +- for (i = 0; i < insn->prefixes.nbytes; i++) { +- if (insn->prefixes.bytes[i] == 0x66) ++ for_each_insn_prefix(insn, i, p) { ++ if (p == 0x66) + return -ENOTSUPP; + } + +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S +index 55f04875293f..a1082dc61bb9 100644 +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -34,13 +34,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) + #ifdef CONFIG_X86_32 + OUTPUT_ARCH(i386) + ENTRY(phys_startup_32) +-jiffies = jiffies_64; + #else + OUTPUT_ARCH(i386:x86-64) + ENTRY(phys_startup_64) +-jiffies_64 = jiffies; + #endif + ++jiffies = jiffies_64; ++ + #if defined(CONFIG_X86_64) + /* + * On 64-bit, align RODATA to 2MB so we retain large page mappings for +@@ -329,7 +329,8 @@ SECTIONS + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { + __bss_start = .; + *(.bss..page_aligned) +- *(.bss) ++ . = ALIGN(PAGE_SIZE); ++ *(BSS_MAIN) + . = ALIGN(PAGE_SIZE); + __bss_stop = .; + } +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index 63c3ff9e74d4..a6f8600672d7 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -633,8 +633,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); + unsigned phys_as = entry->eax & 0xff; + +- if (!g_phys_as) ++ /* ++ * Use bare metal's MAXPHADDR if the CPU doesn't report guest ++ * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the ++ * guest version "applies only to guests using nested paging". ++ */ ++ if (!g_phys_as || !tdp_enabled) + g_phys_as = phys_as; ++ + entry->eax = g_phys_as | (virt_as << 8); + entry->edx = 0; + /* +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index da3cd734dee1..2e5553091f90 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -3536,7 +3536,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) + u64 tsc_aux = 0; + + if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) +- return emulate_gp(ctxt, 0); ++ return emulate_ud(ctxt); + ctxt->dst.val = tsc_aux; + return X86EMUL_CONTINUE; + } +@@ -3934,6 +3934,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt) + return X86EMUL_CONTINUE; + } + ++static int em_clflushopt(struct x86_emulate_ctxt *ctxt) ++{ ++ /* emulating clflushopt regardless of cpuid */ ++ return X86EMUL_CONTINUE; ++} ++ + static int em_movsxd(struct x86_emulate_ctxt *ctxt) + { + ctxt->dst.val = (s32) ctxt->src.val; +@@ -4423,7 +4429,7 @@ static const struct opcode group11[] = { + }; + + static const struct gprefix pfx_0f_ae_7 = { +- I(SrcMem | ByteOp, em_clflush), N, N, N, ++ I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N, + }; + + static const struct group_dual group15 = { { +diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c +index aa34b16e62c2..a069d0dd3ded 100644 +--- a/arch/x86/kvm/ioapic.c ++++ b/arch/x86/kvm/ioapic.c +@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, + static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) + { + ioapic->rtc_status.pending_eoi = 0; +- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); ++ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1); + } + + static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); +diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h +index 1cc6e54436db..2f3df43489f2 100644 +--- a/arch/x86/kvm/ioapic.h ++++ b/arch/x86/kvm/ioapic.h +@@ -42,13 +42,13 @@ struct kvm_vcpu; + + struct dest_map { + /* vcpu bitmap where IRQ has been sent */ +- DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); ++ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1); + + /* + * Vector sent to a given vcpu, only valid when + * the vcpu's bit in map is set + */ +- u8 vectors[KVM_MAX_VCPU_ID]; ++ u8 vectors[KVM_MAX_VCPU_ID + 1]; + }; + + +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 3988e26af3b5..bfed29a4c2ce 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -1756,7 +1756,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) + { + struct kvm_lapic *apic = vcpu->arch.apic; + +- if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || ++ if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || + apic_lvtt_period(apic)) + return; + +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 3a281a2decde..c16d24ad8356 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -3849,7 +3849,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, + nonleaf_bit8_rsvd | rsvd_bits(7, 7) | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | +- nonleaf_bit8_rsvd | gbpages_bit_rsvd | ++ gbpages_bit_rsvd | + rsvd_bits(maxphyaddr, 51); + rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | + rsvd_bits(maxphyaddr, 51); +@@ -3927,7 +3927,16 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, + void + reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) + { +- bool uses_nx = context->nx || context->base_role.smep_andnot_wp; ++ /* ++ * KVM uses NX when TDP is disabled to handle a variety of scenarios, ++ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and ++ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. ++ * The iTLB multi-hit workaround can be toggled at any time, so assume ++ * NX can be used by any non-nested shadow MMU to avoid having to reset ++ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. ++ */ ++ bool uses_nx = context->nx || !tdp_enabled || ++ context->base_role.smep_andnot_wp; + + /* + * Passing "true" to the last argument is okay; it adds a check +@@ -5321,6 +5330,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) + cond_resched_lock(&kvm->mmu_lock); + } + } ++ kvm_mmu_commit_zap_page(kvm, &invalid_list); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, rcu_idx); +diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h +index 756b14ecc957..df1076b0eabf 100644 +--- a/arch/x86/kvm/mmutrace.h ++++ b/arch/x86/kvm/mmutrace.h +@@ -336,7 +336,7 @@ TRACE_EVENT( + /* These depend on page entry type, so compute them now. */ + __field(bool, r) + __field(bool, x) +- __field(u8, u) ++ __field(signed char, u) + ), + + TP_fast_assign( +diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c +index 84ae4dd261ca..cafdaabf062f 100644 +--- a/arch/x86/kvm/pmu_intel.c ++++ b/arch/x86/kvm/pmu_intel.c +@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { + [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, + [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, +- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, ++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, + }; + + /* mapping between fixed pmc index and intel_arch_events array */ +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 1079228e4fef..03fdeab057d2 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -2734,7 +2734,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr + dst->iopm_base_pa = from->iopm_base_pa; + dst->msrpm_base_pa = from->msrpm_base_pa; + dst->tsc_offset = from->tsc_offset; +- dst->asid = from->asid; ++ /* asid not copied, it is handled manually for svm->vmcb. */ + dst->tlb_ctl = from->tlb_ctl; + dst->int_ctl = from->int_ctl; + dst->int_vector = from->int_vector; +@@ -3048,7 +3048,11 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) + svm->nested.intercept = nested_vmcb->control.intercept; + + svm_flush_tlb(&svm->vcpu); +- svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; ++ svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl & ++ (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK); ++ ++ svm->vmcb->control.int_ctl |= V_INTR_MASKING_MASK; ++ + if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) + svm->vcpu.arch.hflags |= HF_VINTR_MASK; + else +@@ -3412,7 +3416,7 @@ static int cr_interception(struct vcpu_svm *svm) + err = 0; + if (cr >= 16) { /* mov to cr */ + cr -= 16; +- val = kvm_register_read(&svm->vcpu, reg); ++ val = kvm_register_readl(&svm->vcpu, reg); + switch (cr) { + case 0: + if (!check_selective_cr0_intercepted(svm, val)) +@@ -3457,7 +3461,7 @@ static int cr_interception(struct vcpu_svm *svm) + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } +- kvm_register_write(&svm->vcpu, reg, val); ++ kvm_register_writel(&svm->vcpu, reg, val); + } + kvm_complete_insn_gp(&svm->vcpu, err); + +@@ -3489,13 +3493,13 @@ static int dr_interception(struct vcpu_svm *svm) + if (dr >= 16) { /* mov to DRn */ + if (!kvm_require_dr(&svm->vcpu, dr - 16)) + return 1; +- val = kvm_register_read(&svm->vcpu, reg); ++ val = kvm_register_readl(&svm->vcpu, reg); + kvm_set_dr(&svm->vcpu, dr - 16, val); + } else { + if (!kvm_require_dr(&svm->vcpu, dr)) + return 1; + kvm_get_dr(&svm->vcpu, dr, &val); +- kvm_register_write(&svm->vcpu, reg, val); ++ kvm_register_writel(&svm->vcpu, reg, val); + } + + skip_emulated_instruction(&svm->vcpu); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 003c5a599f6d..da44308d1b55 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -8207,7 +8207,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + return true; + } + +- switch (exit_reason) { ++ switch ((u16)exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: + if (is_nmi(intr_info)) + return false; +@@ -8606,6 +8606,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + (exit_reason != EXIT_REASON_EXCEPTION_NMI && + exit_reason != EXIT_REASON_EPT_VIOLATION && + exit_reason != EXIT_REASON_PML_FULL && ++ exit_reason != EXIT_REASON_APIC_ACCESS && + exit_reason != EXIT_REASON_TASK_SWITCH)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 0f66f7dd8938..c959720c7593 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -97,6 +97,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); + + static void update_cr8_intercept(struct kvm_vcpu *vcpu); + static void process_nmi(struct kvm_vcpu *vcpu); ++static void process_smi(struct kvm_vcpu *vcpu); + static void enter_smm(struct kvm_vcpu *vcpu); + static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); + +@@ -2304,7 +2305,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + return kvm_mtrr_set_msr(vcpu, msr, data); + case MSR_IA32_APICBASE: + return kvm_set_apic_base(vcpu, msr_info); +- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: ++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: + return kvm_x2apic_msr_write(vcpu, msr, data); + case MSR_IA32_TSCDEADLINE: + kvm_set_lapic_tscdeadline_msr(vcpu, data); +@@ -2576,7 +2577,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + case MSR_IA32_APICBASE: + msr_info->data = kvm_get_apic_base(vcpu); + break; +- case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: ++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: + return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); + break; + case MSR_IA32_TSCDEADLINE: +@@ -3199,6 +3200,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, + struct kvm_vcpu_events *events) + { + process_nmi(vcpu); ++ ++ if (kvm_check_request(KVM_REQ_SMI, vcpu)) ++ process_smi(vcpu); ++ + events->exception.injected = + vcpu->arch.exception.pending && + !kvm_exception_is_soft(vcpu->arch.exception.nr); +@@ -4263,10 +4268,13 @@ long kvm_arch_vm_ioctl(struct file *filp, + r = -EFAULT; + if (copy_from_user(&u.ps, argp, sizeof u.ps)) + goto out; ++ mutex_lock(&kvm->lock); + r = -ENXIO; + if (!kvm->arch.vpit) +- goto out; ++ goto set_pit_out; + r = kvm_vm_ioctl_set_pit(kvm, &u.ps); ++set_pit_out: ++ mutex_unlock(&kvm->lock); + break; + } + case KVM_GET_PIT2: { +@@ -4286,10 +4294,13 @@ long kvm_arch_vm_ioctl(struct file *filp, + r = -EFAULT; + if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) + goto out; ++ mutex_lock(&kvm->lock); + r = -ENXIO; + if (!kvm->arch.vpit) +- goto out; ++ goto set_pit2_out; + r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); ++set_pit2_out: ++ mutex_unlock(&kvm->lock); + break; + } + case KVM_REINJECT_CONTROL: { +@@ -6257,6 +6268,7 @@ void kvm_arch_exit(void) + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + #ifdef CONFIG_X86_64 + pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); ++ cancel_work_sync(&pvclock_gtod_work); + #endif + kvm_x86_ops = NULL; + kvm_mmu_module_exit(); +@@ -7032,6 +7044,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) + set_debugreg(vcpu->arch.eff_db[3], 3); + set_debugreg(vcpu->arch.dr6, 6); + vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; ++ } else if (unlikely(hw_breakpoint_active())) { ++ set_debugreg(0, 7); + } + + kvm_x86_ops->run(vcpu); +diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c +index ce68b6a9d7d1..e28c106a7c0c 100644 +--- a/arch/x86/lib/msr-smp.c ++++ b/arch/x86/lib/msr-smp.c +@@ -239,7 +239,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info) + rv->err = wrmsr_safe_regs(rv->regs); + } + +-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) ++int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) + { + int err; + struct msr_regs_info rv; +@@ -252,7 +252,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) + } + EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); + +-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) ++int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) + { + int err; + struct msr_regs_info rv; +diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S +index d258f59564e1..3b40c98bbbd4 100644 +--- a/arch/x86/math-emu/wm_sqrt.S ++++ b/arch/x86/math-emu/wm_sqrt.S +@@ -208,7 +208,7 @@ sqrt_stage_2_finish: + + #ifdef PARANOID + /* It should be possible to get here only if the arg is ffff....ffff */ +- cmp $0xffffffff,FPU_fsqrt_arg_1 ++ cmpl $0xffffffff,FPU_fsqrt_arg_1 + jnz sqrt_stage_2_error + #endif /* PARANOID */ + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index ce092a62fc5d..bc2455c2fcab 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -110,8 +110,6 @@ __ref void *alloc_low_pages(unsigned int num) + } else { + pfn = pgt_buf_end; + pgt_buf_end += num; +- printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n", +- pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1); + } + + for (i = 0; i < num; i++) { +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 08e0380414a9..b68b102f6774 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -697,6 +697,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) + } + + free_page((unsigned long)pmd_sv); ++ ++ pgtable_pmd_page_dtor(virt_to_page(pmd)); + free_page((unsigned long)pmd); + + return 1; +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c +index eb5734112cb4..1d1434f9c5a6 100644 +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -1082,7 +1082,16 @@ xadd: if (is_imm8(insn->off)) + } + + if (image) { +- if (unlikely(proglen + ilen > oldproglen)) { ++ /* ++ * When populating the image, assert that: ++ * ++ * i) We do not write beyond the allocated space, and ++ * ii) addrs[i] did not change from the prior run, in order ++ * to validate assumptions made for computing branch ++ * displacements. ++ */ ++ if (unlikely(proglen + ilen > oldproglen || ++ proglen + ilen != addrs[i])) { + pr_err("bpf_jit_compile fatal error\n"); + return -EFAULT; + } +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c +index 62950ef7f84e..68e86d7cc94d 100644 +--- a/arch/x86/pci/fixup.c ++++ b/arch/x86/pci/fixup.c +@@ -571,6 +571,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); + + /* + * Device [1022:7914] +diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk +index fd1ab80be0de..a4cf678cf5c8 100644 +--- a/arch/x86/tools/chkobjdump.awk ++++ b/arch/x86/tools/chkobjdump.awk +@@ -10,6 +10,7 @@ BEGIN { + + /^GNU objdump/ { + verstr = "" ++ gsub(/\(.*\)/, ""); + for (i = 3; i <= NF; i++) + if (match($(i), "^[0-9]")) { + verstr = $(i); +diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c +index 5b6c8486a0be..d1c3f82c7882 100644 +--- a/arch/x86/tools/relocs.c ++++ b/arch/x86/tools/relocs.c +@@ -839,9 +839,11 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + case R_386_PC32: + case R_386_PC16: + case R_386_PC8: ++ case R_386_PLT32: + /* +- * NONE can be ignored and PC relative relocations don't +- * need to be adjusted. ++ * NONE can be ignored and PC relative relocations don't need ++ * to be adjusted. Because sym must be defined, R_386_PLT32 can ++ * be treated the same way as R_386_PC32. + */ + break; + +@@ -882,9 +884,11 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + case R_386_PC32: + case R_386_PC16: + case R_386_PC8: ++ case R_386_PLT32: + /* +- * NONE can be ignored and PC relative relocations don't +- * need to be adjusted. ++ * NONE can be ignored and PC relative relocations don't need ++ * to be adjusted. Because sym must be defined, R_386_PLT32 can ++ * be treated the same way as R_386_PC32. + */ + break; + +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c +index 37129db76d33..d6ed664c1e39 100644 +--- a/arch/x86/xen/p2m.c ++++ b/arch/x86/xen/p2m.c +@@ -723,9 +723,12 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + + for (i = 0; i < count; i++) { + unsigned long mfn, pfn; ++ struct gnttab_unmap_grant_ref unmap[2]; ++ int rc; + + /* Do not add to override if the map failed. */ +- if (map_ops[i].status) ++ if (map_ops[i].status != GNTST_okay || ++ (kmap_ops && kmap_ops[i].status != GNTST_okay)) + continue; + + if (map_ops[i].flags & GNTMAP_contains_pte) { +@@ -739,10 +742,46 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + + WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); + +- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { +- ret = -ENOMEM; +- goto out; ++ if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) ++ continue; ++ ++ /* ++ * Signal an error for this slot. This in turn requires ++ * immediate unmapping. ++ */ ++ map_ops[i].status = GNTST_general_error; ++ unmap[0].host_addr = map_ops[i].host_addr, ++ unmap[0].handle = map_ops[i].handle; ++ map_ops[i].handle = ~0; ++ if (map_ops[i].flags & GNTMAP_device_map) ++ unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; ++ else ++ unmap[0].dev_bus_addr = 0; ++ ++ if (kmap_ops) { ++ kmap_ops[i].status = GNTST_general_error; ++ unmap[1].host_addr = kmap_ops[i].host_addr, ++ unmap[1].handle = kmap_ops[i].handle; ++ kmap_ops[i].handle = ~0; ++ if (kmap_ops[i].flags & GNTMAP_device_map) ++ unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; ++ else ++ unmap[1].dev_bus_addr = 0; + } ++ ++ /* ++ * Pre-populate both status fields, to be recognizable in ++ * the log message below. ++ */ ++ unmap[0].status = 1; ++ unmap[1].status = 1; ++ ++ rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ++ unmap, 1 + !!kmap_ops); ++ if (rc || unmap[0].status != GNTST_okay || ++ unmap[1].status != GNTST_okay) ++ pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", ++ rc, unmap[0].status, unmap[1].status); + } + + out: +@@ -763,17 +802,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); + unsigned long pfn = page_to_pfn(pages[i]); + +- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { ++ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY); ++ else + ret = -EINVAL; +- goto out; +- } +- +- set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } + if (kunmap_ops) + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, +- kunmap_ops, count); +-out: ++ kunmap_ops, count) ?: ret; ++ + return ret; + } + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); +diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c +index 8d2c6f071dcc..44bf8a22c97b 100644 +--- a/arch/x86/xen/spinlock.c ++++ b/arch/x86/xen/spinlock.c +@@ -98,10 +98,20 @@ void xen_init_lock_cpu(int cpu) + + void xen_uninit_lock_cpu(int cpu) + { ++ int irq; ++ + if (!xen_pvspin) + return; + +- unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); ++ /* ++ * When booting the kernel with 'mitigations=auto,nosmt', the secondary ++ * CPUs are not activated, and lock_kicker_irq is not initialized. ++ */ ++ irq = per_cpu(lock_kicker_irq, cpu); ++ if (irq == -1) ++ return; ++ ++ unbind_from_irqhandler(irq, NULL); + per_cpu(lock_kicker_irq, cpu) = -1; + kfree(per_cpu(irq_name, cpu)); + per_cpu(irq_name, cpu) = NULL; +diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c +index 0fecc8a2c0b5..f6dd8e148be8 100644 +--- a/arch/xtensa/kernel/perf_event.c ++++ b/arch/xtensa/kernel/perf_event.c +@@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = { + .read = xtensa_pmu_read, + }; + +-static int xtensa_pmu_setup(int cpu) ++static int xtensa_pmu_setup(unsigned int cpu) + { + unsigned i; + +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c +index b9beae798d72..8679fa306206 100644 +--- a/arch/xtensa/kernel/setup.c ++++ b/arch/xtensa/kernel/setup.c +@@ -830,7 +830,8 @@ c_start(struct seq_file *f, loff_t *pos) + static void * + c_next(struct seq_file *f, void *v, loff_t *pos) + { +- return NULL; ++ ++*pos; ++ return c_start(f, pos); + } + + static void +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c +index 9210b9cc4ec9..455c6ec4086c 100644 +--- a/arch/xtensa/kernel/xtensa_ksyms.c ++++ b/arch/xtensa/kernel/xtensa_ksyms.c +@@ -82,13 +82,13 @@ void __xtensa_libgcc_window_spill(void) + } + EXPORT_SYMBOL(__xtensa_libgcc_window_spill); + +-unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) ++unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) + { + BUG(); + } + EXPORT_SYMBOL(__sync_fetch_and_and_4); + +-unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) ++unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) + { + BUG(); + } +diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c +index 3c75c4e597da..7aaedeebb35d 100644 +--- a/arch/xtensa/mm/cache.c ++++ b/arch/xtensa/mm/cache.c +@@ -74,8 +74,10 @@ static inline void kmap_invalidate_coherent(struct page *page, + kvaddr = TLBTEMP_BASE_1 + + (page_to_phys(page) & DCACHE_ALIAS_MASK); + ++ preempt_disable(); + __invalidate_dcache_page_alias(kvaddr, + page_to_phys(page)); ++ preempt_enable(); + } + } + } +@@ -160,6 +162,7 @@ void flush_dcache_page(struct page *page) + if (!alias && !mapping) + return; + ++ preempt_disable(); + virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(virt, phys); + +@@ -170,6 +173,7 @@ void flush_dcache_page(struct page *page) + + if (mapping) + __invalidate_icache_page_alias(virt, phys); ++ preempt_enable(); + } + + /* There shouldn't be an entry in the cache for this page anymore. */ +@@ -203,8 +207,10 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, + unsigned long phys = page_to_phys(pfn_to_page(pfn)); + unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK); + ++ preempt_disable(); + __flush_invalidate_dcache_page_alias(virt, phys); + __invalidate_icache_page_alias(virt, phys); ++ preempt_enable(); + } + EXPORT_SYMBOL(local_flush_cache_page); + +@@ -231,11 +237,13 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) + unsigned long phys = page_to_phys(page); + unsigned long tmp; + ++ preempt_disable(); + tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(tmp, phys); + tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(tmp, phys); + __invalidate_icache_page_alias(tmp, phys); ++ preempt_enable(); + + clear_bit(PG_arch_1, &page->flags); + } +@@ -269,7 +277,9 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + + if (alias) { + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); ++ preempt_disable(); + __flush_invalidate_dcache_page_alias(t, phys); ++ preempt_enable(); + } + + /* Copy data */ +@@ -284,9 +294,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + if (alias) { + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + ++ preempt_disable(); + __flush_invalidate_dcache_range((unsigned long) dst, len); + if ((vma->vm_flags & VM_EXEC) != 0) + __invalidate_icache_page_alias(t, phys); ++ preempt_enable(); + + } else if ((vma->vm_flags & VM_EXEC) != 0) { + __flush_dcache_range((unsigned long)dst,len); +@@ -308,7 +320,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + + if (alias) { + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); ++ preempt_disable(); + __flush_invalidate_dcache_page_alias(t, phys); ++ preempt_enable(); + } + + memcpy(dst, src, len); +diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c +index ede04cca30dd..82fb5102d824 100644 +--- a/arch/xtensa/platforms/iss/simdisk.c ++++ b/arch/xtensa/platforms/iss/simdisk.c +@@ -21,7 +21,6 @@ + #include + + #define SIMDISK_MAJOR 240 +-#define SECTOR_SHIFT 9 + #define SIMDISK_MINORS 1 + #define MAX_SIMDISK_COUNT 10 + +diff --git a/block/blk-mq.c b/block/blk-mq.c +index 098279a84549..76b1acad2da1 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2309,6 +2309,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_freeze_queue(q); ++ /* ++ * Sync with blk_mq_queue_tag_busy_iter. ++ */ ++ synchronize_rcu(); + + set->nr_hw_queues = nr_hw_queues; + list_for_each_entry(q, &set->tag_list, tag_set_list) { +@@ -2324,10 +2328,6 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) + + list_for_each_entry(q, &set->tag_list, tag_set_list) + blk_mq_unfreeze_queue(q); +- /* +- * Sync with blk_mq_queue_tag_busy_iter. +- */ +- synchronize_rcu(); + } + EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); + +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 724febdf2282..088a5084b52f 100755 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -495,6 +495,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) + } + EXPORT_SYMBOL(blk_queue_stack_limits); + ++static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) ++{ ++ sectors = round_down(sectors, lbs >> SECTOR_SHIFT); ++ if (sectors < PAGE_SIZE >> SECTOR_SHIFT) ++ sectors = PAGE_SIZE >> SECTOR_SHIFT; ++ return sectors; ++} ++ + /** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top device) +@@ -607,6 +615,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + ret = -1; + } + ++ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); ++ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); ++ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); ++ + /* Discard alignment and granularity */ + if (b->discard_granularity) { + alignment = queue_limit_discard_alignment(b, start); +diff --git a/block/genhd.c b/block/genhd.c +index 6ad0fd0dfd2d..7829807814c4 100644 +--- a/block/genhd.c ++++ b/block/genhd.c +@@ -159,14 +159,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) + part = rcu_dereference(ptbl->part[piter->idx]); + if (!part) + continue; ++ get_device(part_to_dev(part)); ++ piter->part = part; + if (!part_nr_sects_read(part) && + !(piter->flags & DISK_PITER_INCL_EMPTY) && + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && +- piter->idx == 0)) ++ piter->idx == 0)) { ++ put_device(part_to_dev(part)); ++ piter->part = NULL; + continue; ++ } + +- get_device(part_to_dev(part)); +- piter->part = part; + piter->idx += inc; + break; + } +diff --git a/build.config.aarch64 b/build.config.aarch64 +index 523bbc0449f7..357db0220511 100644 +--- a/build.config.aarch64 ++++ b/build.config.aarch64 +@@ -5,6 +5,7 @@ CROSS_COMPILE=aarch64-linux-androidkernel- + LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin + + FILES=" ++arch/arm64/boot/Image + arch/arm64/boot/Image.gz + vmlinux + System.map +diff --git a/crypto/af_alg.c b/crypto/af_alg.c +index cf3975ee4fd8..9fcd7f5adf74 100644 +--- a/crypto/af_alg.c ++++ b/crypto/af_alg.c +@@ -121,8 +121,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) + + int af_alg_release(struct socket *sock) + { +- if (sock->sk) ++ if (sock->sk) { + sock_put(sock->sk); ++ sock->sk = NULL; ++ } + return 0; + } + EXPORT_SYMBOL_GPL(af_alg_release); +@@ -130,21 +132,15 @@ EXPORT_SYMBOL_GPL(af_alg_release); + void af_alg_release_parent(struct sock *sk) + { + struct alg_sock *ask = alg_sk(sk); +- unsigned int nokey = ask->nokey_refcnt; +- bool last = nokey && !ask->refcnt; ++ unsigned int nokey = atomic_read(&ask->nokey_refcnt); + + sk = ask->parent; + ask = alg_sk(sk); + +- local_bh_disable(); +- bh_lock_sock(sk); +- ask->nokey_refcnt -= nokey; +- if (!last) +- last = !--ask->refcnt; +- bh_unlock_sock(sk); +- local_bh_enable(); ++ if (nokey) ++ atomic_dec(&ask->nokey_refcnt); + +- if (last) ++ if (atomic_dec_and_test(&ask->refcnt)) + sock_put(sk); + } + EXPORT_SYMBOL_GPL(af_alg_release_parent); +@@ -189,7 +185,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + + err = -EBUSY; + lock_sock(sk); +- if (ask->refcnt | ask->nokey_refcnt) ++ if (atomic_read(&ask->refcnt)) + goto unlock; + + swap(ask->type, type); +@@ -238,7 +234,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname, + int err = -EBUSY; + + lock_sock(sk); +- if (ask->refcnt) ++ if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) + goto unlock; + + type = ask->type; +@@ -305,12 +301,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock) + + sk2->sk_family = PF_ALG; + +- if (nokey || !ask->refcnt++) ++ if (atomic_inc_return_relaxed(&ask->refcnt) == 1) + sock_hold(sk); +- ask->nokey_refcnt += nokey; ++ if (nokey) { ++ atomic_inc(&ask->nokey_refcnt); ++ atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); ++ } + alg_sk(sk2)->parent = sk; + alg_sk(sk2)->type = type; +- alg_sk(sk2)->nokey_refcnt = nokey; + + newsock->ops = type->ops; + newsock->state = SS_CONNECTED; +diff --git a/crypto/algboss.c b/crypto/algboss.c +index 6e39d9c05b98..5cbc588555ca 100644 +--- a/crypto/algboss.c ++++ b/crypto/algboss.c +@@ -194,8 +194,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) + if (IS_ERR(thread)) + goto err_put_larval; + +- wait_for_completion_interruptible(&larval->completion); +- + return NOTIFY_STOP; + + err_put_larval: +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c +index 6c11537ca404..ba818a738f9a 100644 +--- a/crypto/algif_aead.c ++++ b/crypto/algif_aead.c +@@ -455,7 +455,7 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, + memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); + aead_request_set_tfm(req, tfm); + aead_request_set_ad(req, ctx->aead_assoclen); +- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + aead_async_cb, req); + used -= ctx->aead_assoclen; + +@@ -747,7 +747,7 @@ static int aead_check_key(struct socket *sock) + struct alg_sock *ask = alg_sk(sk); + + lock_sock(sk); +- if (ask->refcnt) ++ if (!atomic_read(&ask->nokey_refcnt)) + goto unlock_child; + + psk = ask->parent; +@@ -759,11 +759,8 @@ static int aead_check_key(struct socket *sock) + if (!tfm->has_key) + goto unlock; + +- if (!pask->refcnt++) +- sock_hold(psk); +- +- ask->refcnt = 1; +- sock_put(psk); ++ atomic_dec(&pask->nokey_refcnt); ++ atomic_set(&ask->nokey_refcnt, 0); + + err = 0; + +@@ -928,7 +925,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) + ask->private = ctx; + + aead_request_set_tfm(&ctx->aead_req, aead); +- aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, ++ aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, + af_alg_complete, &ctx->completion); + + sk->sk_destruct = aead_sock_destruct; +diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c +index 731b5fb8567b..7ae4f79cc839 100644 +--- a/crypto/algif_hash.c ++++ b/crypto/algif_hash.c +@@ -308,7 +308,7 @@ static int hash_check_key(struct socket *sock) + struct alg_sock *ask = alg_sk(sk); + + lock_sock(sk); +- if (ask->refcnt) ++ if (!atomic_read(&ask->nokey_refcnt)) + goto unlock_child; + + psk = ask->parent; +@@ -320,11 +320,8 @@ static int hash_check_key(struct socket *sock) + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + goto unlock; + +- if (!pask->refcnt++) +- sock_hold(psk); +- +- ask->refcnt = 1; +- sock_put(psk); ++ atomic_dec(&pask->nokey_refcnt); ++ atomic_set(&ask->nokey_refcnt, 0); + + err = 0; + +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index b28f45aca2ef..931a777a4ec4 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -774,7 +774,7 @@ static int skcipher_check_key(struct socket *sock) + struct alg_sock *ask = alg_sk(sk); + + lock_sock(sk); +- if (ask->refcnt) ++ if (!atomic_read(&ask->nokey_refcnt)) + goto unlock_child; + + psk = ask->parent; +@@ -786,11 +786,8 @@ static int skcipher_check_key(struct socket *sock) + if (!tfm->has_key) + goto unlock; + +- if (!pask->refcnt++) +- sock_hold(psk); +- +- ask->refcnt = 1; +- sock_put(psk); ++ atomic_dec(&pask->nokey_refcnt); ++ atomic_set(&ask->nokey_refcnt, 0); + + err = 0; + +diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c +index 3cd8a2414e60..de43ffb53840 100644 +--- a/crypto/ecdh_helper.c ++++ b/crypto/ecdh_helper.c +@@ -71,6 +71,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, + if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH) + return -EINVAL; + ++ if (unlikely(len < secret.len)) ++ return -EINVAL; ++ + ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); + ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); + if (secret.len != crypto_ecdh_key_len(params)) +diff --git a/crypto/shash.c b/crypto/shash.c +index a1c7609578ea..7eebf3cde7b7 100644 +--- a/crypto/shash.c ++++ b/crypto/shash.c +@@ -24,12 +24,24 @@ + + static const struct crypto_type crypto_shash_type; + +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, +- unsigned int keylen) ++static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, ++ unsigned int keylen) + { + return -ENOSYS; + } +-EXPORT_SYMBOL_GPL(shash_no_setkey); ++ ++/* ++ * Check whether an shash algorithm has a setkey function. ++ * ++ * For CFI compatibility, this must not be an inline function. This is because ++ * when CFI is enabled, modules won't get the same address for shash_no_setkey ++ * (if it were exported, which inlining would require) as the core kernel will. ++ */ ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg) ++{ ++ return alg->setkey != shash_no_setkey; ++} ++EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey); + + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index 4c5678cfa9c4..c466d7bc861a 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -7,6 +7,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT + # + # ACPI Boot-Time Table Parsing + # ++ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y) ++tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ; ++ ++endif ++ + obj-$(CONFIG_ACPI) += tables.o + obj-$(CONFIG_X86) += blacklist.o + +diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c +index 7f77c071709a..eb09ee71ceb2 100644 +--- a/drivers/acpi/acpi_amba.c ++++ b/drivers/acpi/acpi_amba.c +@@ -70,6 +70,7 @@ static int amba_handler_attach(struct acpi_device *adev, + case IORESOURCE_MEM: + if (!address_found) { + dev->res = *rentry->res; ++ dev->res.name = dev_name(&dev->dev); + address_found = true; + } + break; +diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c +index 146a77fb762d..11bd2d3da886 100644 +--- a/drivers/acpi/acpi_configfs.c ++++ b/drivers/acpi/acpi_configfs.c +@@ -251,7 +251,12 @@ static int __init acpi_configfs_init(void) + + acpi_table_group = configfs_register_default_group(root, "table", + &acpi_tables_type); +- return PTR_ERR_OR_ZERO(acpi_table_group); ++ if (IS_ERR(acpi_table_group)) { ++ configfs_unregister_subsystem(&acpi_configfs); ++ return PTR_ERR(acpi_table_group); ++ } ++ ++ return 0; + } + module_init(acpi_configfs_init); + +diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c +index dee86925a9a1..1cc38ca54817 100644 +--- a/drivers/acpi/acpi_dbg.c ++++ b/drivers/acpi/acpi_dbg.c +@@ -757,6 +757,9 @@ int __init acpi_aml_init(void) + goto err_exit; + } + ++ if (acpi_disabled) ++ return -ENODEV; ++ + /* Initialize AML IO interface */ + mutex_init(&acpi_aml_io.lock); + init_waitqueue_head(&acpi_aml_io.wait); +diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c +index b3842ffc19ba..46d201fc7ecc 100644 +--- a/drivers/acpi/acpi_extlog.c ++++ b/drivers/acpi/acpi_extlog.c +@@ -223,9 +223,9 @@ static int __init extlog_init(void) + u64 cap; + int rc; + +- rdmsrl(MSR_IA32_MCG_CAP, cap); +- +- if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr()) ++ if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) || ++ !(cap & MCG_ELOG_P) || ++ !extlog_get_l1addr()) + return -ENODEV; + + if (get_edac_report_status() == EDAC_REPORTING_FORCE) { +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c +index 67d97c0090a2..5d72baf60ac8 100644 +--- a/drivers/acpi/acpi_pnp.c ++++ b/drivers/acpi/acpi_pnp.c +@@ -320,6 +320,9 @@ static bool matching_id(const char *idstr, const char *list_id) + { + int i; + ++ if (strlen(idstr) != strlen(list_id)) ++ return false; ++ + if (memcmp(idstr, list_id, 3)) + return false; + +diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c +index aed8d3459220..c2c391d5c5a1 100644 +--- a/drivers/acpi/acpica/exprep.c ++++ b/drivers/acpi/acpica/exprep.c +@@ -507,10 +507,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) + (u8)access_byte_width; + } + } +- /* An additional reference for the container */ +- +- acpi_ut_add_reference(obj_desc->field.region_obj); +- + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", + obj_desc->field.start_field_bit_offset, +diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c +index 529d6c38ea7c..03a2282ceb9c 100644 +--- a/drivers/acpi/acpica/utdelete.c ++++ b/drivers/acpi/acpica/utdelete.c +@@ -591,11 +591,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) + next_object = object->buffer_field.buffer_obj; + break; + +- case ACPI_TYPE_LOCAL_REGION_FIELD: +- +- next_object = object->field.region_obj; +- break; +- + case ACPI_TYPE_LOCAL_BANK_FIELD: + + next_object = object->bank_field.bank_obj; +@@ -636,6 +631,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) + } + break; + ++ case ACPI_TYPE_LOCAL_REGION_FIELD: + case ACPI_TYPE_REGION: + default: + +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index 6b2c9d68d810..1c13e5fe10d9 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1184,6 +1184,7 @@ static int __init acpi_init(void) + init_acpi_device_notify(); + result = acpi_bus_init(); + if (result) { ++ kobject_put(acpi_kobj); + disable_acpi(); + return result; + } +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 9ec4618df533..318bdfb8703c 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -793,8 +793,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) + + ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, + "acpi_cppc"); +- if (ret) ++ if (ret) { ++ kobject_put(&cpc_ptr->kobj); + goto out_free; ++ } + + kfree(output.pointer); + return 0; +diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c +index 435bd0ffc8c0..ea4c7c93a920 100644 +--- a/drivers/acpi/custom_method.c ++++ b/drivers/acpi/custom_method.c +@@ -37,6 +37,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, + sizeof(struct acpi_table_header))) + return -EFAULT; + uncopied_bytes = max_size = table.length; ++ /* make sure the buf is not allocated */ ++ kfree(buf); + buf = kzalloc(max_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; +@@ -50,6 +52,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, + (*ppos + count < count) || + (count > uncopied_bytes)) { + kfree(buf); ++ buf = NULL; + return -EINVAL; + } + +@@ -71,7 +74,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, + add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); + } + +- kfree(buf); + return count; + } + +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c +index 245bcdb44c64..442aac84ab88 100644 +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -171,7 +171,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; +- /* If _PR3 is not available, use D3hot as the target state. */ ++ /* If D3cold is not supported, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c +index 98b513d049f6..152ba55fd908 100644 +--- a/drivers/acpi/device_sysfs.c ++++ b/drivers/acpi/device_sysfs.c +@@ -259,20 +259,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + +- len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); +- if (len < 0) +- return len; +- +- env->buflen += len; +- if (!adev->data.of_compatible) +- return 0; +- +- if (len > 0 && add_uevent_var(env, "MODALIAS=")) +- return -ENOMEM; +- +- len = create_of_modalias(adev, &env->buf[env->buflen - 1], +- sizeof(env->buf) - env->buflen); ++ if (adev->data.of_compatible) ++ len = create_of_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); ++ else ++ len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], ++ sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + +@@ -460,7 +452,7 @@ static ssize_t description_show(struct device *dev, + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, + acpi_dev->pnp.str_obj->buffer.length, + UTF16_LITTLE_ENDIAN, buf, +- PAGE_SIZE); ++ PAGE_SIZE - 1); + + buf[result++] = '\n'; + +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 7b665aaa6a57..077cd0bc78ae 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -1049,29 +1049,21 @@ void acpi_ec_unblock_transactions(void) + /* -------------------------------------------------------------------------- + Event Management + -------------------------------------------------------------------------- */ +-static struct acpi_ec_query_handler * +-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) +-{ +- if (handler) +- kref_get(&handler->kref); +- return handler; +-} +- + static struct acpi_ec_query_handler * + acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) + { + struct acpi_ec_query_handler *handler; +- bool found = false; + + mutex_lock(&ec->mutex); + list_for_each_entry(handler, &ec->list, node) { + if (value == handler->query_bit) { +- found = true; +- break; ++ kref_get(&handler->kref); ++ mutex_unlock(&ec->mutex); ++ return handler; + } + } + mutex_unlock(&ec->mutex); +- return found ? acpi_ec_get_query_handler(handler) : NULL; ++ return NULL; + } + + static void acpi_ec_query_handler_release(struct kref *kref) +diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c +index 46f060356a22..73116acd391d 100644 +--- a/drivers/acpi/evged.c ++++ b/drivers/acpi/evged.c +@@ -82,6 +82,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + struct resource r; + struct acpi_resource_irq *p = &ares->data.irq; + struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; ++ char ev_name[5]; ++ u8 trigger; + + if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) + return AE_OK; +@@ -90,14 +92,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, + dev_err(dev, "unable to parse IRQ resource\n"); + return AE_ERROR; + } +- if (ares->type == ACPI_RESOURCE_TYPE_IRQ) ++ if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { + gsi = p->interrupts[0]; +- else ++ trigger = p->triggering; ++ } else { + gsi = pext->interrupts[0]; ++ trigger = pext->triggering; ++ } + + irq = r.start; + +- if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { ++ switch (gsi) { ++ case 0 ... 255: ++ sprintf(ev_name, "_%c%02X", ++ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); ++ ++ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) ++ break; ++ /* fall through */ ++ default: ++ if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) ++ break; ++ + dev_err(dev, "cannot locate _EVT method\n"); + return AE_ERROR; + } +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index b012e94b7d9f..56c429ea6aaf 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -18,6 +18,8 @@ + #ifndef _ACPI_INTERNAL_H_ + #define _ACPI_INTERNAL_H_ + ++#include ++ + #define PREFIX "ACPI: " + + int early_acpi_osi_init(void); +@@ -97,9 +99,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context); + + extern struct list_head acpi_bus_id_list; + ++#define ACPI_MAX_DEVICE_INSTANCES 4096 ++ + struct acpi_device_bus_id { +- char bus_id[15]; +- unsigned int instance_no; ++ const char *bus_id; ++ struct ida instance_ida; + struct list_head node; + }; + +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c +index 31a07609f7a2..4dddf579560f 100644 +--- a/drivers/acpi/nfit/core.c ++++ b/drivers/acpi/nfit/core.c +@@ -1219,7 +1219,7 @@ static ssize_t format1_show(struct device *dev, + le16_to_cpu(nfit_dcr->dcr->code)); + break; + } +- if (rc != ENXIO) ++ if (rc != -ENXIO) + break; + } + mutex_unlock(&acpi_desc->init_mutex); +@@ -2258,6 +2258,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, + struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; + struct nd_mapping_desc *mapping; + ++ /* range index 0 == unmapped in SPA or invalid-SPA */ ++ if (memdev->range_index == 0 || spa->range_index == 0) ++ continue; + if (memdev->range_index != spa->range_index) + continue; + if (count >= ND_MAX_MAPPINGS) { +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c +index 2237d3f24f0e..8242e16f57c6 100644 +--- a/drivers/acpi/processor_idle.c ++++ b/drivers/acpi/processor_idle.c +@@ -29,6 +29,7 @@ + #include + #include + #include /* need_resched() */ ++#include + #include + #include + #include +@@ -538,10 +539,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, + return; + } + ++static int acpi_cst_latency_cmp(const void *a, const void *b) ++{ ++ const struct acpi_processor_cx *x = a, *y = b; ++ ++ if (!(x->valid && y->valid)) ++ return 0; ++ if (x->latency > y->latency) ++ return 1; ++ if (x->latency < y->latency) ++ return -1; ++ return 0; ++} ++static void acpi_cst_latency_swap(void *a, void *b, int n) ++{ ++ struct acpi_processor_cx *x = a, *y = b; ++ u32 tmp; ++ ++ if (!(x->valid && y->valid)) ++ return; ++ tmp = x->latency; ++ x->latency = y->latency; ++ y->latency = tmp; ++} ++ + static int acpi_processor_power_verify(struct acpi_processor *pr) + { + unsigned int i; + unsigned int working = 0; ++ unsigned int last_latency = 0; ++ unsigned int last_type = 0; ++ bool buggy_latency = false; + + pr->power.timer_broadcast_on_state = INT_MAX; + +@@ -565,12 +593,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) + } + if (!cx->valid) + continue; ++ if (cx->type >= last_type && cx->latency < last_latency) ++ buggy_latency = true; ++ last_latency = cx->latency; ++ last_type = cx->type; + + lapic_timer_check_state(i, pr, cx); + tsc_check_state(cx->type); + working++; + } + ++ if (buggy_latency) { ++ pr_notice("FW issue: working around C-state latencies out of order\n"); ++ sort(&pr->power.states[1], max_cstate, ++ sizeof(struct acpi_processor_cx), ++ acpi_cst_latency_cmp, ++ acpi_cst_latency_swap); ++ } ++ + lapic_timer_propagate_broadcast(pr); + + return (working); +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index 56241eb341f4..76cc11b3b49f 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -532,7 +532,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, + ret = c->preproc(ares, c->preproc_data); + if (ret < 0) { + c->error = ret; +- return AE_CTRL_TERMINATE; ++ return AE_ABORT_METHOD; + } else if (ret > 0) { + return AE_OK; + } +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 0792ec5a9efc..89ce7b14a166 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -481,10 +481,10 @@ static void acpi_device_del(struct acpi_device *device) + list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) + if (!strcmp(acpi_device_bus_id->bus_id, + acpi_device_hid(device))) { +- if (acpi_device_bus_id->instance_no > 0) +- acpi_device_bus_id->instance_no--; +- else { ++ ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); ++ if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { + list_del(&acpi_device_bus_id->node); ++ kfree_const(acpi_device_bus_id->bus_id); + kfree(acpi_device_bus_id); + } + break; +@@ -584,6 +584,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, + if (!device) + return -EINVAL; + ++ *device = NULL; ++ + status = acpi_get_data_full(handle, acpi_scan_drop_device, + (void **)device, callback); + if (ACPI_FAILURE(status) || !*device) { +@@ -619,12 +621,38 @@ void acpi_bus_put_acpi_device(struct acpi_device *adev) + put_device(&adev->dev); + } + ++static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) ++{ ++ struct acpi_device_bus_id *acpi_device_bus_id; ++ ++ /* Find suitable bus_id and instance number in acpi_bus_id_list. */ ++ list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { ++ if (!strcmp(acpi_device_bus_id->bus_id, dev_id)) ++ return acpi_device_bus_id; ++ } ++ return NULL; ++} ++ ++static int acpi_device_set_name(struct acpi_device *device, ++ struct acpi_device_bus_id *acpi_device_bus_id) ++{ ++ struct ida *instance_ida = &acpi_device_bus_id->instance_ida; ++ int result; ++ ++ result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); ++ if (result < 0) ++ return result; ++ ++ device->pnp.instance_no = result; ++ dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); ++ return 0; ++} ++ + int acpi_device_add(struct acpi_device *device, + void (*release)(struct device *)) + { ++ struct acpi_device_bus_id *acpi_device_bus_id; + int result; +- struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; +- int found = 0; + + if (device->handle) { + acpi_status status; +@@ -650,34 +678,39 @@ int acpi_device_add(struct acpi_device *device, + INIT_LIST_HEAD(&device->del_list); + mutex_init(&device->physical_node_lock); + +- new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); +- if (!new_bus_id) { +- pr_err(PREFIX "Memory allocation error\n"); +- result = -ENOMEM; +- goto err_detach; +- } +- + mutex_lock(&acpi_device_lock); +- /* +- * Find suitable bus_id and instance number in acpi_bus_id_list +- * If failed, create one and link it into acpi_bus_id_list +- */ +- list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { +- if (!strcmp(acpi_device_bus_id->bus_id, +- acpi_device_hid(device))) { +- acpi_device_bus_id->instance_no++; +- found = 1; +- kfree(new_bus_id); +- break; ++ ++ acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); ++ if (acpi_device_bus_id) { ++ result = acpi_device_set_name(device, acpi_device_bus_id); ++ if (result) ++ goto err_unlock; ++ } else { ++ acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), ++ GFP_KERNEL); ++ if (!acpi_device_bus_id) { ++ result = -ENOMEM; ++ goto err_unlock; + } +- } +- if (!found) { +- acpi_device_bus_id = new_bus_id; +- strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device)); +- acpi_device_bus_id->instance_no = 0; ++ acpi_device_bus_id->bus_id = ++ kstrdup_const(acpi_device_hid(device), GFP_KERNEL); ++ if (!acpi_device_bus_id->bus_id) { ++ kfree(acpi_device_bus_id); ++ result = -ENOMEM; ++ goto err_unlock; ++ } ++ ++ ida_init(&acpi_device_bus_id->instance_ida); ++ ++ result = acpi_device_set_name(device, acpi_device_bus_id); ++ if (result) { ++ kfree_const(acpi_device_bus_id->bus_id); ++ kfree(acpi_device_bus_id); ++ goto err_unlock; ++ } ++ + list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); + } +- dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); + + if (device->parent) + list_add_tail(&device->node, &device->parent->children); +@@ -708,9 +741,10 @@ int acpi_device_add(struct acpi_device *device, + if (device->parent) + list_del(&device->node); + list_del(&device->wakeup_list); ++ ++ err_unlock: + mutex_unlock(&acpi_device_lock); + +- err_detach: + acpi_detach_data(device->handle, acpi_scan_drop_device); + return result; + } +@@ -927,12 +961,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) + + if (buffer.length && package + && package->type == ACPI_TYPE_PACKAGE +- && package->package.count) { +- int err = acpi_extract_power_resources(package, 0, +- &ps->resources); +- if (!err) +- device->power.flags.power_resources = 1; +- } ++ && package->package.count) ++ acpi_extract_power_resources(package, 0, &ps->resources); ++ + ACPI_FREE(buffer.pointer); + } + +@@ -979,14 +1010,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) + acpi_bus_init_power_state(device, i); + + INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); +- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) +- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; + +- /* Set defaults for D0 and D3hot states (always valid) */ ++ /* Set the defaults for D0 and D3hot (always supported). */ + device->power.states[ACPI_STATE_D0].flags.valid = 1; + device->power.states[ACPI_STATE_D0].power = 100; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; + ++ /* ++ * Use power resources only if the D0 list of them is populated, because ++ * some platforms may provide _PR3 only to indicate D3cold support and ++ * in those cases the power resources list returned by it may be bogus. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { ++ device->power.flags.power_resources = 1; ++ /* ++ * D3cold is supported if the D3hot list of power resources is ++ * not empty. ++ */ ++ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) ++ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; ++ } ++ + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + } +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c +index cf2a17bdccc1..a3efff9cc241 100644 +--- a/drivers/acpi/sysfs.c ++++ b/drivers/acpi/sysfs.c +@@ -845,13 +845,13 @@ static void __exit interrupt_stats_exit(void) + } + + static ssize_t +-acpi_show_profile(struct device *dev, struct device_attribute *attr, ++acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) + { + return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); + } + +-static const struct device_attribute pm_profile_attr = ++static const struct kobj_attribute pm_profile_attr = + __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); + + static ssize_t hotplug_enabled_show(struct kobject *kobj, +@@ -900,8 +900,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, + + error = kobject_init_and_add(&hotplug->kobj, + &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); +- if (error) ++ if (error) { ++ kobject_put(&hotplug->kobj); + goto err_out; ++ } + + kobject_uevent(&hotplug->kobj, KOBJ_ADD); + return; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 35e8fbca10ad..c53c88b53163 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -188,6 +188,8 @@ struct acpi_thermal { + int tz_enabled; + int kelvin_offset; + struct work_struct thermal_check_work; ++ struct mutex thermal_check_lock; ++ atomic_t thermal_check_count; + }; + + /* -------------------------------------------------------------------------- +@@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) + return 0; + } + +-static void acpi_thermal_check(void *data) +-{ +- struct acpi_thermal *tz = data; +- +- if (!tz->tz_enabled) +- return; +- +- thermal_zone_device_update(tz->thermal_zone, +- THERMAL_EVENT_UNSPECIFIED); +-} +- + /* sys I/F for generic thermal sysfs support */ + + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) +@@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, + return 0; + } + ++static void acpi_thermal_check_fn(struct work_struct *work); ++ + static int thermal_set_mode(struct thermal_zone_device *thermal, + enum thermal_device_mode mode) + { +@@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "%s kernel ACPI thermal control\n", + tz->tz_enabled ? "Enable" : "Disable")); +- acpi_thermal_check(tz); ++ acpi_thermal_check_fn(&tz->thermal_check_work); + } + return 0; + } +@@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) + Driver Interface + -------------------------------------------------------------------------- */ + ++static void acpi_queue_thermal_check(struct acpi_thermal *tz) ++{ ++ if (!work_pending(&tz->thermal_check_work)) ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++} ++ + static void acpi_thermal_notify(struct acpi_device *device, u32 event) + { + struct acpi_thermal *tz = acpi_driver_data(device); +@@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) + + switch (event) { + case ACPI_THERMAL_NOTIFY_TEMPERATURE: +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + break; + case ACPI_THERMAL_NOTIFY_THRESHOLDS: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + case ACPI_THERMAL_NOTIFY_DEVICES: + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); +- acpi_thermal_check(tz); ++ acpi_queue_thermal_check(tz); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; +@@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work) + { + struct acpi_thermal *tz = container_of(work, struct acpi_thermal, + thermal_check_work); +- acpi_thermal_check(tz); ++ ++ if (!tz->tz_enabled) ++ return; ++ /* ++ * In general, it is not sufficient to check the pending bit, because ++ * subsequent instances of this function may be queued after one of them ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just ++ * one of them is running, though, because it may have done the actual ++ * check some time ago, so allow at least one of them to block on the ++ * mutex while another one is running the update. ++ */ ++ if (!atomic_add_unless(&tz->thermal_check_count, -1, 1)) ++ return; ++ ++ mutex_lock(&tz->thermal_check_lock); ++ ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); ++ ++ atomic_inc(&tz->thermal_check_count); ++ ++ mutex_unlock(&tz->thermal_check_lock); + } + + static int acpi_thermal_add(struct acpi_device *device) +@@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device) + if (result) + goto free_memory; + ++ atomic_set(&tz->thermal_check_count, 3); ++ mutex_init(&tz->thermal_check_lock); + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); + + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), +@@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev) + tz->state.active |= tz->trips.active[i].flags.enabled; + } + +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); ++ acpi_queue_thermal_check(tz); + + return AE_OK; + } +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index 25f02f5fe0fd..ec2f77a47150 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -268,6 +268,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"), + }, + }, ++ /* https://bugs.launchpad.net/bugs/1894667 */ ++ { ++ .callback = video_detect_force_video, ++ .ident = "HP 635 Notebook", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"), ++ }, ++ }, + + /* Non win8 machines which need native backlight nevertheless */ + { +diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c +index 93888ccb4e26..5bc8d588d146 100644 +--- a/drivers/amba/bus.c ++++ b/drivers/amba/bus.c +@@ -280,10 +280,11 @@ static int amba_remove(struct device *dev) + { + struct amba_device *pcdev = to_amba_device(dev); + struct amba_driver *drv = to_amba_driver(dev->driver); +- int ret; ++ int ret = 0; + + pm_runtime_get_sync(dev); +- ret = drv->remove(pcdev); ++ if (drv->remove) ++ ret = drv->remove(pcdev); + pm_runtime_put_noidle(dev); + + /* Undo the runtime PM settings in amba_probe() */ +@@ -300,7 +301,9 @@ static int amba_remove(struct device *dev) + static void amba_shutdown(struct device *dev) + { + struct amba_driver *drv = to_amba_driver(dev->driver); +- drv->shutdown(to_amba_device(dev)); ++ ++ if (drv->shutdown) ++ drv->shutdown(to_amba_device(dev)); + } + + /** +@@ -313,12 +316,13 @@ static void amba_shutdown(struct device *dev) + */ + int amba_driver_register(struct amba_driver *drv) + { +- drv->drv.bus = &amba_bustype; ++ if (!drv->probe) ++ return -EINVAL; + +-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn +- SETFN(probe); +- SETFN(remove); +- SETFN(shutdown); ++ drv->drv.bus = &amba_bustype; ++ drv->drv.probe = amba_probe; ++ drv->drv.remove = amba_remove; ++ drv->drv.shutdown = amba_shutdown; + + return driver_register(&drv->drv); + } +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index 51bef2482149..14351aeddcd2 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -3116,6 +3116,12 @@ static void binder_transaction(struct binder_proc *proc, + goto err_dead_binder; + } + e->to_node = target_node->debug_id; ++ if (WARN_ON(proc == target_proc)) { ++ return_error = BR_FAILED_REPLY; ++ return_error_param = -EINVAL; ++ return_error_line = __LINE__; ++ goto err_invalid_target_handle; ++ } + if (security_binder_transaction(proc->tsk, + target_proc->tsk) < 0) { + return_error = BR_FAILED_REPLY; +@@ -3694,10 +3700,17 @@ static int binder_thread_write(struct binder_proc *proc, + struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); + ctx_mgr_node = context->binder_context_mgr_node; +- if (ctx_mgr_node) ++ if (ctx_mgr_node) { ++ if (ctx_mgr_node->proc == proc) { ++ binder_user_error("%d:%d context manager tried to acquire desc 0\n", ++ proc->pid, thread->pid); ++ mutex_unlock(&context->context_mgr_node_lock); ++ return -EINVAL; ++ } + ret = binder_inc_ref_for_node( + proc, ctx_mgr_node, + strong, NULL, &rdata); ++ } + mutex_unlock(&context->context_mgr_node_lock); + } + if (ret) +@@ -4293,7 +4306,7 @@ static int binder_thread_read(struct binder_proc *proc, + e->cmd = BR_OK; + ptr += sizeof(uint32_t); + +- binder_stat_br(proc, thread, cmd); ++ binder_stat_br(proc, thread, e->cmd); + } break; + case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_inner_proc_unlock(proc); +diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c +index ed6a30cd681a..98581ae397c1 100644 +--- a/drivers/ata/acard-ahci.c ++++ b/drivers/ata/acard-ahci.c +@@ -72,7 +72,7 @@ struct acard_sg { + __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ + }; + +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); + static int acard_ahci_port_start(struct ata_port *ap); + static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +@@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) + return si; + } + +-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; +@@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); ++ ++ return AC_ERR_OK; + } + + static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c +index f50a76ad63e4..8354f2de37c3 100644 +--- a/drivers/ata/ahci_brcm.c ++++ b/drivers/ata/ahci_brcm.c +@@ -285,6 +285,10 @@ static int brcm_ahci_resume(struct device *dev) + if (ret) + return ret; + ++ ret = ahci_platform_enable_regulators(hpriv); ++ if (ret) ++ goto out_disable_clks; ++ + brcm_sata_init(priv); + brcm_sata_phys_enable(priv); + brcm_sata_alpm_init(hpriv); +@@ -314,6 +318,8 @@ static int brcm_ahci_resume(struct device *dev) + ahci_platform_disable_phys(hpriv); + out_disable_phys: + brcm_sata_phys_disable(priv); ++ ahci_platform_disable_regulators(hpriv); ++out_disable_clks: + ahci_platform_disable_clks(hpriv); + return ret; + } +@@ -377,6 +383,10 @@ static int brcm_ahci_probe(struct platform_device *pdev) + if (ret) + goto out_reset; + ++ ret = ahci_platform_enable_regulators(hpriv); ++ if (ret) ++ goto out_disable_clks; ++ + /* Must be first so as to configure endianness including that + * of the standard AHCI register space. + */ +@@ -386,7 +396,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) + priv->port_mask = brcm_ahci_get_portmask(hpriv, priv); + if (!priv->port_mask) { + ret = -ENODEV; +- goto out_disable_clks; ++ goto out_disable_regulators; + } + + /* Must be done before ahci_platform_enable_phys() */ +@@ -417,6 +427,8 @@ static int brcm_ahci_probe(struct platform_device *pdev) + ahci_platform_disable_phys(hpriv); + out_disable_phys: + brcm_sata_phys_disable(priv); ++out_disable_regulators: ++ ahci_platform_disable_regulators(hpriv); + out_disable_clks: + ahci_platform_disable_clks(hpriv); + out_reset: +diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c +index b26437430163..98b4f0d898d6 100644 +--- a/drivers/ata/ahci_sunxi.c ++++ b/drivers/ata/ahci_sunxi.c +@@ -165,7 +165,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap) + } + + static const struct ata_port_info ahci_sunxi_port_info = { +- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, ++ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_platform_ops, +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index 1610fff19bb3..984260902d0b 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); + static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); + static int ahci_port_start(struct ata_port *ap); + static void ahci_port_stop(struct ata_port *ap); +-static void ahci_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); + static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); + static void ahci_freeze(struct ata_port *ap); + static void ahci_thaw(struct ata_port *ap); +@@ -1607,7 +1607,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) + return sata_pmp_qc_defer_cmd_switch(qc); + } + +-static void ahci_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct ahci_port_priv *pp = ap->private_data; +@@ -1643,6 +1643,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) + opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; + + ahci_fill_cmd_slot(pp, qc->tag, opts); ++ ++ return AC_ERR_OK; + } + + static void ahci_fbs_dec_intr(struct ata_port *ap) +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c +index 0b80502bc1c5..bfa2e5eec263 100644 +--- a/drivers/ata/libahci_platform.c ++++ b/drivers/ata/libahci_platform.c +@@ -518,11 +518,13 @@ int ahci_platform_init_host(struct platform_device *pdev, + int i, irq, n_ports, rc; + + irq = platform_get_irq(pdev, 0); +- if (irq <= 0) { ++ if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "no irq\n"); + return irq; + } ++ if (!irq) ++ return -EINVAL; + + hpriv->irq = irq; + +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index ba0cffbd0bb6..228a4cfb0e7d 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -57,7 +57,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -4372,9 +4371,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + +- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on +- SD7SN6S256G and SD8SN8U256G */ +- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, ++ /* Sandisk SD7/8/9s lock up hard on large trims */ ++ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, + + /* devices which puke on READ_NATIVE_MAX */ + { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, +@@ -4897,7 +4895,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) + return ATA_DEFER_LINK; + } + +-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } ++enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) ++{ ++ return AC_ERR_OK; ++} + + /** + * ata_sg_init - Associate command with scatter-gather table. +@@ -5315,7 +5316,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) + return; + } + +- ap->ops->qc_prep(qc); ++ qc->err_mask |= ap->ops->qc_prep(qc); ++ if (unlikely(qc->err_mask)) ++ goto err; + trace_ata_qc_issue(qc); + qc->err_mask |= ap->ops->qc_issue(qc); + if (unlikely(qc->err_mask)) +@@ -6410,7 +6413,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) + /* perform each probe asynchronously */ + for (i = 0; i < host->n_ports; i++) { + struct ata_port *ap = host->ports[i]; +- async_schedule(async_port_probe, ap); ++ ap->cookie = async_schedule(async_port_probe, ap); + } + + return 0; +@@ -6550,11 +6553,11 @@ void ata_host_detach(struct ata_host *host) + { + int i; + +- /* Ensure ata_port probe has completed */ +- async_synchronize_full(); +- +- for (i = 0; i < host->n_ports; i++) ++ for (i = 0; i < host->n_ports; i++) { ++ /* Ensure ata_port probe has completed */ ++ async_synchronize_cookie(host->ports[i]->cookie + 1); + ata_port_detach(host->ports[i]); ++ } + + /* the host is dead now, dissociate ACPI */ + ata_acpi_dissociate(host); +diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c +index c4f2b563c9f0..76ba83e245c2 100644 +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -2314,6 +2314,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) + + static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) + { ++ struct ata_device *dev = args->dev; + u16 min_io_sectors; + + rbuf[1] = 0xb0; +@@ -2339,7 +2340,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) + * with the unmap bit set. + */ + if (ata_id_has_trim(args->id)) { +- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); ++ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; ++ ++ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) ++ max_blocks = 128 << (20 - SECTOR_SHIFT); ++ ++ put_unaligned_be64(max_blocks, &rbuf[36]); + put_unaligned_be32(1, &rbuf[28]); + } + +@@ -3967,12 +3973,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) + { + struct scsi_cmnd *scmd = qc->scsicmd; + const u8 *cdb = scmd->cmnd; +- const u8 *p; + u8 pg, spg; + unsigned six_byte, pg_len, hdr_len, bd_len; + int len; + u16 fp = (u16)-1; + u8 bp = 0xff; ++ u8 buffer[64]; ++ const u8 *p = buffer; + + VPRINTK("ENTER\n"); + +@@ -4006,12 +4013,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) + if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) + goto invalid_param_len; + +- p = page_address(sg_page(scsi_sglist(scmd))); +- + /* Move past header and block descriptors. */ + if (len < hdr_len) + goto invalid_param_len; + ++ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), ++ buffer, sizeof(buffer))) ++ goto invalid_param_len; ++ + if (six_byte) + bd_len = p[3]; + else +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index 0e2bc5b9a78c..0edd83cae0fd 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -2742,12 +2742,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) + * LOCKING: + * spin_lock_irqsave(host lock) + */ +-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) ++enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) + { + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + ata_bmdma_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); + +@@ -2760,12 +2762,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); + * LOCKING: + * spin_lock_irqsave(host lock) + */ +-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) ++enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) + { + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + ata_bmdma_fill_sg_dumb(qc); ++ ++ return AC_ERR_OK; + } + EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); + +diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c +index b4d54771c9fe..623199fab8fe 100644 +--- a/drivers/ata/pata_arasan_cf.c ++++ b/drivers/ata/pata_arasan_cf.c +@@ -819,12 +819,19 @@ static int arasan_cf_probe(struct platform_device *pdev) + else + quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */ + +- /* if irq is 0, support only PIO */ +- acdev->irq = platform_get_irq(pdev, 0); +- if (acdev->irq) ++ /* ++ * If there's an error getting IRQ (or we do get IRQ0), ++ * support only PIO ++ */ ++ ret = platform_get_irq(pdev, 0); ++ if (ret > 0) { ++ acdev->irq = ret; + irq_handler = arasan_cf_interrupt; +- else ++ } else if (ret == -EPROBE_DEFER) { ++ return ret; ++ } else { + quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; ++ } + + acdev->pbase = res->start; + acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, +diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c +index 634c814cbeda..ebdd2dfabbeb 100644 +--- a/drivers/ata/pata_ep93xx.c ++++ b/drivers/ata/pata_ep93xx.c +@@ -927,7 +927,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev) + /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { +- err = -ENXIO; ++ err = irq; + goto err_rel_gpio; + } + +diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c +index abda44183512..fb8d1f68f36f 100644 +--- a/drivers/ata/pata_ixp4xx_cf.c ++++ b/drivers/ata/pata_ixp4xx_cf.c +@@ -169,8 +169,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); +- if (irq) ++ if (irq > 0) + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); ++ else if (irq < 0) ++ return irq; ++ else ++ return -EINVAL; + + /* Setup expansion bus chip selects */ + *data->cs0_cfg = data->cs0_bits; +diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c +index e347e7acd8ed..d8000bbd1e11 100644 +--- a/drivers/ata/pata_macio.c ++++ b/drivers/ata/pata_macio.c +@@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) + return ATA_CBL_PATA40; + } + +-static void pata_macio_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) + { + unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); + struct ata_port *ap = qc->ap; +@@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) + __func__, qc, qc->flags, write, qc->dev->devno); + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + table = (struct dbdma_cmd *) priv->dma_table_cpu; + +@@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) + table->command = cpu_to_le16(DBDMA_STOP); + + dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); ++ ++ return AC_ERR_OK; + } + + +diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c +index 475a00669427..7e6359e32ab6 100644 +--- a/drivers/ata/pata_octeon_cf.c ++++ b/drivers/ata/pata_octeon_cf.c +@@ -908,10 +908,11 @@ static int octeon_cf_probe(struct platform_device *pdev) + return -EINVAL; + } + +- irq_handler = octeon_cf_interrupt; + i = platform_get_irq(dma_dev, 0); +- if (i > 0) ++ if (i > 0) { + irq = i; ++ irq_handler = octeon_cf_interrupt; ++ } + } + of_node_put(dma_node); + } +diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c +index f6c46e9a4dc0..d7186a503e35 100644 +--- a/drivers/ata/pata_pxa.c ++++ b/drivers/ata/pata_pxa.c +@@ -59,25 +59,27 @@ static void pxa_ata_dma_irq(void *d) + /* + * Prepare taskfile for submission. + */ +-static void pxa_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) + { + struct pata_pxa_data *pd = qc->ap->private_data; + struct dma_async_tx_descriptor *tx; + enum dma_transfer_direction dir; + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); + tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, + DMA_PREP_INTERRUPT); + if (!tx) { + ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); +- return; ++ return AC_ERR_OK; + } + tx->callback = pxa_ata_dma_irq; + tx->callback_param = pd; + pd->dma_cookie = dmaengine_submit(tx); ++ ++ return AC_ERR_OK; + } + + /* +diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c +index c8b6a780a290..76c550e160f6 100644 +--- a/drivers/ata/pata_rb532_cf.c ++++ b/drivers/ata/pata_rb532_cf.c +@@ -120,10 +120,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (irq <= 0) { ++ if (irq < 0) { + dev_err(&pdev->dev, "no IRQ resource found\n"); +- return -ENOENT; ++ return irq; + } ++ if (!irq) ++ return -EINVAL; + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { +diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c +index 64d682c6ee57..11da13bea2c9 100644 +--- a/drivers/ata/pdc_adma.c ++++ b/drivers/ata/pdc_adma.c +@@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); + static int adma_port_start(struct ata_port *ap); + static void adma_port_stop(struct ata_port *ap); +-static void adma_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); + static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); + static int adma_check_atapi_dma(struct ata_queued_cmd *qc); + static void adma_freeze(struct ata_port *ap); +@@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) + return i; + } + +-static void adma_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) + { + struct adma_port_priv *pp = qc->ap->private_data; + u8 *buf = pp->pkt; +@@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) + + adma_enter_reg_mode(qc->ap); + if (qc->tf.protocol != ATA_PROT_DMA) +- return; ++ return AC_ERR_OK; + + buf[i++] = 0; /* Response flags */ + buf[i++] = 0; /* reserved */ +@@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) + printk("%s\n", obuf); + } + #endif ++ return AC_ERR_OK; + } + + static inline void adma_packet_start(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c +index a723ae929783..100b5a3621ef 100644 +--- a/drivers/ata/sata_fsl.c ++++ b/drivers/ata/sata_fsl.c +@@ -513,7 +513,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, + return num_prde; + } + +-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct sata_fsl_port_priv *pp = ap->private_data; +@@ -559,6 +559,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) + + VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", + desc_info, ttl_dwords, num_prde); ++ ++ return AC_ERR_OK; + } + + static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c +index aafb8cc03523..13f339f07f2a 100644 +--- a/drivers/ata/sata_highbank.c ++++ b/drivers/ata/sata_highbank.c +@@ -483,10 +483,12 @@ static int ahci_highbank_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (irq <= 0) { ++ if (irq < 0) { + dev_err(dev, "no irq\n"); +- return -EINVAL; ++ return irq; + } ++ if (!irq) ++ return -EINVAL; + + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) { +diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c +index e81a8217f1ff..349a175f0267 100644 +--- a/drivers/ata/sata_inic162x.c ++++ b/drivers/ata/sata_inic162x.c +@@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) + prd[-1].flags |= PRD_END; + } + +-static void inic_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) + { + struct inic_port_priv *pp = qc->ap->private_data; + struct inic_pkt *pkt = pp->pkt; +@@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) + inic_fill_sg(prd, qc); + + pp->cpb_tbl[0] = pp->pkt_dma; ++ ++ return AC_ERR_OK; + } + + static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c +index 2f32782cea6d..ded3a66049d2 100644 +--- a/drivers/ata/sata_mv.c ++++ b/drivers/ata/sata_mv.c +@@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) + static int mv_port_start(struct ata_port *ap); + static void mv_port_stop(struct ata_port *ap); + static int mv_qc_defer(struct ata_queued_cmd *qc); +-static void mv_qc_prep(struct ata_queued_cmd *qc); +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc); ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); + static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); + static int mv_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); +@@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) + * LOCKING: + * Inherited from caller. + */ +-static void mv_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct mv_port_priv *pp = ap->private_data; +@@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) + switch (tf->protocol) { + case ATA_PROT_DMA: + if (tf->command == ATA_CMD_DSM) +- return; ++ return AC_ERR_OK; + /* fall-thru */ + case ATA_PROT_NCQ: + break; /* continue below */ + case ATA_PROT_PIO: + mv_rw_multi_errata_sata24(qc); +- return; ++ return AC_ERR_OK; + default: +- return; ++ return AC_ERR_OK; + } + + /* Fill in command request block +@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) + * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none + * of which are defined/used by Linux. If we get here, this + * driver needs work. +- * +- * FIXME: modify libata to give qc_prep a return value and +- * return error here. + */ +- BUG_ON(tf->command); +- break; ++ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, ++ tf->command); ++ return AC_ERR_INVALID; + } + mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); + mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); +@@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) + mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + mv_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + + /** +@@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) + * LOCKING: + * Inherited from caller. + */ +-static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct mv_port_priv *pp = ap->private_data; +@@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) + + if ((tf->protocol != ATA_PROT_DMA) && + (tf->protocol != ATA_PROT_NCQ)) +- return; ++ return AC_ERR_OK; + if (tf->command == ATA_CMD_DSM) +- return; /* use bmdma for this */ ++ return AC_ERR_OK; /* use bmdma for this */ + + /* Fill in Gen IIE command request block */ + if (!(tf->flags & ATA_TFLAG_WRITE)) +@@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) + ); + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + mv_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + + /** +@@ -4110,6 +4112,10 @@ static int mv_platform_probe(struct platform_device *pdev) + n_ports = mv_platform_data->n_ports; + irq = platform_get_irq(pdev, 0); + } ++ if (irq < 0) ++ return irq; ++ if (!irq) ++ return -EINVAL; + + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); +diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c +index 734f563b8d37..bb098c4ae177 100644 +--- a/drivers/ata/sata_nv.c ++++ b/drivers/ata/sata_nv.c +@@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap); + static void nv_ck804_thaw(struct ata_port *ap); + static int nv_adma_slave_config(struct scsi_device *sdev); + static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); + static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); + static void nv_adma_irq_clear(struct ata_port *ap); +@@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); + static void nv_swncq_error_handler(struct ata_port *ap); + static int nv_swncq_slave_config(struct scsi_device *sdev); + static int nv_swncq_port_start(struct ata_port *ap); +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); + static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); + static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); +@@ -1382,7 +1382,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) + return 1; + } + +-static void nv_adma_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) + { + struct nv_adma_port_priv *pp = qc->ap->private_data; + struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; +@@ -1394,7 +1394,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) + (qc->flags & ATA_QCFLAG_DMAMAP)); + nv_adma_register_mode(qc->ap); + ata_bmdma_qc_prep(qc); +- return; ++ return AC_ERR_OK; + } + + cpb->resp_flags = NV_CPB_RESP_DONE; +@@ -1426,6 +1426,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) + cpb->ctl_flags = ctl_flags; + wmb(); + cpb->resp_flags = 0; ++ ++ return AC_ERR_OK; + } + + static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) +@@ -1989,17 +1991,19 @@ static int nv_swncq_port_start(struct ata_port *ap) + return 0; + } + +-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) + { + if (qc->tf.protocol != ATA_PROT_NCQ) { + ata_bmdma_qc_prep(qc); +- return; ++ return AC_ERR_OK; + } + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + nv_swncq_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + + static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c +index 0fa211e2831c..8ad8b376a642 100644 +--- a/drivers/ata/sata_promise.c ++++ b/drivers/ata/sata_promise.c +@@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va + static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + static int pdc_common_port_start(struct ata_port *ap); + static int pdc_sata_port_start(struct ata_port *ap); +-static void pdc_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); + static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); +@@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) + prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); + } + +-static void pdc_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) + { + struct pdc_port_priv *pp = qc->ap->private_data; + unsigned int i; +@@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) + default: + break; + } ++ ++ return AC_ERR_OK; + } + + static int pdc_is_sataii_tx4(unsigned long flags) +diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c +index af987a4f33d1..80ff3bbfc826 100644 +--- a/drivers/ata/sata_qstor.c ++++ b/drivers/ata/sata_qstor.c +@@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); + static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + static int qs_port_start(struct ata_port *ap); + static void qs_host_stop(struct ata_host *host); +-static void qs_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); + static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); + static int qs_check_atapi_dma(struct ata_queued_cmd *qc); + static void qs_freeze(struct ata_port *ap); +@@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) + return si; + } + +-static void qs_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) + { + struct qs_port_priv *pp = qc->ap->private_data; + u8 dflags = QS_DF_PORD, *buf = pp->pkt; +@@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) + + qs_enter_reg_mode(qc->ap); + if (qc->tf.protocol != ATA_PROT_DMA) +- return; ++ return AC_ERR_OK; + + nelem = qs_fill_sg(qc); + +@@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) + + /* frame information structure (FIS) */ + ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); ++ ++ return AC_ERR_OK; + } + + static inline void qs_packet_start(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c +index 07e146b772ea..e62b254c419f 100644 +--- a/drivers/ata/sata_rcar.c ++++ b/drivers/ata/sata_rcar.c +@@ -122,7 +122,7 @@ + /* Descriptor table word 0 bit (when DTA32M = 1) */ + #define SATA_RCAR_DTEND BIT(0) + +-#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL ++#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFFUL + + /* Gen2 Physical Layer Control Registers */ + #define RCAR_GEN2_PHY_CTL1_REG 0x1704 +@@ -551,12 +551,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) + prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); + } + +-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) + { + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + sata_rcar_bmdma_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + + static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c +index 29bcff086bce..73156a301912 100644 +--- a/drivers/ata/sata_sil.c ++++ b/drivers/ata/sata_sil.c +@@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev); + static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); + static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); + static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); +-static void sil_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); + static void sil_bmdma_setup(struct ata_queued_cmd *qc); + static void sil_bmdma_start(struct ata_queued_cmd *qc); + static void sil_bmdma_stop(struct ata_queued_cmd *qc); +@@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) + last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); + } + +-static void sil_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) + { + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) +- return; ++ return AC_ERR_OK; + + sil_fill_sg(qc); ++ ++ return AC_ERR_OK; + } + + static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) +diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c +index 4b1995e2d044..ffa3bf724054 100644 +--- a/drivers/ata/sata_sil24.c ++++ b/drivers/ata/sata_sil24.c +@@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev); + static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); + static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); + static int sil24_qc_defer(struct ata_queued_cmd *qc); +-static void sil24_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); + static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); + static void sil24_pmp_attach(struct ata_port *ap); +@@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) + return ata_std_qc_defer(qc); + } + +-static void sil24_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) + { + struct ata_port *ap = qc->ap; + struct sil24_port_priv *pp = ap->private_data; +@@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) + + if (qc->flags & ATA_QCFLAG_DMAMAP) + sil24_fill_sg(qc, sge); ++ ++ return AC_ERR_OK; + } + + static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) +diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c +index 48301cb3a316..043953200b52 100644 +--- a/drivers/ata/sata_sx4.c ++++ b/drivers/ata/sata_sx4.c +@@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap); + static void pdc_freeze(struct ata_port *ap); + static void pdc_thaw(struct ata_port *ap); + static int pdc_port_start(struct ata_port *ap); +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc); ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); + static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); + static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); + static unsigned int pdc20621_dimm_init(struct ata_host *host); +@@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) + VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); + } + +-static void pdc20621_qc_prep(struct ata_queued_cmd *qc) ++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) + { + switch (qc->tf.protocol) { + case ATA_PROT_DMA: +@@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) + default: + break; + } ++ ++ return AC_ERR_OK; + } + + static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, +diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c +index 480fa6ffbc09..04fca6db273e 100644 +--- a/drivers/atm/atmtcp.c ++++ b/drivers/atm/atmtcp.c +@@ -432,9 +432,15 @@ static int atmtcp_remove_persistent(int itf) + return -EMEDIUMTYPE; + } + dev_data = PRIV(dev); +- if (!dev_data->persist) return 0; ++ if (!dev_data->persist) { ++ atm_dev_put(dev); ++ return 0; ++ } + dev_data->persist = 0; +- if (PRIV(dev)->vcc) return 0; ++ if (PRIV(dev)->vcc) { ++ atm_dev_put(dev); ++ return 0; ++ } + kfree(dev_data); + atm_dev_put(dev); + atm_dev_deregister(dev); +diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c +index 88819409e0be..2b7786cd548f 100644 +--- a/drivers/atm/eni.c ++++ b/drivers/atm/eni.c +@@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev, + + rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); + if (rc < 0) +- goto out; ++ goto err_disable; + + rc = -ENOMEM; + eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); +@@ -2279,7 +2279,8 @@ static int eni_init_one(struct pci_dev *pci_dev, + return rc; + + err_eni_release: +- eni_do_release(dev); ++ dev->phy = NULL; ++ iounmap(ENI_DEV(dev)->ioaddr); + err_unregister: + atm_dev_deregister(dev); + err_free_consistent: +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c +index 82296fe2ba3b..7cb2b863e653 100644 +--- a/drivers/atm/firestream.c ++++ b/drivers/atm/firestream.c +@@ -1013,6 +1013,7 @@ static int fs_open(struct atm_vcc *atm_vcc) + error = make_rate (pcr, r, &tmc0, NULL); + if (error) { + kfree(tc); ++ kfree(vcc); + return error; + } + } +diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c +index feb023d7eebd..40644670cff2 100644 +--- a/drivers/atm/idt77105.c ++++ b/drivers/atm/idt77105.c +@@ -261,7 +261,7 @@ static int idt77105_start(struct atm_dev *dev) + { + unsigned long flags; + +- if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) ++ if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) + return -ENOMEM; + PRIV(dev)->dev = dev; + spin_lock_irqsave(&idt77105_priv_lock, flags); +@@ -338,7 +338,7 @@ static int idt77105_stop(struct atm_dev *dev) + else + idt77105_all = walk->next; + dev->phy = NULL; +- dev->dev_data = NULL; ++ dev->phy_data = NULL; + kfree(walk); + break; + } +diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c +index 074616b39f4d..89adb49e435e 100644 +--- a/drivers/atm/idt77252.c ++++ b/drivers/atm/idt77252.c +@@ -3615,7 +3615,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, + + if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) { + printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev)); +- return err; ++ goto err_out_disable_pdev; + } + + card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL); +diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c +index fe47c924dc64..a1427cb9b9ed 100644 +--- a/drivers/atm/iphase.c ++++ b/drivers/atm/iphase.c +@@ -3301,7 +3301,7 @@ static void __exit ia_module_exit(void) + { + pci_unregister_driver(&ia_driver); + +- del_timer(&ia_timer); ++ del_timer_sync(&ia_timer); + } + + module_init(ia_module_init); +diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c +index 445505d9ea07..dec6c68156ee 100644 +--- a/drivers/atm/lanai.c ++++ b/drivers/atm/lanai.c +@@ -2240,6 +2240,7 @@ static int lanai_dev_open(struct atm_dev *atmdev) + conf1_write(lanai); + #endif + iounmap(lanai->base); ++ lanai->base = NULL; + error_pci: + pci_disable_device(lanai->pci); + error: +@@ -2252,6 +2253,8 @@ static int lanai_dev_open(struct atm_dev *atmdev) + static void lanai_dev_close(struct atm_dev *atmdev) + { + struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data; ++ if (lanai->base==NULL) ++ return; + printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n", + lanai->number); + lanai_timed_poll_stop(lanai); +@@ -2561,7 +2564,7 @@ static int lanai_init_one(struct pci_dev *pci, + struct atm_dev *atmdev; + int result; + +- lanai = kmalloc(sizeof(*lanai), GFP_KERNEL); ++ lanai = kzalloc(sizeof(*lanai), GFP_KERNEL); + if (lanai == NULL) { + printk(KERN_ERR DEV_LABEL + ": couldn't allocate dev_data structure!\n"); +diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c +index c7296b583787..8c675c4f6d54 100644 +--- a/drivers/atm/nicstar.c ++++ b/drivers/atm/nicstar.c +@@ -298,7 +298,7 @@ static void __exit nicstar_cleanup(void) + { + XPRINTK("nicstar: nicstar_cleanup() called.\n"); + +- del_timer(&ns_timer); ++ del_timer_sync(&ns_timer); + + pci_unregister_driver(&nicstar_driver); + +@@ -526,6 +526,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev) + /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ + writel(0x00000000, card->membase + VPM); + ++ card->intcnt = 0; ++ if (request_irq ++ (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { ++ pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); ++ error = 9; ++ ns_init_card_error(card, error); ++ return error; ++ } ++ + /* Initialize TSQ */ + card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, + NS_TSQSIZE + NS_TSQ_ALIGNMENT, +@@ -752,15 +761,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev) + + card->efbie = 1; + +- card->intcnt = 0; +- if (request_irq +- (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { +- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); +- error = 9; +- ns_init_card_error(card, error); +- return error; +- } +- + /* Register device */ + card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, + -1, NULL); +@@ -838,10 +838,12 @@ static void ns_init_card_error(ns_dev *card, int error) + dev_kfree_skb_any(hb); + } + if (error >= 12) { +- kfree(card->rsq.org); ++ dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, ++ card->rsq.org, card->rsq.dma); + } + if (error >= 11) { +- kfree(card->tsq.org); ++ dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, ++ card->tsq.org, card->tsq.dma); + } + if (error >= 10) { + free_irq(card->pcidev->irq, card); +@@ -1707,6 +1709,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) + + if (push_scqe(card, vc, scq, &scqe, skb) != 0) { + atomic_inc(&vcc->stats->tx_err); ++ dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, ++ DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + return -EIO; + } +diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c +index 5120a96b3a89..b2f4e8df1591 100644 +--- a/drivers/atm/uPD98402.c ++++ b/drivers/atm/uPD98402.c +@@ -210,7 +210,7 @@ static void uPD98402_int(struct atm_dev *dev) + static int uPD98402_start(struct atm_dev *dev) + { + DPRINTK("phy_start\n"); +- if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) ++ if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) + return -ENOMEM; + spin_lock_init(&PRIV(dev)->lock); + memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); +diff --git a/drivers/base/core.c b/drivers/base/core.c +index ee783abc1c96..5192521a9c33 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -715,6 +715,7 @@ void device_initialize(struct device *dev) + device_pm_init(dev); + set_dev_node(dev, -1); + #ifdef CONFIG_GENERIC_MSI_IRQ ++ raw_spin_lock_init(&dev->msi_lock); + INIT_LIST_HEAD(&dev->msi_list); + #endif + } +@@ -2302,9 +2303,10 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) + */ + void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) + { +- if (fwnode) { +- struct fwnode_handle *fn = dev->fwnode; ++ struct device *parent = dev->parent; ++ struct fwnode_handle *fn = dev->fwnode; + ++ if (fwnode) { + if (fwnode_is_primary(fn)) + fn = fn->secondary; + +@@ -2314,8 +2316,13 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) + } + dev->fwnode = fwnode; + } else { +- dev->fwnode = fwnode_is_primary(dev->fwnode) ? +- dev->fwnode->secondary : NULL; ++ if (fwnode_is_primary(fn)) { ++ dev->fwnode = fn->secondary; ++ if (!(parent && fn == parent->fwnode)) ++ fn->secondary = NULL; ++ } else { ++ dev->fwnode = NULL; ++ } + } + } + EXPORT_SYMBOL_GPL(set_primary_fwnode); +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index fc4b7622a36d..c1eceb557a35 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -364,7 +364,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) + drv->bus->name, __func__, drv->name, dev_name(dev)); + if (!list_empty(&dev->devres_head)) { + dev_crit(dev, "Resources present before probing\n"); +- return -EBUSY; ++ ret = -EBUSY; ++ goto done; + } + + re_probe: +@@ -467,7 +468,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) + ret = 0; + done: + atomic_dec(&probe_count); +- wake_up(&probe_waitqueue); ++ wake_up_all(&probe_waitqueue); + return ret; + } + +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index bef299ef6227..ec2e4b6bc56f 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -692,6 +692,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv, + /* temporary section violation during probe() */ + drv->probe = probe; + retval = code = __platform_driver_register(drv, module); ++ if (retval) ++ return retval; + + /* + * Fixup that section violation, being paranoid about code scanning +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c +index 76373148385c..79d7d7b44809 100644 +--- a/drivers/base/power/main.c ++++ b/drivers/base/power/main.c +@@ -1368,13 +1368,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) + } + + /* +- * If a device configured to wake up the system from sleep states +- * has been suspended at run time and there's a resume request pending +- * for it, this is equivalent to the device signaling wakeup, so the +- * system suspend operation should be aborted. ++ * Wait for possible runtime PM transitions of the device in progress ++ * to complete and if there's a runtime resume request pending for it, ++ * resume it before proceeding with invoking the system-wide suspend ++ * callbacks for it. ++ * ++ * If the system-wide suspend callbacks below change the configuration ++ * of the device, they must disable runtime PM for it or otherwise ++ * ensure that its runtime-resume callbacks will not be confused by that ++ * change in case they are invoked going forward. + */ +- if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) +- pm_wakeup_event(dev, 0); ++ pm_runtime_barrier(dev); + + if (pm_wakeup_pending()) { + pm_get_active_wakeup_sources(suspend_abort, +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c +index 1559070d6060..05bc4c1b1d61 100644 +--- a/drivers/base/regmap/regmap-debugfs.c ++++ b/drivers/base/regmap/regmap-debugfs.c +@@ -204,6 +204,9 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, + if (*ppos < 0 || !count) + return -EINVAL; + ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1))) ++ count = PAGE_SIZE << (MAX_ORDER - 1); ++ + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; +@@ -412,6 +415,9 @@ static ssize_t regmap_reg_ranges_read_file(struct file *file, + if (*ppos < 0 || !count) + return -EINVAL; + ++ if (count > (PAGE_SIZE << (MAX_ORDER - 1))) ++ count = PAGE_SIZE << (MAX_ORDER - 1); ++ + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index fcb76ca848d3..35e780db86a5 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -1239,7 +1239,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) + + /* If the user didn't specify a name match any */ + if (data) +- return (*r)->name == data; ++ return !strcmp((*r)->name, data); + else + return 1; + } +diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig +index 64d95c9514a0..d3b789814155 100644 +--- a/drivers/block/Kconfig ++++ b/drivers/block/Kconfig +@@ -530,6 +530,7 @@ config BLK_DEV_RBD + config BLK_DEV_RSXX + tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" + depends on PCI ++ select CRC32 + help + Device driver for IBM's high speed PCIe SSD + storage device: Flash Adapter 900GB Full Height. +diff --git a/drivers/block/brd.c b/drivers/block/brd.c +index 7e35574a17df..9d81ac8b4512 100644 +--- a/drivers/block/brd.c ++++ b/drivers/block/brd.c +@@ -25,7 +25,6 @@ + + #include + +-#define SECTOR_SHIFT 9 + #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) + #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) + +diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c +index ab62b81c2ca7..dece26f119d4 100644 +--- a/drivers/block/drbd/drbd_bitmap.c ++++ b/drivers/block/drbd/drbd_bitmap.c +@@ -1070,7 +1070,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned + .done = 0, + .flags = flags, + .error = 0, +- .kref = { ATOMIC_INIT(2) }, ++ .kref = KREF_INIT(2), + }; + + if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index eb5f703b00df..8ceaec2613f0 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -222,24 +222,35 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) + blk_mq_unfreeze_queue(lo->lo_queue); + } + ++/** ++ * loop_validate_block_size() - validates the passed in block size ++ * @bsize: size to validate ++ */ + static int +-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) ++loop_validate_block_size(unsigned short bsize) ++{ ++ if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/** ++ * loop_set_size() - sets device size and notifies userspace ++ * @lo: struct loop_device to set the size for ++ * @size: new size of the loop device ++ * ++ * Callers must validate that the size passed into this function fits into ++ * a sector_t, eg using loop_validate_size() ++ */ ++static void loop_set_size(struct loop_device *lo, loff_t size) + { +- loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); +- sector_t x = (sector_t)size; + struct block_device *bdev = lo->lo_device; + +- if (unlikely((loff_t)x != size)) +- return -EFBIG; +- if (lo->lo_offset != offset) +- lo->lo_offset = offset; +- if (lo->lo_sizelimit != sizelimit) +- lo->lo_sizelimit = sizelimit; +- set_capacity(lo->lo_disk, x); +- bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); ++ set_capacity(lo->lo_disk, size); ++ bd_set_size(bdev, size << 9); + /* let user-space know about the new size */ + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); +- return 0; + } + + static inline int +@@ -885,22 +896,123 @@ static int loop_prepare_queue(struct loop_device *lo) + return 0; + } + +-static int loop_set_fd(struct loop_device *lo, fmode_t mode, +- struct block_device *bdev, unsigned int arg) ++static int ++loop_release_xfer(struct loop_device *lo) ++{ ++ int err = 0; ++ struct loop_func_table *xfer = lo->lo_encryption; ++ ++ if (xfer) { ++ if (xfer->release) ++ err = xfer->release(lo); ++ lo->transfer = NULL; ++ lo->lo_encryption = NULL; ++ module_put(xfer->owner); ++ } ++ return err; ++} ++ ++static int ++loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, ++ const struct loop_info64 *i) ++{ ++ int err = 0; ++ ++ if (xfer) { ++ struct module *owner = xfer->owner; ++ ++ if (!try_module_get(owner)) ++ return -EINVAL; ++ if (xfer->init) ++ err = xfer->init(lo, i); ++ if (err) ++ module_put(owner); ++ else ++ lo->lo_encryption = xfer; ++ } ++ return err; ++} ++ ++/** ++ * loop_set_status_from_info - configure device from loop_info ++ * @lo: struct loop_device to configure ++ * @info: struct loop_info64 to configure the device with ++ * ++ * Configures the loop device parameters according to the passed ++ * in loop_info64 configuration. ++ */ ++static int ++loop_set_status_from_info(struct loop_device *lo, ++ const struct loop_info64 *info) ++{ ++ int err; ++ struct loop_func_table *xfer; ++ kuid_t uid = current_uid(); ++ ++ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) ++ return -EINVAL; ++ ++ err = loop_release_xfer(lo); ++ if (err) ++ return err; ++ ++ if (info->lo_encrypt_type) { ++ unsigned int type = info->lo_encrypt_type; ++ ++ if (type >= MAX_LO_CRYPT) ++ return -EINVAL; ++ xfer = xfer_funcs[type]; ++ if (xfer == NULL) ++ return -EINVAL; ++ } else ++ xfer = NULL; ++ ++ err = loop_init_xfer(lo, xfer, info); ++ if (err) ++ return err; ++ ++ lo->lo_offset = info->lo_offset; ++ lo->lo_sizelimit = info->lo_sizelimit; ++ memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); ++ memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); ++ lo->lo_file_name[LO_NAME_SIZE-1] = 0; ++ lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; ++ ++ if (!xfer) ++ xfer = &none_funcs; ++ lo->transfer = xfer->transfer; ++ lo->ioctl = xfer->ioctl; ++ ++ lo->lo_flags = info->lo_flags; ++ ++ lo->lo_encrypt_key_size = info->lo_encrypt_key_size; ++ lo->lo_init[0] = info->lo_init[0]; ++ lo->lo_init[1] = info->lo_init[1]; ++ if (info->lo_encrypt_key_size) { ++ memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, ++ info->lo_encrypt_key_size); ++ lo->lo_key_owner = uid; ++ } ++ ++ return 0; ++} ++ ++static int loop_configure(struct loop_device *lo, fmode_t mode, ++ struct block_device *bdev, ++ const struct loop_config *config) + { + struct file *file; + struct inode *inode; + struct address_space *mapping; +- unsigned lo_blocksize; +- int lo_flags = 0; + int error; + loff_t size; ++ unsigned short bsize; + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + error = -EBADF; +- file = fget(arg); ++ file = fget(config->fd); + if (!file) + goto out; + +@@ -915,37 +1027,42 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, + mapping = file->f_mapping; + inode = mapping->host; + +- if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || +- !file->f_op->write_iter) +- lo_flags |= LO_FLAGS_READ_ONLY; ++ size = get_loop_size(lo, file); + +- lo_blocksize = S_ISBLK(inode->i_mode) ? +- inode->i_bdev->bd_block_size : PAGE_SIZE; ++ if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { ++ error = -EINVAL; ++ goto out_putf; ++ } + +- error = -EFBIG; +- size = get_loop_size(lo, file); +- if ((loff_t)(sector_t)size != size) ++ if (config->block_size) { ++ error = loop_validate_block_size(config->block_size); ++ if (error) ++ goto out_putf; ++ } ++ ++ error = loop_set_status_from_info(lo, &config->info); ++ if (error) + goto out_putf; ++ ++ if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || ++ !file->f_op->write_iter) ++ lo->lo_flags |= LO_FLAGS_READ_ONLY; ++ + error = loop_prepare_queue(lo); + if (error) + goto out_putf; + + error = 0; + +- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); ++ set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); + +- lo->use_dio = false; +- lo->lo_blocksize = lo_blocksize; ++ lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; + lo->lo_device = bdev; +- lo->lo_flags = lo_flags; + lo->lo_backing_file = file; +- lo->transfer = NULL; +- lo->ioctl = NULL; +- lo->lo_sizelimit = 0; + lo->old_gfp_mask = mapping_gfp_mask(mapping); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + +- if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) ++ if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) + blk_queue_write_cache(lo->lo_queue, true, false); + + if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) { +@@ -957,13 +1074,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, + } + + loop_update_dio(lo); +- set_capacity(lo->lo_disk, size); +- bd_set_size(bdev, size << 9); + loop_sysfs_init(lo); +- /* let user-space know about the new size */ +- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); ++ loop_set_size(lo, size); + +- set_blocksize(bdev, lo_blocksize); ++ if (config->block_size) ++ bsize = config->block_size; ++ else ++ bsize = S_ISBLK(inode->i_mode) ? ++ block_size(inode->i_bdev) : PAGE_SIZE; ++ ++ lo->lo_blocksize = bsize; ++ set_blocksize(bdev, bsize); + + lo->lo_state = Lo_bound; + if (part_shift) +@@ -985,43 +1106,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, + return error; + } + +-static int +-loop_release_xfer(struct loop_device *lo) +-{ +- int err = 0; +- struct loop_func_table *xfer = lo->lo_encryption; +- +- if (xfer) { +- if (xfer->release) +- err = xfer->release(lo); +- lo->transfer = NULL; +- lo->lo_encryption = NULL; +- module_put(xfer->owner); +- } +- return err; +-} +- +-static int +-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, +- const struct loop_info64 *i) +-{ +- int err = 0; +- +- if (xfer) { +- struct module *owner = xfer->owner; +- +- if (!try_module_get(owner)) +- return -EINVAL; +- if (xfer->init) +- err = xfer->init(lo, i); +- if (err) +- module_put(owner); +- else +- lo->lo_encryption = xfer; +- } +- return err; +-} +- + static int loop_clr_fd(struct loop_device *lo) + { + struct file *filp = lo->lo_backing_file; +@@ -1108,20 +1192,21 @@ static int + loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + { + int err; +- struct loop_func_table *xfer; + kuid_t uid = current_uid(); ++ int prev_lo_flags; ++ bool size_changed = false; + + if (lo->lo_encrypt_key_size && + !uid_eq(lo->lo_key_owner, uid) && + !capable(CAP_SYS_ADMIN)) + return -EPERM; +- if (lo->lo_state != Lo_bound) ++ if (lo->lo_state != Lo_bound) { + return -ENXIO; +- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) +- return -EINVAL; ++ } + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { ++ size_changed = true; + sync_blockdev(lo->lo_device); + kill_bdev(lo->lo_device); + } +@@ -1129,79 +1214,43 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) + /* I/O need to be drained during transfer transition */ + blk_mq_freeze_queue(lo->lo_queue); + +- err = loop_release_xfer(lo); +- if (err) ++ if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { ++ /* If any pages were dirtied after kill_bdev(), try again */ ++ err = -EAGAIN; ++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", ++ __func__, lo->lo_number, lo->lo_file_name, ++ lo->lo_device->bd_inode->i_mapping->nrpages); + goto exit; ++ } + +- if (info->lo_encrypt_type) { +- unsigned int type = info->lo_encrypt_type; +- +- if (type >= MAX_LO_CRYPT) { +- err = -EINVAL; +- goto exit; +- } +- xfer = xfer_funcs[type]; +- if (xfer == NULL) { +- err = -EINVAL; +- goto exit; +- } +- } else +- xfer = NULL; ++ prev_lo_flags = lo->lo_flags; + +- err = loop_init_xfer(lo, xfer, info); ++ err = loop_set_status_from_info(lo, info); + if (err) + goto exit; + +- if (lo->lo_offset != info->lo_offset || +- lo->lo_sizelimit != info->lo_sizelimit) { +- /* kill_bdev should have truncated all the pages */ +- if (lo->lo_device->bd_inode->i_mapping->nrpages) { +- err = -EAGAIN; +- pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", +- __func__, lo->lo_number, lo->lo_file_name, +- lo->lo_device->bd_inode->i_mapping->nrpages); +- goto exit; +- } +- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { +- err = -EFBIG; +- goto exit; +- } ++ /* Mask out flags that can't be set using LOOP_SET_STATUS. */ ++ lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; ++ /* For those flags, use the previous values instead */ ++ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; ++ /* For flags that can't be cleared, use previous values too */ ++ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS; ++ if (size_changed) { ++ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, ++ lo->lo_backing_file); ++ loop_set_size(lo, new_size); + } + + loop_config_discard(lo); + +- memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); +- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); +- lo->lo_file_name[LO_NAME_SIZE-1] = 0; +- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; +- +- if (!xfer) +- xfer = &none_funcs; +- lo->transfer = xfer->transfer; +- lo->ioctl = xfer->ioctl; +- +- if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != +- (info->lo_flags & LO_FLAGS_AUTOCLEAR)) +- lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; +- +- lo->lo_encrypt_key_size = info->lo_encrypt_key_size; +- lo->lo_init[0] = info->lo_init[0]; +- lo->lo_init[1] = info->lo_init[1]; +- if (info->lo_encrypt_key_size) { +- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, +- info->lo_encrypt_key_size); +- lo->lo_key_owner = uid; +- } +- + /* update dio if lo_offset or transfer is changed */ + __loop_update_dio(lo, lo->use_dio); + + exit: + blk_mq_unfreeze_queue(lo->lo_queue); + +- if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && +- !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { +- lo->lo_flags |= LO_FLAGS_PARTSCAN; ++ if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && ++ !(prev_lo_flags & LO_FLAGS_PARTSCAN)) { + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; + loop_reread_partitions(lo, lo->lo_device); + } +@@ -1350,10 +1399,15 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { + + static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) + { ++ loff_t size; ++ + if (unlikely(lo->lo_state != Lo_bound)) + return -ENXIO; + +- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); ++ size = get_loop_size(lo, lo->lo_backing_file); ++ loop_set_size(lo, size); ++ ++ return 0; + } + + static int loop_set_dio(struct loop_device *lo, unsigned long arg) +@@ -1377,8 +1431,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) + if (lo->lo_state != Lo_bound) + return -ENXIO; + +- if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) +- return -EINVAL; ++ err = loop_validate_block_size(arg); ++ if (err) ++ return err; + + if (lo->lo_queue->limits.logical_block_size == arg) + return 0; +@@ -1409,13 +1464,34 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) + { + struct loop_device *lo = bdev->bd_disk->private_data; ++ void __user *argp = (void __user *) arg; + int err; + + mutex_lock_nested(&lo->lo_ctl_mutex, 1); + switch (cmd) { +- case LOOP_SET_FD: +- err = loop_set_fd(lo, mode, bdev, arg); ++ case LOOP_SET_FD: { ++ /* ++ * Legacy case - pass in a zeroed out struct loop_config with ++ * only the file descriptor set , which corresponds with the ++ * default parameters we'd have used otherwise. ++ */ ++ struct loop_config config; ++ ++ memset(&config, 0, sizeof(config)); ++ config.fd = arg; ++ ++ err = loop_configure(lo, mode, bdev, &config); ++ break; ++ } ++ case LOOP_CONFIGURE: { ++ struct loop_config config; ++ ++ if (copy_from_user(&config, argp, sizeof(config))) ++ return -EFAULT; ++ ++ err = loop_configure(lo, mode, bdev, &config); + break; ++ } + case LOOP_CHANGE_FD: + err = loop_change_fd(lo, bdev, arg); + break; +@@ -1428,20 +1504,18 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, + case LOOP_SET_STATUS: + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) +- err = loop_set_status_old(lo, +- (struct loop_info __user *)arg); ++ err = loop_set_status_old(lo, argp); + break; + case LOOP_GET_STATUS: +- err = loop_get_status_old(lo, (struct loop_info __user *) arg); ++ err = loop_get_status_old(lo, argp); + break; + case LOOP_SET_STATUS64: + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) +- err = loop_set_status64(lo, +- (struct loop_info64 __user *) arg); ++ err = loop_set_status64(lo, argp); + break; + case LOOP_GET_STATUS64: +- err = loop_get_status64(lo, (struct loop_info64 __user *) arg); ++ err = loop_get_status64(lo, argp); + break; + case LOOP_SET_CAPACITY: + err = -EPERM; +@@ -1609,6 +1683,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, + case LOOP_CLR_FD: + case LOOP_GET_STATUS64: + case LOOP_SET_STATUS64: ++ case LOOP_CONFIGURE: + arg = (unsigned long) compat_ptr(arg); + case LOOP_SET_FD: + case LOOP_CHANGE_FD: +diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c +index 76f33c84ce3d..7ec5e8f0cbe5 100644 +--- a/drivers/block/ps3disk.c ++++ b/drivers/block/ps3disk.c +@@ -464,7 +464,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev) + blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); + + blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); +- blk_queue_segment_boundary(queue, -1UL); + blk_queue_dma_alignment(queue, dev->blk_size-1); + blk_queue_logical_block_size(queue, dev->blk_size); + +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 033efb675287..7e8975479f19 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -50,15 +50,6 @@ + + #define RBD_DEBUG /* Activate rbd_assert() calls */ + +-/* +- * The basic unit of block I/O is a sector. It is interpreted in a +- * number of contexts in Linux (blk, bio, genhd), but the default is +- * universally 512 bytes. These symbols are just slightly more +- * meaningful than the bare numbers they represent. +- */ +-#define SECTOR_SHIFT 9 +-#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +- + /* + * Increment the given counter and return its updated value. + * If the counter is already 0 it will not be incremented. +@@ -4623,6 +4614,9 @@ static ssize_t rbd_config_info_show(struct device *dev, + { + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + return sprintf(buf, "%s\n", rbd_dev->config_info); + } + +@@ -4724,6 +4718,9 @@ static ssize_t rbd_image_refresh(struct device *dev, + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + int ret; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + ret = rbd_dev_refresh(rbd_dev); + if (ret) + return ret; +@@ -6201,6 +6198,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, + bool read_only; + int rc; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + +@@ -6349,6 +6349,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, + bool force = false; + int ret; + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + dev_id = -1; + opt_buf[0] = '\0'; + sscanf(buf, "%d %5s", &dev_id, opt_buf); +diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c +index 6beafaa335c7..4ab96c7f8fd7 100644 +--- a/drivers/block/rsxx/core.c ++++ b/drivers/block/rsxx/core.c +@@ -180,15 +180,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, + { + struct rsxx_cardinfo *card = file_inode(fp)->i_private; + char *buf; +- ssize_t st; ++ int st; + + buf = kzalloc(cnt, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); +- if (!st) +- st = copy_to_user(ubuf, buf, cnt); ++ if (!st) { ++ if (copy_to_user(ubuf, buf, cnt)) ++ st = -EFAULT; ++ } + kfree(buf); + if (st) + return st; +@@ -890,6 +892,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, + card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); + if (!card->event_wq) { + dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); ++ st = -ENOMEM; + goto failed_event_handler; + } + +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c +index f287eec36b28..302260e9002c 100644 +--- a/drivers/block/virtio_blk.c ++++ b/drivers/block/virtio_blk.c +@@ -749,6 +749,7 @@ static int virtblk_probe(struct virtio_device *vdev) + put_disk(vblk->disk); + out_free_vq: + vdev->config->del_vqs(vdev); ++ kfree(vblk->vqs); + out_free_vblk: + kfree(vblk); + out_free_index: +@@ -799,6 +800,8 @@ static int virtblk_freeze(struct virtio_device *vdev) + blk_mq_stop_hw_queues(vblk->disk->queue); + + vdev->config->del_vqs(vdev); ++ kfree(vblk->vqs); ++ + return 0; + } + +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c +index a700e525535c..1a1ad0fdc039 100644 +--- a/drivers/block/xen-blkback/blkback.c ++++ b/drivers/block/xen-blkback/blkback.c +@@ -183,7 +183,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num) + + #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) + +-static int do_block_io_op(struct xen_blkif_ring *ring); ++static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); + static int dispatch_rw_block_io(struct xen_blkif_ring *ring, + struct blkif_request *req, + struct pending_req *pending_req); +@@ -608,6 +608,8 @@ int xen_blkif_schedule(void *arg) + struct xen_vbd *vbd = &blkif->vbd; + unsigned long timeout; + int ret; ++ bool do_eoi; ++ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; + + set_freezable(); + while (!kthread_should_stop()) { +@@ -632,16 +634,23 @@ int xen_blkif_schedule(void *arg) + if (timeout == 0) + goto purge_gnt_list; + ++ do_eoi = ring->waiting_reqs; ++ + ring->waiting_reqs = 0; + smp_mb(); /* clear flag *before* checking for work */ + +- ret = do_block_io_op(ring); ++ ret = do_block_io_op(ring, &eoi_flags); + if (ret > 0) + ring->waiting_reqs = 1; + if (ret == -EACCES) + wait_event_interruptible(ring->shutdown_wq, + kthread_should_stop()); + ++ if (do_eoi && !ring->waiting_reqs) { ++ xen_irq_lateeoi(ring->irq, eoi_flags); ++ eoi_flags |= XEN_EOI_FLAG_SPURIOUS; ++ } ++ + purge_gnt_list: + if (blkif->vbd.feature_gnt_persistent && + time_after(jiffies, ring->next_lru)) { +@@ -834,8 +843,11 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, + pages[i]->page = persistent_gnt->page; + pages[i]->persistent_gnt = persistent_gnt; + } else { +- if (get_free_page(ring, &pages[i]->page)) +- goto out_of_memory; ++ if (get_free_page(ring, &pages[i]->page)) { ++ put_free_pages(ring, pages_to_gnt, segs_to_map); ++ ret = -ENOMEM; ++ goto out; ++ } + addr = vaddr(pages[i]->page); + pages_to_gnt[segs_to_map] = pages[i]->page; + pages[i]->persistent_gnt = NULL; +@@ -851,10 +863,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, + break; + } + +- if (segs_to_map) { ++ if (segs_to_map) + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); +- BUG_ON(ret); +- } + + /* + * Now swizzle the MFN in our domain with the MFN from the other domain +@@ -869,7 +879,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, + pr_debug("invalid buffer -- could not remap it\n"); + put_free_pages(ring, &pages[seg_idx]->page, 1); + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; +- ret |= 1; ++ ret |= !ret; + goto next; + } + pages[seg_idx]->handle = map[new_map_idx].handle; +@@ -921,17 +931,18 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring, + } + segs_to_map = 0; + last_map = map_until; +- if (map_until != num) ++ if (!ret && map_until != num) + goto again; + +- return ret; +- +-out_of_memory: +- pr_alert("%s: out of memory\n", __func__); +- put_free_pages(ring, pages_to_gnt, segs_to_map); +- for (i = last_map; i < num; i++) ++out: ++ for (i = last_map; i < num; i++) { ++ /* Don't zap current batch's valid persistent grants. */ ++ if(i >= map_until) ++ pages[i]->persistent_gnt = NULL; + pages[i]->handle = BLKBACK_INVALID_HANDLE; +- return -ENOMEM; ++ } ++ ++ return ret; + } + + static int xen_blkbk_map_seg(struct pending_req *pending_req) +@@ -1117,7 +1128,7 @@ static void end_block_io_op(struct bio *bio) + * and transmute it to the block API to hand it over to the proper block disk. + */ + static int +-__do_block_io_op(struct xen_blkif_ring *ring) ++__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) + { + union blkif_back_rings *blk_rings = &ring->blk_rings; + struct blkif_request req; +@@ -1140,6 +1151,9 @@ __do_block_io_op(struct xen_blkif_ring *ring) + if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) + break; + ++ /* We've seen a request, so clear spurious eoi flag. */ ++ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; ++ + if (kthread_should_stop()) { + more_to_do = 1; + break; +@@ -1198,13 +1212,13 @@ __do_block_io_op(struct xen_blkif_ring *ring) + } + + static int +-do_block_io_op(struct xen_blkif_ring *ring) ++do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) + { + union blkif_back_rings *blk_rings = &ring->blk_rings; + int more_to_do; + + do { +- more_to_do = __do_block_io_op(ring); ++ more_to_do = __do_block_io_op(ring, eoi_flags); + if (more_to_do) + break; + +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c +index 1d1f86657967..27c9d7a5b4de 100644 +--- a/drivers/block/xen-blkback/xenbus.c ++++ b/drivers/block/xen-blkback/xenbus.c +@@ -236,9 +236,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, + BUG(); + } + +- err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, +- xen_blkif_be_int, 0, +- "blkif-backend", ring); ++ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid, ++ evtchn, xen_blkif_be_int, 0, "blkif-backend", ring); + if (err < 0) { + xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); + ring->blk_rings.common.sring = NULL; +@@ -263,6 +262,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) + + if (ring->xenblkd) { + kthread_stop(ring->xenblkd); ++ ring->xenblkd = NULL; + wake_up(&ring->shutdown_wq); + } + +@@ -646,7 +646,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev, + /* setup back pointer */ + be->blkif->be = be; + +- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, ++ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, ++ backend_changed, + "%s/%s", dev->nodename, "physical-device"); + if (err) + goto fail; +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index 853b63108346..63802dc228fe 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -1103,7 +1103,7 @@ static ssize_t mm_stat_show(struct device *dev, + zram->limit_pages << PAGE_SHIFT, + max_used << PAGE_SHIFT, + (u64)atomic64_read(&zram->stats.same_pages), +- pool_stats.pages_compacted, ++ atomic_long_read(&pool_stats.pages_compacted), + (u64)atomic64_read(&zram->stats.huge_pages)); + up_read(&zram->init_lock); + +diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h +index 29af8d024b47..f2fd46daa760 100644 +--- a/drivers/block/zram/zram_drv.h ++++ b/drivers/block/zram/zram_drv.h +@@ -21,7 +21,6 @@ + + #include "zcomp.h" + +-#define SECTOR_SHIFT 9 + #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) + #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) + #define ZRAM_LOGICAL_BLOCK_SHIFT 12 +diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c +index 81e8e2d36372..c00b7637920d 100644 +--- a/drivers/bluetooth/bluetooth-power.c ++++ b/drivers/bluetooth/bluetooth-power.c +@@ -59,6 +59,7 @@ static int pwr_state; + struct class *bt_class; + static int bt_major; + static int soc_id; ++static bool bt_disabled; + + static int bt_vreg_init(struct bt_power_vreg_data *vreg) + { +@@ -563,6 +564,11 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev) + return -ENOMEM; + + if (pdev->dev.of_node) { ++ rc = of_get_named_gpio(pdev->dev.of_node, "qca,bt-disable", 0); ++ if (rc) { ++ bt_disabled = true; ++ return -EINVAL; ++ } + bt_power_pdata->bt_gpio_sys_rst = + of_get_named_gpio(pdev->dev.of_node, + "qca,bt-reset-gpio", 0); +@@ -729,6 +735,9 @@ static long bt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + int ret = 0, pwr_cntrl = 0; + int chipset_version = 0; + ++ if (bt_disabled) ++ return ret; ++ + switch (cmd) { + #ifdef CONFIG_BTFM_SLIM + case BT_CMD_SLIM_TEST: +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 4e3b24a0511f..30c09b9ddbf0 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -2508,6 +2508,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev, + sent += size; + count -= size; + ++ /* ep2 need time to switch from function acl to function dfu, ++ * so we add 20ms delay here. ++ */ ++ msleep(20); ++ + while (count) { + size = min_t(size_t, count, QCA_DFU_PACKET_LEN); + +diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c +index 1b14256376d2..7c1da45be166 100644 +--- a/drivers/bus/mips_cdmm.c ++++ b/drivers/bus/mips_cdmm.c +@@ -544,10 +544,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) + dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id); + ++id; + ret = device_register(&dev->dev); +- if (ret) { ++ if (ret) + put_device(&dev->dev); +- kfree(dev); +- } + } + } + +diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c +index 5012e3ad1225..624f74d03a83 100644 +--- a/drivers/bus/omap_l3_noc.c ++++ b/drivers/bus/omap_l3_noc.c +@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev) + */ + l3->debug_irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, +- 0x0, "l3-dbg-irq", l3); ++ IRQF_NO_THREAD, "l3-dbg-irq", l3); + if (ret) { + dev_err(l3->dev, "request_irq failed for %d\n", + l3->debug_irq); +@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev) + + l3->app_irq = platform_get_irq(pdev, 1); + ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, +- 0x0, "l3-app-irq", l3); ++ IRQF_NO_THREAD, "l3-app-irq", l3); + if (ret) + dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); + +diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c +index a6444244c411..bfb67aa00bec 100644 +--- a/drivers/bus/qcom-ebi2.c ++++ b/drivers/bus/qcom-ebi2.c +@@ -357,8 +357,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev) + + /* Figure out the chipselect */ + ret = of_property_read_u32(child, "reg", &csindex); +- if (ret) ++ if (ret) { ++ of_node_put(child); + return ret; ++ } + + if (csindex > 5) { + dev_err(dev, +diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c +index 1852d19d0d7b..86110a2abf0f 100644 +--- a/drivers/cdrom/gdrom.c ++++ b/drivers/cdrom/gdrom.c +@@ -773,6 +773,13 @@ static int probe_gdrom_setupqueue(void) + static int probe_gdrom(struct platform_device *devptr) + { + int err; ++ ++ /* ++ * Ensure our "one" device is initialized properly in case of previous ++ * usages of it ++ */ ++ memset(&gd, 0, sizeof(gd)); ++ + /* Start the device */ + if (gdrom_execute_diagnostic() != 1) { + pr_warning("ATA Probe for GDROM failed\n"); +@@ -850,6 +857,8 @@ static int remove_gdrom(struct platform_device *devptr) + if (gdrom_major) + unregister_blkdev(gdrom_major, GDROM_DEV_NAME); + unregister_cdrom(gd.cd_info); ++ kfree(gd.cd_info); ++ kfree(gd.toc); + + return 0; + } +@@ -865,7 +874,7 @@ static struct platform_driver gdrom_driver = { + static int __init init_gdrom(void) + { + int rc; +- gd.toc = NULL; ++ + rc = platform_driver_register(&gdrom_driver); + if (rc) + return rc; +@@ -881,8 +890,6 @@ static void __exit exit_gdrom(void) + { + platform_device_unregister(pd); + platform_driver_unregister(&gdrom_driver); +- kfree(gd.toc); +- kfree(gd.cd_info); + } + + module_init(init_gdrom); +diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c +index 58dc1c560d37..7b682aaffc90 100644 +--- a/drivers/char/adsprpc.c ++++ b/drivers/char/adsprpc.c +@@ -274,8 +274,8 @@ struct fastrpc_static_pd { + struct notifier_block pdrnb; + struct notifier_block get_service_nb; + void *pdrhandle; +- int pdrcount; +- int prevpdrcount; ++ uint64_t pdrcount; ++ uint64_t prevpdrcount; + int ispdup; + }; + +@@ -304,10 +304,10 @@ struct fastrpc_channel_ctx { + struct notifier_block nb; + struct kref kref; + int channel; +- int sesscount; +- int ssrcount; ++ uint64_t sesscount; ++ uint64_t ssrcount; + void *handle; +- int prevssrcount; ++ uint64_t prevssrcount; + int issubsystemup; + int vmid; + struct secure_vm rhvm; +@@ -407,7 +407,7 @@ struct fastrpc_file { + int sessionid; + int tgid; + int cid; +- int ssrcount; ++ uint64_t ssrcount; + int pd; + char *spdname; + int file_close; +@@ -1037,6 +1037,12 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size, + struct fastrpc_buf *buf = NULL, *fr = NULL; + struct hlist_node *n; + ++ VERIFY(err, fl->sctx != NULL); ++ if (err) { ++ err = -EBADR; ++ goto bail; ++ } ++ + VERIFY(err, size > 0 && size < MAX_SIZE_LIMIT); + if (err) { + err = -EFAULT; +@@ -2156,6 +2162,10 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, + if (err) + goto bail; + bail: ++ if (ctx && ctx->handle) { ++ glink_rx_done(ctx->handle, ctx->ptr, true); ++ ctx->handle = NULL; ++ } + if (ctx && interrupted == -ERESTARTSYS) + context_save_interrupted(ctx); + else if (ctx) +@@ -2923,18 +2933,18 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl, + mutex_unlock(&fl->fl_map_mutex); + if (err) + goto bail; +- VERIFY(err, map != NULL); +- if (err) { ++ if (map) { ++ VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr, ++ map->phys, map->size, map->flags)); ++ if (err) ++ goto bail; ++ mutex_lock(&fl->fl_map_mutex); ++ fastrpc_mmap_free(map, 0); ++ mutex_unlock(&fl->fl_map_mutex); ++ } else { + err = -EINVAL; + goto bail; + } +- VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr, +- map->phys, map->size, map->flags)); +- if (err) +- goto bail; +- mutex_lock(&fl->fl_map_mutex); +- fastrpc_mmap_free(map, 0); +- mutex_unlock(&fl->fl_map_mutex); + bail: + if (err && map) { + mutex_lock(&fl->fl_map_mutex); +@@ -3068,7 +3078,8 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan, + int secure, int sharedcb, struct fastrpc_session_ctx **session) + { + struct fastrpc_apps *me = &gfa; +- int idx = 0, err = 0; ++ uint64_t idx = 0; ++ int err = 0; + + if (chan->sesscount) { + for (idx = 0; idx < chan->sesscount; ++idx) { +@@ -3441,14 +3452,14 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, + DEBUGFS_SIZE - len, "|%-9d", + chan->kref.refcount.counter); + len += scnprintf(fileinfo + len, +- DEBUGFS_SIZE - len, "|%-9d", +- chan->sesscount); ++ DEBUGFS_SIZE - len, "|0x%-10x", ++ (unsigned int)chan->sesscount); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "|%-14d", + chan->issubsystemup); + len += scnprintf(fileinfo + len, +- DEBUGFS_SIZE - len, "|%-9d", +- chan->ssrcount); ++ DEBUGFS_SIZE - len, "|0x%-9x", ++ (unsigned int)chan->ssrcount); + for (j = 0; j < chan->sesscount; j++) { + sess_used += chan->session[j].used; + } +@@ -3508,7 +3519,8 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %7s %d\n", "sessionid", ":", fl->sessionid); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, +- "%s %8s %d\n", "ssrcount", ":", fl->ssrcount); ++ "%s %8s 0x%x\n", "ssrcount", ":", ++ (unsigned int)fl->ssrcount); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %8s %d\n", "refcount", ":", fl->refcount); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, +diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h +index 6fcf826321ec..c82a278e2008 100644 +--- a/drivers/char/adsprpc_shared.h ++++ b/drivers/char/adsprpc_shared.h +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2012-2019, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig +index c528f96ee204..07de755ca30c 100644 +--- a/drivers/char/agp/Kconfig ++++ b/drivers/char/agp/Kconfig +@@ -124,7 +124,7 @@ config AGP_HP_ZX1 + + config AGP_PARISC + tristate "HP Quicksilver AGP support" +- depends on AGP && PARISC && 64BIT ++ depends on AGP && PARISC && 64BIT && IOMMU_SBA + help + This option gives you AGP GART support for the HP Quicksilver + AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 +diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c +index a7cc5b7be598..667882e996ec 100644 +--- a/drivers/char/agp/intel-gtt.c ++++ b/drivers/char/agp/intel-gtt.c +@@ -303,8 +303,10 @@ static int intel_gtt_setup_scratch_page(void) + if (intel_private.needs_dmar) { + dma_addr = pci_map_page(intel_private.pcidev, page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +- if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) ++ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { ++ __free_page(page); + return -EINVAL; ++ } + + intel_private.scratch_page_dma = dma_addr; + } else +@@ -845,6 +847,7 @@ void intel_gtt_insert_page(dma_addr_t addr, + unsigned int flags) + { + intel_private.driver->write_entry(addr, pg, flags); ++ readl(intel_private.gtt + pg); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -870,7 +873,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, + j++; + } + } +- wmb(); ++ readl(intel_private.gtt + j - 1); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); + } +@@ -1104,6 +1107,7 @@ static void i9xx_cleanup(void) + + static void i9xx_chipset_flush(void) + { ++ wmb(); + if (intel_private.i9xx_flush_page) + writel(1, intel_private.i9xx_flush_page); + } +diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c +index 149d573013ff..f8652afd3147 100644 +--- a/drivers/char/diag/diag_masks.c ++++ b/drivers/char/diag/diag_masks.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -58,7 +58,8 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = { + { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST }, + { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST }, + { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }, +- { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST } ++ { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST }, ++ { .ssid_first = MSG_SSID_26, .ssid_last = MSG_SSID_26_LAST } + }; + + static int diag_check_update(int md_peripheral, int pid) +diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c +index 393f20fb7d36..26680a06811a 100644 +--- a/drivers/char/diag/diag_memorydevice.c ++++ b/drivers/char/diag/diag_memorydevice.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -139,7 +139,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) + { + int i, peripheral, pid = 0; + uint8_t found = 0; +- unsigned long flags; ++ unsigned long flags = 0, flags_sec = 0; + struct diag_md_info *ch = NULL; + struct diag_md_session_t *session_info = NULL; + +@@ -168,6 +168,16 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) + } + + spin_lock_irqsave(&ch->lock, flags); ++ if (peripheral == APPS_DATA) { ++ spin_lock_irqsave(&driver->diagmem_lock, flags_sec); ++ if (!hdlc_data.allocated && !non_hdlc_data.allocated) { ++ spin_unlock_irqrestore(&driver->diagmem_lock, ++ flags_sec); ++ spin_unlock_irqrestore(&ch->lock, flags); ++ mutex_unlock(&driver->md_session_lock); ++ return -EINVAL; ++ } ++ } + for (i = 0; i < ch->num_tbl_entries && !found; i++) { + if (ch->tbl[i].buf != buf) + continue; +@@ -179,14 +189,16 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) + ch->tbl[i].len = 0; + ch->tbl[i].ctx = 0; + } +- spin_unlock_irqrestore(&ch->lock, flags); + + if (found) { ++ if (peripheral == APPS_DATA) ++ spin_unlock_irqrestore(&driver->diagmem_lock, ++ flags_sec); ++ spin_unlock_irqrestore(&ch->lock, flags); + mutex_unlock(&driver->md_session_lock); + return -ENOMEM; + } + +- spin_lock_irqsave(&ch->lock, flags); + for (i = 0; i < ch->num_tbl_entries && !found; i++) { + if (ch->tbl[i].len == 0) { + ch->tbl[i].buf = buf; +@@ -196,6 +208,8 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) + diag_ws_on_read(DIAG_WS_MUX, len); + } + } ++ if (peripheral == APPS_DATA) ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags_sec); + spin_unlock_irqrestore(&ch->lock, flags); + mutex_unlock(&driver->md_session_lock); + +diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c +index 646f247285f0..d3240945a4df 100644 +--- a/drivers/char/diag/diag_usb.c ++++ b/drivers/char/diag/diag_usb.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2014-2016, 2018-2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2014-2016, 2018-2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -30,6 +30,7 @@ + #include "diag_mux.h" + #include "diagmem.h" + #include "diag_ipc_logging.h" ++#include + + #define DIAG_USB_STRING_SZ 10 + #define DIAG_USB_MAX_SIZE 16384 +@@ -365,6 +366,7 @@ static void diag_usb_notifier(void *priv, unsigned int event, + usb_info->max_size = usb_diag_request_size(usb_info->hdl); + atomic_set(&usb_info->connected, 1); + pr_info("diag: USB channel %s connected\n", usb_info->name); ++ place_marker("M - Diag port is enumerated"); + queue_work(usb_info->usb_wq, + &usb_info->connect_work); + break; +diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h +index 921fbe89aa95..fbba1c8294b2 100644 +--- a/drivers/char/diag/diagchar.h ++++ b/drivers/char/diag/diagchar.h +@@ -209,6 +209,9 @@ + #define DEFAULT_LOW_WM_VAL 15 + #define DEFAULT_HIGH_WM_VAL 85 + ++#define HDLC_CTXT 1 ++#define NON_HDLC_CTXT 2 ++ + #define TYPE_DATA 0 + #define TYPE_CNTL 1 + #define TYPE_DCI 2 +@@ -610,6 +613,7 @@ struct diagchar_dev { + unsigned int poolsize_dci; + unsigned int poolsize_user; + spinlock_t diagmem_lock; ++ wait_queue_head_t hdlc_wait_q; + /* Buffers for masks */ + struct mutex diag_cntl_mutex; + /* Members for Sending response */ +@@ -713,6 +717,17 @@ extern struct diagchar_dev *driver; + extern int wrap_enabled; + extern uint16_t wrap_count; + ++struct diag_apps_data_t { ++ void *buf; ++ uint32_t len; ++ int ctxt; ++ uint8_t allocated; ++ uint8_t flushed; ++}; ++ ++extern struct diag_apps_data_t hdlc_data; ++extern struct diag_apps_data_t non_hdlc_data; ++ + void diag_get_timestamp(char *time_str); + void check_drain_timer(void); + int diag_get_remote(int remote_info); +diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c +index 2dd7c0530487..027871e509e5 100644 +--- a/drivers/char/diag/diagchar_core.c ++++ b/drivers/char/diag/diagchar_core.c +@@ -154,14 +154,8 @@ static int timer_in_progress; + static int diag_mask_clear_param = 1; + module_param(diag_mask_clear_param, int, 0644); + +-struct diag_apps_data_t { +- void *buf; +- uint32_t len; +- int ctxt; +-}; +- +-static struct diag_apps_data_t hdlc_data; +-static struct diag_apps_data_t non_hdlc_data; ++struct diag_apps_data_t hdlc_data; ++struct diag_apps_data_t non_hdlc_data; + static struct mutex apps_data_mutex; + + #define DIAGPKT_MAX_DELAYED_RSP 0xFFFF +@@ -223,14 +217,25 @@ static void diag_drain_apps_data(struct diag_apps_data_t *data) + if (!data || !data->buf) + return; + ++ spin_lock_irqsave(&driver->diagmem_lock, flags); ++ if (data->flushed) { ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ return; ++ } ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); +- spin_lock_irqsave(&driver->diagmem_lock, flags); +- if (err) ++ ++ if (err) { ++ spin_lock_irqsave(&driver->diagmem_lock, flags); + diagmem_free(driver, data->buf, POOL_TYPE_HDLC); +- data->buf = NULL; +- data->len = 0; +- spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ data->buf = NULL; ++ data->len = 0; ++ data->allocated = 0; ++ data->flushed = 0; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ } + } + + void diag_update_user_client_work_fn(struct work_struct *work) +@@ -305,6 +310,7 @@ static void diag_mempool_init(void) + diagmem_init(driver, POOL_TYPE_DCI); + + spin_lock_init(&driver->diagmem_lock); ++ init_waitqueue_head(&driver->hdlc_wait_q); + } + + static void diag_mempool_exit(void) +@@ -558,13 +564,11 @@ static int diag_remove_client_entry(struct file *file) + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "pid %d, not present in client map\n", + diagpriv_data->pid); +- mutex_unlock(&driver->diagchar_mutex); + mutex_unlock(&driver->diag_file_mutex); + return -EINVAL; + } + DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n", + driver->client_map[i].name, diagpriv_data->pid); +- mutex_unlock(&driver->diagchar_mutex); + /* + * clean up any DCI registrations, if this is a DCI client + * This will specially help in case of ungraceful exit of any DCI client +@@ -1929,11 +1933,13 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) + driver->pcie_switch_pid = current->tgid; + } + if (new_mode == DIAG_PCIE_MODE) { +- driver->transport_set = DIAG_ROUTE_TO_PCIE; ++ driver->transport_set = ++ DIAG_ROUTE_TO_PCIE; + diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_pcie_apps, + poolsize_pcie_apps + 1 + (NUM_PERIPHERALS * 6)); + } else if (new_mode == DIAG_USB_MODE) { +- driver->transport_set = DIAG_ROUTE_TO_USB; ++ driver->transport_set = ++ DIAG_ROUTE_TO_USB; + diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps, + poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6)); + } +@@ -2955,6 +2961,7 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len, + { + int err = 0; + int ret = PKT_DROP; ++ int retval = 0; + struct diag_apps_data_t *data = &hdlc_data; + struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; + struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; +@@ -2983,37 +2990,44 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len, + send.last = (void *)(buf + len - 1); + send.terminate = 1; + +- if (!data->buf) ++wait_for_buffer: ++ retval = wait_event_interruptible(driver->hdlc_wait_q, ++ (data->flushed == 0)); ++ if (retval) ++ return retval; ++ spin_lock_irqsave(&driver->diagmem_lock, flags); ++ if (data->flushed) { ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ goto wait_for_buffer; ++ } ++ if (!data->buf) { + data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE + + APF_DIAG_PADDING, + POOL_TYPE_HDLC); +- if (!data->buf) { +- ret = PKT_DROP; +- goto fail_ret; ++ if (!data->buf) { ++ ret = PKT_DROP; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ goto fail_ret; ++ } ++ data->allocated = 1; ++ data->flushed = 0; + } + + if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) { ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); + if (err) { + ret = -EIO; + goto fail_free_buf; + } +- data->buf = NULL; +- data->len = 0; +- data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE + +- APF_DIAG_PADDING, +- POOL_TYPE_HDLC); +- if (!data->buf) { +- ret = PKT_DROP; +- goto fail_ret; +- } ++ goto wait_for_buffer; + } + + enc.dest = data->buf + data->len; + enc.dest_last = (void *)(data->buf + data->len + max_encoded_size); + diag_hdlc_encode(&send, &enc); +- + /* + * This is to check if after HDLC encoding, we are still within + * the limits of aggregation buffer. If not, we write out the +@@ -3022,21 +3036,34 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len, + */ + if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf + + DIAG_MAX_HDLC_BUF_SIZE)) { ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); + if (err) { + ret = -EIO; + goto fail_free_buf; + } +- data->buf = NULL; +- data->len = 0; ++wait_for_agg_buff: ++ retval = wait_event_interruptible(driver->hdlc_wait_q, ++ (data->flushed == 0)); ++ if (retval) ++ return retval; ++ spin_lock_irqsave(&driver->diagmem_lock, flags); ++ if (data->flushed) { ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ goto wait_for_agg_buff; ++ } + data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE + + APF_DIAG_PADDING, + POOL_TYPE_HDLC); + if (!data->buf) { + ret = PKT_DROP; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + goto fail_ret; + } ++ data->allocated = 1; ++ data->flushed = 0; + + enc.dest = data->buf + data->len; + enc.dest_last = (void *)(data->buf + data->len + +@@ -3050,23 +3077,27 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len, + DIAG_MAX_HDLC_BUF_SIZE; + + if (pkt_type == DATA_TYPE_RESPONSE) { ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); + if (err) { + ret = -EIO; + goto fail_free_buf; + } +- data->buf = NULL; +- data->len = 0; ++ return PKT_ALLOC; + } +- ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + return PKT_ALLOC; + + fail_free_buf: + spin_lock_irqsave(&driver->diagmem_lock, flags); +- diagmem_free(driver, data->buf, POOL_TYPE_HDLC); ++ if (data->allocated) ++ diagmem_free(driver, data->buf, POOL_TYPE_HDLC); + data->buf = NULL; + data->len = 0; ++ data->allocated = 0; ++ data->flushed = 0; + spin_unlock_irqrestore(&driver->diagmem_lock, flags); + fail_ret: + return ret; +@@ -3077,6 +3108,7 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len, + { + int err = 0; + int ret = PKT_DROP; ++ int retval = 0; + struct diag_pkt_frame_t header; + struct diag_apps_data_t *data = &non_hdlc_data; + unsigned long flags; +@@ -3092,33 +3124,38 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len, + __func__, buf, len); + return -EIO; + } +- ++wait_for_buffer: ++ retval = wait_event_interruptible(driver->hdlc_wait_q, ++ (data->flushed == 0)); ++ if (retval) ++ return retval; ++ spin_lock_irqsave(&driver->diagmem_lock, flags); ++ if (data->flushed) { ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); ++ goto wait_for_buffer; ++ } + if (!data->buf) { + data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE + + APF_DIAG_PADDING, + POOL_TYPE_HDLC); + if (!data->buf) { + ret = PKT_DROP; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + goto fail_ret; + } ++ data->allocated = 1; ++ data->flushed = 0; + } +- + if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) { ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); + if (err) { + ret = -EIO; + goto fail_free_buf; + } +- data->buf = NULL; +- data->len = 0; +- data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE + +- APF_DIAG_PADDING, +- POOL_TYPE_HDLC); +- if (!data->buf) { +- ret = PKT_DROP; +- goto fail_ret; +- } ++ goto wait_for_buffer; + } + + header.start = CONTROL_CHAR; +@@ -3131,23 +3168,27 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len, + *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR; + data->len += sizeof(uint8_t); + if (pkt_type == DATA_TYPE_RESPONSE) { ++ data->flushed = 1; ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len, + data->ctxt); + if (err) { + ret = -EIO; + goto fail_free_buf; + } +- data->buf = NULL; +- data->len = 0; ++ return PKT_ALLOC; + } +- ++ spin_unlock_irqrestore(&driver->diagmem_lock, flags); + return PKT_ALLOC; + + fail_free_buf: + spin_lock_irqsave(&driver->diagmem_lock, flags); +- diagmem_free(driver, data->buf, POOL_TYPE_HDLC); ++ if (data->allocated) ++ diagmem_free(driver, data->buf, POOL_TYPE_HDLC); + data->buf = NULL; + data->len = 0; ++ data->allocated = 0; ++ data->flushed = 0; + spin_unlock_irqrestore(&driver->diagmem_lock, flags); + fail_ret: + return ret; +@@ -4178,7 +4219,7 @@ static void diag_init_transport(void) + * The number of buffers encompasses Diag data generated on + * the Apss processor + 1 for the responses generated + * exclusively on the Apps processor + data from data channels +- *(4 channels periperipheral) + data from command channels (2) ++ *(4 channels per peripheral) + data from command channels (2) + */ + diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_pcie_apps, + poolsize_pcie_apps + 1 + (NUM_PERIPHERALS * 6)); +@@ -4194,7 +4235,7 @@ static void diag_init_transport(void) + * The number of buffers encompasses Diag data generated on + * the Apss processor + 1 for the responses generated + * exclusively on the Apps processor + data from data channels +- *(4 channels periperipheral) + data from command channels (2) ++ *(4 channels per peripheral) + data from command channels (2) + */ + diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps, + poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6)); +@@ -4240,9 +4281,15 @@ static int __init diagchar_init(void) + driver->pcie_switch_pid = 0; + driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); + hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1); ++ hdlc_data.ctxt |= SET_HDLC_CTXT(HDLC_CTXT); + hdlc_data.len = 0; ++ hdlc_data.allocated = 0; ++ hdlc_data.flushed = 0; + non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1); ++ non_hdlc_data.ctxt |= SET_HDLC_CTXT(NON_HDLC_CTXT); + non_hdlc_data.len = 0; ++ non_hdlc_data.allocated = 0; ++ non_hdlc_data.flushed = 0; + mutex_init(&driver->hdlc_disable_mutex); + mutex_init(&driver->diagchar_mutex); + mutex_init(&driver->diag_notifier_mutex); +diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c +index fcd2f355b4e7..686bc49f7efa 100644 +--- a/drivers/char/diag/diagfwd.c ++++ b/drivers/char/diag/diagfwd.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1839,9 +1839,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt, + int ctxt) + { + unsigned long flags; +- int peripheral = -1; +- int type = -1; +- int num = -1; ++ int peripheral = -1, type = -1; ++ int num = -1, hdlc_ctxt = -1; ++ struct diag_apps_data_t *temp = NULL; + + if (!buf || len < 0) + return -EINVAL; +@@ -1860,9 +1860,27 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt, + diag_ws_on_copy(DIAG_WS_MUX); + } else if (peripheral == APPS_DATA) { + spin_lock_irqsave(&driver->diagmem_lock, flags); +- diagmem_free(driver, (unsigned char *)buf, +- POOL_TYPE_HDLC); +- buf = NULL; ++ hdlc_ctxt = GET_HDLC_CTXT(buf_ctxt); ++ if ((hdlc_ctxt == HDLC_CTXT) && hdlc_data.allocated) ++ temp = &hdlc_data; ++ else if ((hdlc_ctxt == NON_HDLC_CTXT) && ++ non_hdlc_data.allocated) ++ temp = &non_hdlc_data; ++ else ++ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, ++ "No apps data buffer is allocated to be freed\n"); ++ if (temp) { ++ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, ++ "Freeing Apps data buffer after write done hdlc_ctxt: %d, hdlc.allocated: %d, non_hdlc.allocated: %d\n", ++ hdlc_ctxt, ++ hdlc_data.allocated, non_hdlc_data.allocated); ++ diagmem_free(driver, temp->buf, POOL_TYPE_HDLC); ++ temp->buf = NULL; ++ temp->len = 0; ++ temp->allocated = 0; ++ temp->flushed = 0; ++ wake_up_interruptible(&driver->hdlc_wait_q); ++ } + spin_unlock_irqrestore(&driver->diagmem_lock, flags); + } else { + pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n", +diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h +index 066d1f9f2d44..861673d183ec 100644 +--- a/drivers/char/diag/diagfwd.h ++++ b/drivers/char/diag/diagfwd.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2017, 2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2017, 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -25,6 +25,9 @@ + #define GET_BUF_NUM(n) ((n & 0x0000FF)) + #define GET_PD_CTXT(u) ((u & 0xFF000000) >> 24) + ++#define SET_HDLC_CTXT(u) ((u & 0xFF) << 24) ++#define GET_HDLC_CTXT(u) ((u & 0xFF000000) >> 24) ++ + #define CHK_OVERFLOW(bufStart, start, end, length) \ + ((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0) + +diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c +index 76d091a00204..f08cc88af35b 100644 +--- a/drivers/char/diag/diagfwd_cntl.c ++++ b/drivers/char/diag/diagfwd_cntl.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2011-2018,2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1023,6 +1023,9 @@ void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf, + + while (read_len + header_len < len) { + ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr; ++ if (((size_t)read_len + (size_t)ctrl_pkt->len + ++ header_len) > len) ++ return; + DIAG_LOG(DIAG_DEBUG_CONTROL, + "diag:peripheral: %d: pkt_id: %d\n", + p_info->peripheral, ctrl_pkt->pkt_id); +diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c +index 2a3602568a1d..92e01724c313 100644 +--- a/drivers/char/diag/diagmem.c ++++ b/drivers/char/diag/diagmem.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2014, 2016, 2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2014, 2016, 2019, 2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -152,6 +152,9 @@ void diagmem_setsize(int pool_idx, int itemsize, int poolsize) + } + + diag_mempools[pool_idx].itemsize = itemsize; ++ if (diag_mempools[pool_idx].pool) ++ diag_mempools[pool_idx].pool->pool_data = ++ (void *)(uintptr_t)itemsize; + diag_mempools[pool_idx].poolsize = poolsize; + pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n", + diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize, +@@ -177,7 +180,8 @@ void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type) + mempool->name); + break; + } +- if (size == 0 || size > mempool->itemsize) { ++ if (size == 0 || size > mempool->itemsize || ++ size > (int)mempool->pool->pool_data) { + pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n", + mempool->name, size); + break; +diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c +index bedfd2412ec1..7975ddd40b35 100644 +--- a/drivers/char/hpet.c ++++ b/drivers/char/hpet.c +@@ -976,6 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) + if (ACPI_SUCCESS(status)) { + hdp->hd_phys_address = addr.address.minimum; + hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); ++ if (!hdp->hd_address) ++ return AE_ERROR; + + if (hpet_is_known(hdp)) { + iounmap(hdp->hd_address); +@@ -989,6 +991,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) + hdp->hd_phys_address = fixmem32->address; + hdp->hd_address = ioremap(fixmem32->address, + HPET_RANGE_SIZE); ++ if (!hdp->hd_address) ++ return AE_ERROR; + + if (hpet_is_known(hdp)) { + iounmap(hdp->hd_address); +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c +index 055d2ce378a7..a5393e678fe9 100644 +--- a/drivers/char/ipmi/ipmi_watchdog.c ++++ b/drivers/char/ipmi/ipmi_watchdog.c +@@ -393,16 +393,18 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, + data[0] = 0; + WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); + +- if ((ipmi_version_major > 1) +- || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { +- /* This is an IPMI 1.5-only feature. */ +- data[0] |= WDOG_DONT_STOP_ON_SET; +- } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { +- /* +- * In ipmi 1.0, setting the timer stops the watchdog, we +- * need to start it back up again. +- */ +- hbnow = 1; ++ if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { ++ if ((ipmi_version_major > 1) || ++ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { ++ /* This is an IPMI 1.5-only feature. */ ++ data[0] |= WDOG_DONT_STOP_ON_SET; ++ } else { ++ /* ++ * In ipmi 1.0, setting the timer stops the watchdog, we ++ * need to start it back up again. ++ */ ++ hbnow = 1; ++ } + } + + data[1] = 0; +diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c +index c115217c79ae..f8d98f7e6fb7 100644 +--- a/drivers/char/pcmcia/cm4000_cs.c ++++ b/drivers/char/pcmcia/cm4000_cs.c +@@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) + io_read_num_rec_bytes(iobase, &num_bytes_read); + if (num_bytes_read >= 4) { + DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); ++ if (num_bytes_read > 4) { ++ rc = -EIO; ++ goto exit_setprotocol; ++ } + break; + } + mdelay(10); +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 296350d3814a..a15108729baa 100755 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1914,7 +1914,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) + return -EPERM; + if (crng_init < 2) + return -ENODATA; +- crng_reseed(&primary_crng, NULL); ++ crng_reseed(&primary_crng, &input_pool); + crng_global_init_time = jiffies - 1; + return 0; + default: +diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c +index 100cd1de9939..59e1e94d12c0 100644 +--- a/drivers/char/tlclk.c ++++ b/drivers/char/tlclk.c +@@ -777,17 +777,21 @@ static int __init tlclk_init(void) + { + int ret; + ++ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); ++ ++ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); ++ if (!alarm_events) { ++ ret = -ENOMEM; ++ goto out1; ++ } ++ + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); + if (ret < 0) { + printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); ++ kfree(alarm_events); + return ret; + } + tlclk_major = ret; +- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); +- if (!alarm_events) { +- ret = -ENOMEM; +- goto out1; +- } + + /* Read telecom clock IRQ number (Set by BIOS) */ + if (!request_region(TLCLK_BASE, 8, "telco_clock")) { +@@ -796,7 +800,6 @@ static int __init tlclk_init(void) + ret = -EBUSY; + goto out2; + } +- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + + if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ + printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", +@@ -837,8 +840,8 @@ static int __init tlclk_init(void) + release_region(TLCLK_BASE, 8); + out2: + kfree(alarm_events); +-out1: + unregister_chrdev(tlclk_major, "telco_clock"); ++out1: + return ret; + } + +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c +index 84eca4f93b82..0fad6cf37bab 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.c ++++ b/drivers/char/tpm/tpm_ibmvtpm.c +@@ -550,6 +550,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) + */ + while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { + ibmvtpm_crq_process(crq, ibmvtpm); ++ wake_up_interruptible(&ibmvtpm->crq_queue.wq); + crq->valid = 0; + smp_wmb(); + } +@@ -596,6 +597,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, + } + + crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); ++ init_waitqueue_head(&crq_q->wq); + ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, + CRQ_RES_BUF_SIZE, + DMA_BIDIRECTIONAL); +@@ -648,6 +650,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, + if (rc) + goto init_irq_cleanup; + ++ if (!wait_event_timeout(ibmvtpm->crq_queue.wq, ++ ibmvtpm->rtce_buf != NULL, ++ HZ)) { ++ dev_err(dev, "CRQ response timed out\n"); ++ goto init_irq_cleanup; ++ } ++ + return tpm_chip_register(chip); + init_irq_cleanup: + do { +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h +index 91dfe766d080..4f6a124601db 100644 +--- a/drivers/char/tpm/tpm_ibmvtpm.h ++++ b/drivers/char/tpm/tpm_ibmvtpm.h +@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue { + struct ibmvtpm_crq *crq_addr; + u32 index; + u32 num_entry; ++ wait_queue_head_t wq; + }; + + struct ibmvtpm_dev { +diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c +index 774748497ace..e56ac5adb5fc 100644 +--- a/drivers/char/ttyprintk.c ++++ b/drivers/char/ttyprintk.c +@@ -159,12 +159,23 @@ static int tpk_ioctl(struct tty_struct *tty, + return 0; + } + ++/* ++ * TTY operations hangup function. ++ */ ++static void tpk_hangup(struct tty_struct *tty) ++{ ++ struct ttyprintk_port *tpkp = tty->driver_data; ++ ++ tty_port_hangup(&tpkp->port); ++} ++ + static const struct tty_operations ttyprintk_ops = { + .open = tpk_open, + .close = tpk_close, + .write = tpk_write, + .write_room = tpk_write_room, + .ioctl = tpk_ioctl, ++ .hangup = tpk_hangup, + }; + + static const struct tty_port_operations null_ops = { }; +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c +index 34548d3b4d13..2632b0fdb1b5 100644 +--- a/drivers/char/virtio_console.c ++++ b/drivers/char/virtio_console.c +@@ -492,7 +492,7 @@ static struct port_buffer *get_inbuf(struct port *port) + + buf = virtqueue_get_buf(port->in_vq, &len); + if (buf) { +- buf->len = len; ++ buf->len = min_t(size_t, len, buf->size); + buf->offset = 0; + port->stats.bytes_received += len; + } +@@ -1758,7 +1758,7 @@ static void control_work_handler(struct work_struct *work) + while ((buf = virtqueue_get_buf(vq, &len))) { + spin_unlock(&portdev->c_ivq_lock); + +- buf->len = len; ++ buf->len = min_t(size_t, len, buf->size); + buf->offset = 0; + + handle_control_message(vq->vdev, portdev, buf); +@@ -2161,6 +2161,7 @@ static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, + { 0 }, + }; ++MODULE_DEVICE_TABLE(virtio, id_table); + + static unsigned int features[] = { + VIRTIO_CONSOLE_F_SIZE, +@@ -2173,6 +2174,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { + #endif + { 0 }, + }; ++MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); + + static unsigned int rproc_serial_features[] = { + }; +@@ -2325,6 +2327,5 @@ static void __exit fini(void) + module_init(init); + module_exit(fini); + +-MODULE_DEVICE_TABLE(virtio, id_table); + MODULE_DESCRIPTION("Virtio console driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c +index 90988e7a5b47..2e7da9b379d4 100644 +--- a/drivers/clk/at91/clk-main.c ++++ b/drivers/clk/at91/clk-main.c +@@ -517,12 +517,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) + return -EINVAL; + + regmap_read(regmap, AT91_CKGR_MOR, &tmp); +- tmp &= ~MOR_KEY_MASK; + + if (index && !(tmp & AT91_PMC_MOSCSEL)) +- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); ++ tmp = AT91_PMC_MOSCSEL; + else if (!index && (tmp & AT91_PMC_MOSCSEL)) +- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); ++ tmp = 0; ++ else ++ return 0; ++ ++ regmap_update_bits(regmap, AT91_CKGR_MOR, ++ AT91_PMC_MOSCSEL | MOR_KEY_MASK, ++ tmp | AT91_PMC_KEY); + + while (!clk_sam9x5_main_ready(regmap)) + cpu_relax(); +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c +index 73aab6e984cd..3f16b553982d 100644 +--- a/drivers/clk/bcm/clk-bcm2835.c ++++ b/drivers/clk/bcm/clk-bcm2835.c +@@ -1177,8 +1177,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, + pll->hw.init = &init; + + ret = devm_clk_hw_register(cprman->dev, &pll->hw); +- if (ret) ++ if (ret) { ++ kfree(pll); + return NULL; ++ } + return &pll->hw; + } + +@@ -1295,13 +1297,13 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman, + return &clock->hw; + } + +-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman, ++static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman, + const struct bcm2835_gate_data *data) + { +- return clk_register_gate(cprman->dev, data->name, data->parent, +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, +- cprman->regs + data->ctl_reg, +- CM_GATE_BIT, 0, &cprman->regs_lock); ++ return clk_hw_register_gate(cprman->dev, data->name, data->parent, ++ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE, ++ cprman->regs + data->ctl_reg, ++ CM_GATE_BIT, 0, &cprman->regs_lock); + } + + typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman, +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c +index f5d74e8db432..1803af6230b2 100644 +--- a/drivers/clk/clk-s2mps11.c ++++ b/drivers/clk/clk-s2mps11.c +@@ -211,6 +211,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) + return ret; + + err_reg: ++ of_node_put(s2mps11_clks[0].clk_np); + while (--i >= 0) + clkdev_drop(s2mps11_clks[i].lookup); + +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c +index 872c9d0746c6..50b6c44f7328 100644 +--- a/drivers/clk/clk.c ++++ b/drivers/clk/clk.c +@@ -4019,32 +4019,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); + */ + int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) + { +- struct clk_notifier *cn = NULL; +- int ret = -EINVAL; ++ struct clk_notifier *cn; ++ int ret = -ENOENT; + + if (!clk || !nb) + return -EINVAL; + + clk_prepare_lock(); + +- list_for_each_entry(cn, &clk_notifier_list, node) +- if (cn->clk == clk) +- break; +- +- if (cn->clk == clk) { +- ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); ++ list_for_each_entry(cn, &clk_notifier_list, node) { ++ if (cn->clk == clk) { ++ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + +- clk->core->notifier_count--; ++ clk->core->notifier_count--; + +- /* XXX the notifier code should handle this better */ +- if (!cn->notifier_head.head) { +- srcu_cleanup_notifier_head(&cn->notifier_head); +- list_del(&cn->node); +- kfree(cn); ++ /* XXX the notifier code should handle this better */ ++ if (!cn->notifier_head.head) { ++ srcu_cleanup_notifier_head(&cn->notifier_head); ++ list_del(&cn->node); ++ kfree(cn); ++ } ++ break; + } +- +- } else { +- ret = -ENOENT; + } + + clk_prepare_unlock(); +diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c +index 4adc1e89212c..f9157e7f45f2 100644 +--- a/drivers/clk/meson/clk-pll.c ++++ b/drivers/clk/meson/clk-pll.c +@@ -145,7 +145,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, + if (parent_rate == 0 || rate == 0) + return -EINVAL; + +- old_rate = rate; ++ old_rate = clk_hw_get_rate(hw); + + rate_set = meson_clk_get_pll_settings(pll, rate); + if (!rate_set) +diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c +index 612d65ede10a..5370514959e1 100644 +--- a/drivers/clk/mvebu/armada-37xx-xtal.c ++++ b/drivers/clk/mvebu/armada-37xx-xtal.c +@@ -15,8 +15,8 @@ + #include + #include + +-#define NB_GPIO1_LATCH 0xC +-#define XTAL_MODE BIT(31) ++#define NB_GPIO1_LATCH 0x8 ++#define XTAL_MODE BIT(9) + + static int armada_3700_xtal_clock_probe(struct platform_device *pdev) + { +diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c +index 8dd71345b5d0..55430c8f1bc2 100644 +--- a/drivers/clk/qcom/gcc-msm8916.c ++++ b/drivers/clk/qcom/gcc-msm8916.c +@@ -270,7 +270,7 @@ static struct clk_pll gpll0 = { + .l_reg = 0x21004, + .m_reg = 0x21008, + .n_reg = 0x2100c, +- .config_reg = 0x21014, ++ .config_reg = 0x21010, + .mode_reg = 0x21000, + .status_reg = 0x2101c, + .status_bit = 17, +@@ -297,7 +297,7 @@ static struct clk_pll gpll1 = { + .l_reg = 0x20004, + .m_reg = 0x20008, + .n_reg = 0x2000c, +- .config_reg = 0x20014, ++ .config_reg = 0x20010, + .mode_reg = 0x20000, + .status_reg = 0x2001c, + .status_bit = 17, +@@ -324,7 +324,7 @@ static struct clk_pll gpll2 = { + .l_reg = 0x4a004, + .m_reg = 0x4a008, + .n_reg = 0x4a00c, +- .config_reg = 0x4a014, ++ .config_reg = 0x4a010, + .mode_reg = 0x4a000, + .status_reg = 0x4a01c, + .status_bit = 17, +@@ -351,7 +351,7 @@ static struct clk_pll bimc_pll = { + .l_reg = 0x23004, + .m_reg = 0x23008, + .n_reg = 0x2300c, +- .config_reg = 0x23014, ++ .config_reg = 0x23010, + .mode_reg = 0x23000, + .status_reg = 0x2301c, + .status_bit = 17, +diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c +index 53f16efbb8f4..31785b3abac5 100644 +--- a/drivers/clk/rockchip/clk-rk3228.c ++++ b/drivers/clk/rockchip/clk-rk3228.c +@@ -126,7 +126,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; + PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" }; + PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" }; + +-PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; ++PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" }; + PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" }; + PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" }; + PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" }; +diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c +index 91f9b79e3941..9a9402f568ef 100644 +--- a/drivers/clk/samsung/clk-exynos4.c ++++ b/drivers/clk/samsung/clk-exynos4.c +@@ -1060,7 +1060,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { + GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), + GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), + GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), +- GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), + GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, +@@ -1101,7 +1101,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { + 0), + GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), + GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), +- GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), ++ GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), + GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, +diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c +index 09cdd35dc434..a082b026791a 100644 +--- a/drivers/clk/samsung/clk-exynos5433.c ++++ b/drivers/clk/samsung/clk-exynos5433.c +@@ -1672,7 +1672,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = { + GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric", + ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric", +- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), ++ ENABLE_SCLK_PERIC, 6, ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0), + GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC, + 5, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC, +diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c +index bbfa57b4e017..17dfd4f130ca 100644 +--- a/drivers/clk/samsung/clk-exynos7.c ++++ b/drivers/clk/samsung/clk-exynos7.c +@@ -541,8 +541,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = { + GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200", + ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT | + CLK_IS_CRITICAL, 0), ++ /* ++ * This clock is required for the CMU_FSYS1 registers access, keep it ++ * enabled permanently until proper runtime PM support is added. ++ */ + GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200", +- ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0), ++ ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT | ++ CLK_IS_CRITICAL, 0), + + GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m", + "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11, +diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c +index 665fa681b2e1..1e6bdf22c3b6 100644 +--- a/drivers/clk/sirf/clk-atlas6.c ++++ b/drivers/clk/sirf/clk-atlas6.c +@@ -136,7 +136,7 @@ static void __init atlas6_clk_init(struct device_node *np) + + for (i = pll1; i < maxclk; i++) { + atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); +- BUG_ON(!atlas6_clks[i]); ++ BUG_ON(IS_ERR(atlas6_clks[i])); + } + clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); + clk_register_clkdev(atlas6_clks[io], NULL, "io"); +diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c +index c2d572748167..7913dbedba89 100644 +--- a/drivers/clk/socfpga/clk-gate-a10.c ++++ b/drivers/clk/socfpga/clk-gate-a10.c +@@ -157,6 +157,7 @@ static void __init __socfpga_gate_init(struct device_node *node, + if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) { + pr_err("%s: failed to find altr,sys-mgr regmap!\n", + __func__); ++ kfree(socfpga_clk); + return; + } + } +diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c +index aa7a6e6a15b6..14918896811d 100644 +--- a/drivers/clk/socfpga/clk-gate.c ++++ b/drivers/clk/socfpga/clk-gate.c +@@ -107,7 +107,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, + val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + val &= GENMASK(socfpgaclk->width - 1, 0); + /* Check for GPIO_DB_CLK by its offset */ +- if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) ++ if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + div = val + 1; + else + div = (1 << val); +diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c +index a485f3b284b9..033e6062599d 100644 +--- a/drivers/clk/st/clk-flexgen.c ++++ b/drivers/clk/st/clk-flexgen.c +@@ -371,6 +371,7 @@ static void __init st_of_flexgen_setup(struct device_node *np) + break; + } + ++ flex_flags &= ~CLK_IS_CRITICAL; + of_clk_detect_critical(np, i, &flex_flags); + + /* +diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c +index f2c9274b8bd5..369164f0bd0e 100644 +--- a/drivers/clk/sunxi/clk-sunxi.c ++++ b/drivers/clk/sunxi/clk-sunxi.c +@@ -98,7 +98,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req) + * Round down the frequency to the closest multiple of either + * 6 or 16 + */ +- u32 round_freq_6 = round_down(freq_mhz, 6); ++ u32 round_freq_6 = rounddown(freq_mhz, 6); + u32 round_freq_16 = round_down(freq_mhz, 16); + + if (round_freq_6 > round_freq_16) +diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h +index 5738635c5274..9f8397c696e6 100644 +--- a/drivers/clk/tegra/clk-id.h ++++ b/drivers/clk/tegra/clk-id.h +@@ -233,6 +233,7 @@ enum clk_id { + tegra_clk_sdmmc4_8, + tegra_clk_sdmmc4_9, + tegra_clk_se, ++ tegra_clk_se_10, + tegra_clk_soc_therm, + tegra_clk_soc_therm_8, + tegra_clk_sor0, +diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c +index 1ab36a355daf..789efad791a3 100644 +--- a/drivers/clk/tegra/clk-pll.c ++++ b/drivers/clk/tegra/clk-pll.c +@@ -1085,7 +1085,8 @@ static int clk_pllu_enable(struct clk_hw *hw) + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + +- _clk_pll_enable(hw); ++ if (!clk_pll_is_enabled(hw)) ++ _clk_pll_enable(hw); + + ret = clk_pll_wait_for_lock(pll); + if (ret < 0) +@@ -1702,7 +1703,8 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw) + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + +- _clk_pll_enable(hw); ++ if (!clk_pll_is_enabled(hw)) ++ _clk_pll_enable(hw); + + ret = clk_pll_wait_for_lock(pll); + if (ret < 0) +diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c +index d9c1f229c644..bf88f90e6c43 100644 +--- a/drivers/clk/tegra/clk-tegra-periph.c ++++ b/drivers/clk/tegra/clk-tegra-periph.c +@@ -648,7 +648,7 @@ static struct tegra_periph_init_data periph_clks[] = { + INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8), + INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9), + INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), +- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se), ++ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10), + INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8), + INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8), + INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03), +diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c +index 255cafb18336..9345eaf00938 100644 +--- a/drivers/clk/ti/adpll.c ++++ b/drivers/clk/ti/adpll.c +@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, + if (err) + return NULL; + } else { +- const char *base_name = "adpll"; +- char *buf; +- +- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + +- strlen(postfix), GFP_KERNEL); +- if (!buf) +- return NULL; +- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); +- name = buf; ++ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", ++ d->pa, postfix); + } + + return name; +diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c +index 6cf9dd189a92..4e5e95238086 100644 +--- a/drivers/clk/ti/clockdomain.c ++++ b/drivers/clk/ti/clockdomain.c +@@ -124,10 +124,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) + if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { + pr_warn("can't setup clkdm for basic clk %s\n", + __clk_get_name(clk)); ++ clk_put(clk); + continue; + } + to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name; + omap2_init_clk_clkdm(clk_hw); ++ clk_put(clk); + } + } + +diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c +index 1cf70f452e1e..3725b2e0c788 100644 +--- a/drivers/clk/ti/composite.c ++++ b/drivers/clk/ti/composite.c +@@ -226,6 +226,7 @@ static void __init _register_composite(struct clk_hw *hw, + if (!cclk->comp_clks[i]) + continue; + list_del(&cclk->comp_clks[i]->link); ++ kfree(cclk->comp_clks[i]->parent_names); + kfree(cclk->comp_clks[i]); + } + +diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c +index 66a0d0ed8b55..02ff499e3653 100644 +--- a/drivers/clk/ti/fapll.c ++++ b/drivers/clk/ti/fapll.c +@@ -497,6 +497,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, + { + struct clk_init_data *init; + struct fapll_synth *synth; ++ struct clk *clk = ERR_PTR(-ENOMEM); + + init = kzalloc(sizeof(*init), GFP_KERNEL); + if (!init) +@@ -519,13 +520,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, + synth->hw.init = init; + synth->clk_pll = pll_clk; + +- return clk_register(NULL, &synth->hw); ++ clk = clk_register(NULL, &synth->hw); ++ if (IS_ERR(clk)) { ++ pr_err("failed to register clock\n"); ++ goto free; ++ } ++ ++ return clk; + + free: + kfree(synth); + kfree(init); + +- return ERR_PTR(-ENOMEM); ++ return clk; + } + + static void __init ti_fapll_setup(struct device_node *node) +diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c +index 2c243a894f3b..3a52ab968ac2 100644 +--- a/drivers/clk/uniphier/clk-uniphier-mux.c ++++ b/drivers/clk/uniphier/clk-uniphier-mux.c +@@ -40,10 +40,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index) + static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw) + { + struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw); +- int num_parents = clk_hw_get_num_parents(hw); ++ unsigned int num_parents = clk_hw_get_num_parents(hw); + int ret; + unsigned int val; +- u8 i; ++ unsigned int i; + + ret = regmap_read(mux->regmap, mux->reg, &val); + if (ret) +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c +index 5aa9914d494e..f02ca36f45ad 100644 +--- a/drivers/clocksource/arm_arch_timer.c ++++ b/drivers/clocksource/arm_arch_timer.c +@@ -428,15 +428,24 @@ static void arch_timer_evtstrm_enable(int divider) + + static void arch_timer_configure_evtstream(void) + { +- int evt_stream_div, pos; ++ int evt_stream_div, lsb; ++ ++ /* ++ * As the event stream can at most be generated at half the frequency ++ * of the counter, use half the frequency when computing the divider. ++ */ ++ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; ++ ++ /* ++ * Find the closest power of two to the divisor. If the adjacent bit ++ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). ++ */ ++ lsb = fls(evt_stream_div) - 1; ++ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) ++ lsb++; + +- /* Find the closest power of two to the divisor */ +- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; +- pos = fls(evt_stream_div); +- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) +- pos--; + /* enable event stream */ +- arch_timer_evtstrm_enable(min(pos, 15)); ++ arch_timer_evtstrm_enable(max(0, min(lsb, 15))); + } + + static void arch_counter_set_user_access(void) +diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c +index fbfbdec13b08..7cddc54be96c 100644 +--- a/drivers/clocksource/cadence_ttc_timer.c ++++ b/drivers/clocksource/cadence_ttc_timer.c +@@ -418,10 +418,8 @@ static int __init ttc_setup_clockevent(struct clk *clk, + ttcce->ttc.clk = clk; + + err = clk_prepare_enable(ttcce->ttc.clk); +- if (err) { +- kfree(ttcce); +- return err; +- } ++ if (err) ++ goto out_kfree; + + ttcce->ttc.clk_rate_change_nb.notifier_call = + ttc_rate_change_clockevent_cb; +@@ -431,7 +429,7 @@ static int __init ttc_setup_clockevent(struct clk *clk, + &ttcce->ttc.clk_rate_change_nb); + if (err) { + pr_warn("Unable to register clock notifier.\n"); +- return err; ++ goto out_kfree; + } + + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk); +@@ -460,15 +458,17 @@ static int __init ttc_setup_clockevent(struct clk *clk, + + err = request_irq(irq, ttc_clock_event_interrupt, + IRQF_TIMER, ttcce->ce.name, ttcce); +- if (err) { +- kfree(ttcce); +- return err; +- } ++ if (err) ++ goto out_kfree; + + clockevents_config_and_register(&ttcce->ce, + ttcce->ttc.freq / PRESCALE, 1, 0xfffe); + + return 0; ++ ++out_kfree: ++ kfree(ttcce); ++ return err; + } + + /** +diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c +index aee6c0d39a7c..024e6cc5025b 100644 +--- a/drivers/clocksource/dw_apb_timer_of.c ++++ b/drivers/clocksource/dw_apb_timer_of.c +@@ -146,10 +146,6 @@ static int num_called; + static int __init dw_apb_timer_init(struct device_node *timer) + { + switch (num_called) { +- case 0: +- pr_debug("%s: found clockevent timer\n", __func__); +- add_clockevent(timer); +- break; + case 1: + pr_debug("%s: found clocksource timer\n", __func__); + add_clocksource(timer); +@@ -160,6 +156,8 @@ static int __init dw_apb_timer_init(struct device_node *timer) + #endif + break; + default: ++ pr_debug("%s: found clockevent timer\n", __func__); ++ add_clockevent(timer); + break; + } + +diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c +index 546bb180f5a4..8202e49ac64c 100644 +--- a/drivers/clocksource/h8300_timer8.c ++++ b/drivers/clocksource/h8300_timer8.c +@@ -176,7 +176,7 @@ static int __init h8300_8timer_init(struct device_node *node) + return PTR_ERR(clk); + } + +- ret = ENXIO; ++ ret = -ENXIO; + base = of_iomap(node, 0); + if (!base) { + pr_err("failed to map registers for clockevent\n"); +diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c +index 0ba0a913b41d..b26c3b84c5b6 100644 +--- a/drivers/clocksource/mxs_timer.c ++++ b/drivers/clocksource/mxs_timer.c +@@ -152,10 +152,7 @@ static void mxs_irq_clear(char *state) + + /* Clear pending interrupt */ + timrot_irq_acknowledge(); +- +-#ifdef DEBUG +- pr_info("%s: changing mode to %s\n", __func__, state) +-#endif /* DEBUG */ ++ pr_debug("%s: changing mode to %s\n", __func__, state); + } + + static int mxs_shutdown(struct clock_event_device *evt) +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c +index 1ee3674a99bb..3bd816cf4a7d 100644 +--- a/drivers/cpufreq/acpi-cpufreq.c ++++ b/drivers/cpufreq/acpi-cpufreq.c +@@ -720,7 +720,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) + cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); + } + +- if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { ++ if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 && ++ !acpi_pstate_strict) { + cpumask_clear(policy->cpus); + cpumask_set_cpu(cpu, policy->cpus); + cpumask_copy(data->freqdomain_cpus, +diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c +index 1608f7105c9f..ad743f2f31e7 100644 +--- a/drivers/cpufreq/highbank-cpufreq.c ++++ b/drivers/cpufreq/highbank-cpufreq.c +@@ -104,6 +104,13 @@ static int hb_cpufreq_driver_init(void) + } + module_init(hb_cpufreq_driver_init); + ++static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = { ++ { .compatible = "calxeda,highbank" }, ++ { .compatible = "calxeda,ecx-2000" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match); ++ + MODULE_AUTHOR("Mark Langsdorf "); + MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c +index be89416e2358..9d902f67f871 100644 +--- a/drivers/cpufreq/loongson1-cpufreq.c ++++ b/drivers/cpufreq/loongson1-cpufreq.c +@@ -217,6 +217,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = { + + module_platform_driver(ls1x_cpufreq_platdrv); + ++MODULE_ALIAS("platform:ls1x-cpufreq"); + MODULE_AUTHOR("Kelvin Cheung "); + MODULE_DESCRIPTION("Loongson1 CPUFreq driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c +index 0b5bf135b090..59f16807921a 100644 +--- a/drivers/cpufreq/powernow-k8.c ++++ b/drivers/cpufreq/powernow-k8.c +@@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data *data) + + /* Take a frequency, and issue the fid/vid transition command */ + static int transition_frequency_fidvid(struct powernow_k8_data *data, +- unsigned int index) ++ unsigned int index, ++ struct cpufreq_policy *policy) + { +- struct cpufreq_policy *policy; + u32 fid = 0; + u32 vid = 0; + int res; +@@ -921,9 +921,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, + freqs.old = find_khz_freq_from_fid(data->currfid); + freqs.new = find_khz_freq_from_fid(fid); + +- policy = cpufreq_cpu_get(smp_processor_id()); +- cpufreq_cpu_put(policy); +- + cpufreq_freq_transition_begin(policy, &freqs); + res = transition_fid_vid(data, fid, vid); + cpufreq_freq_transition_end(policy, &freqs, res); +@@ -978,7 +975,7 @@ static long powernowk8_target_fn(void *arg) + + powernow_k8_acpi_pst_values(data, newstate); + +- ret = transition_frequency_fidvid(data, newstate); ++ ret = transition_frequency_fidvid(data, newstate, pol); + + if (ret) { + pr_err("transition frequency failed\n"); +diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c +index b4fc65512aad..8d1826479425 100644 +--- a/drivers/cpufreq/powernv-cpufreq.c ++++ b/drivers/cpufreq/powernv-cpufreq.c +@@ -784,12 +784,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) + { + int cpu; +- struct cpufreq_policy cpu_policy; ++ struct cpufreq_policy *cpu_policy; + + rebooting = true; + for_each_online_cpu(cpu) { +- cpufreq_get_policy(&cpu_policy, cpu); +- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index()); ++ cpu_policy = cpufreq_cpu_get(cpu); ++ if (!cpu_policy) ++ continue; ++ powernv_cpufreq_target_index(cpu_policy, get_nominal_index()); ++ cpufreq_cpu_put(cpu_policy); + } + + return NOTIFY_DONE; +@@ -802,6 +805,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { + void powernv_cpufreq_work_fn(struct work_struct *work) + { + struct chip *chip = container_of(work, struct chip, throttle); ++ struct cpufreq_policy *policy; + unsigned int cpu; + cpumask_t mask; + +@@ -816,12 +820,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) + chip->restore = false; + for_each_cpu(cpu, &mask) { + int index; +- struct cpufreq_policy policy; + +- cpufreq_get_policy(&policy, cpu); +- index = cpufreq_table_find_index_c(&policy, policy.cur); +- powernv_cpufreq_target_index(&policy, index); +- cpumask_andnot(&mask, &mask, policy.cpus); ++ policy = cpufreq_cpu_get(cpu); ++ if (!policy) ++ continue; ++ index = cpufreq_table_find_index_c(policy, policy->cur); ++ powernv_cpufreq_target_index(policy, index); ++ cpumask_andnot(&mask, &mask, policy->cpus); ++ cpufreq_cpu_put(policy); + } + out: + put_online_cpus(); +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c +index ea7a4e1b68c2..1a45fbb00d87 100644 +--- a/drivers/cpufreq/scpi-cpufreq.c ++++ b/drivers/cpufreq/scpi-cpufreq.c +@@ -111,6 +111,7 @@ static struct platform_driver scpi_cpufreq_platdrv = { + }; + module_platform_driver(scpi_cpufreq_platdrv); + ++MODULE_ALIAS("platform:scpi-cpufreq"); + MODULE_AUTHOR("Sudeep Holla "); + MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver"); + MODULE_LICENSE("GPL v2"); +diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c +index b366e6d830ea..c7f0b2df15cd 100644 +--- a/drivers/cpufreq/sti-cpufreq.c ++++ b/drivers/cpufreq/sti-cpufreq.c +@@ -144,7 +144,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = { + static const struct reg_field *sti_cpufreq_match(void) + { + if (of_machine_is_compatible("st,stih407") || +- of_machine_is_compatible("st,stih410")) ++ of_machine_is_compatible("st,stih410") || ++ of_machine_is_compatible("st,stih418")) + return sti_stih407_dvfs_regfields; + + return NULL; +@@ -260,7 +261,8 @@ static int sti_cpufreq_init(void) + int ret; + + if ((!of_machine_is_compatible("st,stih407")) && +- (!of_machine_is_compatible("st,stih410"))) ++ (!of_machine_is_compatible("st,stih410")) && ++ (!of_machine_is_compatible("st,stih418"))) + return -ENODEV; + + ddata.cpu = get_cpu_device(0); +@@ -292,6 +294,13 @@ static int sti_cpufreq_init(void) + } + module_init(sti_cpufreq_init); + ++static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = { ++ { .compatible = "st,stih407" }, ++ { .compatible = "st,stih410" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match); ++ + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver"); + MODULE_AUTHOR("Ajitpal Singh "); + MODULE_AUTHOR("Lee Jones "); +diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c +index 9e98a5fbbc1d..e7e92ed34f0c 100644 +--- a/drivers/cpuidle/sysfs.c ++++ b/drivers/cpuidle/sysfs.c +@@ -412,7 +412,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) + ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, + &kdev->kobj, "state%d", i); + if (ret) { +- kfree(kobj); ++ kobject_put(&kobj->kobj); + goto error_state; + } + kobject_uevent(&kobj->kobj, KOBJ_ADD); +@@ -542,7 +542,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) + ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, + &kdev->kobj, "driver"); + if (ret) { +- kfree(kdrv); ++ kobject_put(&kdrv->kobj); + return ret; + } + +@@ -636,7 +636,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) + error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj, + "cpuidle"); + if (error) { +- kfree(kdev); ++ kobject_put(&kdev->kobj); + return error; + } + +diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h +index cfe21d033745..56c8c9ba12bc 100644 +--- a/drivers/crypto/ccp/ccp-dev.h ++++ b/drivers/crypto/ccp/ccp-dev.h +@@ -444,6 +444,7 @@ struct ccp_sg_workarea { + int nents; + + struct scatterlist *dma_sg; ++ struct scatterlist *dma_sg_head; + struct device *dma_dev; + unsigned int dma_count; + enum dma_data_direction dma_dir; +diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c +index 7d4cd518e602..0aa18c1164bf 100644 +--- a/drivers/crypto/ccp/ccp-ops.c ++++ b/drivers/crypto/ccp/ccp-ops.c +@@ -52,7 +52,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) + static void ccp_sg_free(struct ccp_sg_workarea *wa) + { + if (wa->dma_count) +- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); ++ dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); + + wa->dma_count = 0; + } +@@ -81,6 +81,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, + return 0; + + wa->dma_sg = sg; ++ wa->dma_sg_head = sg; + wa->dma_dev = dev; + wa->dma_dir = dma_dir; + wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); +@@ -93,14 +94,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, + static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) + { + unsigned int nbytes = min_t(u64, len, wa->bytes_left); ++ unsigned int sg_combined_len = 0; + + if (!wa->sg) + return; + + wa->sg_used += nbytes; + wa->bytes_left -= nbytes; +- if (wa->sg_used == wa->sg->length) { +- wa->sg = sg_next(wa->sg); ++ if (wa->sg_used == sg_dma_len(wa->dma_sg)) { ++ /* Advance to the next DMA scatterlist entry */ ++ wa->dma_sg = sg_next(wa->dma_sg); ++ ++ /* In the case that the DMA mapped scatterlist has entries ++ * that have been merged, the non-DMA mapped scatterlist ++ * must be advanced multiple times for each merged entry. ++ * This ensures that the current non-DMA mapped entry ++ * corresponds to the current DMA mapped entry. ++ */ ++ do { ++ sg_combined_len += wa->sg->length; ++ wa->sg = sg_next(wa->sg); ++ } while (wa->sg_used > sg_combined_len); ++ + wa->sg_used = 0; + } + } +@@ -298,7 +313,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) + /* Update the structures and generate the count */ + buf_count = 0; + while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { +- nbytes = min(sg_wa->sg->length - sg_wa->sg_used, ++ nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, + dm_wa->length - buf_count); + nbytes = min_t(u64, sg_wa->bytes_left, nbytes); + +@@ -330,11 +345,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, + * and destination. The resulting len values will always be <= UINT_MAX + * because the dma length is an unsigned int. + */ +- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; ++ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; + sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); + + if (dst) { +- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; ++ sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; + sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); + op_len = min(sg_src_len, sg_dst_len); + } else { +@@ -364,7 +379,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, + /* Enough data in the sg element, but we need to + * adjust for any previously copied data + */ +- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); ++ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); + op->src.u.dma.offset = src->sg_wa.sg_used; + op->src.u.dma.length = op_len & ~(block_size - 1); + +@@ -385,7 +400,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, + /* Enough room in the sg element, but we need to + * adjust for any previously used area + */ +- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); ++ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); + op->dst.u.dma.offset = dst->sg_wa.sg_used; + op->dst.u.dma.length = op->src.u.dma.length; + } +@@ -1180,7 +1195,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) + break; + default: + ret = -EINVAL; +- goto e_ctx; ++ goto e_data; + } + } else { + /* Stash the context */ +@@ -1216,8 +1231,9 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) + digest_size); + break; + default: ++ kfree(hmac_buf); + ret = -EINVAL; +- goto e_ctx; ++ goto e_data; + } + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); +@@ -1446,7 +1462,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, + dst.sg_wa.sg_used = 0; + for (i = 1; i <= src.sg_wa.dma_count; i++) { + if (!dst.sg_wa.sg || +- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { ++ (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { + ret = -EINVAL; + goto e_dst; + } +@@ -1472,8 +1488,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, + goto e_dst; + } + +- dst.sg_wa.sg_used += src.sg_wa.sg->length; +- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { ++ dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); ++ if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { + dst.sg_wa.sg = sg_next(dst.sg_wa.sg); + dst.sg_wa.sg_used = 0; + } +diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c +index b54af97a20bb..81b61e256f7c 100644 +--- a/drivers/crypto/ixp4xx_crypto.c ++++ b/drivers/crypto/ixp4xx_crypto.c +@@ -334,7 +334,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) + + buf1 = buf->next; + phys1 = buf->phys_next; +- dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); ++ dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir); + dma_pool_free(buffer_pool, buf, phys); + buf = buf1; + phys = phys1; +@@ -532,7 +532,7 @@ static void release_ixp_crypto(struct device *dev) + + if (crypt_virt) { + dma_free_coherent(dev, +- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), ++ NPE_QLEN * sizeof(struct crypt_ctl), + crypt_virt, crypt_phys); + } + return; +diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c +index cddc6d8b55d9..1b8c87770645 100644 +--- a/drivers/crypto/nx/nx-842-pseries.c ++++ b/drivers/crypto/nx/nx-842-pseries.c +@@ -553,13 +553,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata) + * The status field indicates if the device is enabled when the status + * is 'okay'. Otherwise the device driver will be disabled. + * +- * @prop - struct property point containing the maxsyncop for the update ++ * @devdata: struct nx842_devdata to use for dev_info ++ * @prop: struct property point containing the maxsyncop for the update + * + * Returns: + * 0 - Device is available + * -ENODEV - Device is not available + */ +-static int nx842_OF_upd_status(struct property *prop) ++static int nx842_OF_upd_status(struct nx842_devdata *devdata, ++ struct property *prop) + { + const char *status = (const char *)prop->value; + +@@ -773,7 +775,7 @@ static int nx842_OF_upd(struct property *new_prop) + goto out; + + /* Perform property updates */ +- ret = nx842_OF_upd_status(status); ++ ret = nx842_OF_upd_status(new_devdata, status); + if (ret) + goto error_out; + +@@ -1086,6 +1088,7 @@ static struct vio_device_id nx842_vio_driver_ids[] = { + {"ibm,compression-v1", "ibm,compression"}, + {"", ""}, + }; ++MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids); + + static struct vio_driver nx842_vio_driver = { + .name = KBUILD_MODNAME, +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c +index fe32dd95ae4f..a2d5ba0a0d5a 100644 +--- a/drivers/crypto/omap-aes.c ++++ b/drivers/crypto/omap-aes.c +@@ -1172,7 +1172,7 @@ static int omap_aes_probe(struct platform_device *pdev) + if (err < 0) { + dev_err(dev, "%s: failed to get_sync(%d)\n", + __func__, err); +- goto err_res; ++ goto err_pm_disable; + } + + omap_aes_dma_stop(dd); +@@ -1257,6 +1257,7 @@ static int omap_aes_probe(struct platform_device *pdev) + omap_aes_dma_cleanup(dd); + err_irq: + tasklet_kill(&dd->done_task); ++err_pm_disable: + pm_runtime_disable(dev); + err_res: + dd = NULL; +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c +index ff6ac4e824b5..4adcf89add25 100644 +--- a/drivers/crypto/omap-sham.c ++++ b/drivers/crypto/omap-sham.c +@@ -167,8 +167,6 @@ struct omap_sham_hmac_ctx { + }; + + struct omap_sham_ctx { +- struct omap_sham_dev *dd; +- + unsigned long flags; + + /* fallback stuff */ +@@ -456,6 +454,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val, mask; + ++ if (likely(ctx->digcnt)) ++ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); ++ + /* + * Setting ALGO_CONST only for the first iteration and + * CLOSE_HASH only for the last one. Note that flags mode bits +@@ -915,27 +916,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) + return 0; + } + ++struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx) ++{ ++ struct omap_sham_dev *dd; ++ ++ if (ctx->dd) ++ return ctx->dd; ++ ++ spin_lock_bh(&sham.lock); ++ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list); ++ list_move_tail(&dd->list, &sham.dev_list); ++ ctx->dd = dd; ++ spin_unlock_bh(&sham.lock); ++ ++ return dd; ++} ++ + static int omap_sham_init(struct ahash_request *req) + { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = NULL, *tmp; ++ struct omap_sham_dev *dd; + int bs = 0; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); ++ ctx->dd = NULL; + +- ctx->dd = dd; ++ dd = omap_sham_find_dev(ctx); ++ if (!dd) ++ return -ENODEV; + + ctx->flags = 0; + +@@ -1185,8 +1194,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, + static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); +- struct omap_sham_dev *dd = tctx->dd; ++ struct omap_sham_dev *dd = ctx->dd; + + ctx->op = op; + +@@ -1196,7 +1204,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) + static int omap_sham_update(struct ahash_request *req) + { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); +- struct omap_sham_dev *dd = ctx->dd; ++ struct omap_sham_dev *dd = omap_sham_find_dev(ctx); + + if (!req->nbytes) + return 0; +@@ -1301,21 +1309,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + struct omap_sham_hmac_ctx *bctx = tctx->base; + int bs = crypto_shash_blocksize(bctx->shash); + int ds = crypto_shash_digestsize(bctx->shash); +- struct omap_sham_dev *dd = NULL, *tmp; + int err, i; + +- spin_lock_bh(&sham.lock); +- if (!tctx->dd) { +- list_for_each_entry(tmp, &sham.dev_list, list) { +- dd = tmp; +- break; +- } +- tctx->dd = dd; +- } else { +- dd = tctx->dd; +- } +- spin_unlock_bh(&sham.lock); +- + err = crypto_shash_setkey(tctx->fallback, key, keylen); + if (err) + return err; +@@ -1333,7 +1328,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + + memset(bctx->ipad + keylen, 0, bs - keylen); + +- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { ++ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) { + memcpy(bctx->opad, bctx->ipad, bs); + + for (i = 0; i < bs; i++) { +@@ -2072,6 +2067,7 @@ static int omap_sham_probe(struct platform_device *pdev) + } + + dd->flags |= dd->pdata->flags; ++ sham.flags |= dd->pdata->flags; + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); +@@ -2097,6 +2093,9 @@ static int omap_sham_probe(struct platform_device *pdev) + spin_unlock(&sham.lock); + + for (i = 0; i < dd->pdata->algs_info_size; i++) { ++ if (dd->pdata->algs_info[i].registered) ++ break; ++ + for (j = 0; j < dd->pdata->algs_info[i].size; j++) { + struct ahash_alg *alg; + +@@ -2142,9 +2141,11 @@ static int omap_sham_remove(struct platform_device *pdev) + list_del(&dd->list); + spin_unlock(&sham.lock); + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) +- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) ++ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) { + crypto_unregister_ahash( + &dd->pdata->algs_info[i].algs_list[j]); ++ dd->pdata->algs_info[i].registered--; ++ } + tasklet_kill(&dd->done_task); + pm_runtime_disable(&pdev->dev); + +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +index 0dd8d2dc2ec1..26931325dfa2 100644 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (ret) + goto out_err_free_reg; + +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); +- + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); ++ + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c +index cd9e63468b18..722a06aac9fa 100644 +--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c ++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c +@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (ret) + goto out_err_free_reg; + +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); +- + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); ++ + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; +diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c +index 06d49017a52b..2c0be14309cf 100644 +--- a/drivers/crypto/qat/qat_common/adf_isr.c ++++ b/drivers/crypto/qat/qat_common/adf_isr.c +@@ -330,19 +330,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) + + ret = adf_isr_alloc_msix_entry_table(accel_dev); + if (ret) +- return ret; +- if (adf_enable_msix(accel_dev)) + goto err_out; + +- if (adf_setup_bh(accel_dev)) +- goto err_out; ++ ret = adf_enable_msix(accel_dev); ++ if (ret) ++ goto err_free_msix_table; + +- if (adf_request_irqs(accel_dev)) +- goto err_out; ++ ret = adf_setup_bh(accel_dev); ++ if (ret) ++ goto err_disable_msix; ++ ++ ret = adf_request_irqs(accel_dev); ++ if (ret) ++ goto err_cleanup_bh; + + return 0; ++ ++err_cleanup_bh: ++ adf_cleanup_bh(accel_dev); ++ ++err_disable_msix: ++ adf_disable_msix(&accel_dev->accel_pci_dev); ++ ++err_free_msix_table: ++ adf_isr_free_msix_entry_table(accel_dev); ++ + err_out: +- adf_isr_resource_free(accel_dev); +- return -EFAULT; ++ return ret; + } + EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); +diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c +index 57d2622728a5..4c0067f8c079 100644 +--- a/drivers/crypto/qat/qat_common/adf_transport.c ++++ b/drivers/crypto/qat/qat_common/adf_transport.c +@@ -197,6 +197,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) + dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); + dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, + ring->base_addr, ring->dma_addr); ++ ring->base_addr = NULL; + return -EFAULT; + } + +diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c +index bf99e11a3403..4c1217ba83ae 100644 +--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c ++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c +@@ -304,17 +304,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) + goto err_out; + + if (adf_setup_pf2vf_bh(accel_dev)) +- goto err_out; ++ goto err_disable_msi; + + if (adf_setup_bh(accel_dev)) +- goto err_out; ++ goto err_cleanup_pf2vf_bh; + + if (adf_request_msi_irq(accel_dev)) +- goto err_out; ++ goto err_cleanup_bh; + + return 0; ++ ++err_cleanup_bh: ++ adf_cleanup_bh(accel_dev); ++ ++err_cleanup_pf2vf_bh: ++ adf_cleanup_pf2vf_bh(accel_dev); ++ ++err_disable_msi: ++ adf_disable_msi(accel_dev); ++ + err_out: +- adf_vf_isr_resource_free(accel_dev); + return -EFAULT; + } + EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); +diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c +index 20f35df8a01f..4f4884521a87 100644 +--- a/drivers/crypto/qat/qat_common/qat_algs.c ++++ b/drivers/crypto/qat/qat_common/qat_algs.c +@@ -822,6 +822,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) + struct icp_qat_fw_la_bulk_req *msg; + int digst_size = crypto_aead_authsize(aead_tfm); + int ret, ctr = 0; ++ u32 cipher_len; ++ ++ cipher_len = areq->cryptlen - digst_size; ++ if (cipher_len % AES_BLOCK_SIZE != 0) ++ return -EINVAL; + + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + if (unlikely(ret)) +@@ -836,7 +841,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) + qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; + qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; +- cipher_param->cipher_length = areq->cryptlen - digst_size; ++ cipher_param->cipher_length = cipher_len; + cipher_param->cipher_offset = areq->assoclen; + memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); +@@ -865,6 +870,9 @@ static int qat_alg_aead_enc(struct aead_request *areq) + uint8_t *iv = areq->iv; + int ret, ctr = 0; + ++ if (areq->cryptlen % AES_BLOCK_SIZE != 0) ++ return -EINVAL; ++ + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + if (unlikely(ret)) + return ret; +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c +index 8c4fd255a601..cdf80c16a033 100644 +--- a/drivers/crypto/qat/qat_common/qat_hal.c ++++ b/drivers/crypto/qat/qat_common/qat_hal.c +@@ -1255,7 +1255,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, + pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); + return -EINVAL; + } +- qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); ++ status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); ++ if (status) { ++ pr_err("QAT: failed to read register"); ++ return status; ++ } + gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); + data16low = 0xffff & data; + data16hi = 0xffff & (data >> 0x10); +diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c +index e2454d90d949..a8e3191e5185 100644 +--- a/drivers/crypto/qat/qat_common/qat_uclo.c ++++ b/drivers/crypto/qat/qat_common/qat_uclo.c +@@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle + } + return 0; + out_err: ++ /* Do not free the list head unless we allocated it. */ ++ tail_old = tail_old->next; ++ if (flag) { ++ kfree(*init_tab_base); ++ *init_tab_base = NULL; ++ } ++ + while (tail_old) { + mem_init = tail_old->next; + kfree(tail_old); + tail_old = mem_init; + } +- if (flag) +- kfree(*init_tab_base); + return -ENOMEM; + } + +@@ -380,7 +385,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, + return 0; + } + +-#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 + static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, + struct icp_qat_uof_initmem *init_mem) + { +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +index 15de9cbed3bf..cc3f5171a523 100644 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +@@ -238,12 +238,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + if (ret) + goto out_err_free_reg; + +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); +- + ret = adf_dev_init(accel_dev); + if (ret) + goto out_err_dev_shutdown; + ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); ++ + ret = adf_dev_start(accel_dev); + if (ret) + goto out_err_dev_stop; +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c +index 8b383d3d21c2..f4a6be76468d 100644 +--- a/drivers/crypto/talitos.c ++++ b/drivers/crypto/talitos.c +@@ -447,7 +447,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) + /* + * locate current (offending) descriptor + */ +-static u32 current_desc_hdr(struct device *dev, int ch) ++static __be32 current_desc_hdr(struct device *dev, int ch) + { + struct talitos_private *priv = dev_get_drvdata(dev); + int tail, iter; +@@ -478,13 +478,13 @@ static u32 current_desc_hdr(struct device *dev, int ch) + /* + * user diagnostics; report root cause of error based on execution unit status + */ +-static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) ++static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr) + { + struct talitos_private *priv = dev_get_drvdata(dev); + int i; + + if (!desc_hdr) +- desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); ++ desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF)); + + switch (desc_hdr & DESC_HDR_SEL0_MASK) { + case DESC_HDR_SEL0_AFEU: +@@ -2636,7 +2636,6 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, +- .ivsize = AES_BLOCK_SIZE, + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | +@@ -2670,6 +2669,7 @@ static struct talitos_alg_template driver_algs[] = { + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, + .setkey = ablkcipher_aes_setkey, + } + }, +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c +index 17c8e2b28c42..7500ec9efa6a 100644 +--- a/drivers/crypto/ux500/hash/hash_core.c ++++ b/drivers/crypto/ux500/hash/hash_core.c +@@ -1006,6 +1006,7 @@ static int hash_hw_final(struct ahash_request *req) + goto out; + } + } else if (req->nbytes == 0 && ctx->keylen > 0) { ++ ret = -EPERM; + dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", + __func__); + goto out; +diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c +index fe9dce0245bf..a20267d93f8a 100644 +--- a/drivers/devfreq/tegra-devfreq.c ++++ b/drivers/devfreq/tegra-devfreq.c +@@ -79,6 +79,8 @@ + + #define KHZ 1000 + ++#define KHZ_MAX (ULONG_MAX / KHZ) ++ + /* Assume that the bus is saturated if the utilization is 25% */ + #define BUS_SATURATION_RATIO 25 + +@@ -179,7 +181,7 @@ struct tegra_actmon_emc_ratio { + }; + + static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { +- { 1400000, ULONG_MAX }, ++ { 1400000, KHZ_MAX }, + { 1200000, 750000 }, + { 1100000, 600000 }, + { 1000000, 500000 }, +diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c +index a32cd71f94bb..cb72b8c915c7 100644 +--- a/drivers/dma/at_hdmac.c ++++ b/drivers/dma/at_hdmac.c +@@ -1810,6 +1810,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, + return NULL; + + dmac_pdev = of_find_device_by_node(dma_spec->np); ++ if (!dmac_pdev) ++ return NULL; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); +diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c +index aca2d6fd92d5..e1b6787f0e68 100644 +--- a/drivers/dma/dma-jz4780.c ++++ b/drivers/dma/dma-jz4780.c +@@ -567,11 +567,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, + enum dma_status status; + unsigned long flags; + ++ spin_lock_irqsave(&jzchan->vchan.lock, flags); ++ + status = dma_cookie_status(chan, cookie, txstate); + if ((status == DMA_COMPLETE) || (txstate == NULL)) +- return status; +- +- spin_lock_irqsave(&jzchan->vchan.lock, flags); ++ goto out_unlock_irqrestore; + + vdesc = vchan_find_desc(&jzchan->vchan, cookie); + if (vdesc) { +@@ -588,6 +588,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, + && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) + status = DMA_ERROR; + ++out_unlock_irqrestore: + spin_unlock_irqrestore(&jzchan->vchan.lock, flags); + return status; + } +diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig +index e00c9b022964..6ea3e95c287b 100644 +--- a/drivers/dma/dw/Kconfig ++++ b/drivers/dma/dw/Kconfig +@@ -11,6 +11,7 @@ config DW_DMAC_BIG_ENDIAN_IO + + config DW_DMAC + tristate "Synopsys DesignWare AHB DMA platform driver" ++ depends on HAS_IOMEM + select DW_DMAC_CORE + select DW_DMAC_BIG_ENDIAN_IO if AVR32 + default y if CPU_AT32AP7000 +@@ -21,6 +22,7 @@ config DW_DMAC + config DW_DMAC_PCI + tristate "Synopsys DesignWare AHB DMA PCI driver" + depends on PCI ++ depends on HAS_IOMEM + select DW_DMAC_CORE + help + Support the Synopsys DesignWare AHB DMA controller on the +diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c +index c7568869284e..0d2c6e13a01f 100644 +--- a/drivers/dma/fsl-edma.c ++++ b/drivers/dma/fsl-edma.c +@@ -682,6 +682,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) + fsl_chan = &fsl_edma->chans[ch]; + + spin_lock(&fsl_chan->vchan.lock); ++ ++ if (!fsl_chan->edesc) { ++ /* terminate_all called before */ ++ spin_unlock(&fsl_chan->vchan.lock); ++ continue; ++ } ++ + if (!fsl_chan->edesc->iscyclic) { + list_del(&fsl_chan->edesc->vdesc.node); + vchan_cookie_complete(&fsl_chan->edesc->vdesc); +diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c +index 51c75bf2b9b6..c9a1d59dcb49 100644 +--- a/drivers/dma/fsldma.c ++++ b/drivers/dma/fsldma.c +@@ -1331,6 +1331,7 @@ static int fsldma_of_probe(struct platform_device *op) + { + struct fsldma_device *fdev; + struct device_node *child; ++ unsigned int i; + int err; + + fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); +@@ -1411,6 +1412,10 @@ static int fsldma_of_probe(struct platform_device *op) + return 0; + + out_free_fdev: ++ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { ++ if (fdev->chan[i]) ++ fsl_dma_chan_remove(fdev->chan[i]); ++ } + irq_dispose_mapping(fdev->irq); + iounmap(fdev->regs); + out_free: +@@ -1433,6 +1438,7 @@ static int fsldma_of_remove(struct platform_device *op) + if (fdev->chan[i]) + fsl_dma_chan_remove(fdev->chan[i]); + } ++ irq_dispose_mapping(fdev->irq); + + iounmap(fdev->regs); + kfree(fdev); +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c +index 1389f0582e29..c5a45c57b8b8 100644 +--- a/drivers/dma/ioat/dma.c ++++ b/drivers/dma/ioat/dma.c +@@ -38,6 +38,18 @@ + + #include "../dmaengine.h" + ++int completion_timeout = 200; ++module_param(completion_timeout, int, 0644); ++MODULE_PARM_DESC(completion_timeout, ++ "set ioat completion timeout [msec] (default 200 [msec])"); ++int idle_timeout = 2000; ++module_param(idle_timeout, int, 0644); ++MODULE_PARM_DESC(idle_timeout, ++ "set ioat idel timeout [msec] (default 2000 [msec])"); ++ ++#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) ++#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) ++ + static char *chanerr_str[] = { + "DMA Transfer Destination Address Error", + "Next Descriptor Address Error", +diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h +index a9bc1a15b0d1..b0152288983b 100644 +--- a/drivers/dma/ioat/dma.h ++++ b/drivers/dma/ioat/dma.h +@@ -111,8 +111,6 @@ struct ioatdma_chan { + #define IOAT_RUN 5 + #define IOAT_CHAN_ACTIVE 6 + struct timer_list timer; +- #define COMPLETION_TIMEOUT msecs_to_jiffies(100) +- #define IDLE_TIMEOUT msecs_to_jiffies(2000) + #define RESET_DELAY msecs_to_jiffies(100) + struct ioatdma_device *ioat_dma; + dma_addr_t completion_dma; +diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c +index faae0bfe1109..441f37b41abd 100644 +--- a/drivers/dma/of-dma.c ++++ b/drivers/dma/of-dma.c +@@ -68,18 +68,23 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, + return NULL; + + ofdma_target = of_dma_find_controller(&dma_spec_target); +- if (!ofdma_target) +- return NULL; ++ if (!ofdma_target) { ++ ofdma->dma_router->route_free(ofdma->dma_router->dev, ++ route_data); ++ chan = ERR_PTR(-EPROBE_DEFER); ++ goto err; ++ } + + chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); +- if (chan) { +- chan->router = ofdma->dma_router; +- chan->route_data = route_data; +- } else { ++ if (IS_ERR_OR_NULL(chan)) { + ofdma->dma_router->route_free(ofdma->dma_router->dev, + route_data); ++ } else { ++ chan->router = ofdma->dma_router; ++ chan->route_data = route_data; + } + ++err: + /* + * Need to put the node back since the ofdma->of_dma_route_allocate + * has taken it for generating the new, translated dma_spec +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c +index 57b375d0de29..d8997dafb876 100644 +--- a/drivers/dma/pl330.c ++++ b/drivers/dma/pl330.c +@@ -2579,13 +2579,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( + for (i = 0; i < len / period_len; i++) { + desc = pl330_get_desc(pch); + if (!desc) { ++ unsigned long iflags; ++ + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + + if (!first) + return NULL; + +- spin_lock_irqsave(&pl330->pool_lock, flags); ++ spin_lock_irqsave(&pl330->pool_lock, iflags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, +@@ -2595,7 +2597,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( + + list_move_tail(&first->node, &pl330->desc_pool); + +- spin_unlock_irqrestore(&pl330->pool_lock, flags); ++ spin_unlock_irqrestore(&pl330->pool_lock, iflags); + + return NULL; + } +@@ -2677,14 +2679,14 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, + while (burst != (1 << desc->rqcfg.brst_size)) + desc->rqcfg.brst_size++; + ++ desc->rqcfg.brst_len = get_burst_len(desc, len); + /* + * If burst size is smaller than bus width then make sure we only + * transfer one at a time to avoid a burst stradling an MFIFO entry. + */ +- if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) ++ if (burst * 8 < pl330->pcfg.data_bus_width) + desc->rqcfg.brst_len = 1; + +- desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; + + desc->txd.flags = flags; +diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig +index 2afceb1627a1..b62ebb5c4fe0 100644 +--- a/drivers/dma/qcom/Kconfig ++++ b/drivers/dma/qcom/Kconfig +@@ -9,6 +9,7 @@ config QCOM_BAM_DMA + + config QCOM_HIDMA_MGMT + tristate "Qualcomm Technologies HIDMA Management support" ++ depends on HAS_IOMEM + select DMA_ENGINE + help + Enable support for the Qualcomm Technologies HIDMA Management. +diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c +index 82f36e466083..143ea7cad756 100644 +--- a/drivers/dma/qcom/hidma_mgmt.c ++++ b/drivers/dma/qcom/hidma_mgmt.c +@@ -398,6 +398,20 @@ static int __init hidma_mgmt_init(void) + of_node_put(child); + } + #endif ++ /* ++ * We do not check for return value here, as it is assumed that ++ * platform_driver_register must not fail. The reason for this is that ++ * the (potential) hidma_mgmt_of_populate_channels calls above are not ++ * cleaned up if it does fail, and to do this work is quite ++ * complicated. In particular, various calls of of_address_to_resource, ++ * of_irq_to_resource, platform_device_register_full, of_dma_configure, ++ * and of_msi_configure which then call other functions and so on, must ++ * be cleaned up - this is not a trivial exercise. ++ * ++ * Currently, this module is not intended to be unloaded, and there is ++ * no module_exit function defined which does the needed cleanup. For ++ * this reason, we have to assume success here. ++ */ + platform_driver_register(&hidma_mgmt_driver); + + return 0; +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c +index 6682b3eec2b6..ec15ded640f6 100644 +--- a/drivers/dma/sh/usb-dmac.c ++++ b/drivers/dma/sh/usb-dmac.c +@@ -861,8 +861,8 @@ static int usb_dmac_probe(struct platform_device *pdev) + + error: + of_dma_controller_free(pdev->dev.of_node); +- pm_runtime_put(&pdev->dev); + error_pm: ++ pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; + } +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c +index 68b41daab3a8..bf7105814ee7 100644 +--- a/drivers/dma/ste_dma40.c ++++ b/drivers/dma/ste_dma40.c +@@ -3674,6 +3674,9 @@ static int __init d40_probe(struct platform_device *pdev) + + kfree(base->lcla_pool.base_unaligned); + ++ if (base->lcpa_base) ++ iounmap(base->lcpa_base); ++ + if (base->phy_lcpa) + release_mem_region(base->phy_lcpa, + base->lcpa_size); +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c +index 4eaf92b2b886..909739426f78 100644 +--- a/drivers/dma/tegra20-apb-dma.c ++++ b/drivers/dma/tegra20-apb-dma.c +@@ -1208,8 +1208,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) + + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); + +- if (tdc->busy) +- tegra_dma_terminate_all(dc); ++ tegra_dma_terminate_all(dc); + + spin_lock_irqsave(&tdc->lock, flags); + list_splice_init(&tdc->pending_sg_req, &sg_req_list); +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c +index c16c06b3dd2f..54296e262dea 100644 +--- a/drivers/dma/tegra210-adma.c ++++ b/drivers/dma/tegra210-adma.c +@@ -583,6 +583,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) + + ret = pm_runtime_get_sync(tdc2dev(tdc)); + if (ret < 0) { ++ pm_runtime_put_noidle(tdc2dev(tdc)); + free_irq(tdc->irq, tdc); + return ret; + } +@@ -764,8 +765,10 @@ static int tegra_adma_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + + ret = pm_runtime_get_sync(&pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(&pdev->dev); + goto rpm_disable; ++ } + + ret = tegra_adma_init(tdma); + if (ret) +diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c +index cd271f782605..f00652585ee3 100644 +--- a/drivers/dma/xilinx/xilinx_dma.c ++++ b/drivers/dma/xilinx/xilinx_dma.c +@@ -420,8 +420,8 @@ struct xilinx_dma_device { + #define to_dma_tx_descriptor(tx) \ + container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) + #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ +- readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ +- cond, delay_us, timeout_us) ++ readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \ ++ val, cond, delay_us, timeout_us) + + /* IO accessors */ + static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) +@@ -2357,7 +2357,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, + has_dre = false; + + if (!has_dre) +- xdev->common.copy_align = fls(width - 1); ++ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); + + if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || + of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || +@@ -2630,7 +2630,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) + } + + /* Register the DMA engine with the core */ +- dma_async_device_register(&xdev->common); ++ err = dma_async_device_register(&xdev->common); ++ if (err) { ++ dev_err(xdev->dev, "failed to register the dma device\n"); ++ goto error; ++ } + + err = of_dma_controller_register(node, of_dma_xilinx_xlate, + xdev); +diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c +index 9069fb854319..514763dcc375 100644 +--- a/drivers/dma/xilinx/zynqmp_dma.c ++++ b/drivers/dma/xilinx/zynqmp_dma.c +@@ -125,10 +125,12 @@ + /* Max transfer size per descriptor */ + #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 + ++/* Max burst lengths */ ++#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U ++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U ++ + /* Reset values for data attributes */ + #define ZYNQMP_DMA_AXCACHE_VAL 0xF +-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF +-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF + + #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F + +@@ -527,17 +529,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) + + static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) + { +- u32 val; ++ u32 val, burst_val; + + val = readl(chan->regs + ZYNQMP_DMA_CTRL0); + val |= ZYNQMP_DMA_POINT_TYPE_SG; + writel(val, chan->regs + ZYNQMP_DMA_CTRL0); + + val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); ++ burst_val = __ilog2_u32(chan->src_burst_len); + val = (val & ~ZYNQMP_DMA_ARLEN) | +- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); ++ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); ++ burst_val = __ilog2_u32(chan->dst_burst_len); + val = (val & ~ZYNQMP_DMA_AWLEN) | +- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); ++ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); + writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); + } + +@@ -551,8 +555,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, + { + struct zynqmp_dma_chan *chan = to_chan(dchan); + +- chan->src_burst_len = config->src_maxburst; +- chan->dst_burst_len = config->dst_maxburst; ++ chan->src_burst_len = clamp(config->src_maxburst, 1U, ++ ZYNQMP_DMA_MAX_SRC_BURST_LEN); ++ chan->dst_burst_len = clamp(config->dst_maxburst, 1U, ++ ZYNQMP_DMA_MAX_DST_BURST_LEN); + + return 0; + } +@@ -968,8 +974,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, + return PTR_ERR(chan->regs); + + chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; +- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; +- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; ++ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; ++ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; + err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); + if (err < 0) { + dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c +index 1c5f23224b3c..020dd07d1c23 100644 +--- a/drivers/edac/amd64_edac.c ++++ b/drivers/edac/amd64_edac.c +@@ -243,6 +243,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) + + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); ++ else ++ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); + } else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); + +diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c +index b69eaf62e657..09a5b168b289 100644 +--- a/drivers/edac/edac_device_sysfs.c ++++ b/drivers/edac/edac_device_sysfs.c +@@ -301,6 +301,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) + + /* Error exit stack */ + err_kobj_reg: ++ kobject_put(&edac_dev->kobj); + module_put(edac_dev->owner); + + err_out: +diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c +index 6e3428ba400f..622d117e2533 100644 +--- a/drivers/edac/edac_pci_sysfs.c ++++ b/drivers/edac/edac_pci_sysfs.c +@@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void) + + /* Error unwind statck */ + kobject_init_and_add_fail: +- kfree(edac_pci_top_main_kobj); ++ kobject_put(edac_pci_top_main_kobj); + + kzalloc_fail: + module_put(THIS_MODULE); +diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c +index c655162caf08..599038edd90d 100644 +--- a/drivers/edac/i5100_edac.c ++++ b/drivers/edac/i5100_edac.c +@@ -1073,16 +1073,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + PCI_DEVICE_ID_INTEL_5100_19, 0); + if (!einj) { + ret = -ENODEV; +- goto bail_einj; ++ goto bail_mc_free; + } + + rc = pci_enable_device(einj); + if (rc < 0) { + ret = rc; +- goto bail_disable_einj; ++ goto bail_einj; + } + +- + mci->pdev = &pdev->dev; + + priv = mci->pvt_info; +@@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + bail_scrub: + priv->scrub_enable = 0; + cancel_delayed_work_sync(&(priv->i5100_scrubbing)); +- edac_mc_free(mci); +- +-bail_disable_einj: + pci_disable_device(einj); + + bail_einj: + pci_dev_put(einj); + ++bail_mc_free: ++ edac_mc_free(mci); ++ + bail_disable_ch1: + pci_disable_device(ch1mm); + +diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c +index 1c88d9707495..3438b98e6094 100644 +--- a/drivers/edac/ie31200_edac.c ++++ b/drivers/edac/ie31200_edac.c +@@ -145,6 +145,8 @@ + (n << (28 + (2 * skl) - PAGE_SHIFT)) + + static int nr_channels; ++static struct pci_dev *mci_pdev; ++static int ie31200_registered = 1; + + struct ie31200_priv { + void __iomem *window; +@@ -512,12 +514,16 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) + static int ie31200_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) + { +- edac_dbg(0, "MC:\n"); ++ int rc; + ++ edac_dbg(0, "MC:\n"); + if (pci_enable_device(pdev) < 0) + return -EIO; ++ rc = ie31200_probe1(pdev, ent->driver_data); ++ if (rc == 0 && !mci_pdev) ++ mci_pdev = pci_dev_get(pdev); + +- return ie31200_probe1(pdev, ent->driver_data); ++ return rc; + } + + static void ie31200_remove_one(struct pci_dev *pdev) +@@ -526,6 +532,8 @@ static void ie31200_remove_one(struct pci_dev *pdev) + struct ie31200_priv *priv; + + edac_dbg(0, "\n"); ++ pci_dev_put(mci_pdev); ++ mci_pdev = NULL; + mci = edac_mc_del_mc(&pdev->dev); + if (!mci) + return; +@@ -574,17 +582,53 @@ static struct pci_driver ie31200_driver = { + + static int __init ie31200_init(void) + { ++ int pci_rc, i; ++ + edac_dbg(3, "MC:\n"); + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + +- return pci_register_driver(&ie31200_driver); ++ pci_rc = pci_register_driver(&ie31200_driver); ++ if (pci_rc < 0) ++ goto fail0; ++ ++ if (!mci_pdev) { ++ ie31200_registered = 0; ++ for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) { ++ mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor, ++ ie31200_pci_tbl[i].device, ++ NULL); ++ if (mci_pdev) ++ break; ++ } ++ if (!mci_pdev) { ++ edac_dbg(0, "ie31200 pci_get_device fail\n"); ++ pci_rc = -ENODEV; ++ goto fail1; ++ } ++ pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); ++ if (pci_rc < 0) { ++ edac_dbg(0, "ie31200 init fail\n"); ++ pci_rc = -ENODEV; ++ goto fail1; ++ } ++ } ++ return 0; ++ ++fail1: ++ pci_unregister_driver(&ie31200_driver); ++fail0: ++ pci_dev_put(mci_pdev); ++ ++ return pci_rc; + } + + static void __exit ie31200_exit(void) + { + edac_dbg(3, "MC:\n"); + pci_unregister_driver(&ie31200_driver); ++ if (!ie31200_registered) ++ ie31200_remove_one(mci_pdev); + } + + module_init(ie31200_init); +diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c +index bc538708c753..cdee6d6d5453 100644 +--- a/drivers/extcon/extcon-adc-jack.c ++++ b/drivers/extcon/extcon-adc-jack.c +@@ -128,7 +128,7 @@ static int adc_jack_probe(struct platform_device *pdev) + for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++); + data->num_conditions = i; + +- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel); ++ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel); + if (IS_ERR(data->chan)) + return PTR_ERR(data->chan); + +@@ -170,7 +170,6 @@ static int adc_jack_remove(struct platform_device *pdev) + + free_irq(data->irq, data); + cancel_work_sync(&data->handler.work); +- iio_channel_release(data->chan); + + return 0; + } +diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c +index 4c0b6df8b5dd..1d06d99b8bd9 100644 +--- a/drivers/extcon/extcon-arizona.c ++++ b/drivers/extcon/extcon-arizona.c +@@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) + struct arizona *arizona = info->arizona; + int id_gpio = arizona->pdata.hpdet_id_gpio; + unsigned int report = EXTCON_JACK_HEADPHONE; +- int ret, reading; ++ int ret, reading, state; + bool mic = false; + + mutex_lock(&info->lock); +@@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) + } + + /* If the cable was removed while measuring ignore the result */ +- ret = extcon_get_state(info->edev, EXTCON_MECHANICAL); +- if (ret < 0) { +- dev_err(arizona->dev, "Failed to check cable state: %d\n", +- ret); ++ state = extcon_get_state(info->edev, EXTCON_MECHANICAL); ++ if (state < 0) { ++ dev_err(arizona->dev, "Failed to check cable state: %d\n", state); + goto out; +- } else if (!ret) { ++ } else if (!state) { + dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n"); + goto done; + } +@@ -672,7 +671,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) + ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC); + + /* If we have a mic then reenable MICDET */ +- if (mic || info->mic) ++ if (state && (mic || info->mic)) + arizona_start_mic(info); + + if (info->hpdet_active) { +@@ -680,7 +679,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data) + info->hpdet_active = false; + } + +- info->hpdet_done = true; ++ /* Do not set hp_det done when the cable has been unplugged */ ++ if (state) ++ info->hpdet_done = true; + + out: + mutex_unlock(&info->lock); +diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c +index 68dbcb814b2f..4cf72487381e 100644 +--- a/drivers/extcon/extcon-max77693.c ++++ b/drivers/extcon/extcon-max77693.c +@@ -1272,4 +1272,4 @@ module_platform_driver(max77693_muic_driver); + MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver"); + MODULE_AUTHOR("Chanwoo Choi "); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("platform:extcon-max77693"); ++MODULE_ALIAS("platform:max77693-muic"); +diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c +index b9b48d45a6dc..17d426829f5d 100644 +--- a/drivers/extcon/extcon-max8997.c ++++ b/drivers/extcon/extcon-max8997.c +@@ -783,3 +783,4 @@ module_platform_driver(max8997_muic_driver); + MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver"); + MODULE_AUTHOR("Donggeun Kim "); + MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:max8997-muic"); +diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c +index 9d2d8a6673c8..dbe5fc278f09 100644 +--- a/drivers/extcon/extcon-sm5502.c ++++ b/drivers/extcon/extcon-sm5502.c +@@ -92,7 +92,6 @@ static struct reg_data sm5502_reg_data[] = { + | SM5502_REG_INTM2_MHL_MASK, + .invert = true, + }, +- { } + }; + + /* List of detectable cables */ +diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c +index e223a8c77946..f0c6ca5a1ac1 100644 +--- a/drivers/extcon/extcon.c ++++ b/drivers/extcon/extcon.c +@@ -1248,6 +1248,7 @@ int extcon_dev_register(struct extcon_dev *edev) + sizeof(*edev->nh) * edev->max_supported, GFP_KERNEL); + if (!edev->nh) { + ret = -ENOMEM; ++ device_unregister(&edev->dev); + goto err_dev; + } + +diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c +index 180f0a96528c..646dca0a8d73 100644 +--- a/drivers/firewire/nosy.c ++++ b/drivers/firewire/nosy.c +@@ -359,6 +359,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + struct client *client = file->private_data; + spinlock_t *client_list_lock = &client->lynx->client_list_lock; + struct nosy_stats stats; ++ int ret; + + switch (cmd) { + case NOSY_IOC_GET_STATS: +@@ -373,11 +374,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + return 0; + + case NOSY_IOC_START: ++ ret = -EBUSY; + spin_lock_irq(client_list_lock); +- list_add_tail(&client->link, &client->lynx->client_list); ++ if (list_empty(&client->link)) { ++ list_add_tail(&client->link, &client->lynx->client_list); ++ ret = 0; ++ } + spin_unlock_irq(client_list_lock); + +- return 0; ++ return ret; + + case NOSY_IOC_STOP: + spin_lock_irq(client_list_lock); +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig +index 923f34be269a..900a1b99eabd 100644 +--- a/drivers/firmware/Kconfig ++++ b/drivers/firmware/Kconfig +@@ -194,6 +194,7 @@ config FW_CFG_SYSFS_CMDLINE + config QCOM_SCM + bool + depends on ARM || ARM64 ++ depends on HAVE_ARM_SMCCC + select RESET_CONTROLLER + + config QCOM_SCM_32 +diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig +index c981be17d3c0..8ebc6f8bf9b9 100644 +--- a/drivers/firmware/efi/Kconfig ++++ b/drivers/firmware/efi/Kconfig +@@ -129,6 +129,17 @@ config EFI_TEST + Say Y here to enable the runtime services support via /dev/efi_test. + If unsure, say N. + ++config EFI_CUSTOM_SSDT_OVERLAYS ++ bool "Load custom ACPI SSDT overlay from an EFI variable" ++ depends on EFI_VARS && ACPI ++ default ACPI_TABLE_UPGRADE ++ help ++ Allow loading of an ACPI SSDT overlay from an EFI variable specified ++ by a kernel command line option. ++ ++ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more ++ information. ++ + endmenu + + config UEFI_CPER +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c +index c0e54396f250..dc8d2603612e 100644 +--- a/drivers/firmware/efi/cper.c ++++ b/drivers/firmware/efi/cper.c +@@ -257,8 +257,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) + if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) + return 0; + +- n = 0; +- len = CPER_REC_LEN - 1; ++ len = CPER_REC_LEN; + dmi_memdev_name(mem->mem_dev_handle, &bank, &device); + if (bank && device) + n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); +@@ -267,7 +266,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) + "DIMM location: not present. DMI handle: 0x%.4x ", + mem->mem_dev_handle); + +- msg[n] = '\0'; + return n; + } + +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c +index d89457d62a24..6e6a590b8388 100644 +--- a/drivers/firmware/efi/efi.c ++++ b/drivers/firmware/efi/efi.c +@@ -198,7 +198,7 @@ static void generic_ops_unregister(void) + efivars_unregister(&generic_efivars); + } + +-#if IS_ENABLED(CONFIG_ACPI) ++#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS + #define EFIVAR_SSDT_NAME_MAX 16 + static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; + static int __init efivar_ssdt_setup(char *str) +diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c +index 1c65f5ac4368..6529addd1e82 100644 +--- a/drivers/firmware/efi/efivars.c ++++ b/drivers/firmware/efi/efivars.c +@@ -586,8 +586,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + NULL, "%s", short_name); + kfree(short_name); +- if (ret) ++ if (ret) { ++ kobject_put(&new_var->kobj); + return ret; ++ } + + kobject_uevent(&new_var->kobj, KOBJ_ADD); + if (efivar_entry_add(new_var, &efivar_sysfs_list)) { +diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c +index 241dd7c63d2c..481b2f0a190b 100644 +--- a/drivers/firmware/efi/esrt.c ++++ b/drivers/firmware/efi/esrt.c +@@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) + rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, + "entry%d", entry_num); + if (rc) { +- kfree(entry); ++ kobject_put(&entry->kobj); + return rc; + } + } +diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c +index 9faa09e7c31f..c2b991b9fa9e 100644 +--- a/drivers/firmware/efi/memattr.c ++++ b/drivers/firmware/efi/memattr.c +@@ -68,11 +68,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) + return false; + } + +- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) { +- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n"); +- return false; +- } +- + if (PAGE_SIZE > EFI_PAGE_SIZE && + (!PAGE_ALIGNED(in->phys_addr) || + !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { +diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c +index 0e2011636fbb..595bf12e7653 100644 +--- a/drivers/firmware/qemu_fw_cfg.c ++++ b/drivers/firmware/qemu_fw_cfg.c +@@ -192,15 +192,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev) + /* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */ + static u32 fw_cfg_rev; + +-static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf) ++static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a, ++ char *buf) + { + return sprintf(buf, "%u\n", fw_cfg_rev); + } + +-static const struct { +- struct attribute attr; +- ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf); +-} fw_cfg_rev_attr = { ++static const struct kobj_attribute fw_cfg_rev_attr = { + .attr = { .name = "rev", .mode = S_IRUSR }, + .show = fw_cfg_showrev, + }; +diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c +index d168410e2338..2e6081e5ad0b 100644 +--- a/drivers/gpio/gpio-pcf857x.c ++++ b/drivers/gpio/gpio-pcf857x.c +@@ -370,7 +370,7 @@ static int pcf857x_probe(struct i2c_client *client, + * reset state. Otherwise it flags pins to be driven low. + */ + gpio->out = ~n_latch; +- gpio->status = gpio->out; ++ gpio->status = gpio->read(gpio->client); + + status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio); + if (status < 0) +diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c +index d6e21f1a70a9..6cc7c5d59e46 100644 +--- a/drivers/gpio/gpio-tc3589x.c ++++ b/drivers/gpio/gpio-tc3589x.c +@@ -210,7 +210,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) + continue; + + tc3589x_gpio->oldregs[i][j] = new; +- tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); ++ tc3589x_reg_write(tc3589x, regmap[i] + j, new); + } + } + +diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c +index 6b4d10d6e10f..5038d771ac6e 100644 +--- a/drivers/gpio/gpio-zynq.c ++++ b/drivers/gpio/gpio-zynq.c +@@ -778,8 +778,11 @@ static int zynq_gpio_probe(struct platform_device *pdev) + static int zynq_gpio_remove(struct platform_device *pdev) + { + struct zynq_gpio *gpio = platform_get_drvdata(pdev); ++ int ret; + +- pm_runtime_get_sync(&pdev->dev); ++ ret = pm_runtime_get_sync(&pdev->dev); ++ if (ret < 0) ++ dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); + gpiochip_remove(&gpio->chip); + clk_disable_unprepare(gpio->clk); + device_set_wakeup_capable(&pdev->dev, 0); +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index b863386be911..9e40914c0984 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -80,7 +80,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, + &gpiospec); + if (ret) { + pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n", +- __func__, propname, np->full_name, index); ++ __func__, propname, np ? np->full_name : NULL, index); + return ERR_PTR(ret); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +index e9311eb7b8d9..694f631d9c90 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +@@ -734,8 +734,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (encoder) { +@@ -872,8 +874,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + encoder = amdgpu_connector_best_single_encoder(connector); +@@ -996,8 +1000,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { +@@ -1371,8 +1377,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +index 15a2d8f3725d..fd97532bf7eb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +@@ -268,7 +268,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) +- return ret; ++ goto out; + + ret = drm_crtc_helper_set_config(set); + +@@ -292,6 +292,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) + adev->have_disp_power_ref = false; + } + ++out: + /* drop the power reference we got coming in here */ + pm_runtime_put_autosuspend(dev->dev); + return ret; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index e0890deccb2f..7cae10fec78d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -633,11 +633,12 @@ long amdgpu_drm_ioctl(struct file *filp, + dev = file_priv->minor->dev; + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) +- return ret; ++ goto out; + + ret = drm_ioctl(filp, cmd, arg); + + pm_runtime_mark_last_busy(dev->dev); ++out: + pm_runtime_put_autosuspend(dev->dev); + return ret; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +index 24941a7b659f..96fc1566f28e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +@@ -452,8 +452,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file + return n ? -EFAULT : 0; + } + case AMDGPU_INFO_DEV_INFO: { +- struct drm_amdgpu_info_device dev_info = {}; ++ struct drm_amdgpu_info_device dev_info; + ++ memset(&dev_info, 0, sizeof(dev_info)); + dev_info.device_id = dev->pdev->device; + dev_info.chip_rev = adev->rev_id; + dev_info.external_rev = adev->external_rev_id; +@@ -542,7 +543,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) + + r = pm_runtime_get_sync(dev->dev); + if (r < 0) +- return r; ++ goto pm_put; + + fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); + if (unlikely(!fpriv)) { +@@ -565,6 +566,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) + + out_suspend: + pm_runtime_mark_last_busy(dev->dev); ++pm_put: + pm_runtime_put_autosuspend(dev->dev); + + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +index 05ff98b43c50..ab041ae58b20 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +@@ -637,6 +637,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) + + release_sg: + kfree(ttm->sg); ++ ttm->sg = NULL; + return r; + } + +@@ -651,7 +652,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + /* double check that we don't free the table twice */ +- if (!ttm->sg->sgl) ++ if (!ttm->sg || !ttm->sg->sgl) + return; + + /* free the sg table and pages again */ +@@ -885,6 +886,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) + + if (gtt && gtt->userptr) { + kfree(ttm->sg); ++ ttm->sg = NULL; + ttm->page_flags &= ~TTM_PAGE_FLAG_SG; + return; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c +index 1b50e6c13fb3..5fbf99d60058 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atom.c ++++ b/drivers/gpu/drm/amd/amdgpu/atom.c +@@ -748,8 +748,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) + cjiffies = jiffies; + if (time_after(cjiffies, ctx->last_jump_jiffies)) { + cjiffies -= ctx->last_jump_jiffies; +- if ((jiffies_to_msecs(cjiffies) > 5000)) { +- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); ++ if ((jiffies_to_msecs(cjiffies) > 10000)) { ++ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); + ctx->abort = true; + } + } else { +diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +index cb952acc7133..2934443fbd4d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c ++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +@@ -1053,22 +1053,19 @@ static int cik_sdma_soft_reset(void *handle) + { + u32 srbm_soft_reset = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +- u32 tmp = RREG32(mmSRBM_STATUS2); ++ u32 tmp; + +- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { +- /* sdma0 */ +- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); +- tmp |= SDMA0_F32_CNTL__HALT_MASK; +- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); +- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; +- } +- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { +- /* sdma1 */ +- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); +- tmp |= SDMA0_F32_CNTL__HALT_MASK; +- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); +- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; +- } ++ /* sdma0 */ ++ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); ++ tmp |= SDMA0_F32_CNTL__HALT_MASK; ++ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); ++ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; ++ ++ /* sdma1 */ ++ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); ++ tmp |= SDMA0_F32_CNTL__HALT_MASK; ++ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); ++ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; + + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +index 8c6e47c5507f..74221e096855 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +@@ -841,8 +841,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, + + ret = kobject_init_and_add(dev->kobj_node, &node_type, + sys_props.kobj_nodes, "%d", id); +- if (ret < 0) ++ if (ret < 0) { ++ kobject_put(dev->kobj_node); + return ret; ++ } + + dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); + if (!dev->kobj_mem) +@@ -885,8 +887,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, + return -ENOMEM; + ret = kobject_init_and_add(mem->kobj, &mem_type, + dev->kobj_mem, "%d", i); +- if (ret < 0) ++ if (ret < 0) { ++ kobject_put(mem->kobj); + return ret; ++ } + + mem->attr.name = "properties"; + mem->attr.mode = KFD_SYSFS_FILE_MODE; +@@ -904,8 +908,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, + return -ENOMEM; + ret = kobject_init_and_add(cache->kobj, &cache_type, + dev->kobj_cache, "%d", i); +- if (ret < 0) ++ if (ret < 0) { ++ kobject_put(cache->kobj); + return ret; ++ } + + cache->attr.name = "properties"; + cache->attr.mode = KFD_SYSFS_FILE_MODE; +@@ -923,8 +929,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, + return -ENOMEM; + ret = kobject_init_and_add(iolink->kobj, &iolink_type, + dev->kobj_iolink, "%d", i); +- if (ret < 0) ++ if (ret < 0) { ++ kobject_put(iolink->kobj); + return ret; ++ } + + iolink->attr.name = "properties"; + iolink->attr.mode = KFD_SYSFS_FILE_MODE; +@@ -976,8 +984,10 @@ static int kfd_topology_update_sysfs(void) + ret = kobject_init_and_add(sys_props.kobj_topology, + &sysprops_type, &kfd_device->kobj, + "topology"); +- if (ret < 0) ++ if (ret < 0) { ++ kobject_put(sys_props.kobj_topology); + return ret; ++ } + + sys_props.kobj_nodes = kobject_create_and_add("nodes", + sys_props.kobj_topology); +diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c +index 753b3410c83b..73ca66c8f198 100644 +--- a/drivers/gpu/drm/drm_auth.c ++++ b/drivers/gpu/drm/drm_auth.c +@@ -244,9 +244,10 @@ int drm_master_open(struct drm_file *file_priv) + void drm_master_release(struct drm_file *file_priv) + { + struct drm_device *dev = file_priv->minor->dev; +- struct drm_master *master = file_priv->master; ++ struct drm_master *master; + + mutex_lock(&dev->master_mutex); ++ master = file_priv->master; + if (file_priv->magic) + idr_remove(&file_priv->master->magic_map, file_priv->magic); + +diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c +index 264ed1d7531b..4bd727006393 100644 +--- a/drivers/gpu/drm/drm_debugfs.c ++++ b/drivers/gpu/drm/drm_debugfs.c +@@ -296,13 +296,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf, + + buf[len] = '\0'; + +- if (!strcmp(buf, "on")) ++ if (sysfs_streq(buf, "on")) + connector->force = DRM_FORCE_ON; +- else if (!strcmp(buf, "digital")) ++ else if (sysfs_streq(buf, "digital")) + connector->force = DRM_FORCE_ON_DIGITAL; +- else if (!strcmp(buf, "off")) ++ else if (sysfs_streq(buf, "off")) + connector->force = DRM_FORCE_OFF; +- else if (!strcmp(buf, "unspecified")) ++ else if (sysfs_streq(buf, "unspecified")) + connector->force = DRM_FORCE_UNSPECIFIED; + else + return -EINVAL; +diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c +index ec1ed94b2390..19c85c50e2d1 100644 +--- a/drivers/gpu/drm/drm_dp_aux_dev.c ++++ b/drivers/gpu/drm/drm_dp_aux_dev.c +@@ -59,7 +59,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index) + + mutex_lock(&aux_idr_mutex); + aux_dev = idr_find(&aux_idr, index); +- if (!kref_get_unless_zero(&aux_dev->refcount)) ++ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount)) + aux_dev = NULL; + mutex_unlock(&aux_idr_mutex); + +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 41e67e983a7f..bb70c5272fe8 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + #include + +@@ -2673,6 +2674,17 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + return ret; + } + ++static int do_get_act_status(struct drm_dp_aux *aux) ++{ ++ int ret; ++ u8 status; ++ ++ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); ++ if (ret < 0) ++ return ret; ++ ++ return status; ++} + + /** + * drm_dp_check_act_status() - Check ACT handled status. +@@ -2682,33 +2694,29 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, + */ + int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) + { +- u8 status; +- int ret; +- int count = 0; +- +- do { +- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); +- +- if (ret < 0) { +- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); +- goto fail; +- } +- +- if (status & DP_PAYLOAD_ACT_HANDLED) +- break; +- count++; +- udelay(100); +- +- } while (count < 30); +- +- if (!(status & DP_PAYLOAD_ACT_HANDLED)) { +- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); +- ret = -EINVAL; +- goto fail; ++ /* ++ * There doesn't seem to be any recommended retry count or timeout in ++ * the MST specification. Since some hubs have been observed to take ++ * over 1 second to update their payload allocations under certain ++ * conditions, we use a rather large timeout value. ++ */ ++ const int timeout_ms = 3000; ++ int ret, status; ++ ++ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, ++ status & DP_PAYLOAD_ACT_HANDLED || status < 0, ++ 200, timeout_ms * USEC_PER_MSEC); ++ if (ret < 0 && status >= 0) { ++ DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n", ++ timeout_ms, status); ++ return -EINVAL; ++ } else if (status < 0) { ++ DRM_DEBUG_KMS("Failed to read payload table status: %d\n", ++ status); ++ return status; + } ++ + return 0; +-fail: +- return ret; + } + EXPORT_SYMBOL(drm_dp_check_act_status); + +diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c +index 4484785cd9ac..95d5a5949753 100644 +--- a/drivers/gpu/drm/drm_encoder_slave.c ++++ b/drivers/gpu/drm/drm_encoder_slave.c +@@ -84,7 +84,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + err = encoder_drv->encoder_init(client, dev, encoder); + if (err) +- goto fail_unregister; ++ goto fail_module_put; + + if (info->platform_data) + encoder->slave_funcs->set_config(&encoder->base, +@@ -92,9 +92,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, + + return 0; + ++fail_module_put: ++ module_put(module); + fail_unregister: + i2c_unregister_device(client); +- module_put(module); + fail: + return err; + } +diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c +index ae5c0952a7a3..3133aa6e89c1 100644 +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -694,9 +694,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, + * @file_priv: drm file-private structure + * + * Open an object using the global name, returning a handle and the size. +- * +- * This handle (of course) holds a reference to the object, so the object +- * will not go away until the handle is deleted. + */ + int + drm_gem_open_ioctl(struct drm_device *dev, void *data, +@@ -721,14 +718,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, + + /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + ret = drm_gem_handle_create_tail(file_priv, obj, &handle); +- drm_gem_object_unreference_unlocked(obj); + if (ret) +- return ret; ++ goto err; + + args->handle = handle; + args->size = obj->size; + +- return 0; ++err: ++ drm_gem_object_unreference_unlocked(obj); ++ return ret; + } + + /** +diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c +index 161f36132f98..b53ffb65569f 100644 +--- a/drivers/gpu/drm/drm_mipi_dsi.c ++++ b/drivers/gpu/drm/drm_mipi_dsi.c +@@ -1032,11 +1032,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); + */ + int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) + { +- u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, +- scanline & 0xff }; ++ u8 payload[2] = { scanline >> 8, scanline & 0xff }; + ssize_t err; + +- err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); ++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload, ++ sizeof(payload)); + if (err < 0) + return err; + +diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c +index 17db4b4749d5..2e8479744ca4 100644 +--- a/drivers/gpu/drm/gma500/cdv_intel_display.c ++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c +@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + struct gma_clock_t clock; + ++ memset(&clock, 0, sizeof(clock)); ++ + switch (refclk) { + case 27000: + if (target < 200000) { +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c +index c52f9adf5e04..ec6ea202cf5d 100644 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c +@@ -2121,12 +2121,12 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + cdv_intel_edp_panel_vdd_off(gma_encoder); +- if (ret == 0) { ++ if (ret <= 0) { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + cdv_intel_dp_encoder_destroy(encoder); + cdv_intel_dp_destroy(connector); +- goto err_priv; ++ goto err_connector; + } else { + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], +diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +index e28107061148..fc9a34ed58bd 100644 +--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c ++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) + hdmi_dev = pci_get_drvdata(dev); + + i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); +- if (i2c_dev == NULL) { +- DRM_ERROR("Can't allocate interface\n"); +- ret = -ENOMEM; +- goto exit; +- } ++ if (!i2c_dev) ++ return -ENOMEM; + + i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; + i2c_dev->status = I2C_STAT_INIT; +@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) + oaktrail_hdmi_i2c_adapter.name, hdmi_dev); + if (ret) { + DRM_ERROR("Failed to request IRQ for I2C controller\n"); +- goto err; ++ goto free_dev; + } + + /* Adapter registration */ + ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); +- return ret; ++ if (ret) { ++ DRM_ERROR("Failed to add I2C adapter\n"); ++ goto free_irq; ++ } + +-err: ++ return 0; ++ ++free_irq: ++ free_irq(dev->irq, hdmi_dev); ++free_dev: + kfree(i2c_dev); +-exit: ++ + return ret; + } + +diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c +index 8f3ca526bd1b..29cb552829fe 100644 +--- a/drivers/gpu/drm/gma500/psb_drv.c ++++ b/drivers/gpu/drm/gma500/psb_drv.c +@@ -323,6 +323,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) + if (ret) + goto out_err; + ++ ret = -ENOMEM; ++ + dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); + if (!dev_priv->mmu) + goto out_err; +diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c +index 78eb10902809..076b6da44f46 100644 +--- a/drivers/gpu/drm/gma500/psb_irq.c ++++ b/drivers/gpu/drm/gma500/psb_irq.c +@@ -350,6 +350,7 @@ int psb_irq_postinstall(struct drm_device *dev) + { + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long irqflags; ++ unsigned int i; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + +@@ -362,20 +363,12 @@ int psb_irq_postinstall(struct drm_device *dev) + PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + +- if (dev->vblank[0].enabled) +- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); +- else +- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); +- +- if (dev->vblank[1].enabled) +- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); +- else +- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); +- +- if (dev->vblank[2].enabled) +- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); +- else +- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); ++ for (i = 0; i < dev->num_crtcs; ++i) { ++ if (dev->vblank[i].enabled) ++ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); ++ else ++ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); ++ } + + if (dev_priv->ops->hotplug_enable) + dev_priv->ops->hotplug_enable(dev, true); +@@ -388,6 +381,7 @@ void psb_irq_uninstall(struct drm_device *dev) + { + struct drm_psb_private *dev_priv = dev->dev_private; + unsigned long irqflags; ++ unsigned int i; + + spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); + +@@ -396,14 +390,10 @@ void psb_irq_uninstall(struct drm_device *dev) + + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + +- if (dev->vblank[0].enabled) +- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); +- +- if (dev->vblank[1].enabled) +- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); +- +- if (dev->vblank[2].enabled) +- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); ++ for (i = 0; i < dev->num_crtcs; ++i) { ++ if (dev->vblank[i].enabled) ++ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); ++ } + + dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | + _PSB_IRQ_MSVDX_FLAG | +diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c +index 1e104518192d..8a98442d494b 100644 +--- a/drivers/gpu/drm/i915/i915_cmd_parser.c ++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c +@@ -570,6 +570,9 @@ struct drm_i915_reg_descriptor { + #define REG32(_reg, ...) \ + { .addr = (_reg), __VA_ARGS__ } + ++#define REG32_IDX(_reg, idx) \ ++ { .addr = _reg(idx) } ++ + /* + * Convenience macro for adding 64-bit registers. + * +@@ -667,6 +670,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), + REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), ++ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), + REG64_IDX(BCS_GPR, 0), + REG64_IDX(BCS_GPR, 1), + REG64_IDX(BCS_GPR, 2), +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +index 4548d89abcdc..ff8168c60b35 100644 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -882,7 +882,7 @@ eb_vma_misplaced(struct i915_vma *vma) + return !only_mappable_for_reloc(entry->flags); + + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && +- (vma->node.start + vma->node.size - 1) >> 32) ++ (vma->node.start + vma->node.size + 4095) >> 32) + return true; + + return false; +diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c +index 67881e5517fb..3a9d06de81b4 100644 +--- a/drivers/gpu/drm/imx/imx-ldb.c ++++ b/drivers/gpu/drm/imx/imx-ldb.c +@@ -212,6 +212,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + ++ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { ++ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); ++ return; ++ } ++ + drm_panel_prepare(imx_ldb_ch->panel); + + if (dual) { +@@ -270,6 +275,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, + int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); + u32 bus_format = imx_ldb_ch->bus_format; + ++ if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { ++ dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); ++ return; ++ } ++ + if (mode->clock > 170000) { + dev_warn(ldb->dev, + "%s: mode exceeds 170 MHz pixel clock\n", __func__); +@@ -317,6 +327,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) + { + struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); + struct imx_ldb *ldb = imx_ldb_ch->ldb; ++ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; + int mux, ret; + + /* +@@ -333,14 +344,14 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) + + drm_panel_disable(imx_ldb_ch->panel); + +- if (imx_ldb_ch == &ldb->channel[0]) ++ if (imx_ldb_ch == &ldb->channel[0] || dual) + ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; +- else if (imx_ldb_ch == &ldb->channel[1]) ++ if (imx_ldb_ch == &ldb->channel[1] || dual) + ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; + + regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); + +- if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { ++ if (dual) { + clk_disable_unprepare(ldb->clk[0]); + clk_disable_unprepare(ldb->clk[1]); + } +diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c +index 89cf0090feac..9ae515f3171e 100644 +--- a/drivers/gpu/drm/imx/imx-tve.c ++++ b/drivers/gpu/drm/imx/imx-tve.c +@@ -511,6 +511,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) + return 0; + } + ++static void imx_tve_disable_regulator(void *data) ++{ ++ struct imx_tve *tve = data; ++ ++ regulator_disable(tve->dac_reg); ++} ++ + static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) + { + return (reg % 4 == 0) && (reg <= 0xdc); +@@ -635,6 +642,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) + ret = regulator_enable(tve->dac_reg); + if (ret) + return ret; ++ ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); ++ if (ret) ++ return ret; + } + + tve->clk = devm_clk_get(dev, "tve"); +@@ -681,18 +691,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) + return 0; + } + +-static void imx_tve_unbind(struct device *dev, struct device *master, +- void *data) +-{ +- struct imx_tve *tve = dev_get_drvdata(dev); +- +- if (!IS_ERR(tve->dac_reg)) +- regulator_disable(tve->dac_reg); +-} +- + static const struct component_ops imx_tve_ops = { + .bind = imx_tve_bind, +- .unbind = imx_tve_unbind, + }; + + static int imx_tve_probe(struct platform_device *pdev) +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +index 286587607931..0f8f9a784b1b 100644 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c +@@ -457,8 +457,13 @@ static int mtk_drm_probe(struct platform_device *pdev) + pm_runtime_disable(dev); + err_node: + of_node_put(private->mutex_node); +- for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) ++ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { + of_node_put(private->comp_node[i]); ++ if (private->ddp_comp[i]) { ++ put_device(private->ddp_comp[i]->larb_dev); ++ private->ddp_comp[i] = NULL; ++ } ++ } + return ret; + } + +diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +index aebbdf21bc41..536862e7befa 100644 +--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c ++++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +@@ -1867,7 +1867,7 @@ static int dsi_panel_create_cmd_packets(const char *data, + cmd[i].msg.type = data[0]; + cmd[i].last_command = (data[1] == 1 ? true : false); + cmd[i].msg.channel = data[2]; +- cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0); ++ cmd[i].msg.flags |= data[3]; + cmd[i].msg.ctrl = 0; + cmd[i].post_wait_ms = cmd[i].msg.wait_ms = data[4]; + cmd[i].msg.tx_len = ((data[5] << 8) | (data[6])); +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +index c757e2070cac..636e9df3d118 100644 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +@@ -146,7 +146,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { + .enable = dsi_20nm_phy_enable, + .disable = dsi_20nm_phy_disable, + }, +- .io_start = { 0xfd998300, 0xfd9a0300 }, ++ .io_start = { 0xfd998500, 0xfd9a0500 }, + .num_dsi_phy = 2, + }; + +diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +index c627ab6d0061..8ac54b9dcd39 100644 +--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c ++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +@@ -128,9 +128,17 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, + | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; + cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); + ++ /* ++ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on ++ * the vsync_clk equating to roughly half the desired panel refresh rate. ++ * This is only necessary as stability fallback if interrupts from the ++ * panel arrive too late or not at all, but is currently used by default ++ * because these panel interrupts are not wired up yet. ++ */ + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); + mdp5_write(mdp5_kms, +- REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); ++ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal)); ++ + mdp5_write(mdp5_kms, + REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); +diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +index 46b60b132372..7ebf833e6823 100644 +--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c ++++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +@@ -773,7 +773,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) + + return 0; + fail: +- mdp5_destroy(pdev); ++ if (mdp5_kms) ++ mdp5_destroy(pdev); + return ret; + } + +diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c +index a9b9b1c95a2e..9dbd17be51f7 100644 +--- a/drivers/gpu/drm/msm/msm_fence.c ++++ b/drivers/gpu/drm/msm/msm_fence.c +@@ -56,7 +56,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, + int ret; + + if (fence > fctx->last_fence) { +- DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", ++ DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n", + fctx->name, fence, fctx->last_fence); + return -EINVAL; + } +diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c +index b54975b35279..cd5e356f66f7 100644 +--- a/drivers/gpu/drm/msm/sde/sde_connector.c ++++ b/drivers/gpu/drm/msm/sde/sde_connector.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -552,8 +552,12 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn) + SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n", + bl_config->bl_scale, bl_config->bl_scale_ad, + bl_config->bl_level); +- rc = c_conn->ops.set_backlight(dsi_display, bl_config->bl_level); +- c_conn->unset_bl_level = 0; ++ ++ if (c_conn->ops.set_backlight) { ++ rc = c_conn->ops.set_backlight(dsi_display, ++ bl_config->bl_level); ++ c_conn->unset_bl_level = 0; ++ } + + return rc; + } +diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c +index a2e6a81669e7..94b7798bdea4 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_bo.c ++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c +@@ -447,7 +447,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) + struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + int i; + +- if (!ttm_dma) ++ if (!ttm_dma || !ttm_dma->dma_address) + return; + + /* Don't waste time looping if the object is coherent */ +@@ -467,7 +467,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) + struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + int i; + +- if (!ttm_dma) ++ if (!ttm_dma || !ttm_dma->dma_address) + return; + + /* Don't waste time looping if the object is coherent */ +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c +index 5bfae1f972c7..0061deca290a 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c +@@ -281,8 +281,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) + pm_runtime_get_noresume(dev->dev); + } else { + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0 && ret != -EACCES) ++ if (ret < 0 && ret != -EACCES) { ++ pm_runtime_put_autosuspend(dev->dev); + return conn_status; ++ } + } + + nv_encoder = nouveau_connector_ddc_detect(connector); +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c +index 42829a942e33..4e12d3d59651 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -823,8 +823,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) + + /* need to bring up power immediately if opening device */ + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0 && ret != -EACCES) ++ if (ret < 0 && ret != -EACCES) { ++ pm_runtime_put_autosuspend(dev->dev); + return ret; ++ } + + get_task_comm(tmpname, current); + snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); +@@ -912,8 +914,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + long ret; + + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0 && ret != -EACCES) ++ if (ret < 0 && ret != -EACCES) { ++ pm_runtime_put_autosuspend(dev->dev); + return ret; ++ } + + switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { + case DRM_NOUVEAU_NVIF: +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index 2b79e27dd89c..40da9143f722 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -183,8 +183,10 @@ nouveau_fbcon_open(struct fb_info *info, int user) + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); +- if (ret < 0 && ret != -EACCES) ++ if (ret < 0 && ret != -EACCES) { ++ pm_runtime_put(drm->dev->dev); + return ret; ++ } + return 0; + } + +@@ -584,6 +586,7 @@ nouveau_fbcon_init(struct drm_device *dev) + drm_fb_helper_fini(&fbcon->helper); + free: + kfree(fbcon); ++ drm->fbcon = NULL; + return ret; + } + +diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c +index 505dca48b9f8..be6672da33a6 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_gem.c ++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c +@@ -42,8 +42,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (WARN_ON(ret < 0 && ret != -EACCES)) ++ if (WARN_ON(ret < 0 && ret != -EACCES)) { ++ pm_runtime_put_autosuspend(dev); + return; ++ } + + if (gem->import_attach) + drm_prime_gem_destroy(gem, nvbo->bo.sg); +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +index 7deb81b6dbac..4b571cc6bc70 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) + nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", + image.base, image.type, image.size); + +- if (!shadow_fetch(bios, mthd, image.size)) { ++ if (!shadow_fetch(bios, mthd, image.base + image.size)) { + nvkm_debug(subdev, "%08x: fetch failed\n", image.base); + return 0; + } +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +index 954f5b76bfcf..d44965f805fe 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, + if (retries) + udelay(400); + +- /* transaction request, wait up to 1ms for it to complete */ ++ /* transaction request, wait up to 2ms for it to complete */ + nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); + +- timeout = 1000; ++ timeout = 2000; + do { + ctrl = nvkm_rd32(device, 0x00e4e4 + base); + udelay(1); +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +index 61d729b82c69..c49795e779be 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +@@ -33,7 +33,7 @@ static void + gm200_i2c_aux_fini(struct gm200_i2c_aux *aux) + { + struct nvkm_device *device = aux->base.pad->i2c->subdev.device; +- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000); ++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000); + } + + static int +@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) + AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); + return -EBUSY; + } +- } while (ctrl & 0x03010000); ++ } while (ctrl & 0x07010000); + + /* set some magic, and wait up to 1ms for it to appear */ +- nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); ++ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq); + timeout = 1000; + do { + ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); +@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux) + gm200_i2c_aux_fini(aux); + return -EBUSY; + } +- } while ((ctrl & 0x03000000) != urep); ++ } while ((ctrl & 0x07000000) != urep); + + return 0; + } +@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, + if (retries) + udelay(400); + +- /* transaction request, wait up to 1ms for it to complete */ ++ /* transaction request, wait up to 2ms for it to complete */ + nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); + +- timeout = 1000; ++ timeout = 2000; + do { + ctrl = nvkm_rd32(device, 0x00d954 + base); + udelay(1); +diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +index 136d30484d02..46111e9ee9a2 100644 +--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c ++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +@@ -194,7 +194,7 @@ static int __init omapdss_boot_init(void) + dss = of_find_matching_node(NULL, omapdss_of_match); + + if (dss == NULL || !of_device_is_available(dss)) +- return 0; ++ goto put_node; + + omapdss_walk_device(dss, true); + +@@ -219,6 +219,8 @@ static int __init omapdss_boot_init(void) + kfree(n); + } + ++put_node: ++ of_node_put(dss); + return 0; + } + +diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +index 6a0b25e0823f..6b64a1e07c01 100644 +--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c ++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +@@ -747,6 +747,7 @@ static int omap_dmm_probe(struct platform_device *dev) + &omap_dmm->refill_pa, GFP_KERNEL); + if (!omap_dmm->refill_va) { + dev_err(&dev->dev, "could not allocate refill memory\n"); ++ ret = -ENOMEM; + goto fail; + } + +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c +index 68a2b25deb50..57f32d1bb312 100644 +--- a/drivers/gpu/drm/panel/panel-simple.c ++++ b/drivers/gpu/drm/panel/panel-simple.c +@@ -1041,7 +1041,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = { + static const struct panel_desc lg_lb070wv8 = { + .modes = &lg_lb070wv8_mode, + .num_modes = 1, +- .bpc = 16, ++ .bpc = 8, + .size = { + .width = 151, + .height = 91, +diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c +index 5e65d5d2d937..e7b7b8a31e52 100644 +--- a/drivers/gpu/drm/qxl/qxl_dumb.c ++++ b/drivers/gpu/drm/qxl/qxl_dumb.c +@@ -57,6 +57,8 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, + surf.height = args->height; + surf.stride = pitch; + surf.format = format; ++ surf.data = 0; ++ + r = qxl_gem_object_create_with_handle(qdev, file_priv, + QXL_GEM_DOMAIN_VRAM, + args->size, &surf, &qobj, +diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c +index e642242728c0..a886652ed895 100644 +--- a/drivers/gpu/drm/qxl/qxl_kms.c ++++ b/drivers/gpu/drm/qxl/qxl_kms.c +@@ -199,7 +199,7 @@ static int qxl_device_init(struct qxl_device *qdev, + &(qdev->ram_header->cursor_ring_hdr), + sizeof(struct qxl_command), + QXL_CURSOR_RING_SIZE, +- qdev->io_base + QXL_IO_NOTIFY_CMD, ++ qdev->io_base + QXL_IO_NOTIFY_CURSOR, + false, + &qdev->cursor_event); + +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c +index 24810492d2c1..50bad42527b1 100644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c ++++ b/drivers/gpu/drm/radeon/ci_dpm.c +@@ -4348,7 +4348,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; +- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) ++ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + + if (!pi->mem_gddr5) { +@@ -5557,6 +5557,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) + if (!rdev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; ++ rdev->pm.dpm.num_ps = 0; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; +@@ -5566,10 +5567,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) + if (!rdev->pm.power_state[i].clock_info) + return -EINVAL; + ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); +- if (ps == NULL) { +- kfree(rdev->pm.dpm.ps); ++ if (ps == NULL) + return -ENOMEM; +- } + rdev->pm.dpm.ps[i].ps_priv = ps; + ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], + non_clock_info, +@@ -5591,8 +5590,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) + k++; + } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; ++ rdev->pm.dpm.num_ps = i + 1; + } +- rdev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { +diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c +index 4a601f990562..a32cf6dbd3ee 100644 +--- a/drivers/gpu/drm/radeon/ni_dpm.c ++++ b/drivers/gpu/drm/radeon/ni_dpm.c +@@ -2126,7 +2126,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) + if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + ret = -EINVAL; + +- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ++ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) + ret = -EINVAL; + + if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c +index 5df3ec73021b..0bfbced3862e 100644 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c +@@ -2259,10 +2259,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) + rdev->pm.default_power_state_index = state_index - 1; + rdev->pm.power_state[state_index - 1].default_clock_mode = + &rdev->pm.power_state[state_index - 1].clock_info[0]; +- rdev->pm.power_state[state_index].flags &= ++ rdev->pm.power_state[state_index - 1].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; +- rdev->pm.power_state[state_index].misc = 0; +- rdev->pm.power_state[state_index].misc2 = 0; ++ rdev->pm.power_state[state_index - 1].misc = 0; ++ rdev->pm.power_state[state_index - 1].misc2 = 0; + } + return state_index; + } +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index efa875120071..9e6c2be0cc7d 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -892,8 +892,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (encoder) { +@@ -1038,8 +1040,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + encoder = radeon_best_single_encoder(connector); +@@ -1176,8 +1180,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + encoder = radeon_best_single_encoder(connector); +@@ -1260,8 +1266,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (radeon_connector->detected_hpd_without_ddc) { +@@ -1701,8 +1709,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) + + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(connector->dev->dev); + return connector_status_disconnected; ++ } + } + + if (!force && radeon_check_hpd_status_unchanged(connector)) { +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index 432ad7d73cb9..99e23800cadc 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -639,8 +639,10 @@ radeon_crtc_set_config(struct drm_mode_set *set) + dev = set->crtc->dev; + + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_autosuspend(dev->dev); + return ret; ++ } + + ret = drm_crtc_helper_set_config(set); + +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c +index 30bd4a6a9d46..7648fd0d1075 100644 +--- a/drivers/gpu/drm/radeon/radeon_drv.c ++++ b/drivers/gpu/drm/radeon/radeon_drv.c +@@ -496,8 +496,10 @@ long radeon_drm_ioctl(struct file *filp, + long ret; + dev = file_priv->minor->dev; + ret = pm_runtime_get_sync(dev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_autosuspend(dev->dev); + return ret; ++ } + + ret = drm_ioctl(filp, cmd, arg); + +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c +index 4388ddeec8d2..61000e3b2e79 100644 +--- a/drivers/gpu/drm/radeon/radeon_kms.c ++++ b/drivers/gpu/drm/radeon/radeon_kms.c +@@ -506,6 +506,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file + *value = rdev->config.si.backend_enable_mask; + } else { + DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); ++ return -EINVAL; + } + break; + case RADEON_INFO_MAX_SCLK: +@@ -634,8 +635,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) + file_priv->driver_priv = NULL; + + r = pm_runtime_get_sync(dev->dev); +- if (r < 0) ++ if (r < 0) { ++ pm_runtime_put_autosuspend(dev->dev); + return r; ++ } + + /* new gpu have virtual address space support */ + if (rdev->family >= CHIP_CAYMAN) { +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c +index 16239b07ce45..2610919eb709 100644 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c +@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) + if (rdev->uvd.vcpu_bo == NULL) + return -EINVAL; + +- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); ++ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; +@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + +- memset(ptr, 0, size); ++ memset_io((void __iomem *)ptr, 0, size); + + return 0; + } +diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c +index 74d0540b8d4c..76717d2f51e7 100644 +--- a/drivers/gpu/drm/tegra/sor.c ++++ b/drivers/gpu/drm/tegra/sor.c +@@ -2375,17 +2375,23 @@ static int tegra_sor_init(struct host1x_client *client) + if (err < 0) { + dev_err(sor->dev, "failed to deassert SOR reset: %d\n", + err); ++ clk_disable_unprepare(sor->clk); + return err; + } + } + + err = clk_prepare_enable(sor->clk_safe); +- if (err < 0) ++ if (err < 0) { ++ clk_disable_unprepare(sor->clk); + return err; ++ } + + err = clk_prepare_enable(sor->clk_dp); +- if (err < 0) ++ if (err < 0) { ++ clk_disable_unprepare(sor->clk_safe); ++ clk_disable_unprepare(sor->clk); + return err; ++ } + + return 0; + } +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c +index 2134bb20fbe9..2836154dbb12 100644 +--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c ++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c +@@ -159,12 +159,16 @@ static int panel_connector_get_modes(struct drm_connector *connector) + int i; + + for (i = 0; i < timings->num_timings; i++) { +- struct drm_display_mode *mode = drm_mode_create(dev); ++ struct drm_display_mode *mode; + struct videomode vm; + + if (videomode_from_timings(timings, &vm, i)) + break; + ++ mode = drm_mode_create(dev); ++ if (!mode) ++ break; ++ + drm_display_mode_from_videomode(&vm, mode); + + mode->type = DRM_MODE_TYPE_DRIVER; +diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c +index 036b0fbae0fb..6058bdab5fb8 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_kms.c ++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c +@@ -113,8 +113,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, + vgdev->capsets[i].id > 0, 5 * HZ); + if (ret == 0) { + DRM_ERROR("timed out waiting for cap set %d\n", i); ++ spin_lock(&vgdev->display_info_lock); + kfree(vgdev->capsets); + vgdev->capsets = NULL; ++ spin_unlock(&vgdev->display_info_lock); + return; + } + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", +@@ -232,6 +234,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) + err_vbufs: + vgdev->vdev->config->del_vqs(vgdev->vdev); + err_vqs: ++ dev->dev_private = NULL; + kfree(vgdev); + return ret; + } +diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c +index 772a5a3b0ce1..18e8fcad6690 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_vq.c ++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c +@@ -596,9 +596,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, + int i = le32_to_cpu(cmd->capset_index); + + spin_lock(&vgdev->display_info_lock); +- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); +- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); +- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); ++ if (vgdev->capsets) { ++ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); ++ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); ++ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); ++ } else { ++ DRM_ERROR("invalid capset memory."); ++ } + spin_unlock(&vgdev->display_info_lock); + wake_up(&vgdev->resp_wq); + } +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index 33ca24ab983e..39ac7566b705 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2109,7 +2109,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, + ++i; + } + +- if (i != unit) { ++ if (&con->head == &dev_priv->dev->mode_config.connector_list) { + DRM_ERROR("Could not find initial display unit.\n"); + return -EINVAL; + } +@@ -2131,13 +2131,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, + break; + } + +- if (mode->type & DRM_MODE_TYPE_PREFERRED) +- *p_mode = mode; +- else { ++ if (&mode->head == &con->modes) { + WARN_ONCE(true, "Could not find initial preferred mode.\n"); + *p_mode = list_first_entry(&con->modes, + struct drm_display_mode, + head); ++ } else { ++ *p_mode = mode; + } + + return 0; +diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c +index c27858ae0552..6ef89e8a515a 100644 +--- a/drivers/gpu/host1x/bus.c ++++ b/drivers/gpu/host1x/bus.c +@@ -542,8 +542,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); + + void host1x_driver_unregister(struct host1x_driver *driver) + { ++ struct host1x *host1x; ++ + driver_unregister(&driver->driver); + ++ mutex_lock(&devices_lock); ++ ++ list_for_each_entry(host1x, &devices, list) ++ host1x_detach_driver(host1x, driver); ++ ++ mutex_unlock(&devices_lock); ++ + mutex_lock(&drivers_lock); + list_del_init(&driver->list); + mutex_unlock(&drivers_lock); +diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c +index 50b73f3876fb..098b86eb26d7 100644 +--- a/drivers/gpu/ipu-v3/ipu-image-convert.c ++++ b/drivers/gpu/ipu-v3/ipu-image-convert.c +@@ -987,9 +987,10 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run) + return IRQ_WAKE_THREAD; + } + +-static irqreturn_t norotate_irq(int irq, void *data) ++static irqreturn_t eof_irq(int irq, void *data) + { + struct ipu_image_convert_chan *chan = data; ++ struct ipu_image_convert_priv *priv = chan->priv; + struct ipu_image_convert_ctx *ctx; + struct ipu_image_convert_run *run; + unsigned long flags; +@@ -1006,45 +1007,26 @@ static irqreturn_t norotate_irq(int irq, void *data) + + ctx = run->ctx; + +- if (ipu_rot_mode_is_irt(ctx->rot_mode)) { +- /* this is a rotation operation, just ignore */ +- spin_unlock_irqrestore(&chan->irqlock, flags); +- return IRQ_HANDLED; +- } +- +- ret = do_irq(run); +-out: +- spin_unlock_irqrestore(&chan->irqlock, flags); +- return ret; +-} +- +-static irqreturn_t rotate_irq(int irq, void *data) +-{ +- struct ipu_image_convert_chan *chan = data; +- struct ipu_image_convert_priv *priv = chan->priv; +- struct ipu_image_convert_ctx *ctx; +- struct ipu_image_convert_run *run; +- unsigned long flags; +- irqreturn_t ret; +- +- spin_lock_irqsave(&chan->irqlock, flags); +- +- /* get current run and its context */ +- run = chan->current_run; +- if (!run) { ++ if (irq == chan->out_eof_irq) { ++ if (ipu_rot_mode_is_irt(ctx->rot_mode)) { ++ /* this is a rotation op, just ignore */ ++ ret = IRQ_HANDLED; ++ goto out; ++ } ++ } else if (irq == chan->rot_out_eof_irq) { ++ if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { ++ /* this was NOT a rotation op, shouldn't happen */ ++ dev_err(priv->ipu->dev, ++ "Unexpected rotation interrupt\n"); ++ ret = IRQ_HANDLED; ++ goto out; ++ } ++ } else { ++ dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); + ret = IRQ_NONE; + goto out; + } + +- ctx = run->ctx; +- +- if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { +- /* this was NOT a rotation operation, shouldn't happen */ +- dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); +- spin_unlock_irqrestore(&chan->irqlock, flags); +- return IRQ_HANDLED; +- } +- + ret = do_irq(run); + out: + spin_unlock_irqrestore(&chan->irqlock, flags); +@@ -1137,7 +1119,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) + chan->out_chan, + IPU_IRQ_EOF); + +- ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, ++ ret = request_threaded_irq(chan->out_eof_irq, eof_irq, do_bh, + 0, "ipu-ic", chan); + if (ret < 0) { + dev_err(priv->ipu->dev, "could not acquire irq %d\n", +@@ -1150,7 +1132,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) + chan->rotation_out_chan, + IPU_IRQ_EOF); + +- ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, ++ ret = request_threaded_irq(chan->rot_out_eof_irq, eof_irq, do_bh, + 0, "ipu-ic", chan); + if (ret < 0) { + dev_err(priv->ipu->dev, "could not acquire irq %d\n", +diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h +index e3a664e62577..c0c0c9b39afa 100644 +--- a/drivers/gpu/msm/a6xx_reg.h ++++ b/drivers/gpu/msm/a6xx_reg.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018,2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1021,6 +1021,7 @@ + /* GPUCC registers */ + #define A6XX_GPU_CC_GX_GDSCR 0x24403 + #define A6XX_GPU_CC_GX_DOMAIN_MISC 0x24542 ++#define A6XX_GPU_CC_CX_GDSCR 0x2441B + + /* GPU RSC sequencer registers */ + #define A6XX_RSCC_PDC_SEQ_START_ADDR 0x23408 +diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c +index be168688ae8d..ddb02c86d826 100644 +--- a/drivers/gpu/msm/adreno.c ++++ b/drivers/gpu/msm/adreno.c +@@ -2268,6 +2268,14 @@ int adreno_reset(struct kgsl_device *device, int fault) + } + } + if (ret) { ++ unsigned long flags = device->pwrctrl.ctrl_flags; ++ ++ /* ++ * Clear ctrl_flags to ensure clocks and regulators are ++ * turned off ++ */ ++ device->pwrctrl.ctrl_flags = 0; ++ + /* If soft reset failed/skipped, then pull the power */ + kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT); + /* since device is officially off now clear start bit */ +@@ -2281,6 +2289,8 @@ int adreno_reset(struct kgsl_device *device, int fault) + + msleep(20); + } ++ ++ device->pwrctrl.ctrl_flags = flags; + } + if (ret) + return ret; +diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h +index 43125221826a..54386862ff00 100644 +--- a/drivers/gpu/msm/adreno.h ++++ b/drivers/gpu/msm/adreno.h +@@ -1002,6 +1002,7 @@ struct adreno_gpudev { + int (*reset)(struct kgsl_device *, int fault); + int (*soft_reset)(struct adreno_device *); + bool (*gx_is_on)(struct adreno_device *); ++ bool (*cx_is_on)(struct kgsl_device *); + bool (*sptprac_is_on)(struct adreno_device *); + unsigned int (*ccu_invalidate)(struct adreno_device *adreno_dev, + unsigned int *cmds); +diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c +index 5a3a26d0083f..300361744cb4 100644 +--- a/drivers/gpu/msm/adreno_a6xx.c ++++ b/drivers/gpu/msm/adreno_a6xx.c +@@ -1414,6 +1414,9 @@ static int a6xx_gmu_start(struct kgsl_device *device) + + /* Bring GMU out of reset */ + kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0); ++ /* Make sure the request completes before continuing */ ++ wmb(); ++ + if (timed_poll_check(device, + A6XX_GMU_CM3_FW_INIT_RESULT, + 0xBABEFACE, +@@ -1609,6 +1612,18 @@ static bool a6xx_gx_is_on(struct adreno_device *adreno_dev) + return is_on(val); + } + ++/* ++ * a6xx_cx_is_on() - Check if CX is on using GPUCC register ++ * @device - Pointer to KGSL device struct ++ */ ++static bool a6xx_cx_is_on(struct kgsl_device *device) ++{ ++ unsigned int val; ++ ++ kgsl_gmu_regread(device, A6XX_GPU_CC_CX_GDSCR, &val); ++ return (val & BIT(31)); ++} ++ + /* + * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register + * @adreno_dev - Pointer to adreno_device +@@ -1883,6 +1898,13 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, + kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0, + FENCE_RANGE_MASK); + ++ /* ++ * Make sure that CM3 state is at reset value. Snapshot is changing ++ * NMI bit and if we boot up GMU with NMI bit set.GMU will boot straight ++ * in to NMI handler without executing __main code ++ */ ++ kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, 0x4052); ++ + /* Pass chipid to GMU FW, must happen before starting GMU */ + + /* Keep Core and Major bitfields unchanged */ +@@ -2374,11 +2396,15 @@ static int a6xx_reset(struct kgsl_device *device, int fault) + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int ret = -EINVAL; + int i = 0; ++ unsigned long flags = device->pwrctrl.ctrl_flags; + + /* Use the regular reset sequence for No GMU */ + if (!kgsl_gmu_isenabled(device)) + return adreno_reset(device, fault); + ++ /* Clear ctrl_flags to ensure clocks and regulators are turned off */ ++ device->pwrctrl.ctrl_flags = 0; ++ + /* Transition from ACTIVE to RESET state */ + kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET); + +@@ -2430,6 +2456,8 @@ static int a6xx_reset(struct kgsl_device *device, int fault) + + clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv); + ++ device->pwrctrl.ctrl_flags = flags; ++ + if (ret) + return ret; + +@@ -3912,6 +3940,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { + .preemption_context_init = a6xx_preemption_context_init, + .preemption_context_destroy = a6xx_preemption_context_destroy, + .gx_is_on = a6xx_gx_is_on, ++ .cx_is_on = a6xx_cx_is_on, + .sptprac_is_on = a6xx_sptprac_is_on, + .ccu_invalidate = a6xx_ccu_invalidate, + .perfcounter_update = a6xx_perfcounter_update, +diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c +index d3f13d4a9541..639133ae094b 100644 +--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c ++++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018,2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -557,6 +557,7 @@ static struct a6xx_shader_block a6xx_shader_blocks[] = { + static struct kgsl_memdesc a6xx_capturescript; + static struct kgsl_memdesc a6xx_crashdump_registers; + static bool crash_dump_valid; ++static u32 *a6xx_cd_reg_end; + + static struct reg_list { + const unsigned int *regs; +@@ -1505,9 +1506,9 @@ static size_t a6xx_snapshot_sqe(struct kgsl_device *device, u8 *buf, + + static void _a6xx_do_crashdump(struct kgsl_device *device) + { +- unsigned long wait_time; + unsigned int reg = 0; + unsigned int val; ++ ktime_t timeout; + + crash_dump_valid = false; + +@@ -1531,14 +1532,25 @@ static void _a6xx_do_crashdump(struct kgsl_device *device) + upper_32_bits(a6xx_capturescript.gpuaddr)); + kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1); + +- wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT); +- while (!time_after(jiffies, wait_time)) { +- kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, ®); +- if (reg & 0x2) ++ timeout = ktime_add_ms(ktime_get(), CP_CRASH_DUMPER_TIMEOUT); ++ ++ might_sleep(); ++ ++ for (;;) { ++ /* make sure we're reading the latest value */ ++ rmb(); ++ if ((*a6xx_cd_reg_end) != 0xaaaaaaaa) ++ break; ++ ++ if (ktime_compare(ktime_get(), timeout) > 0) + break; +- cpu_relax(); ++ ++ /* Wait 1msec to avoid unnecessary looping */ ++ usleep_range(100, 1000); + } + ++ kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, ®); ++ + kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0); + + if (!(reg & 0x2)) { +@@ -1564,7 +1576,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; + bool sptprac_on; +- unsigned int i; ++ unsigned int i, val; + + /* GMU TCM data dumped through AHB */ + a6xx_snapshot_gmu(adreno_dev, snapshot); +@@ -1643,6 +1655,12 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, + + /* registers dumped through DBG AHB */ + a6xx_snapshot_dbgahb_regs(device, snapshot); ++ ++ /* if SMMU is stalled we don't run crash dump */ ++ kgsl_regread(device, A6XX_RBBM_STATUS3, &val); ++ if (!(val & BIT(24))) ++ memset(a6xx_crashdump_registers.hostptr, 0xaa, ++ a6xx_crashdump_registers.size); + } + + } +@@ -1905,6 +1923,11 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev) + sizeof(unsigned int); + } + ++ /* 16 bytes (2 qwords) for last entry in CD script */ ++ script_size += 16; ++ /* Increment data size to store last entry in CD */ ++ data_size += sizeof(unsigned int); ++ + /* Now allocate the script and data buffers */ + + /* The script buffers needs 2 extra qwords on the end */ +@@ -1957,6 +1980,16 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev) + + ptr += _a6xx_crashdump_init_non_ctx_dbgahb(ptr, &offset); + ++ /* Save CD register end pointer to check CD status completion */ ++ a6xx_cd_reg_end = a6xx_crashdump_registers.hostptr + offset; ++ ++ memset(a6xx_crashdump_registers.hostptr, 0xaa, ++ a6xx_crashdump_registers.size); ++ ++ /* Program the capturescript to read the last register entry */ ++ *ptr++ = a6xx_crashdump_registers.gpuaddr + offset; ++ *ptr++ = (((uint64_t) A6XX_CP_CRASH_DUMP_STATUS) << 44) | (uint64_t) 1; ++ + *ptr++ = 0; + *ptr++ = 0; + } +diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c +index 6c83908cfdf6..e0afc7357815 100644 +--- a/drivers/gpu/msm/adreno_debugfs.c ++++ b/drivers/gpu/msm/adreno_debugfs.c +@@ -158,9 +158,9 @@ struct flag_entry { + static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS}; + + static const struct flag_entry cmdobj_priv[] = { +- { CMDOBJ_SKIP, "skip"}, +- { CMDOBJ_FORCE_PREAMBLE, "force_preamble"}, +- { CMDOBJ_WFI, "wait_for_idle" }, ++ { BIT(CMDOBJ_SKIP), "skip"}, ++ { BIT(CMDOBJ_FORCE_PREAMBLE), "force_preamble"}, ++ { BIT(CMDOBJ_WFI), "wait_for_idle" }, + }; + + static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS}; +@@ -170,15 +170,15 @@ static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS}; + * KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC so it is ok to cross the streams here. + */ + static const struct flag_entry context_priv[] = { +- { KGSL_CONTEXT_PRIV_SUBMITTED, "submitted"}, +- { KGSL_CONTEXT_PRIV_DETACHED, "detached"}, +- { KGSL_CONTEXT_PRIV_INVALID, "invalid"}, +- { KGSL_CONTEXT_PRIV_PAGEFAULT, "pagefault"}, +- { ADRENO_CONTEXT_FAULT, "fault"}, +- { ADRENO_CONTEXT_GPU_HANG, "gpu_hang"}, +- { ADRENO_CONTEXT_GPU_HANG_FT, "gpu_hang_ft"}, +- { ADRENO_CONTEXT_SKIP_EOF, "skip_end_of_frame" }, +- { ADRENO_CONTEXT_FORCE_PREAMBLE, "force_preamble"}, ++ { BIT(KGSL_CONTEXT_PRIV_SUBMITTED), "submitted"}, ++ { BIT(KGSL_CONTEXT_PRIV_DETACHED), "detached"}, ++ { BIT(KGSL_CONTEXT_PRIV_INVALID), "invalid"}, ++ { BIT(KGSL_CONTEXT_PRIV_PAGEFAULT), "pagefault"}, ++ { BIT(ADRENO_CONTEXT_FAULT), "fault"}, ++ { BIT(ADRENO_CONTEXT_GPU_HANG), "gpu_hang"}, ++ { BIT(ADRENO_CONTEXT_GPU_HANG_FT), "gpu_hang_ft"}, ++ { BIT(ADRENO_CONTEXT_SKIP_EOF), "skip_end_of_frame" }, ++ { BIT(ADRENO_CONTEXT_FORCE_PREAMBLE), "force_preamble"}, + }; + + static void print_flags(struct seq_file *s, const struct flag_entry *table, +diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c +index b4bc750a9c1a..cf2c1c06c38f 100644 +--- a/drivers/gpu/msm/adreno_dispatch.c ++++ b/drivers/gpu/msm/adreno_dispatch.c +@@ -1221,7 +1221,7 @@ static inline int _wait_for_room_in_context_queue( + * while we were sleeping + */ + +- if (ret > 0) { ++ if (ret >= 1) { + ret = _check_context_state(&drawctxt->base); + if (ret) + return ret; +diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c +index ff9a69a29130..a5d1c0cc6f57 100644 +--- a/drivers/gpu/msm/adreno_ioctl.c ++++ b/drivers/gpu/msm/adreno_ioctl.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2002,2007-2018,2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2002,2007-2018,2020-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -195,11 +195,8 @@ long adreno_ioctl_helper(struct kgsl_device_private *dev_priv, + break; + } + +- if (i == len) { +- KGSL_DRV_INFO(dev_priv->device, +- "invalid ioctl code 0x%08X\n", cmd); ++ if (i == len) + return -ENOIOCTLCMD; +- } + + if (WARN_ON(_IOC_SIZE(cmds[i].cmd) > sizeof(data))) { + if (__ratelimit(&_rs)) +diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h +index cf30a834546b..543496399044 100644 +--- a/drivers/gpu/msm/adreno_pm4types.h ++++ b/drivers/gpu/msm/adreno_pm4types.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2002,2007-2017,2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2002,2007-2017,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c +index 178369171cb0..dabc443d4ece 100644 +--- a/drivers/gpu/msm/adreno_snapshot.c ++++ b/drivers/gpu/msm/adreno_snapshot.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -746,7 +746,7 @@ static void setup_fault_process(struct kgsl_device *device, + if (kgsl_mmu_is_perprocess(&device->mmu)) { + struct kgsl_process_private *tmp; + +- mutex_lock(&kgsl_driver.process_mutex); ++ spin_lock(&kgsl_driver.proclist_lock); + list_for_each_entry(tmp, &kgsl_driver.process_list, list) { + u64 pt_ttbr0; + +@@ -757,7 +757,7 @@ static void setup_fault_process(struct kgsl_device *device, + break; + } + } +- mutex_unlock(&kgsl_driver.process_mutex); ++ spin_unlock(&kgsl_driver.proclist_lock); + } + done: + snapshot->process = process; +diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c +index 5d76314cd809..32f14563dec3 100644 +--- a/drivers/gpu/msm/kgsl.c ++++ b/drivers/gpu/msm/kgsl.c +@@ -895,7 +895,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid) + { + struct kgsl_process_private *p, *private = NULL; + +- mutex_lock(&kgsl_driver.process_mutex); ++ spin_lock(&kgsl_driver.proclist_lock); + list_for_each_entry(p, &kgsl_driver.process_list, list) { + if (pid_nr(p->pid) == pid) { + if (kgsl_process_private_get(p)) +@@ -903,7 +903,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid) + break; + } + } +- mutex_unlock(&kgsl_driver.process_mutex); ++ spin_unlock(&kgsl_driver.proclist_lock); + return private; + } + +@@ -1020,18 +1020,19 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv, + kgsl_mmu_detach_pagetable(private->pagetable); + + /* Remove the process struct from the master list */ ++ spin_lock(&kgsl_driver.proclist_lock); + list_del(&private->list); ++ spin_unlock(&kgsl_driver.proclist_lock); ++ ++ debugfs_remove_recursive(private->debug_root); + + /* +- * Unlock the mutex before releasing the memory and the debugfs +- * nodes - this prevents deadlocks with the IOMMU and debugfs +- * locks. ++ * Unlock the mutex before releasing the memory - this prevents a ++ * deadlock with the IOMMU mutex if a page fault occurs. + */ + mutex_unlock(&kgsl_driver.process_mutex); + + process_release_memory(private); +- debugfs_remove_recursive(private->debug_root); +- + kgsl_process_private_put(private); + } + +@@ -1056,7 +1057,9 @@ static struct kgsl_process_private *kgsl_process_private_open( + kgsl_process_init_sysfs(device, private); + kgsl_process_init_debugfs(private); + ++ spin_lock(&kgsl_driver.proclist_lock); + list_add(&private->list, &kgsl_driver.process_list); ++ spin_unlock(&kgsl_driver.proclist_lock); + } + + done: +@@ -4648,6 +4651,7 @@ static const struct file_operations kgsl_fops = { + + struct kgsl_driver kgsl_driver = { + .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex), ++ .proclist_lock = __SPIN_LOCK_UNLOCKED(kgsl_driver.proclist_lock), + .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock), + .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock), + /* +diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h +index 82509a7aa8d7..3f59dfc4d09e 100644 +--- a/drivers/gpu/msm/kgsl.h ++++ b/drivers/gpu/msm/kgsl.h +@@ -119,6 +119,7 @@ struct kgsl_context; + * @pagetable_list: LIst of open pagetables + * @ptlock: Lock for accessing the pagetable list + * @process_mutex: Mutex for accessing the process list ++ * @proclist_lock: Lock for accessing the process list + * @devlock: Mutex protecting the device list + * @stats: Struct containing atomic memory statistics + * @full_cache_threshold: the threshold that triggers a full cache flush +@@ -137,6 +138,7 @@ struct kgsl_driver { + struct list_head pagetable_list; + spinlock_t ptlock; + struct mutex process_mutex; ++ spinlock_t proclist_lock; + struct mutex devlock; + struct { + atomic_long_t vmalloc; +diff --git a/drivers/gpu/msm/kgsl_compat.c b/drivers/gpu/msm/kgsl_compat.c +index 1c89ed5c2e04..9546024423e4 100644 +--- a/drivers/gpu/msm/kgsl_compat.c ++++ b/drivers/gpu/msm/kgsl_compat.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2013-2017,2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -384,8 +384,6 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) + if (ret == -ENOIOCTLCMD) { + if (device->ftbl->compat_ioctl != NULL) + return device->ftbl->compat_ioctl(dev_priv, cmd, arg); +- +- KGSL_DRV_INFO(device, "invalid ioctl code 0x%08X\n", cmd); + } + + return ret; +diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c +index 2a6e60883637..02e811f6eb0d 100644 +--- a/drivers/gpu/msm/kgsl_debugfs.c ++++ b/drivers/gpu/msm/kgsl_debugfs.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2002,2008-2017,2020-2021, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2002,2008-2017,2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c +index 3e080eedddf1..31692bae847e 100644 +--- a/drivers/gpu/msm/kgsl_gmu.c ++++ b/drivers/gpu/msm/kgsl_gmu.c +@@ -790,6 +790,8 @@ static int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr) + + static void send_nmi_to_gmu(struct adreno_device *adreno_dev) + { ++ u32 val; ++ + /* Mask so there's no interrupt caused by NMI */ + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); +@@ -798,9 +800,10 @@ static void send_nmi_to_gmu(struct adreno_device *adreno_dev) + wmb(); + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); +- adreno_write_gmureg(adreno_dev, +- ADRENO_REG_GMU_CM3_CFG, +- (1 << GMU_CM3_CFG_NONMASKINTR_SHIFT)); ++ ++ adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, &val); ++ val |= 1 << GMU_CM3_CFG_NONMASKINTR_SHIFT; ++ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, val); + + /* Make sure the NMI is invoked before we proceed*/ + wmb(); +@@ -1321,6 +1324,8 @@ static int gmu_enable_gdsc(struct gmu_device *gmu) + #define CX_GDSC_TIMEOUT 5000 /* ms */ + static int gmu_disable_gdsc(struct gmu_device *gmu) + { ++ struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu); ++ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(ADRENO_DEVICE(device)); + int ret; + unsigned long t; + +@@ -1342,13 +1347,13 @@ static int gmu_disable_gdsc(struct gmu_device *gmu) + */ + t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT); + do { +- if (!regulator_is_enabled(gmu->cx_gdsc)) ++ if (gpudev->cx_is_on && !(gpudev->cx_is_on(device))) + return 0; + usleep_range(10, 100); + + } while (!(time_after(jiffies, t))); + +- if (!regulator_is_enabled(gmu->cx_gdsc)) ++ if (gpudev->cx_is_on && !(gpudev->cx_is_on(device))) + return 0; + + dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout"); +diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h +index 1478d6b747a8..d369563dc65d 100644 +--- a/drivers/gpu/msm/kgsl_gmu.h ++++ b/drivers/gpu/msm/kgsl_gmu.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c +index 9b02e1993a09..735ca3cc23bd 100644 +--- a/drivers/gpu/msm/kgsl_ioctl.c ++++ b/drivers/gpu/msm/kgsl_ioctl.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2017,2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -183,8 +183,6 @@ long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) + return device->ftbl->compat_ioctl(dev_priv, cmd, arg); + else if (device->ftbl->ioctl != NULL) + return device->ftbl->ioctl(dev_priv, cmd, arg); +- +- KGSL_DRV_INFO(device, "invalid ioctl code 0x%08X\n", cmd); + } + + return ret; +diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c +index 399eb6d8d1d7..6c439e70f67c 100644 +--- a/drivers/gpu/msm/kgsl_iommu.c ++++ b/drivers/gpu/msm/kgsl_iommu.c +@@ -666,10 +666,8 @@ static void _get_entries(struct kgsl_process_private *private, + + static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, + struct _mem_entry *preventry, struct _mem_entry *nextentry, +- struct kgsl_context *context) ++ struct kgsl_process_private *private) + { +- struct kgsl_process_private *private; +- + memset(preventry, 0, sizeof(*preventry)); + memset(nextentry, 0, sizeof(*nextentry)); + +@@ -678,8 +676,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr, + + if (ADDR_IN_GLOBAL(mmu, faultaddr)) { + _get_global_entries(faultaddr, preventry, nextentry); +- } else if (context) { +- private = context->proc_priv; ++ } else if (private) { + spin_lock(&private->mem_lock); + _get_entries(private, faultaddr, preventry, nextentry); + spin_unlock(&private->mem_lock); +@@ -746,7 +743,7 @@ kgsl_iommu_uche_overfetch(struct kgsl_process_private *private, + */ + + static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write, +- struct kgsl_context *context) ++ struct kgsl_process_private *private) + { + /* + * If there is no context associated with the pagefault then this +@@ -754,10 +751,30 @@ static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write, + * on global buffers as they are mainly accessed by the CP bypassing + * the UCHE. Also, write pagefaults are never suppressed. + */ +- if (!context || write) ++ if (!private || write) + return false; + +- return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr); ++ return kgsl_iommu_uche_overfetch(private, faultaddr); ++} ++ ++static struct kgsl_process_private *kgsl_iommu_get_process(u64 ptbase) ++{ ++ struct kgsl_process_private *p = NULL; ++ struct kgsl_iommu_pt *iommu_pt; ++ ++ spin_lock(&kgsl_driver.proclist_lock); ++ list_for_each_entry(p, &kgsl_driver.process_list, list) { ++ iommu_pt = p->pagetable->priv; ++ if (iommu_pt->ttbr0 == ptbase) { ++ if (!kgsl_process_private_get(p)) ++ p = NULL; ++ spin_unlock(&kgsl_driver.proclist_lock); ++ return p; ++ } ++ } ++ ++ spin_unlock(&kgsl_driver.proclist_lock); ++ return p; + } + + static int kgsl_iommu_fault_handler(struct iommu_domain *domain, +@@ -778,9 +795,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + struct adreno_device *adreno_dev; + struct adreno_gpudev *gpudev; + unsigned int no_page_fault_log = 0; +- unsigned int curr_context_id = 0; +- struct kgsl_context *context; + char *fault_type = "unknown"; ++ struct kgsl_process_private *private; + + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL, +@@ -795,21 +811,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + adreno_dev = ADRENO_DEVICE(device); + gpudev = ADRENO_GPU_DEVICE(adreno_dev); + +- if (pt->name == KGSL_MMU_SECURE_PT) +- ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; +- +- /* +- * set the fault bits and stuff before any printks so that if fault +- * handler runs then it will know it's dealing with a pagefault. +- * Read the global current timestamp because we could be in middle of +- * RB switch and hence the cur RB may not be reliable but global +- * one will always be reliable +- */ +- kgsl_sharedmem_readl(&device->memstore, &curr_context_id, +- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context)); +- +- context = kgsl_context_get(device, curr_context_id); +- + write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0; + if (flags & IOMMU_FAULT_TRANSLATION) + fault_type = "translation"; +@@ -820,17 +821,20 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + else if (flags & IOMMU_FAULT_TRANSACTION_STALLED) + fault_type = "transaction stalled"; + +- if (kgsl_iommu_suppress_pagefault(addr, write, context)) { ++ ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0); ++ private = kgsl_iommu_get_process(ptbase); ++ ++ if (private) ++ pid = pid_nr(private->pid); ++ ++ if (kgsl_iommu_suppress_pagefault(addr, write, private)) { + iommu->pagefault_suppression_count++; +- kgsl_context_put(context); ++ kgsl_process_private_put(private); + return ret; + } + +- if (context != NULL) { +- /* save pagefault timestamp for GFT */ +- set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv); +- pid = pid_nr(context->proc_priv->pid); +- } ++ if (pt->name == KGSL_MMU_SECURE_PT) ++ ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; + + ctx->fault = 1; + +@@ -846,9 +850,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + mutex_unlock(&device->mutex); + } + +- ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0); + contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR); +- + ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? + KGSL_MMU_GLOBAL_PT : pid; + /* +@@ -857,28 +859,22 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + * search and delays the trace unnecessarily. + */ + trace_kgsl_mmu_pagefault(ctx->kgsldev, addr, +- ptname, write ? "write" : "read"); ++ ptname, ++ private != NULL ? private->comm : "unknown", ++ write ? "write" : "read"); + + if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE, + &adreno_dev->ft_pf_policy)) + no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr); + + if (!no_page_fault_log && __ratelimit(&_rs)) { +- const char *api_str; +- +- if (context != NULL) { +- struct adreno_context *drawctxt = +- ADRENO_CONTEXT(context); +- +- api_str = get_api_type_str(drawctxt->type); +- } else +- api_str = "UNKNOWN"; +- + KGSL_MEM_CRIT(ctx->kgsldev, +- "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname); ++ "GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr, ++ ptname, ++ private != NULL ? private->comm : "unknown"); + KGSL_MEM_CRIT(ctx->kgsldev, +- "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n", +- ctx->name, api_str, ptbase, contextidr, ++ "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n", ++ ctx->name, ptbase, contextidr, + write ? "write" : "read", fault_type); + + if (gpudev->iommu_fault_block) { +@@ -898,7 +894,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + KGSL_LOG_DUMP(ctx->kgsldev, + "---- nearby memory ----\n"); + +- _find_mem_entries(mmu, addr, &prev, &next, context); ++ _find_mem_entries(mmu, addr, &prev, &next, private); + if (prev.gpuaddr) + _print_entry(ctx->kgsldev, &prev); + else +@@ -940,7 +936,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, + adreno_dispatcher_schedule(device); + } + +- kgsl_context_put(context); ++ kgsl_process_private_put(private); ++ + return ret; + } + +diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c +index 456c0e1dacfe..8a0d70fa15db 100644 +--- a/drivers/gpu/msm/kgsl_mmu.c ++++ b/drivers/gpu/msm/kgsl_mmu.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2002,2007-2017,2021, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2002,2007-2017,2020-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h +index b40a16122e86..9b925721aa1a 100644 +--- a/drivers/gpu/msm/kgsl_trace.h ++++ b/drivers/gpu/msm/kgsl_trace.h +@@ -821,14 +821,15 @@ TRACE_EVENT(kgsl_constraint, + TRACE_EVENT(kgsl_mmu_pagefault, + + TP_PROTO(struct kgsl_device *device, unsigned long page, +- unsigned int pt, const char *op), ++ unsigned int pt, const char *name, const char *op), + +- TP_ARGS(device, page, pt, op), ++ TP_ARGS(device, page, pt, name, op), + + TP_STRUCT__entry( + __string(device_name, device->name) + __field(unsigned long, page) + __field(unsigned int, pt) ++ __string(name, name) + __string(op, op) + ), + +@@ -836,13 +837,14 @@ TRACE_EVENT(kgsl_mmu_pagefault, + __assign_str(device_name, device->name); + __entry->page = page; + __entry->pt = pt; ++ __assign_str(name, name); + __assign_str(op, op); + ), + + TP_printk( +- "d_name=%s page=0x%lx pt=%u op=%s", ++ "d_name=%s page=0x%lx pt=%u op=%s name=%s", + __get_str(device_name), __entry->page, __entry->pt, +- __get_str(op) ++ __get_str(op), __get_str(name) + ) + ); + +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c +index ed9c0ea5b026..1bc6ad0339d2 100644 +--- a/drivers/hid/hid-alps.c ++++ b/drivers/hid/hid-alps.c +@@ -429,6 +429,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi) + ret = input_register_device(data->input2); + if (ret) { + input_free_device(input2); ++ ret = -ENOENT; + goto exit; + } + } +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index 0a3efa74be35..f7cd50e15e5c 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -55,6 +55,7 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") + struct apple_sc { + unsigned long quirks; + unsigned int fn_on; ++ unsigned int fn_found; + DECLARE_BITMAP(pressed_numlock, KEY_CNT); + }; + +@@ -340,12 +341,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) + { ++ struct apple_sc *asc = hid_get_drvdata(hdev); ++ + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || + usage->hid == (HID_UP_MSVENDOR | 0x0003) || + usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { + /* The fn key on Apple USB keyboards */ + set_bit(EV_REP, hi->input->evbit); + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); ++ asc->fn_found = true; + apple_setup_input(hi->input); + return 1; + } +@@ -372,6 +376,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, + return 0; + } + ++static int apple_input_configured(struct hid_device *hdev, ++ struct hid_input *hidinput) ++{ ++ struct apple_sc *asc = hid_get_drvdata(hdev); ++ ++ if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { ++ hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); ++ asc->quirks = 0; ++ } ++ ++ return 0; ++} ++ + static int apple_probe(struct hid_device *hdev, + const struct hid_device_id *id) + { +@@ -596,6 +613,7 @@ static struct hid_driver apple_driver = { + .event = apple_event, + .input_mapping = apple_input_mapping, + .input_mapped = apple_input_mapped, ++ .input_configured = apple_input_configured, + }; + module_hid_driver(apple_driver); + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 5717f6f94ed7..9a037ea46a90 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1109,6 +1109,9 @@ EXPORT_SYMBOL_GPL(hid_open_report); + + static s32 snto32(__u32 value, unsigned n) + { ++ if (!value || !n) ++ return 0; ++ + switch (n) { + case 8: return ((__s8)value); + case 16: return ((__s16)value); +@@ -1406,6 +1409,17 @@ static void hid_output_field(const struct hid_device *hid, + } + } + ++/* ++ * Compute the size of a report. ++ */ ++static size_t hid_compute_report_size(struct hid_report *report) ++{ ++ if (report->size) ++ return ((report->size - 1) >> 3) + 1; ++ ++ return 0; ++} ++ + /* + * Create a report. 'data' has to be allocated using + * hid_alloc_report_buf() so that it has proper size. +@@ -1418,7 +1432,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) + if (report->id > 0) + *data++ = report->id; + +- memset(data, 0, ((report->size - 1) >> 3) + 1); ++ memset(data, 0, hid_compute_report_size(report)); + for (n = 0; n < report->maxfield; n++) + hid_output_field(report->device, report->field[n], data); + } +@@ -1545,7 +1559,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, + csize--; + } + +- rsize = ((report->size - 1) >> 3) + 1; ++ rsize = hid_compute_report_size(report); + + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) + rsize = HID_MAX_BUFFER_SIZE - 1; +@@ -1790,6 +1804,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) + case BUS_I2C: + bus = "I2C"; + break; ++ case BUS_VIRTUAL: ++ bus = "VIRTUAL"; ++ break; + default: + bus = ""; + } +diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c +index 1689568b597d..12c5d7c96527 100644 +--- a/drivers/hid/hid-cypress.c ++++ b/drivers/hid/hid-cypress.c +@@ -26,19 +26,17 @@ + #define CP_2WHEEL_MOUSE_HACK 0x02 + #define CP_2WHEEL_MOUSE_HACK_ON 0x04 + ++#define VA_INVAL_LOGICAL_BOUNDARY 0x08 ++ + /* + * Some USB barcode readers from cypress have usage min and usage max in + * the wrong order + */ +-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, ++static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) + { +- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); + unsigned int i; + +- if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) +- return rdesc; +- + if (*rsize < 4) + return rdesc; + +@@ -51,6 +49,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, + return rdesc; + } + ++static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc, ++ unsigned int *rsize) ++{ ++ /* ++ * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly ++ * reports Logical Minimum of its Consumer Control device as 572 ++ * (0x02 0x3c). Fix this by setting its Logical Minimum to zero. ++ */ ++ if (*rsize == 25 && ++ rdesc[0] == 0x05 && rdesc[1] == 0x0c && ++ rdesc[2] == 0x09 && rdesc[3] == 0x01 && ++ rdesc[6] == 0x19 && rdesc[7] == 0x00 && ++ rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) { ++ hid_info(hdev, ++ "fixing up varmilo VA104M consumer control report descriptor\n"); ++ rdesc[12] = 0x00; ++ rdesc[13] = 0x00; ++ } ++ return rdesc; ++} ++ ++static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, ++ unsigned int *rsize) ++{ ++ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); ++ ++ if (quirks & CP_RDESC_SWAPPED_MIN_MAX) ++ rdesc = cp_rdesc_fixup(hdev, rdesc, rsize); ++ if (quirks & VA_INVAL_LOGICAL_BOUNDARY) ++ rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize); ++ ++ return rdesc; ++} ++ + static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +@@ -131,6 +163,8 @@ static const struct hid_device_id cp_devices[] = { + .driver_data = CP_RDESC_SWAPPED_MIN_MAX }, + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE), + .driver_data = CP_2WHEEL_MOUSE_HACK }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1), ++ .driver_data = VA_INVAL_LOGICAL_BOUNDARY }, + { } + }; + MODULE_DEVICE_TABLE(hid, cp_devices); +diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c +index a298fbd8db6b..8ca4c1baeda8 100644 +--- a/drivers/hid/hid-gt683r.c ++++ b/drivers/hid/hid-gt683r.c +@@ -64,6 +64,7 @@ static const struct hid_device_id gt683r_led_id[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, + { } + }; ++MODULE_DEVICE_TABLE(hid, gt683r_led_id); + + static void gt683r_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 0215d34bc51e..9f706e79c764 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -307,6 +307,8 @@ + #define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81 + #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001 + ++#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1 0X07b1 ++ + #define USB_VENDOR_ID_DATA_MODUL 0x7374 + #define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH 0x1201 + +@@ -817,6 +819,7 @@ + #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 + + #define USB_VENDOR_ID_PLANTRONICS 0x047f ++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 + + #define USB_VENDOR_ID_PANASONIC 0x04da + #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index 5dbb05961cb5..d45820271510 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -1084,6 +1084,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel + } + + mapped: ++ /* Mapping failed, bail out */ ++ if (!bit) ++ return; ++ + if (device->driver->input_mapped && + device->driver->input_mapped(device, hidinput, field, usage, + &bit, &max) < 0) { +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index 42ed887ba0be..78e37bb25aee 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -452,6 +452,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd + __set_bit(MSC_RAW, input->mscbit); + } + ++ /* ++ * hid-input may mark device as using autorepeat, but neither ++ * the trackpad, nor the mouse actually want it. ++ */ ++ __clear_bit(EV_REP, input->evbit); ++ + return 0; + } + +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 1207102823de..258a50ec1572 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -567,6 +567,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, + case HID_UP_BUTTON: + code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE); + hid_map_usage(hi, usage, bit, max, EV_KEY, code); ++ if (!*bit) ++ return -1; + input_set_capability(hi->input, EV_KEY, code); + return 1; + +diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c +index 584b10d3fc3d..460711c1124a 100644 +--- a/drivers/hid/hid-plantronics.c ++++ b/drivers/hid/hid-plantronics.c +@@ -16,6 +16,7 @@ + + #include + #include ++#include + + #define PLT_HID_1_0_PAGE 0xffa00000 + #define PLT_HID_2_0_PAGE 0xffa20000 +@@ -39,6 +40,16 @@ + #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ + (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) + ++#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) ++ ++#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ ++ ++struct plt_drv_data { ++ unsigned long device_type; ++ unsigned long last_volume_key_ts; ++ u32 quirks; ++}; ++ + static int plantronics_input_mapping(struct hid_device *hdev, + struct hid_input *hi, + struct hid_field *field, +@@ -46,7 +57,8 @@ static int plantronics_input_mapping(struct hid_device *hdev, + unsigned long **bit, int *max) + { + unsigned short mapped_key; +- unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); ++ struct plt_drv_data *drv_data = hid_get_drvdata(hdev); ++ unsigned long plt_type = drv_data->device_type; + + /* special case for PTT products */ + if (field->application == HID_GD_JOYSTICK) +@@ -108,6 +120,30 @@ static int plantronics_input_mapping(struct hid_device *hdev, + return 1; + } + ++static int plantronics_event(struct hid_device *hdev, struct hid_field *field, ++ struct hid_usage *usage, __s32 value) ++{ ++ struct plt_drv_data *drv_data = hid_get_drvdata(hdev); ++ ++ if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) { ++ unsigned long prev_ts, cur_ts; ++ ++ /* Usages are filtered in plantronics_usages. */ ++ ++ if (!value) /* Handle key presses only. */ ++ return 0; ++ ++ prev_ts = drv_data->last_volume_key_ts; ++ cur_ts = jiffies; ++ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT) ++ return 1; /* Ignore the repeated key. */ ++ ++ drv_data->last_volume_key_ts = cur_ts; ++ } ++ ++ return 0; ++} ++ + static unsigned long plantronics_device_type(struct hid_device *hdev) + { + unsigned i, col_page; +@@ -136,15 +172,24 @@ static unsigned long plantronics_device_type(struct hid_device *hdev) + static int plantronics_probe(struct hid_device *hdev, + const struct hid_device_id *id) + { ++ struct plt_drv_data *drv_data; + int ret; + ++ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL); ++ if (!drv_data) ++ return -ENOMEM; ++ + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + goto err; + } + +- hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev)); ++ drv_data->device_type = plantronics_device_type(hdev); ++ drv_data->quirks = id->driver_data; ++ drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); ++ ++ hid_set_drvdata(hdev, drv_data); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | + HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE); +@@ -156,15 +201,26 @@ static int plantronics_probe(struct hid_device *hdev, + } + + static const struct hid_device_id plantronics_devices[] = { ++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, ++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), ++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, + { } + }; + MODULE_DEVICE_TABLE(hid, plantronics_devices); + ++static const struct hid_usage_id plantronics_usages[] = { ++ { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID }, ++ { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID }, ++ { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR } ++}; ++ + static struct hid_driver plantronics_driver = { + .name = "plantronics", + .id_table = plantronics_devices, ++ .usage_table = plantronics_usages, + .input_mapping = plantronics_input_mapping, ++ .event = plantronics_event, + .probe = plantronics_probe, + }; + module_hid_driver(plantronics_driver); +diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c +index bf4675a27396..9be8c31f613f 100644 +--- a/drivers/hid/hid-roccat-kone.c ++++ b/drivers/hid/hid-roccat-kone.c +@@ -297,31 +297,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj, + struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval = 0, difference, old_profile; ++ struct kone_settings *settings = (struct kone_settings *)buf; + + /* I need to get my data in one piece */ + if (off != 0 || count != sizeof(struct kone_settings)) + return -EINVAL; + + mutex_lock(&kone->kone_lock); +- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); ++ difference = memcmp(settings, &kone->settings, ++ sizeof(struct kone_settings)); + if (difference) { +- retval = kone_set_settings(usb_dev, +- (struct kone_settings const *)buf); +- if (retval) { +- mutex_unlock(&kone->kone_lock); +- return retval; ++ if (settings->startup_profile < 1 || ++ settings->startup_profile > 5) { ++ retval = -EINVAL; ++ goto unlock; + } + ++ retval = kone_set_settings(usb_dev, settings); ++ if (retval) ++ goto unlock; ++ + old_profile = kone->settings.startup_profile; +- memcpy(&kone->settings, buf, sizeof(struct kone_settings)); ++ memcpy(&kone->settings, settings, sizeof(struct kone_settings)); + + kone_profile_activated(kone, kone->settings.startup_profile); + + if (kone->settings.startup_profile != old_profile) + kone_profile_report(kone, kone->settings.startup_profile); + } ++unlock: + mutex_unlock(&kone->kone_lock); + ++ if (retval) ++ return retval; ++ + return sizeof(struct kone_settings); + } + static BIN_ATTR(settings, 0660, kone_sysfs_read_settings, +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c +index 4ef73374a8f9..4ea18f07c65b 100644 +--- a/drivers/hid/hid-sensor-hub.c ++++ b/drivers/hid/hid-sensor-hub.c +@@ -223,16 +223,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, + buffer_size = buffer_size / sizeof(__s32); + if (buffer_size) { + for (i = 0; i < buffer_size; ++i) { +- hid_set_field(report->field[field_index], i, +- (__force __s32)cpu_to_le32(*buf32)); ++ ret = hid_set_field(report->field[field_index], i, ++ (__force __s32)cpu_to_le32(*buf32)); ++ if (ret) ++ goto done_proc; ++ + ++buf32; + } + } + if (remaining_bytes) { + value = 0; + memcpy(&value, (u8 *)buf32, remaining_bytes); +- hid_set_field(report->field[field_index], i, +- (__force __s32)cpu_to_le32(value)); ++ ret = hid_set_field(report->field[field_index], i, ++ (__force __s32)cpu_to_le32(value)); ++ if (ret) ++ goto done_proc; + } + hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT); + hid_hw_wait(hsdev->hdev); +@@ -489,7 +494,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev, + return 1; + + ptr = raw_data; +- ptr++; /* Skip report id */ ++ if (report->id) ++ ptr++; /* Skip report id */ + + spin_lock_irqsave(&pdata->lock, flags); + +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 850527d5fab1..800c477dd076 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -407,6 +407,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) + dev_err(&client->dev, "failed to change power setting.\n"); + + set_pwr_exit: ++ ++ /* ++ * The HID over I2C specification states that if a DEVICE needs time ++ * after the PWR_ON request, it should utilise CLOCK stretching. ++ * However, it has been observered that the Windows driver provides a ++ * 1ms sleep between the PWR_ON and RESET requests. ++ * According to Goodix Windows even waits 60 ms after (other?) ++ * PWR_ON requests. Testing has confirmed that several devices ++ * will not work properly without a delay after a PWR_ON request. ++ */ ++ if (!ret && power_state == I2C_HID_PWR_ON) ++ msleep(60); ++ + return ret; + } + +@@ -428,15 +441,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) + if (ret) + goto out_unlock; + +- /* +- * The HID over I2C specification states that if a DEVICE needs time +- * after the PWR_ON request, it should utilise CLOCK stretching. +- * However, it has been observered that the Windows driver provides a +- * 1ms sleep between the PWR_ON and RESET requests and that some devices +- * rely on this. +- */ +- usleep_range(1000, 5000); +- + i2c_hid_dbg(ihid, "resetting...\n"); + + ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); +@@ -1153,8 +1157,8 @@ static int i2c_hid_probe(struct i2c_client *client, + hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); + hid->product = le16_to_cpu(ihid->hdesc.wProductID); + +- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", +- client->name, hid->vendor, hid->product); ++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", ++ client->name, (u16)hid->vendor, (u16)hid->product); + strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); + + ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +index 681ac9bc68b3..58a753ef2717 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +@@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ .ident = "Mediacom FlexBook edge 13", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { + .ident = "Odys Winbook 13", + .matches = { +@@ -389,6 +397,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ .ident = "Vero K147", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { } /* Terminate list */ + }; + +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index 7838343eb37c..b6600329a272 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -372,7 +372,7 @@ static int hid_submit_ctrl(struct hid_device *hid) + raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; + dir = usbhid->ctrl[usbhid->ctrltail].dir; + +- len = ((report->size - 1) >> 3) + 1 + (report->id > 0); ++ len = hid_report_len(report); + if (dir == USB_DIR_OUT) { + usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); + usbhid->urbctrl->transfer_buffer_length = len; +diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c +index 08174d341f4a..bc75f1efa0f4 100644 +--- a/drivers/hid/usbhid/hid-pidff.c ++++ b/drivers/hid/usbhid/hid-pidff.c +@@ -1304,6 +1304,7 @@ int hid_pidff_init(struct hid_device *hid) + + if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && + pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { ++ error = -EPERM; + hid_notice(hid, + "device does not support device managed pool\n"); + goto fail; +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c +index dbdd265075da..7bce23a43907 100644 +--- a/drivers/hid/usbhid/hiddev.c ++++ b/drivers/hid/usbhid/hiddev.c +@@ -554,12 +554,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, + + switch (cmd) { + case HIDIOCGUSAGE: ++ if (uref->usage_index >= field->report_count) ++ goto inval; + uref->value = field->value[uref->usage_index]; + if (copy_to_user(user_arg, uref, sizeof(*uref))) + goto fault; + goto goodreturn; + + case HIDIOCSUSAGE: ++ if (uref->usage_index >= field->report_count) ++ goto inval; + field->value[uref->usage_index] = uref->value; + goto goodreturn; + +diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c +index 9a29b34ed2c8..22cd7169011d 100644 +--- a/drivers/hsi/controllers/omap_ssi_core.c ++++ b/drivers/hsi/controllers/omap_ssi_core.c +@@ -391,7 +391,7 @@ static int ssi_add_controller(struct hsi_controller *ssi, + + err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); + if (err < 0) +- goto out_err; ++ return err; + ssi->id = err; + + ssi->owner = THIS_MODULE; +diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c +index c2a2a9795b0b..e9d63b966caf 100644 +--- a/drivers/hsi/hsi_core.c ++++ b/drivers/hsi/hsi_core.c +@@ -223,8 +223,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port, + if (err) + goto err; + +- dev_set_name(&cl->device, "%s", name); +- + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); + if (err) { + err = hsi_of_property_parse_mode(client, "hsi-rx-mode", +@@ -307,6 +305,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port, + cl->device.release = hsi_client_release; + cl->device.of_node = client; + ++ dev_set_name(&cl->device, "%s", name); + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", name); + put_device(&cl->device); +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index 9360cdce740e..60c122b355ea 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -675,13 +675,19 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) + channel->target_vp = hv_context.vp_index[cur_cpu]; + } + ++#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */ ++#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */ ++#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS) ++#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */ ++#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS) ++ + static void vmbus_wait_for_unload(void) + { + int cpu; + void *page_addr; + struct hv_message *msg; + struct vmbus_channel_message_header *hdr; +- u32 message_type; ++ u32 message_type, i; + + /* + * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was +@@ -691,10 +697,18 @@ static void vmbus_wait_for_unload(void) + * functional and vmbus_unload_response() will complete + * vmbus_connection.unload_event. If not, the last thing we can do is + * read message pages for all CPUs directly. ++ * ++ * Wait up to 100 seconds since an Azure host must writeback any dirty ++ * data in its disk cache before the VMbus UNLOAD request will ++ * complete. This flushing has been empirically observed to take up ++ * to 50 seconds in cases with a lot of dirty data, so allow additional ++ * leeway and for inaccuracies in mdelay(). But eventually time out so ++ * that the panic path can't get hung forever in case the response ++ * message isn't seen. + */ +- while (1) { ++ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) { + if (completion_done(&vmbus_connection.unload_event)) +- break; ++ goto completed; + + for_each_online_cpu(cpu) { + page_addr = hv_context.synic_message_page[cpu]; +@@ -714,9 +728,18 @@ static void vmbus_wait_for_unload(void) + vmbus_signal_eom(msg, message_type); + } + +- mdelay(10); ++ /* ++ * Give a notice periodically so someone watching the ++ * serial output won't think it is completely hung. ++ */ ++ if (!(i % UNLOAD_MSG_LOOPS)) ++ pr_notice("Waiting for VMBus UNLOAD to complete\n"); ++ ++ mdelay(UNLOAD_DELAY_UNIT_MS); + } ++ pr_err("Continuing even though VMBus UNLOAD did not complete\n"); + ++completed: + /* + * We're crashing and already got the UNLOAD_RESPONSE, cleanup all + * maybe-pending messages on all CPUs to be able to receive new +diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c +index e27f7e12c05b..9b4ad6c74041 100644 +--- a/drivers/hwmon/acpi_power_meter.c ++++ b/drivers/hwmon/acpi_power_meter.c +@@ -895,7 +895,7 @@ static int acpi_power_meter_add(struct acpi_device *device) + + res = setup_attrs(resource); + if (res) +- goto exit_free; ++ goto exit_free_capability; + + resource->hwmon_dev = hwmon_device_register(&device->dev); + if (IS_ERR(resource->hwmon_dev)) { +@@ -908,6 +908,8 @@ static int acpi_power_meter_add(struct acpi_device *device) + + exit_remove: + remove_attrs(resource); ++exit_free_capability: ++ free_capabilities(resource); + exit_free: + kfree(resource); + exit: +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index 0af7fd311979..587fc5c686b3 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -758,15 +758,18 @@ static ssize_t applesmc_light_show(struct device *dev, + } + + ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); ++ if (ret) ++ goto out; + /* newer macbooks report a single 10-bit bigendian value */ + if (data_length == 10) { + left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; + goto out; + } + left = buffer[2]; ++ ++ ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); + if (ret) + goto out; +- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); + right = buffer[2]; + + out: +@@ -814,12 +817,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, + sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); + + ret = applesmc_read_key(newkey, buffer, 2); +- speed = ((buffer[0] << 8 | buffer[1]) >> 2); +- + if (ret) + return ret; +- else +- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); ++ ++ speed = ((buffer[0] << 8 | buffer[1]) >> 2); ++ return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + } + + static ssize_t applesmc_store_fan_speed(struct device *dev, +@@ -854,12 +856,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, + u8 buffer[2]; + + ret = applesmc_read_key(FANS_MANUAL, buffer, 2); +- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; +- + if (ret) + return ret; +- else +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); ++ ++ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + } + + static ssize_t applesmc_store_fan_manual(struct device *dev, +@@ -875,10 +876,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + return -EINVAL; + + ret = applesmc_read_key(FANS_MANUAL, buffer, 2); +- val = (buffer[0] << 8 | buffer[1]); + if (ret) + goto out; + ++ val = (buffer[0] << 8 | buffer[1]); ++ + if (input) + val = val | (0x01 << to_index(attr)); + else +@@ -954,13 +956,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, + u32 count; + + ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); +- count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + +- ((u32)buffer[2]<<8) + buffer[3]; +- + if (ret) + return ret; +- else +- return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); ++ ++ count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + ++ ((u32)buffer[2]<<8) + buffer[3]; ++ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + } + + static ssize_t applesmc_key_at_index_read_show(struct device *dev, +diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c +index 24e395c5907d..7204ebf32351 100644 +--- a/drivers/hwmon/emc2103.c ++++ b/drivers/hwmon/emc2103.c +@@ -452,7 +452,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, + } + + result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); +- if (result) { ++ if (result < 0) { + count = result; + goto err; + } +diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c +index be60bd5bab78..ee6d499edc1b 100644 +--- a/drivers/hwmon/lm80.c ++++ b/drivers/hwmon/lm80.c +@@ -630,7 +630,6 @@ static int lm80_probe(struct i2c_client *client, + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct lm80_data *data; +- int rv; + + data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL); + if (!data) +@@ -643,14 +642,8 @@ static int lm80_probe(struct i2c_client *client, + lm80_init_client(client); + + /* A few vars need to be filled upon startup */ +- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1)); +- if (rv < 0) +- return rv; +- data->fan[f_min][0] = rv; +- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2)); +- if (rv < 0) +- return rv; +- data->fan[f_min][1] = rv; ++ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); ++ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); + + hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, + data, lm80_groups); +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c +index 841f2428e84a..293d1184976b 100644 +--- a/drivers/hwmon/lm90.c ++++ b/drivers/hwmon/lm90.c +@@ -186,6 +186,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680, + #define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */ + #define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */ + #define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */ ++#define LM90_PAUSE_FOR_CONFIG (1 << 8) /* Pause conversion for config */ + + /* LM90 status */ + #define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */ +@@ -286,6 +287,7 @@ static const struct lm90_params lm90_params[] = { + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, + }, + [max6657] = { ++ .flags = LM90_PAUSE_FOR_CONFIG, + .alert_alarms = 0x7c, + .max_convrate = 8, + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, +@@ -486,6 +488,38 @@ static inline int lm90_select_remote_channel(struct i2c_client *client, + return 0; + } + ++static int lm90_write_convrate(struct i2c_client *client, ++ struct lm90_data *data, int val) ++{ ++ int err; ++ int config_orig, config_stop; ++ ++ /* Save config and pause conversion */ ++ if (data->flags & LM90_PAUSE_FOR_CONFIG) { ++ config_orig = lm90_read_reg(client, LM90_REG_R_CONFIG1); ++ if (config_orig < 0) ++ return config_orig; ++ config_stop = config_orig | 0x40; ++ if (config_orig != config_stop) { ++ err = i2c_smbus_write_byte_data(client, ++ LM90_REG_W_CONFIG1, ++ config_stop); ++ if (err < 0) ++ return err; ++ } ++ } ++ ++ /* Set conv rate */ ++ err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, val); ++ ++ /* Revert change to config */ ++ if (data->flags & LM90_PAUSE_FOR_CONFIG && config_orig != config_stop) ++ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, ++ config_orig); ++ ++ return err; ++} ++ + /* + * Set conversion rate. + * client->update_lock must be held when calling this function (unless we are +@@ -506,7 +540,7 @@ static int lm90_set_convrate(struct i2c_client *client, struct lm90_data *data, + if (interval >= update_interval * 3 / 4) + break; + +- err = i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, i); ++ err = lm90_write_convrate(client, data, i); + data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64); + return err; + } +@@ -1512,8 +1546,7 @@ static void lm90_restore_conf(void *_data) + struct i2c_client *client = data->client; + + /* Restore initial configuration */ +- i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE, +- data->convrate_orig); ++ lm90_write_convrate(client, data, data->convrate_orig); + i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, + data->config_orig); + } +@@ -1530,12 +1563,13 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) + /* + * Start the conversions. + */ +- lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */ + config = lm90_read_reg(client, LM90_REG_R_CONFIG1); + if (config < 0) + return config; + data->config_orig = config; + ++ lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */ ++ + /* Check Temperature Range Select */ + if (data->kind == adt7461 || data->kind == tmp451) { + if (config & 0x04) +diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c +index 30a100e70a0d..877c3d7dca01 100644 +--- a/drivers/hwmon/max31722.c ++++ b/drivers/hwmon/max31722.c +@@ -9,7 +9,6 @@ + * directory of this archive for more details. + */ + +-#include + #include + #include + #include +@@ -138,20 +137,12 @@ static const struct spi_device_id max31722_spi_id[] = { + {"max31723", 0}, + {} + }; +- +-static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = { +- {"MAX31722", 0}, +- {"MAX31723", 0}, +- {} +-}; +- + MODULE_DEVICE_TABLE(spi, max31722_spi_id); + + static struct spi_driver max31722_driver = { + .driver = { + .name = "max31722", + .pm = &max31722_pm_ops, +- .acpi_match_table = ACPI_PTR(max31722_acpi_id), + }, + .probe = max31722_probe, + .remove = max31722_remove, +diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c +index 281491cca510..66cf772de7d2 100644 +--- a/drivers/hwmon/max31790.c ++++ b/drivers/hwmon/max31790.c +@@ -179,7 +179,7 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel, + + switch (attr) { + case hwmon_fan_input: +- sr = get_tach_period(data->fan_dynamics[channel]); ++ sr = get_tach_period(data->fan_dynamics[channel % NR_CHANNEL]); + rpm = RPM_FROM_REG(data->tach[channel], sr); + *val = rpm; + return 0; +diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c +index f03a71722849..d4bb3d6aaf18 100644 +--- a/drivers/hwmon/max6697.c ++++ b/drivers/hwmon/max6697.c +@@ -46,8 +46,9 @@ static const u8 MAX6697_REG_CRIT[] = { + * Map device tree / platform data register bit map to chip bit map. + * Applies to alert register and over-temperature register. + */ +-#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ ++#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \ + (((reg) & 0x01) << 6) | ((reg) & 0x80)) ++#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7)) + + #define MAX6697_REG_STAT(n) (0x44 + (n)) + +@@ -586,12 +587,12 @@ static int max6697_init_chip(struct max6697_data *data, + return ret; + + ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK, +- MAX6697_MAP_BITS(pdata->alert_mask)); ++ MAX6697_ALERT_MAP_BITS(pdata->alert_mask)); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK, +- MAX6697_MAP_BITS(pdata->over_temperature_mask)); ++ MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask)); + if (ret < 0) + return ret; + +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c +index 98a4cb5d4993..9c236c88bc7b 100644 +--- a/drivers/hwtracing/intel_th/gth.c ++++ b/drivers/hwtracing/intel_th/gth.c +@@ -485,7 +485,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, + output->active = false; + + for_each_set_bit(master, gth->output[output->port].master, +- TH_CONFIGURABLE_MASTERS) { ++ TH_CONFIGURABLE_MASTERS + 1) { + gth_master_set(gth, master, -1); + } + spin_unlock(>h->gth_lock); +@@ -605,7 +605,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, + othdev->output.port = -1; + othdev->output.active = false; + gth->output[port].output = NULL; +- for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) ++ for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++) + if (gth->master[master] == port) + gth->master[master] = -1; + spin_unlock(>h->gth_lock); +diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c +index 3da7b673aab2..3957ce678265 100644 +--- a/drivers/hwtracing/stm/heartbeat.c ++++ b/drivers/hwtracing/stm/heartbeat.c +@@ -72,7 +72,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) + + static int stm_heartbeat_init(void) + { +- int i, ret = -ENOMEM; ++ int i, ret; + + if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) + return -EINVAL; +@@ -80,8 +80,10 @@ static int stm_heartbeat_init(void) + for (i = 0; i < nr_devs; i++) { + stm_heartbeat[i].data.name = + kasprintf(GFP_KERNEL, "heartbeat.%d", i); +- if (!stm_heartbeat[i].data.name) ++ if (!stm_heartbeat[i].data.name) { ++ ret = -ENOMEM; + goto fail_unregister; ++ } + + stm_heartbeat[i].data.nr_chans = 1; + stm_heartbeat[i].data.link = stm_heartbeat_link; +diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c +index e370804ec8bc..1886588b9ea3 100644 +--- a/drivers/i2c/algos/i2c-algo-pca.c ++++ b/drivers/i2c/algos/i2c-algo-pca.c +@@ -50,8 +50,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); + pca_outw(adap, I2C_PCA_IND, 0xA5); + pca_outw(adap, I2C_PCA_IND, 0x5A); ++ ++ /* ++ * After a reset we need to re-apply any configuration ++ * (calculated in pca_init) to get the bus in a working state. ++ */ ++ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); ++ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); ++ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); ++ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); ++ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); ++ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); ++ ++ pca_set_con(adap, I2C_PCA_CON_ENSIO); + } else { + adap->reset_chip(adap->data); ++ pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); + } + } + +@@ -326,7 +340,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, + DEB2("BUS ERROR - SDA Stuck low\n"); + pca_reset(adap); + goto out; +- case 0x90: /* Bus error - SCL stuck low */ ++ case 0x78: /* Bus error - SCL stuck low (PCA9665) */ ++ case 0x90: /* Bus error - SCL stuck low (PCA9564) */ + DEB2("BUS ERROR - SCL Stuck low\n"); + pca_reset(adap); + goto out; +@@ -434,13 +449,14 @@ static int pca_init(struct i2c_adapter *adap) + " Use the nominal frequency.\n", adap->name); + } + +- pca_reset(pca_data); +- + clock = pca_clock(pca_data); + printk(KERN_INFO "%s: Clock frequency is %dkHz\n", + adap->name, freqs[clock]); + +- pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); ++ /* Store settings as these will be needed when the PCA chip is reset */ ++ pca_data->bus_settings.clock_freq = clock; ++ ++ pca_reset(pca_data); + } else { + int clock; + int mode; +@@ -507,19 +523,15 @@ static int pca_init(struct i2c_adapter *adap) + thi = tlow * min_thi / min_tlow; + } + ++ /* Store settings as these will be needed when the PCA chip is reset */ ++ pca_data->bus_settings.mode = mode; ++ pca_data->bus_settings.tlow = tlow; ++ pca_data->bus_settings.thi = thi; ++ + pca_reset(pca_data); + + printk(KERN_INFO + "%s: Clock frequency is %dHz\n", adap->name, clock * 100); +- +- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); +- pca_outw(pca_data, I2C_PCA_IND, mode); +- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); +- pca_outw(pca_data, I2C_PCA_IND, tlow); +- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); +- pca_outw(pca_data, I2C_PCA_IND, thi); +- +- pca_set_con(pca_data, I2C_PCA_CON_ENSIO); + } + udelay(500); /* 500 us for oscillator to stabilise */ + +diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c +index 78792b4d6437..a658f975605a 100644 +--- a/drivers/i2c/busses/i2c-brcmstb.c ++++ b/drivers/i2c/busses/i2c-brcmstb.c +@@ -318,7 +318,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev, + goto cmd_out; + } + +- if ((CMD_RD || CMD_WR) && ++ if ((cmd == CMD_RD || cmd == CMD_WR) && + bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) { + rc = -EREMOTEIO; + dev_dbg(dev->device, "controller received NOACK intr for %s\n", +diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c +index 59c08d5b75d6..23ee1a423654 100644 +--- a/drivers/i2c/busses/i2c-cadence.c ++++ b/drivers/i2c/busses/i2c-cadence.c +@@ -382,10 +382,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ +- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ++ if (id->recv_count > CDNS_I2C_FIFO_DEPTH) + ctrl_reg |= CDNS_I2C_CR_HOLD; +- else +- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; + + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + +@@ -442,11 +440,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) + * Check for the message size against FIFO depth and set the + * 'hold bus' bit if it is greater than FIFO depth. + */ +- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) ++ if (id->send_count > CDNS_I2C_FIFO_DEPTH) + ctrl_reg |= CDNS_I2C_CR_HOLD; +- else +- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; +- + cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); + + /* Clear the interrupts in interrupt status register. */ +@@ -913,7 +908,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) + if (IS_ERR(id->membase)) + return PTR_ERR(id->membase); + +- id->irq = platform_get_irq(pdev, 0); ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ return ret; ++ id->irq = ret; + + id->adap.owner = THIS_MODULE; + id->adap.dev.of_node = pdev->dev.of_node; +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c +index d89bde2c5da2..cf285b97a642 100644 +--- a/drivers/i2c/busses/i2c-cpm.c ++++ b/drivers/i2c/busses/i2c-cpm.c +@@ -74,6 +74,9 @@ struct i2c_ram { + char res1[4]; /* Reserved */ + ushort rpbase; /* Relocation pointer */ + char res2[2]; /* Reserved */ ++ /* The following elements are only for CPM2 */ ++ char res3[4]; /* Reserved */ ++ uint sdmatmp; /* Internal */ + }; + + #define I2COM_START 0x80 +diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c +index 5ce71ce7b6c4..39f05e784566 100644 +--- a/drivers/i2c/busses/i2c-eg20t.c ++++ b/drivers/i2c/busses/i2c-eg20t.c +@@ -189,6 +189,7 @@ static const struct pci_device_id pch_pcidev_id[] = { + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, + {0,} + }; ++MODULE_DEVICE_TABLE(pci, pch_pcidev_id); + + static irqreturn_t pch_i2c_handler(int irq, void *pData); + +diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c +index 0218ba6eb26a..ad33c1e3a30f 100644 +--- a/drivers/i2c/busses/i2c-emev2.c ++++ b/drivers/i2c/busses/i2c-emev2.c +@@ -398,7 +398,10 @@ static int em_i2c_probe(struct platform_device *pdev) + + em_i2c_reset(&priv->adap); + +- priv->irq = platform_get_irq(pdev, 0); ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ goto err_clk; ++ priv->irq = ret; + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0, + "em_i2c", priv); + if (ret) +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index 26f1691f67ab..0e04b27e3158 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -375,11 +375,9 @@ static int i801_check_post(struct i801_priv *priv, int status) + dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); + /* try to stop the current command */ + dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); +- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, +- SMBHSTCNT(priv)); ++ outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv)); + usleep_range(1000, 2000); +- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), +- SMBHSTCNT(priv)); ++ outb_p(0, SMBHSTCNT(priv)); + + /* Check if it worked */ + status = inb_p(SMBHSTSTS(priv)); +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c +index 9c1be9378dfd..b91ad668202e 100644 +--- a/drivers/i2c/busses/i2c-imx.c ++++ b/drivers/i2c/busses/i2c-imx.c +@@ -194,6 +194,7 @@ struct imx_i2c_dma { + struct imx_i2c_struct { + struct i2c_adapter adapter; + struct clk *clk; ++ struct notifier_block clk_change_nb; + void __iomem *base; + wait_queue_head_t queue; + unsigned long i2csr; +@@ -412,6 +413,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) + dma->chan_using = NULL; + } + ++static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) ++{ ++ unsigned int temp; ++ ++ /* ++ * i2sr_clr_opcode is the value to clear all interrupts. Here we want to ++ * clear only , so we write ~i2sr_clr_opcode with just ++ * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. ++ */ ++ temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; ++ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); ++} ++ + static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) + { + unsigned long orig_jiffies = jiffies; +@@ -424,8 +438,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) + + /* check for arbitration lost */ + if (temp & I2SR_IAL) { +- temp &= ~I2SR_IAL; +- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); ++ i2c_imx_clear_irq(i2c_imx, I2SR_IAL); + return -EAGAIN; + } + +@@ -452,6 +465,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) + dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); + return -ETIMEDOUT; + } ++ ++ /* check for arbitration lost */ ++ if (i2c_imx->i2csr & I2SR_IAL) { ++ dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); ++ i2c_imx_clear_irq(i2c_imx, I2SR_IAL); ++ ++ i2c_imx->i2csr = 0; ++ return -EAGAIN; ++ } ++ + dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); + i2c_imx->i2csr = 0; + return 0; +@@ -468,15 +491,14 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx) + return 0; + } + +-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx) ++static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, ++ unsigned int i2c_clk_rate) + { + struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div; +- unsigned int i2c_clk_rate; + unsigned int div; + int i; + + /* Divider value calculation */ +- i2c_clk_rate = clk_get_rate(i2c_imx->clk); + if (i2c_imx->cur_clk == i2c_clk_rate) + return; + +@@ -511,6 +533,20 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx) + #endif + } + ++static int i2c_imx_clk_notifier_call(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct clk_notifier_data *ndata = data; ++ struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk, ++ struct imx_i2c_struct, ++ clk); ++ ++ if (action & POST_RATE_CHANGE) ++ i2c_imx_set_clk(i2c_imx, ndata->new_rate); ++ ++ return NOTIFY_OK; ++} ++ + static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) + { + unsigned int temp = 0; +@@ -518,8 +554,6 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) + + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + +- i2c_imx_set_clk(i2c_imx); +- + imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR); + /* Enable I2C controller */ + imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); +@@ -583,9 +617,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) + if (temp & I2SR_IIF) { + /* save status register */ + i2c_imx->i2csr = temp; +- temp &= ~I2SR_IIF; +- temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF); +- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); ++ i2c_imx_clear_irq(i2c_imx, I2SR_IIF); + wake_up(&i2c_imx->queue); + return IRQ_HANDLED; + } +@@ -1099,14 +1131,6 @@ static int i2c_imx_probe(struct platform_device *pdev) + return ret; + } + +- /* Request IRQ */ +- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, +- pdev->name, i2c_imx); +- if (ret) { +- dev_err(&pdev->dev, "can't claim irq %d\n", irq); +- goto clk_disable; +- } +- + /* Init queue */ + init_waitqueue_head(&i2c_imx->queue); + +@@ -1125,12 +1149,23 @@ static int i2c_imx_probe(struct platform_device *pdev) + if (ret < 0) + goto rpm_disable; + ++ /* Request IRQ */ ++ ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED, ++ pdev->name, i2c_imx); ++ if (ret) { ++ dev_err(&pdev->dev, "can't claim irq %d\n", irq); ++ goto rpm_disable; ++ } ++ + /* Set up clock divider */ + i2c_imx->bitrate = IMX_I2C_BIT_RATE; + ret = of_property_read_u32(pdev->dev.of_node, + "clock-frequency", &i2c_imx->bitrate); + if (ret < 0 && pdata && pdata->bitrate) + i2c_imx->bitrate = pdata->bitrate; ++ i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call; ++ clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb); ++ i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk)); + + /* Set up chip registers to defaults */ + imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN, +@@ -1141,12 +1176,12 @@ static int i2c_imx_probe(struct platform_device *pdev) + ret = i2c_imx_init_recovery_info(i2c_imx, pdev); + /* Give it another chance if pinctrl used is not ready yet */ + if (ret == -EPROBE_DEFER) +- goto rpm_disable; ++ goto clk_notifier_unregister; + + /* Add I2C adapter */ + ret = i2c_add_numbered_adapter(&i2c_imx->adapter); + if (ret < 0) +- goto rpm_disable; ++ goto clk_notifier_unregister; + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); +@@ -1162,13 +1197,14 @@ static int i2c_imx_probe(struct platform_device *pdev) + + return 0; /* Return OK */ + ++clk_notifier_unregister: ++ clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); ++ free_irq(irq, i2c_imx); + rpm_disable: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); +- +-clk_disable: + clk_disable_unprepare(i2c_imx->clk); + return ret; + } +@@ -1176,7 +1212,7 @@ static int i2c_imx_probe(struct platform_device *pdev) + static int i2c_imx_remove(struct platform_device *pdev) + { + struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); +- int ret; ++ int irq, ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) +@@ -1195,6 +1231,10 @@ static int i2c_imx_remove(struct platform_device *pdev) + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); + imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); + ++ clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); ++ irq = platform_get_irq(pdev, 0); ++ if (irq >= 0) ++ free_irq(irq, i2c_imx); + clk_disable_unprepare(i2c_imx->clk); + + pm_runtime_put_noidle(&pdev->dev); +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c +index 41ca9ff7b5da..4dd800c0db14 100644 +--- a/drivers/i2c/busses/i2c-jz4780.c ++++ b/drivers/i2c/busses/i2c-jz4780.c +@@ -760,7 +760,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev) + + jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); + +- i2c->irq = platform_get_irq(pdev, 0); ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ goto err; ++ i2c->irq = ret; + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, + dev_name(&pdev->dev), i2c); + if (ret) +diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c +index 565a49a0c564..90e4f839eb1c 100644 +--- a/drivers/i2c/busses/i2c-mpc.c ++++ b/drivers/i2c/busses/i2c-mpc.c +@@ -23,6 +23,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -49,6 +50,7 @@ + #define CCR_MTX 0x10 + #define CCR_TXAK 0x08 + #define CCR_RSTA 0x04 ++#define CCR_RSVD 0x02 + + #define CSR_MCF 0x80 + #define CSR_MAAS 0x40 +@@ -70,6 +72,7 @@ struct mpc_i2c { + u8 fdr, dfsrr; + #endif + struct clk *clk_per; ++ bool has_errata_A004447; + }; + + struct mpc_i2c_divider { +@@ -178,6 +181,75 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) + return 0; + } + ++static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask) ++{ ++ void __iomem *addr = i2c->base + MPC_I2C_SR; ++ u8 val; ++ ++ return readb_poll_timeout(addr, val, val & mask, 0, 100); ++} ++ ++/* ++ * Workaround for Erratum A004447. From the P2040CE Rev Q ++ * ++ * 1. Set up the frequency divider and sampling rate. ++ * 2. I2CCR - a0h ++ * 3. Poll for I2CSR[MBB] to get set. ++ * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to ++ * step 5. If MAL is not set, then go to step 13. ++ * 5. I2CCR - 00h ++ * 6. I2CCR - 22h ++ * 7. I2CCR - a2h ++ * 8. Poll for I2CSR[MBB] to get set. ++ * 9. Issue read to I2CDR. ++ * 10. Poll for I2CSR[MIF] to be set. ++ * 11. I2CCR - 82h ++ * 12. Workaround complete. Skip the next steps. ++ * 13. Issue read to I2CDR. ++ * 14. Poll for I2CSR[MIF] to be set. ++ * 15. I2CCR - 80h ++ */ ++static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c) ++{ ++ int ret; ++ u32 val; ++ ++ writeccr(i2c, CCR_MEN | CCR_MSTA); ++ ret = i2c_mpc_wait_sr(i2c, CSR_MBB); ++ if (ret) { ++ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); ++ return; ++ } ++ ++ val = readb(i2c->base + MPC_I2C_SR); ++ ++ if (val & CSR_MAL) { ++ writeccr(i2c, 0x00); ++ writeccr(i2c, CCR_MSTA | CCR_RSVD); ++ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD); ++ ret = i2c_mpc_wait_sr(i2c, CSR_MBB); ++ if (ret) { ++ dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); ++ return; ++ } ++ val = readb(i2c->base + MPC_I2C_DR); ++ ret = i2c_mpc_wait_sr(i2c, CSR_MIF); ++ if (ret) { ++ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); ++ return; ++ } ++ writeccr(i2c, CCR_MEN | CCR_RSVD); ++ } else { ++ val = readb(i2c->base + MPC_I2C_DR); ++ ret = i2c_mpc_wait_sr(i2c, CSR_MIF); ++ if (ret) { ++ dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); ++ return; ++ } ++ writeccr(i2c, CCR_MEN); ++ } ++} ++ + #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x) + static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { + {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23}, +@@ -581,7 +653,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) + if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { + writeb(status & ~CSR_MAL, + i2c->base + MPC_I2C_SR); +- mpc_i2c_fixup(i2c); ++ i2c_recover_bus(&i2c->adap); + } + return -EIO; + } +@@ -617,7 +689,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) + if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { + writeb(status & ~CSR_MAL, + i2c->base + MPC_I2C_SR); +- mpc_i2c_fixup(i2c); ++ i2c_recover_bus(&i2c->adap); + } + return -EIO; + } +@@ -632,6 +704,18 @@ static u32 mpc_functionality(struct i2c_adapter *adap) + | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; + } + ++static int fsl_i2c_bus_recovery(struct i2c_adapter *adap) ++{ ++ struct mpc_i2c *i2c = i2c_get_adapdata(adap); ++ ++ if (i2c->has_errata_A004447) ++ mpc_i2c_fixup_A004447(i2c); ++ else ++ mpc_i2c_fixup(i2c); ++ ++ return 0; ++} ++ + static const struct i2c_algorithm mpc_algo = { + .master_xfer = mpc_xfer, + .functionality = mpc_functionality, +@@ -643,6 +727,10 @@ static struct i2c_adapter mpc_ops = { + .timeout = HZ, + }; + ++static struct i2c_bus_recovery_info fsl_i2c_recovery_info = { ++ .recover_bus = fsl_i2c_bus_recovery, ++}; ++ + static const struct of_device_id mpc_i2c_of_match[]; + static int fsl_i2c_probe(struct platform_device *op) + { +@@ -727,6 +815,8 @@ static int fsl_i2c_probe(struct platform_device *op) + dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ); + + platform_set_drvdata(op, i2c); ++ if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447")) ++ i2c->has_errata_A004447 = true; + + i2c->adap = mpc_ops; + of_address_to_resource(op->dev.of_node, 0, &res); +@@ -735,6 +825,7 @@ static int fsl_i2c_probe(struct platform_device *op) + i2c_set_adapdata(&i2c->adap, i2c); + i2c->adap.dev.parent = &op->dev; + i2c->adap.dev.of_node = of_node_get(op->dev.of_node); ++ i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info; + + result = i2c_add_adapter(&i2c->adap); + if (result < 0) +diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c +index 5e63b17f935d..e5ad3f9cd372 100644 +--- a/drivers/i2c/busses/i2c-octeon-core.c ++++ b/drivers/i2c/busses/i2c-octeon-core.c +@@ -383,7 +383,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, + + data[i] = octeon_i2c_data_read(i2c); + if (recv_len && i == 0) { +- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) ++ if (data[i] > I2C_SMBUS_BLOCK_MAX) + return -EPROTO; + length += data[i]; + } +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c +index 62785aa76b3f..8324d2729088 100644 +--- a/drivers/i2c/busses/i2c-piix4.c ++++ b/drivers/i2c/busses/i2c-piix4.c +@@ -840,7 +840,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) + } + + if (dev->vendor == PCI_VENDOR_ID_AMD && +- dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) { ++ (dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS || ++ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) { + retval = piix4_setup_sb800(dev, id, 1); + } + +diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c +index e28b825b0433..fb191ad8fc3a 100644 +--- a/drivers/i2c/busses/i2c-pxa.c ++++ b/drivers/i2c/busses/i2c-pxa.c +@@ -297,11 +297,10 @@ static void i2c_pxa_scream_blue_murder(struct pxa_i2c *i2c, const char *why) + dev_err(dev, "IBMR: %08x IDBR: %08x ICR: %08x ISR: %08x\n", + readl(_IBMR(i2c)), readl(_IDBR(i2c)), readl(_ICR(i2c)), + readl(_ISR(i2c))); +- dev_dbg(dev, "log: "); ++ dev_err(dev, "log:"); + for (i = 0; i < i2c->irqlogidx; i++) +- pr_debug("[%08x:%08x] ", i2c->isrlog[i], i2c->icrlog[i]); +- +- pr_debug("\n"); ++ pr_cont(" [%03x:%05x]", i2c->isrlog[i], i2c->icrlog[i]); ++ pr_cont("\n"); + } + + #else /* ifdef DEBUG */ +@@ -691,11 +690,9 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c) + { + u32 icr; + +- /* +- * Clear the STOP and ACK flags +- */ ++ /* Clear the START, STOP, ACK, TB and MA flags */ + icr = readl(_ICR(i2c)); +- icr &= ~(ICR_STOP | ICR_ACKNAK); ++ icr &= ~(ICR_START | ICR_STOP | ICR_ACKNAK | ICR_TB | ICR_MA); + writel(icr, _ICR(i2c)); + } + +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index 63dc2ab003aa..f5cf832c55d1 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2019,2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -328,12 +328,13 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev) + SE_DMA_RX_IRQ_CLR); + /* Ensure all writes are done before returning from ISR. */ + wmb(); +- if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE)) +- complete(&gi2c->xfer); +- + } +- /* if this is err with done-bit not set, handle that thr' timeout. */ +- else if (m_stat & M_CMD_DONE_EN) ++ ++ /* For some reason if DMA could not complete transfer, GENI must have ++ * command executed. E.g. I2C NACK, hence consider CMD_DONE too. ++ */ ++ if ((m_stat & M_CMD_DONE_EN) || ++ (dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE)) + complete(&gi2c->xfer); + + return IRQ_HANDLED; +diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c +index 7524e17ac966..00366cf89617 100644 +--- a/drivers/i2c/busses/i2c-qup.c ++++ b/drivers/i2c/busses/i2c-qup.c +@@ -810,7 +810,8 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg, + if (ret || qup->bus_err || qup->qup_err) { + reinit_completion(&qup->xfer); + +- if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { ++ ret = qup_i2c_change_state(qup, QUP_RUN_STATE); ++ if (ret) { + dev_err(qup->dev, "change to run state timed out"); + goto desc_err; + } +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 93b8069041bb..6be02da2ccc4 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -527,13 +527,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) + rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); + } + +- rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); ++ /* Clear SSR, too, because of old STOPs to other clients than us */ ++ rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); + } + + /* master sent stop */ + if (ssr_filtered & SSR) { + i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); +- rcar_i2c_write(priv, ICSIER, SAR | SSR); ++ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ ++ rcar_i2c_write(priv, ICSIER, SAR); + rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); + } + +@@ -762,7 +764,7 @@ static int rcar_reg_slave(struct i2c_client *slave) + priv->slave = slave; + rcar_i2c_write(priv, ICSAR, slave->addr); + rcar_i2c_write(priv, ICSSR, 0); +- rcar_i2c_write(priv, ICSIER, SAR | SSR); ++ rcar_i2c_write(priv, ICSIER, SAR); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); + + return 0; +diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c +index 89d8b41b6668..032e8535e860 100644 +--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c ++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c +@@ -89,7 +89,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + } + } + +- ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); ++ ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); + if (ret) { + dev_err(&adapter->dev, "failure sending STOP\n"); + return -EREMOTEIO; +@@ -159,7 +159,7 @@ static int osif_probe(struct usb_interface *interface, + * Set bus frequency. The frequency is: + * 120,000,000 / ( 16 + 2 * div * 4^prescale). + * Using dev = 52, prescale = 0 give 100KHz */ +- ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, ++ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, + NULL, 0); + if (ret) { + dev_err(&interface->dev, "failure sending bit rate"); +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c +index 499af26e736e..acccdfb95420 100644 +--- a/drivers/i2c/busses/i2c-s3c2410.c ++++ b/drivers/i2c/busses/i2c-s3c2410.c +@@ -495,7 +495,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) + * forces us to send a new START + * when we change direction + */ ++ dev_dbg(i2c->dev, ++ "missing START before write->read\n"); + s3c24xx_i2c_stop(i2c, -EINVAL); ++ break; + } + + goto retry_write; +diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c +index c2005c789d2b..319d1fa617c8 100644 +--- a/drivers/i2c/busses/i2c-sh7760.c ++++ b/drivers/i2c/busses/i2c-sh7760.c +@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev) + goto out2; + } + +- id->irq = platform_get_irq(pdev, 0); ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ goto out3; ++ id->irq = ret; + + id->adap.nr = pdev->id; + id->adap.algo = &sh7760_i2c_algo; +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index 80d82c6792d8..67e44e990777 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1003,6 +1004,8 @@ static void i2c_device_shutdown(struct device *dev) + driver = to_i2c_driver(dev->driver); + if (driver->shutdown) + driver->shutdown(client); ++ else if (client->irq > 0) ++ disable_irq(client->irq); + } + + static void i2c_client_dev_release(struct device *dev) +@@ -1858,8 +1861,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) + + /* create pre-declared device nodes */ + of_i2c_register_devices(adap); +- i2c_acpi_register_devices(adap); + i2c_acpi_install_space_handler(adap); ++ i2c_acpi_register_devices(adap); + + if (adap->nr < __i2c_first_dynamic_bus_num) + i2c_scan_static_board_info(adap); +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index c4066276eb7b..b7f9fb00f695 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, + if (count > 8192) + count = 8192; + +- tmp = kmalloc(count, GFP_KERNEL); ++ tmp = kzalloc(count, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + +@@ -157,7 +157,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, + + ret = i2c_master_recv(client, tmp, count); + if (ret >= 0) +- ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; ++ if (copy_to_user(buf, tmp, ret)) ++ ret = -EFAULT; + kfree(tmp); + return ret; + } +diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c +index 9c4ac26c014e..6f673b0cc803 100644 +--- a/drivers/i2c/muxes/i2c-mux-pca954x.c ++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c +@@ -96,6 +96,10 @@ static const struct chip_desc chips[] = { + .nchans = 4, + .muxtype = pca954x_isswi, + }, ++ [pca_9546] = { ++ .nchans = 4, ++ .muxtype = pca954x_isswi, ++ }, + [pca_9547] = { + .nchans = 8, + .enable = 0x8, +@@ -113,7 +117,7 @@ static const struct i2c_device_id pca954x_id[] = { + { "pca9543", pca_9543 }, + { "pca9544", pca_9544 }, + { "pca9545", pca_9545 }, +- { "pca9546", pca_9545 }, ++ { "pca9546", pca_9546 }, + { "pca9547", pca_9547 }, + { "pca9548", pca_9548 }, + { } +diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c +index 883fe2cdd42c..6e3b3a5a3c36 100644 +--- a/drivers/ide/ide-cd.c ++++ b/drivers/ide/ide-cd.c +@@ -704,7 +704,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) + struct request_queue *q = drive->queue; + int write = rq_data_dir(rq) == WRITE; + unsigned short sectors_per_frame = +- queue_logical_block_size(q) >> SECTOR_BITS; ++ queue_logical_block_size(q) >> SECTOR_SHIFT; + + ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " + "secs_per_frame: %u", +@@ -900,7 +900,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, + * end up being bogus. + */ + blocklen = be32_to_cpu(capbuf.blocklen); +- blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; ++ blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT; + switch (blocklen) { + case 512: + case 1024: +@@ -916,7 +916,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, + } + + *capacity = 1 + be32_to_cpu(capbuf.lba); +- *sectors_per_frame = blocklen >> SECTOR_BITS; ++ *sectors_per_frame = blocklen >> SECTOR_SHIFT; + + ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", + *capacity, *sectors_per_frame); +@@ -993,7 +993,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) + drive->probed_capacity = toc->capacity * sectors_per_frame; + + blk_queue_logical_block_size(drive->queue, +- sectors_per_frame << SECTOR_BITS); ++ sectors_per_frame << SECTOR_SHIFT); + + /* first read just the header, so we know how long the TOC is */ + stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, +diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h +index 1efc936f5b66..7c6d017e84e9 100644 +--- a/drivers/ide/ide-cd.h ++++ b/drivers/ide/ide-cd.h +@@ -20,11 +20,7 @@ + + /************************************************************************/ + +-#define SECTOR_BITS 9 +-#ifndef SECTOR_SIZE +-#define SECTOR_SIZE (1 << SECTOR_BITS) +-#endif +-#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS) ++#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT) + #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32) + + /* Capabilities Page size including 8 bytes of Mode Page Header */ +diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c +index 0890934ef66f..02a361de3dcc 100644 +--- a/drivers/iio/accel/bma180.c ++++ b/drivers/iio/accel/bma180.c +@@ -49,7 +49,7 @@ struct bma180_part_info { + + u8 int_reset_reg, int_reset_mask; + u8 sleep_reg, sleep_mask; +- u8 bw_reg, bw_mask; ++ u8 bw_reg, bw_mask, bw_offset; + u8 scale_reg, scale_mask; + u8 power_reg, power_mask, lowpower_val; + u8 int_enable_reg, int_enable_mask; +@@ -105,6 +105,7 @@ struct bma180_part_info { + + #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */ + #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */ ++#define BMA250_BW_OFFSET 8 + #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */ + #define BMA250_LOWPOWER_MASK BIT(6) + #define BMA250_DATA_INTEN_MASK BIT(4) +@@ -120,7 +121,11 @@ struct bma180_data { + int scale; + int bw; + bool pmode; +- u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ s16 chan[4]; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + enum bma180_chan { +@@ -238,7 +243,8 @@ static int bma180_set_bw(struct bma180_data *data, int val) + for (i = 0; i < data->part_info->num_bw; ++i) { + if (data->part_info->bw_table[i] == val) { + ret = bma180_set_bits(data, data->part_info->bw_reg, +- data->part_info->bw_mask, i); ++ data->part_info->bw_mask, ++ i + data->part_info->bw_offset); + if (ret) { + dev_err(&data->client->dev, + "failed to set bandwidth\n"); +@@ -621,32 +627,53 @@ static const struct iio_chan_spec bma250_channels[] = { + + static const struct bma180_part_info bma180_part_info[] = { + [BMA180] = { +- bma180_channels, ARRAY_SIZE(bma180_channels), +- bma180_scale_table, ARRAY_SIZE(bma180_scale_table), +- bma180_bw_table, ARRAY_SIZE(bma180_bw_table), +- BMA180_CTRL_REG0, BMA180_RESET_INT, +- BMA180_CTRL_REG0, BMA180_SLEEP, +- BMA180_BW_TCS, BMA180_BW, +- BMA180_OFFSET_LSB1, BMA180_RANGE, +- BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER, +- BMA180_CTRL_REG3, BMA180_NEW_DATA_INT, +- BMA180_RESET, +- bma180_chip_config, +- bma180_chip_disable, ++ .channels = bma180_channels, ++ .num_channels = ARRAY_SIZE(bma180_channels), ++ .scale_table = bma180_scale_table, ++ .num_scales = ARRAY_SIZE(bma180_scale_table), ++ .bw_table = bma180_bw_table, ++ .num_bw = ARRAY_SIZE(bma180_bw_table), ++ .int_reset_reg = BMA180_CTRL_REG0, ++ .int_reset_mask = BMA180_RESET_INT, ++ .sleep_reg = BMA180_CTRL_REG0, ++ .sleep_mask = BMA180_SLEEP, ++ .bw_reg = BMA180_BW_TCS, ++ .bw_mask = BMA180_BW, ++ .scale_reg = BMA180_OFFSET_LSB1, ++ .scale_mask = BMA180_RANGE, ++ .power_reg = BMA180_TCO_Z, ++ .power_mask = BMA180_MODE_CONFIG, ++ .lowpower_val = BMA180_LOW_POWER, ++ .int_enable_reg = BMA180_CTRL_REG3, ++ .int_enable_mask = BMA180_NEW_DATA_INT, ++ .softreset_reg = BMA180_RESET, ++ .chip_config = bma180_chip_config, ++ .chip_disable = bma180_chip_disable, + }, + [BMA250] = { +- bma250_channels, ARRAY_SIZE(bma250_channels), +- bma250_scale_table, ARRAY_SIZE(bma250_scale_table), +- bma250_bw_table, ARRAY_SIZE(bma250_bw_table), +- BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK, +- BMA250_POWER_REG, BMA250_SUSPEND_MASK, +- BMA250_BW_REG, BMA250_BW_MASK, +- BMA250_RANGE_REG, BMA250_RANGE_MASK, +- BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1, +- BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK, +- BMA250_RESET_REG, +- bma250_chip_config, +- bma250_chip_disable, ++ .channels = bma250_channels, ++ .num_channels = ARRAY_SIZE(bma250_channels), ++ .scale_table = bma250_scale_table, ++ .num_scales = ARRAY_SIZE(bma250_scale_table), ++ .bw_table = bma250_bw_table, ++ .num_bw = ARRAY_SIZE(bma250_bw_table), ++ .int_reset_reg = BMA250_INT_RESET_REG, ++ .int_reset_mask = BMA250_INT_RESET_MASK, ++ .sleep_reg = BMA250_POWER_REG, ++ .sleep_mask = BMA250_SUSPEND_MASK, ++ .bw_reg = BMA250_BW_REG, ++ .bw_mask = BMA250_BW_MASK, ++ .bw_offset = BMA250_BW_OFFSET, ++ .scale_reg = BMA250_RANGE_REG, ++ .scale_mask = BMA250_RANGE_MASK, ++ .power_reg = BMA250_POWER_REG, ++ .power_mask = BMA250_LOWPOWER_MASK, ++ .lowpower_val = 1, ++ .int_enable_reg = BMA250_INT_ENABLE_REG, ++ .int_enable_mask = BMA250_DATA_INTEN_MASK, ++ .softreset_reg = BMA250_RESET_REG, ++ .chip_config = bma250_chip_config, ++ .chip_disable = bma250_chip_disable, + }, + }; + +@@ -667,12 +694,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) + mutex_unlock(&data->mutex); + goto err; + } +- ((s16 *)data->buff)[i++] = ret; ++ data->scan.chan[i++] = ret; + } + + mutex_unlock(&data->mutex); + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns); ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns); + err: + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c +index 5099f295dd37..a96f2d530ae3 100644 +--- a/drivers/iio/accel/bma220_spi.c ++++ b/drivers/iio/accel/bma220_spi.c +@@ -76,7 +76,11 @@ static const int bma220_scale_table[][4] = { + struct bma220_data { + struct spi_device *spi_device; + struct mutex lock; +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */ ++ struct { ++ s8 chans[3]; ++ /* Ensure timestamp is naturally aligned. */ ++ s64 timestamp __aligned(8); ++ } scan; + u8 tx_buf[2] ____cacheline_aligned; + }; + +@@ -107,12 +111,12 @@ static irqreturn_t bma220_trigger_handler(int irq, void *p) + + mutex_lock(&data->lock); + data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK; +- ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer, ++ ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans, + ARRAY_SIZE(bma220_channels) - 1); + if (ret < 0) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); + err: + mutex_unlock(&data->lock); +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c +index b6254ce9ab3b..86fe94f9511f 100644 +--- a/drivers/iio/accel/bmc150-accel-core.c ++++ b/drivers/iio/accel/bmc150-accel-core.c +@@ -197,6 +197,14 @@ struct bmc150_accel_data { + struct mutex mutex; + u8 fifo_mode, watermark; + s16 buffer[8]; ++ /* ++ * Ensure there is sufficient space and correct alignment for ++ * the timestamp if enabled ++ */ ++ struct { ++ __le16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + u8 bw_bits; + u32 slope_dur; + u32 slope_thres; +@@ -933,15 +941,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, + * now. + */ + for (i = 0; i < count; i++) { +- u16 sample[8]; + int j, bit; + + j = 0; + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) +- memcpy(&sample[j++], &buffer[i * 3 + bit], 2); ++ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], ++ sizeof(data->scan.channels[0])); + +- iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, ++ tstamp); + + tstamp += sample_period; + } +diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c +index 780f886ccbfe..626a605a0c0e 100644 +--- a/drivers/iio/accel/kxcjk-1013.c ++++ b/drivers/iio/accel/kxcjk-1013.c +@@ -91,12 +91,28 @@ enum kx_chipset { + KX_MAX_CHIPS /* this must be last */ + }; + ++enum kx_acpi_type { ++ ACPI_GENERIC, ++ ACPI_SMO8500, ++}; ++ ++enum kxcjk1013_axis { ++ AXIS_X, ++ AXIS_Y, ++ AXIS_Z, ++ AXIS_MAX ++}; ++ + struct kxcjk1013_data { + struct i2c_client *client; + struct iio_trigger *dready_trig; + struct iio_trigger *motion_trig; + struct mutex mutex; +- s16 buffer[8]; ++ /* Ensure timestamp naturally aligned */ ++ struct { ++ s16 chans[AXIS_MAX]; ++ s64 timestamp __aligned(8); ++ } scan; + u8 odr_bits; + u8 range; + int wake_thres; +@@ -107,14 +123,7 @@ struct kxcjk1013_data { + bool motion_trigger_on; + int64_t timestamp; + enum kx_chipset chipset; +- bool is_smo8500_device; +-}; +- +-enum kxcjk1013_axis { +- AXIS_X, +- AXIS_Y, +- AXIS_Z, +- AXIS_MAX, ++ enum kx_acpi_type acpi_type; + }; + + enum kxcjk1013_mode { +@@ -966,12 +975,12 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p) + ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client, + KXCJK1013_REG_XOUT_L, + AXIS_MAX * 2, +- (u8 *)data->buffer); ++ (u8 *)data->scan.chans); + mutex_unlock(&data->mutex); + if (ret < 0) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + data->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); +@@ -1144,7 +1153,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private) + + static const char *kxcjk1013_match_acpi_device(struct device *dev, + enum kx_chipset *chipset, +- bool *is_smo8500_device) ++ enum kx_acpi_type *acpi_type) + { + const struct acpi_device_id *id; + +@@ -1153,7 +1162,7 @@ static const char *kxcjk1013_match_acpi_device(struct device *dev, + return NULL; + + if (strcmp(id->id, "SMO8500") == 0) +- *is_smo8500_device = true; ++ *acpi_type = ACPI_SMO8500; + + *chipset = (enum kx_chipset)id->driver_data; + +@@ -1189,7 +1198,7 @@ static int kxcjk1013_probe(struct i2c_client *client, + } else if (ACPI_HANDLE(&client->dev)) { + name = kxcjk1013_match_acpi_device(&client->dev, + &data->chipset, +- &data->is_smo8500_device); ++ &data->acpi_type); + } else + return -ENODEV; + +@@ -1207,7 +1216,7 @@ static int kxcjk1013_probe(struct i2c_client *client, + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &kxcjk1013_info; + +- if (client->irq > 0 && !data->is_smo8500_device) { ++ if (client->irq > 0 && data->acpi_type != ACPI_SMO8500) { + ret = devm_request_threaded_irq(&client->dev, client->irq, + kxcjk1013_data_rdy_trig_poll, + kxcjk1013_event_handler, +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c +index 9af60ac70738..1bda730a71c0 100644 +--- a/drivers/iio/accel/kxsd9.c ++++ b/drivers/iio/accel/kxsd9.c +@@ -212,14 +212,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) + const struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct kxsd9_state *st = iio_priv(indio_dev); ++ /* ++ * Ensure correct positioning and alignment of timestamp. ++ * No need to zero initialize as all elements written. ++ */ ++ struct { ++ __be16 chan[4]; ++ s64 ts __aligned(8); ++ } hw_values; + int ret; +- /* 4 * 16bit values AND timestamp */ +- __be16 hw_values[8]; + + ret = regmap_bulk_read(st->map, + KXSD9_REG_X, +- &hw_values, +- 8); ++ hw_values.chan, ++ sizeof(hw_values.chan)); + if (ret) { + dev_err(st->dev, + "error reading data\n"); +@@ -227,7 +233,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) + } + + iio_push_to_buffers_with_timestamp(indio_dev, +- hw_values, ++ &hw_values, + iio_get_time_ns(indio_dev)); + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c +index 6551085bedd7..f7804e56cc27 100644 +--- a/drivers/iio/accel/mma7455_core.c ++++ b/drivers/iio/accel/mma7455_core.c +@@ -55,6 +55,14 @@ + + struct mma7455_data { + struct regmap *regmap; ++ /* ++ * Used to reorganize data. Will ensure correct alignment of ++ * the timestamp if present ++ */ ++ struct { ++ __le16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + }; + + static int mma7455_drdy(struct mma7455_data *mma7455) +@@ -85,19 +93,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mma7455_data *mma7455 = iio_priv(indio_dev); +- u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ + int ret; + + ret = mma7455_drdy(mma7455); + if (ret) + goto done; + +- ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, +- sizeof(__le16) * 3); ++ ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, ++ mma7455->scan.channels, ++ sizeof(mma7455->scan.channels)); + if (ret) + goto done; + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c +index d41e1b588e68..90d4179e8c3d 100644 +--- a/drivers/iio/accel/mma8452.c ++++ b/drivers/iio/accel/mma8452.c +@@ -105,6 +105,12 @@ struct mma8452_data { + u8 ctrl_reg1; + u8 data_cfg; + const struct mma_chip_info *chip_info; ++ ++ /* Ensure correct alignment of time stamp when present */ ++ struct { ++ __be16 channels[3]; ++ s64 ts __aligned(8); ++ } buffer; + }; + + /** +@@ -985,14 +991,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mma8452_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 3 16-bit channels + padding + ts */ + int ret; + +- ret = mma8452_read(data, (__be16 *)buffer); ++ ret = mma8452_read(data, data->buffer.channels); + if (ret < 0) + goto done; + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + iio_get_time_ns(indio_dev)); + + done: +@@ -1576,10 +1581,13 @@ static int mma8452_probe(struct i2c_client *client, + + ret = mma8452_set_freefall_mode(data, false); + if (ret < 0) +- goto buffer_cleanup; ++ goto unregister_device; + + return 0; + ++unregister_device: ++ iio_device_unregister(indio_dev); ++ + buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + +diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c +index e31023dc5f1b..24a7499049f1 100644 +--- a/drivers/iio/accel/stk8312.c ++++ b/drivers/iio/accel/stk8312.c +@@ -106,7 +106,11 @@ struct stk8312_data { + u8 mode; + struct iio_trigger *dready_trig; + bool dready_trigger_on; +- s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ s8 chans[3]; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL); +@@ -443,7 +447,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p) + ret = i2c_smbus_read_i2c_block_data(data->client, + STK8312_REG_XOUT, + STK8312_ALL_CHANNEL_SIZE, +- data->buffer); ++ data->scan.chans); + if (ret < STK8312_ALL_CHANNEL_SIZE) { + dev_err(&data->client->dev, "register read failed\n"); + mutex_unlock(&data->lock); +@@ -457,12 +461,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p) + mutex_unlock(&data->lock); + goto err; + } +- data->buffer[i++] = ret; ++ data->scan.chans[i++] = ret; + } + } + mutex_unlock(&data->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c +index 300d955bad00..5ca179cea2fb 100644 +--- a/drivers/iio/accel/stk8ba50.c ++++ b/drivers/iio/accel/stk8ba50.c +@@ -94,12 +94,11 @@ struct stk8ba50_data { + u8 sample_rate_idx; + struct iio_trigger *dready_trig; + bool dready_trigger_on; +- /* +- * 3 x 16-bit channels (10-bit data, 6-bit padding) + +- * 1 x 16 padding + +- * 4 x 16 64-bit timestamp +- */ +- s16 buffer[8]; ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ s16 chans[3]; ++ s64 timetamp __aligned(8); ++ } scan; + }; + + #define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \ +@@ -329,7 +328,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p) + ret = i2c_smbus_read_i2c_block_data(data->client, + STK8BA50_REG_XOUT, + STK8BA50_ALL_CHANNEL_SIZE, +- (u8 *)data->buffer); ++ (u8 *)data->scan.chans); + if (ret < STK8BA50_ALL_CHANNEL_SIZE) { + dev_err(&data->client->dev, "register read failed\n"); + goto err; +@@ -342,10 +341,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p) + if (ret < 0) + goto err; + +- data->buffer[i++] = ret; ++ data->scan.chans[i++] = ret; + } + } +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); + err: + mutex_unlock(&data->lock); +diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c +index 437762a1e487..f940b1607ef4 100644 +--- a/drivers/iio/adc/ad7793.c ++++ b/drivers/iio/adc/ad7793.c +@@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, + id &= AD7793_ID_MASK; + + if (id != st->chip_info->id) { ++ ret = -ENODEV; + dev_err(&st->sd.spi->dev, "device ID query failed\n"); + goto out; + } +diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c +index 59b7d76e1ad2..ce7e9a2383af 100644 +--- a/drivers/iio/adc/ina2xx-adc.c ++++ b/drivers/iio/adc/ina2xx-adc.c +@@ -117,6 +117,11 @@ struct ina2xx_chip_info { + int int_time_vbus; /* Bus voltage integration time uS */ + int int_time_vshunt; /* Shunt voltage integration time uS */ + bool allow_async_readout; ++ /* data buffer needs space for channel data and timestamp */ ++ struct { ++ u16 chan[4]; ++ u64 ts __aligned(8); ++ } scan; + }; + + static const struct ina2xx_config ina2xx_config[] = { +@@ -459,7 +464,6 @@ static const struct iio_chan_spec ina2xx_channels[] = { + static int ina2xx_work_buffer(struct iio_dev *indio_dev) + { + struct ina2xx_chip_info *chip = iio_priv(indio_dev); +- unsigned short data[8]; + int bit, ret, i = 0; + s64 time_a, time_b; + unsigned int alert; +@@ -500,13 +504,12 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) + if (ret < 0) + return ret; + +- data[i++] = val; ++ chip->scan.chan[i++] = val; + } + + time_b = iio_get_time_ns(indio_dev); + +- iio_push_to_buffers_with_timestamp(indio_dev, +- (unsigned int *)data, time_a); ++ iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time_a); + + return (unsigned long)(time_b - time_a) / 1000; + }; +diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c +index 254135e07792..6cc6666180eb 100644 +--- a/drivers/iio/adc/mcp3422.c ++++ b/drivers/iio/adc/mcp3422.c +@@ -99,16 +99,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) + { + int ret; + +- mutex_lock(&adc->lock); +- + ret = i2c_master_send(adc->i2c, &newconfig, 1); + if (ret > 0) { + adc->config = newconfig; + ret = 0; + } + +- mutex_unlock(&adc->lock); +- + return ret; + } + +@@ -141,6 +137,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, + u8 config; + u8 req_channel = channel->channel; + ++ mutex_lock(&adc->lock); ++ + if (req_channel != MCP3422_CHANNEL(adc->config)) { + config = adc->config; + config &= ~MCP3422_CHANNEL_MASK; +@@ -148,12 +146,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc, + config &= ~MCP3422_PGA_MASK; + config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); + ret = mcp3422_update_config(adc, config); +- if (ret < 0) ++ if (ret < 0) { ++ mutex_unlock(&adc->lock); + return ret; ++ } + msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); + } + +- return mcp3422_read(adc, value, &config); ++ ret = mcp3422_read(adc, value, &config); ++ ++ mutex_unlock(&adc->lock); ++ ++ return ret; + } + + static int mcp3422_read_raw(struct iio_dev *iio, +diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c +index 7d61b566e148..f5218461ae25 100644 +--- a/drivers/iio/adc/palmas_gpadc.c ++++ b/drivers/iio/adc/palmas_gpadc.c +@@ -660,8 +660,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc) + + adc_period = adc->auto_conversion_period; + for (i = 0; i < 16; ++i) { +- if (((1000 * (1 << i)) / 32) < adc_period) +- continue; ++ if (((1000 * (1 << i)) / 32) >= adc_period) ++ break; + } + if (i > 0) + i--; +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c +index 85d701291654..7e72dab45932 100644 +--- a/drivers/iio/adc/rockchip_saradc.c ++++ b/drivers/iio/adc/rockchip_saradc.c +@@ -378,7 +378,7 @@ static int rockchip_saradc_resume(struct device *dev) + + ret = clk_prepare_enable(info->clk); + if (ret) +- return ret; ++ clk_disable_unprepare(info->pclk); + + return ret; + } +diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c +index 319172cf7da8..d9bfe79c1747 100644 +--- a/drivers/iio/adc/ti-adc081c.c ++++ b/drivers/iio/adc/ti-adc081c.c +@@ -36,6 +36,12 @@ struct adc081c { + + /* 8, 10 or 12 */ + int bits; ++ ++ /* Ensure natural alignment of buffer elements */ ++ struct { ++ u16 channel; ++ s64 ts __aligned(8); ++ } scan; + }; + + #define REG_CONV_RES 0x00 +@@ -132,14 +138,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct adc081c *data = iio_priv(indio_dev); +- u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ + int ret; + + ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); + if (ret < 0) + goto out; +- buf[0] = ret; +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ data->scan.channel = ret; ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + out: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c +index 072f03bfe6a0..1410a41782cc 100644 +--- a/drivers/iio/adc/ti-adc12138.c ++++ b/drivers/iio/adc/ti-adc12138.c +@@ -50,6 +50,12 @@ struct adc12138 { + struct completion complete; + /* The number of cclk periods for the S/H's acquisition time */ + unsigned int acquisition_time; ++ /* ++ * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp. ++ * Less may be need if not all channels are enabled, as long as ++ * the 8 byte alignment of the timestamp is maintained. ++ */ ++ __be16 data[20] __aligned(8); + + u8 tx_buf[2] ____cacheline_aligned; + u8 rx_buf[2]; +@@ -333,7 +339,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct adc12138 *adc = iio_priv(indio_dev); +- __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */ + __be16 trash; + int ret; + int scan_index; +@@ -349,7 +354,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) + reinit_completion(&adc->complete); + + ret = adc12138_start_and_read_conv(adc, scan_chan, +- i ? &data[i - 1] : &trash); ++ i ? &adc->data[i - 1] : &trash); + if (ret) { + dev_warn(&adc->spi->dev, + "failed to start conversion\n"); +@@ -366,7 +371,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) + } + + if (i) { +- ret = adc12138_read_conv_data(adc, &data[i - 1]); ++ ret = adc12138_read_conv_data(adc, &adc->data[i - 1]); + if (ret) { + dev_warn(&adc->spi->dev, + "failed to get conversion data\n"); +@@ -374,7 +379,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p) + } + } + +- iio_push_to_buffers_with_timestamp(indio_dev, data, ++ iio_push_to_buffers_with_timestamp(indio_dev, adc->data, + iio_get_time_ns(indio_dev)); + out: + mutex_unlock(&adc->lock); +diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c +index af05e20c986b..d892c0fe5c31 100644 +--- a/drivers/iio/adc/ti-ads1015.c ++++ b/drivers/iio/adc/ti-ads1015.c +@@ -220,6 +220,7 @@ static const struct iio_chan_spec ads1115_channels[] = { + IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), + }; + ++#ifdef CONFIG_PM + static int ads1015_set_power_state(struct ads1015_data *data, bool on) + { + int ret; +@@ -237,6 +238,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) + return ret < 0 ? ret : 0; + } + ++#else /* !CONFIG_PM */ ++ ++static int ads1015_set_power_state(struct ads1015_data *data, bool on) ++{ ++ return 0; ++} ++ ++#endif /* !CONFIG_PM */ ++ + static + int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) + { +@@ -282,10 +292,14 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ads1015_data *data = iio_priv(indio_dev); +- s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */ ++ /* Ensure natural alignment of timestamp */ ++ struct { ++ s16 chan; ++ s64 timestamp __aligned(8); ++ } scan; + int chan, ret, res; + +- memset(buf, 0, sizeof(buf)); ++ memset(&scan, 0, sizeof(scan)); + + mutex_lock(&data->lock); + chan = find_first_bit(indio_dev->active_scan_mask, +@@ -296,10 +310,10 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p) + goto err; + } + +- buf[0] = res; ++ scan.chan = res; + mutex_unlock(&data->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + + err: +diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c +index d1bde6d2721e..2a14800e6514 100644 +--- a/drivers/iio/adc/vf610_adc.c ++++ b/drivers/iio/adc/vf610_adc.c +@@ -180,7 +180,11 @@ struct vf610_adc { + u32 sample_freq_avail[5]; + + struct completion completion; +- u16 buffer[8]; ++ /* Ensure the timestamp is naturally aligned */ ++ struct { ++ u16 chan; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 }; +@@ -592,9 +596,9 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id) + if (coco & VF610_ADC_HS_COCO0) { + info->value = vf610_adc_read_data(info); + if (iio_buffer_enabled(indio_dev)) { +- info->buffer[0] = info->value; ++ info->scan.chan = info->value; + iio_push_to_buffers_with_timestamp(indio_dev, +- info->buffer, ++ &info->scan, + iio_get_time_ns(indio_dev)); + iio_trigger_notify_done(indio_dev->trig); + } else +diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c +index 788b3d6fd1cc..b15a02f502df 100644 +--- a/drivers/iio/dac/ad5504.c ++++ b/drivers/iio/dac/ad5504.c +@@ -189,9 +189,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, + return ret; + + if (pwr_down) +- st->pwr_down_mask |= (1 << chan->channel); +- else + st->pwr_down_mask &= ~(1 << chan->channel); ++ else ++ st->pwr_down_mask |= (1 << chan->channel); + + ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, + AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | +diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c +index 69bde5909854..5c998ac8c840 100644 +--- a/drivers/iio/dac/ad5592r-base.c ++++ b/drivers/iio/dac/ad5592r-base.c +@@ -417,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, + s64 tmp = *val * (3767897513LL / 25LL); + *val = div_s64_rem(tmp, 1000000000LL, val2); + +- ret = IIO_VAL_INT_PLUS_MICRO; ++ return IIO_VAL_INT_PLUS_MICRO; + } else { + int mult; + +@@ -448,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, + ret = IIO_VAL_INT; + break; + default: +- ret = -EINVAL; ++ return -EINVAL; + } + + unlock: +diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c +index b5a5517e3ce1..ec2830c16433 100644 +--- a/drivers/iio/gyro/bmg160_core.c ++++ b/drivers/iio/gyro/bmg160_core.c +@@ -104,7 +104,11 @@ struct bmg160_data { + struct iio_trigger *dready_trig; + struct iio_trigger *motion_trig; + struct mutex mutex; +- s16 buffer[8]; ++ /* Ensure naturally aligned timestamp */ ++ struct { ++ s16 chans[3]; ++ s64 timestamp __aligned(8); ++ } scan; + u32 dps_range; + int ev_enable_state; + int slope_thres; +@@ -874,12 +878,12 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p) + + mutex_lock(&data->mutex); + ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L, +- data->buffer, AXIS_MAX * 2); ++ data->scan.chans, AXIS_MAX * 2); + mutex_unlock(&data->mutex); + if (ret < 0) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c +index eef50e91f17c..e04483254b28 100644 +--- a/drivers/iio/gyro/itg3200_buffer.c ++++ b/drivers/iio/gyro/itg3200_buffer.c +@@ -49,13 +49,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct itg3200 *st = iio_priv(indio_dev); +- __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)]; +- +- int ret = itg3200_read_all_channels(st->i2c, buf); ++ /* ++ * Ensure correct alignment and padding including for the ++ * timestamp that may be inserted. ++ */ ++ struct { ++ __be16 buf[ITG3200_SCAN_ELEMENTS]; ++ s64 ts __aligned(8); ++ } scan; ++ ++ int ret = itg3200_read_all_channels(st->i2c, scan.buf); + if (ret < 0) + goto error_ret; + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); + + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c +index 6bb23a49e81e..2f07c4d1398c 100644 +--- a/drivers/iio/health/afe4403.c ++++ b/drivers/iio/health/afe4403.c +@@ -71,6 +71,7 @@ static const struct reg_field afe4403_reg_fields[] = { + * @regulator: Pointer to the regulator for the IC + * @trig: IIO trigger for this device + * @irq: ADC_RDY line interrupt number ++ * @buffer: Used to construct data layout to push into IIO buffer. + */ + struct afe4403_data { + struct device *dev; +@@ -80,6 +81,8 @@ struct afe4403_data { + struct regulator *regulator; + struct iio_trigger *trig; + int irq; ++ /* Ensure suitable alignment for timestamp */ ++ s32 buffer[8] __aligned(8); + }; + + enum afe4403_chan_id { +@@ -318,7 +321,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) + struct iio_dev *indio_dev = pf->indio_dev; + struct afe4403_data *afe = iio_priv(indio_dev); + int ret, bit, i = 0; +- s32 buffer[8]; + u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; + u8 rx[3]; + +@@ -335,9 +337,9 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) + if (ret) + goto err; + +- buffer[i++] = (rx[0] << 16) | +- (rx[1] << 8) | +- (rx[2]); ++ afe->buffer[i++] = (rx[0] << 16) | ++ (rx[1] << 8) | ++ (rx[2]); + } + + /* Disable reading from the device */ +@@ -346,7 +348,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) + if (ret) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, ++ pf->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c +index 964f5231a831..5e256b11ac87 100644 +--- a/drivers/iio/health/afe4404.c ++++ b/drivers/iio/health/afe4404.c +@@ -91,6 +91,7 @@ static const struct reg_field afe4404_reg_fields[] = { + * @regulator: Pointer to the regulator for the IC + * @trig: IIO trigger for this device + * @irq: ADC_RDY line interrupt number ++ * @buffer: Used to construct a scan to push to the iio buffer. + */ + struct afe4404_data { + struct device *dev; +@@ -99,6 +100,7 @@ struct afe4404_data { + struct regulator *regulator; + struct iio_trigger *trig; + int irq; ++ s32 buffer[10] __aligned(8); + }; + + enum afe4404_chan_id { +@@ -337,17 +339,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) + struct iio_dev *indio_dev = pf->indio_dev; + struct afe4404_data *afe = iio_priv(indio_dev); + int ret, bit, i = 0; +- s32 buffer[10]; + + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) { + ret = regmap_read(afe->regmap, afe4404_channel_values[bit], +- &buffer[i++]); ++ &afe->buffer[i++]); + if (ret) + goto err; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); ++ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, ++ pf->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); + +diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c +index ff96b6d0fdae..77513fd84b99 100644 +--- a/drivers/iio/humidity/am2315.c ++++ b/drivers/iio/humidity/am2315.c +@@ -36,7 +36,11 @@ + struct am2315_data { + struct i2c_client *client; + struct mutex lock; +- s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ s16 chans[2]; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + struct am2315_sensor_data { +@@ -170,20 +174,20 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p) + + mutex_lock(&data->lock); + if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { +- data->buffer[0] = sensor_data.hum_data; +- data->buffer[1] = sensor_data.temp_data; ++ data->scan.chans[0] = sensor_data.hum_data; ++ data->scan.chans[1] = sensor_data.temp_data; + } else { + i = 0; + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) { +- data->buffer[i] = (bit ? sensor_data.temp_data : +- sensor_data.hum_data); ++ data->scan.chans[i] = (bit ? sensor_data.temp_data : ++ sensor_data.hum_data); + i++; + } + } + mutex_unlock(&data->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + pf->timestamp); + err: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c +index 90c24a23c679..c0eb9dfd1c45 100644 +--- a/drivers/iio/imu/adis16400_buffer.c ++++ b/drivers/iio/imu/adis16400_buffer.c +@@ -37,8 +37,11 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, + return -ENOMEM; + + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + tx = adis->buffer + burst_length; + tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c +index fb7c0dbed51c..67837905f7b4 100644 +--- a/drivers/iio/imu/adis16400_core.c ++++ b/drivers/iio/imu/adis16400_core.c +@@ -288,8 +288,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev) + if (ret) + goto err_ret; + +- ret = sscanf(indio_dev->name, "adis%u\n", &device_id); +- if (ret != 1) { ++ if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) { + ret = -EINVAL; + goto err_ret; + } +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c +index 36607d52fee0..625f54d9e382 100644 +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, + return -ENOMEM; + + adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + rx = adis->buffer; + tx = rx + scan_count; +@@ -80,9 +83,6 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) + struct adis *adis = iio_device_get_drvdata(indio_dev); + int ret; + +- if (!adis->buffer) +- return -ENOMEM; +- + if (adis->data->has_paging) { + mutex_lock(&adis->txrx_lock); + if (adis->current_page != 0) { +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c +index 5fb571d03153..93c5040c6454 100644 +--- a/drivers/iio/imu/bmi160/bmi160_core.c ++++ b/drivers/iio/imu/bmi160/bmi160_core.c +@@ -110,6 +110,13 @@ enum bmi160_sensor_type { + + struct bmi160_data { + struct regmap *regmap; ++ /* ++ * Ensure natural alignment for timestamp if present. ++ * Max length needed: 2 * 3 channels + 4 bytes padding + 8 byte ts. ++ * If fewer channels are enabled, less space may be needed, as ++ * long as the timestamp is still aligned to 8 bytes. ++ */ ++ __le16 buf[12] __aligned(8); + }; + + const struct regmap_config bmi160_regmap_config = { +@@ -385,7 +392,6 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmi160_data *data = iio_priv(indio_dev); +- s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */ + int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L; + __le16 sample; + +@@ -395,10 +401,10 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p) + &sample, sizeof(__le16)); + if (ret < 0) + goto done; +- buf[j++] = sample; ++ data->buf[j++] = sample; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, data->buf, + iio_get_time_ns(indio_dev)); + done: + iio_trigger_notify_done(indio_dev->trig); +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c +index 2f037cd59d53..b0fdce0b9563 100644 +--- a/drivers/iio/industrialio-buffer.c ++++ b/drivers/iio/industrialio-buffer.c +@@ -1335,12 +1335,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, + indio_dev->masklength, + in_ind + 1); + while (in_ind != out_ind) { +- in_ind = find_next_bit(indio_dev->active_scan_mask, +- indio_dev->masklength, +- in_ind + 1); + length = iio_storage_bytes_for_si(indio_dev, in_ind); + /* Make sure we are aligned */ + in_loc = roundup(in_loc, length) + length; ++ in_ind = find_next_bit(indio_dev->active_scan_mask, ++ indio_dev->masklength, ++ in_ind + 1); + } + length = iio_storage_bytes_for_si(indio_dev, in_ind); + out_loc = roundup(out_loc, length); +diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c +index 45ca056f019e..63041dcec7af 100644 +--- a/drivers/iio/light/hid-sensor-prox.c ++++ b/drivers/iio/light/hid-sensor-prox.c +@@ -37,6 +37,9 @@ struct prox_state { + struct hid_sensor_common common_attributes; + struct hid_sensor_hub_attribute_info prox_attr; + u32 human_presence; ++ int scale_pre_decml; ++ int scale_post_decml; ++ int scale_precision; + }; + + /* Channel definitions */ +@@ -105,8 +108,9 @@ static int prox_read_raw(struct iio_dev *indio_dev, + ret_type = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SCALE: +- *val = prox_state->prox_attr.units; +- ret_type = IIO_VAL_INT; ++ *val = prox_state->scale_pre_decml; ++ *val2 = prox_state->scale_post_decml; ++ ret_type = prox_state->scale_precision; + break; + case IIO_CHAN_INFO_OFFSET: + *val = hid_sensor_convert_exponent( +@@ -240,6 +244,12 @@ static int prox_parse_report(struct platform_device *pdev, + st->common_attributes.sensitivity.index, + st->common_attributes.sensitivity.report_id); + } ++ ++ st->scale_precision = hid_sensor_format_scale( ++ hsdev->usage, ++ &st->prox_attr, ++ &st->scale_pre_decml, &st->scale_post_decml); ++ + return ret; + } + +diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c +index 1d2c0c8a1d4f..207b856cef8c 100644 +--- a/drivers/iio/light/isl29125.c ++++ b/drivers/iio/light/isl29125.c +@@ -54,7 +54,11 @@ + struct isl29125_data { + struct i2c_client *client; + u8 conf1; +- u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ u16 chans[3]; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + #define ISL29125_CHANNEL(_color, _si) { \ +@@ -187,10 +191,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p) + if (ret < 0) + goto done; + +- data->buffer[j++] = ret; ++ data->scan.chans[j++] = ret; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c +index c298fd86ed86..414fe857fa9d 100644 +--- a/drivers/iio/light/ltr501.c ++++ b/drivers/iio/light/ltr501.c +@@ -35,9 +35,12 @@ + #define LTR501_PART_ID 0x86 + #define LTR501_MANUFAC_ID 0x87 + #define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */ ++#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */ + #define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */ ++#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */ + #define LTR501_ALS_PS_STATUS 0x8c + #define LTR501_PS_DATA 0x8d /* 16-bit, little endian */ ++#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */ + #define LTR501_INTR 0x8f /* output mode, polarity, mode */ + #define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */ + #define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */ +@@ -408,18 +411,19 @@ static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2]) + + static int ltr501_read_ps(struct ltr501_data *data) + { +- int ret, status; ++ __le16 status; ++ int ret; + + ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY); + if (ret < 0) + return ret; + + ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA, +- &status, 2); ++ &status, sizeof(status)); + if (ret < 0) + return ret; + +- return status; ++ return le16_to_cpu(status); + } + + static int ltr501_read_intr_prst(struct ltr501_data *data, +@@ -1180,7 +1184,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = { + .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl), + .ps_gain = ltr559_ps_gain_tbl, + .ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl), +- .als_mode_active = BIT(1), ++ .als_mode_active = BIT(0), + .als_gain_mask = BIT(2) | BIT(3) | BIT(4), + .als_gain_shift = 2, + .info = <r501_info, +@@ -1218,13 +1222,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ltr501_data *data = iio_priv(indio_dev); +- u16 buf[8]; ++ struct { ++ u16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + __le16 als_buf[2]; + u8 mask = 0; + int j = 0; + int ret, psdata; + +- memset(buf, 0, sizeof(buf)); ++ memset(&scan, 0, sizeof(scan)); + + /* figure out which data needs to be ready */ + if (test_bit(0, indio_dev->active_scan_mask) || +@@ -1243,9 +1250,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + if (ret < 0) + return ret; + if (test_bit(0, indio_dev->active_scan_mask)) +- buf[j++] = le16_to_cpu(als_buf[1]); ++ scan.channels[j++] = le16_to_cpu(als_buf[1]); + if (test_bit(1, indio_dev->active_scan_mask)) +- buf[j++] = le16_to_cpu(als_buf[0]); ++ scan.channels[j++] = le16_to_cpu(als_buf[0]); + } + + if (mask & LTR501_STATUS_PS_RDY) { +@@ -1253,10 +1260,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) + &psdata, 2); + if (ret < 0) + goto done; +- buf[j++] = psdata & LTR501_PS_DATA_MASK; ++ scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + + done: +@@ -1326,9 +1333,12 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg) + { + switch (reg) { + case LTR501_ALS_DATA1: ++ case LTR501_ALS_DATA1_UPPER: + case LTR501_ALS_DATA0: ++ case LTR501_ALS_DATA0_UPPER: + case LTR501_ALS_PS_STATUS: + case LTR501_PS_DATA: ++ case LTR501_PS_DATA_UPPER: + return true; + default: + return false; +diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c +index a8ffa432bf0d..87662c88fbeb 100644 +--- a/drivers/iio/light/max44000.c ++++ b/drivers/iio/light/max44000.c +@@ -78,6 +78,11 @@ + struct max44000_data { + struct mutex lock; + struct regmap *regmap; ++ /* Ensure naturally aligned timestamp */ ++ struct { ++ u16 channels[2]; ++ s64 ts __aligned(8); ++ } scan; + }; + + /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ +@@ -491,7 +496,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct max44000_data *data = iio_priv(indio_dev); +- u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ + int index = 0; + unsigned int regval; + int ret; +@@ -501,17 +505,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) + ret = max44000_read_alsval(data); + if (ret < 0) + goto out_unlock; +- buf[index++] = ret; ++ data->scan.channels[index++] = ret; + } + if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { + ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); + if (ret < 0) + goto out_unlock; +- buf[index] = regval; ++ data->scan.channels[index] = regval; + } + mutex_unlock(&data->lock); + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + iio_trigger_notify_done(indio_dev->trig); + return IRQ_HANDLED; +diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c +index 096034c126a4..c292d9816118 100644 +--- a/drivers/iio/light/si1145.c ++++ b/drivers/iio/light/si1145.c +@@ -172,6 +172,7 @@ struct si1145_part_info { + * @part_info: Part information + * @trig: Pointer to iio trigger + * @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode ++ * @buffer: Used to pack data read from sensor. + */ + struct si1145_data { + struct i2c_client *client; +@@ -183,6 +184,14 @@ struct si1145_data { + bool autonomous; + struct iio_trigger *trig; + int meas_rate; ++ /* ++ * Ensure timestamp will be naturally aligned if present. ++ * Maximum buffer size (may be only partly used if not all ++ * channels are enabled): ++ * 6*2 bytes channels data + 4 bytes alignment + ++ * 8 bytes timestamp ++ */ ++ u8 buffer[24] __aligned(8); + }; + + /** +@@ -444,12 +453,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) + struct iio_poll_func *pf = private; + struct iio_dev *indio_dev = pf->indio_dev; + struct si1145_data *data = iio_priv(indio_dev); +- /* +- * Maximum buffer size: +- * 6*2 bytes channels data + 4 bytes alignment + +- * 8 bytes timestamp +- */ +- u8 buffer[24]; + int i, j = 0; + int ret; + u8 irq_status = 0; +@@ -482,7 +485,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) + + ret = i2c_smbus_read_i2c_block_data_or_emulated( + data->client, indio_dev->channels[i].address, +- sizeof(u16) * run, &buffer[j]); ++ sizeof(u16) * run, &data->buffer[j]); + if (ret < 0) + goto done; + j += run * sizeof(u16); +@@ -497,7 +500,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private) + goto done; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c +index a795afb7667b..b51cd43ef824 100644 +--- a/drivers/iio/light/tcs3414.c ++++ b/drivers/iio/light/tcs3414.c +@@ -56,7 +56,11 @@ struct tcs3414_data { + u8 control; + u8 gain; + u8 timing; +- u16 buffer[8]; /* 4x 16-bit + 8 bytes timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ u16 chans[4]; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + #define TCS3414_CHANNEL(_color, _si, _addr) { \ +@@ -212,10 +216,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p) + if (ret < 0) + goto done; + +- data->buffer[j++] = ret; ++ data->scan.chans[j++] = ret; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c +index 752237f0889e..374b1fdd1e42 100644 +--- a/drivers/iio/magnetometer/ak8974.c ++++ b/drivers/iio/magnetometer/ak8974.c +@@ -153,6 +153,11 @@ struct ak8974 { + bool drdy_irq; + struct completion drdy_complete; + bool drdy_active_low; ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ __le16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + }; + + static const char ak8974_reg_avdd[] = "avdd"; +@@ -494,7 +499,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) + { + struct ak8974 *ak8974 = iio_priv(indio_dev); + int ret; +- s16 hw_values[8]; /* Three axes + 64bit padding */ + + pm_runtime_get_sync(&ak8974->i2c->dev); + mutex_lock(&ak8974->lock); +@@ -504,13 +508,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) + dev_err(&ak8974->i2c->dev, "error triggering measure\n"); + goto out_unlock; + } +- ret = ak8974_getresult(ak8974, hw_values); ++ ret = ak8974_getresult(ak8974, ak8974->scan.channels); + if (ret) { + dev_err(&ak8974->i2c->dev, "error getting measures\n"); + goto out_unlock; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, hw_values, ++ iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, + iio_get_time_ns(indio_dev)); + + out_unlock: +@@ -671,19 +675,21 @@ static int ak8974_probe(struct i2c_client *i2c, + ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); + if (IS_ERR(ak8974->map)) { + dev_err(&i2c->dev, "failed to allocate register map\n"); ++ pm_runtime_put_noidle(&i2c->dev); ++ pm_runtime_disable(&i2c->dev); + return PTR_ERR(ak8974->map); + } + + ret = ak8974_set_power(ak8974, AK8974_PWR_ON); + if (ret) { + dev_err(&i2c->dev, "could not power on\n"); +- goto power_off; ++ goto disable_pm; + } + + ret = ak8974_detect(ak8974); + if (ret) { + dev_err(&i2c->dev, "neither AK8974 nor AMI305 found\n"); +- goto power_off; ++ goto disable_pm; + } + + ret = ak8974_selftest(ak8974); +@@ -693,14 +699,9 @@ static int ak8974_probe(struct i2c_client *i2c, + ret = ak8974_reset(ak8974); + if (ret) { + dev_err(&i2c->dev, "AK8974 reset failed\n"); +- goto power_off; ++ goto disable_pm; + } + +- pm_runtime_set_autosuspend_delay(&i2c->dev, +- AK8974_AUTOSUSPEND_DELAY); +- pm_runtime_use_autosuspend(&i2c->dev); +- pm_runtime_put(&i2c->dev); +- + indio_dev->dev.parent = &i2c->dev; + indio_dev->channels = ak8974_channels; + indio_dev->num_channels = ARRAY_SIZE(ak8974_channels); +@@ -753,6 +754,11 @@ static int ak8974_probe(struct i2c_client *i2c, + goto cleanup_buffer; + } + ++ pm_runtime_set_autosuspend_delay(&i2c->dev, ++ AK8974_AUTOSUSPEND_DELAY); ++ pm_runtime_use_autosuspend(&i2c->dev); ++ pm_runtime_put(&i2c->dev); ++ + return 0; + + cleanup_buffer: +@@ -761,7 +767,6 @@ static int ak8974_probe(struct i2c_client *i2c, + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); + ak8974_set_power(ak8974, AK8974_PWR_OFF); +-power_off: + regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); + + return ret; +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c +index af8606cc7812..2a4e23e20ebc 100644 +--- a/drivers/iio/magnetometer/ak8975.c ++++ b/drivers/iio/magnetometer/ak8975.c +@@ -381,6 +381,12 @@ struct ak8975_data { + struct iio_mount_matrix orientation; + struct regulator *vdd; + struct regulator *vid; ++ ++ /* Ensure natural alignment of timestamp */ ++ struct { ++ s16 channels[3]; ++ s64 ts __aligned(8); ++ } scan; + }; + + /* Enable attached power regulator if any. */ +@@ -690,6 +696,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val) + struct ak8975_data *data = iio_priv(indio_dev); + const struct i2c_client *client = data->client; + const struct ak_def *def = data->def; ++ __le16 rval; + u16 buff; + int ret; + +@@ -703,7 +710,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val) + + ret = i2c_smbus_read_i2c_block_data_or_emulated( + client, def->data_regs[index], +- sizeof(buff), (u8*)&buff); ++ sizeof(rval), (u8*)&rval); + if (ret < 0) + goto exit; + +@@ -713,7 +720,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val) + pm_runtime_put_autosuspend(&data->client->dev); + + /* Swap bytes and convert to valid range. */ +- buff = le16_to_cpu(buff); ++ buff = le16_to_cpu(rval); + *val = clamp_t(s16, buff, -def->range, def->range); + return IIO_VAL_INT; + +@@ -812,7 +819,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) + const struct i2c_client *client = data->client; + const struct ak_def *def = data->def; + int ret; +- s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ ++ __le16 fval[3]; + + mutex_lock(&data->lock); + +@@ -826,20 +833,21 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) + */ + ret = i2c_smbus_read_i2c_block_data_or_emulated(client, + def->data_regs[0], +- 3 * sizeof(buff[0]), +- (u8 *)buff); ++ 3 * sizeof(fval[0]), ++ (u8 *)fval); + if (ret < 0) + goto unlock; + + mutex_unlock(&data->lock); + + /* Clamp to valid range. */ +- buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range); +- buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range); +- buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range); ++ data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); ++ data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); ++ data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + +- iio_push_to_buffers_with_timestamp(indio_dev, buff, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); ++ + return; + + unlock: +diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c +index b4f643fb3b1e..d523bc51ff26 100644 +--- a/drivers/iio/magnetometer/mag3110.c ++++ b/drivers/iio/magnetometer/mag3110.c +@@ -52,6 +52,12 @@ struct mag3110_data { + struct i2c_client *client; + struct mutex lock; + u8 ctrl_reg1; ++ /* Ensure natural alignment of timestamp */ ++ struct { ++ __be16 channels[3]; ++ u8 temperature; ++ s64 ts __aligned(8); ++ } scan; + }; + + static int mag3110_request(struct mag3110_data *data) +@@ -262,10 +268,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mag3110_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */ + int ret; + +- ret = mag3110_read(data, (__be16 *) buffer); ++ ret = mag3110_read(data, data->scan.channels); + if (ret < 0) + goto done; + +@@ -274,10 +279,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) + MAG3110_DIE_TEMP); + if (ret < 0) + goto done; +- buffer[6] = ret; ++ data->scan.temperature = ret; + } + +- iio_push_to_buffers_with_timestamp(indio_dev, buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + + done: +diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c +index c9263acc190b..85b90b5939db 100644 +--- a/drivers/iio/pressure/bmp280-core.c ++++ b/drivers/iio/pressure/bmp280-core.c +@@ -182,6 +182,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, + + (s32)2097152) * H2 + 8192) >> 14); + var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4; + ++ var = clamp_val(var, 0, 419430400); ++ + return var >> 12; + }; + +@@ -630,7 +632,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas) + unsigned int ctrl; + + if (data->use_eoc) +- init_completion(&data->done); ++ reinit_completion(&data->done); + + ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas); + if (ret) +@@ -886,6 +888,9 @@ static int bmp085_fetch_eoc_irq(struct device *dev, + "trying to enforce it\n"); + irq_trig = IRQF_TRIGGER_RISING; + } ++ ++ init_completion(&data->done); ++ + ret = devm_request_threaded_irq(dev, + irq, + bmp085_eoc_irq, +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c +index eb87948fc559..d6c919cfa416 100644 +--- a/drivers/iio/pressure/mpl3115.c ++++ b/drivers/iio/pressure/mpl3115.c +@@ -139,7 +139,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct mpl3115_data *data = iio_priv(indio_dev); +- u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */ ++ /* ++ * 32-bit channel + 16-bit channel + padding + ts ++ * Note that it is possible for only one of the first 2 ++ * channels to be enabled. If that happens, the first element ++ * of the buffer may be either 16 or 32-bits. As such we cannot ++ * use a simple structure definition to express this data layout. ++ */ ++ u8 buffer[16] __aligned(8); + int ret, pos = 0; + + mutex_lock(&data->lock); +diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c +index 8cc7156b5ace..17844e20de7d 100644 +--- a/drivers/iio/pressure/ms5611_core.c ++++ b/drivers/iio/pressure/ms5611_core.c +@@ -215,16 +215,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ms5611_state *st = iio_priv(indio_dev); +- s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ ++ /* Ensure buffer elements are naturally aligned */ ++ struct { ++ s32 channels[2]; ++ s64 ts __aligned(8); ++ } scan; + int ret; + + mutex_lock(&st->lock); +- ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); ++ ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], ++ &scan.channels[0]); + mutex_unlock(&st->lock); + if (ret < 0) + goto err; + +- iio_push_to_buffers_with_timestamp(indio_dev, buf, ++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, + iio_get_time_ns(indio_dev)); + + err: +diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c +index cc002b958f7e..401e230ab72e 100644 +--- a/drivers/iio/pressure/zpa2326.c ++++ b/drivers/iio/pressure/zpa2326.c +@@ -676,8 +676,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) + int err; + + err = pm_runtime_get_sync(indio_dev->dev.parent); +- if (err < 0) ++ if (err < 0) { ++ pm_runtime_put(indio_dev->dev.parent); + return err; ++ } + + if (err > 0) { + /* +diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +index 3141c3c161bb..ed7397f0b4c8 100644 +--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c ++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +@@ -51,7 +51,11 @@ struct lidar_data { + int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len); + int i2c_enabled; + +- u16 buffer[8]; /* 2 byte distance + 8 byte timestamp */ ++ /* Ensure timestamp is naturally aligned */ ++ struct { ++ u16 chan; ++ s64 timestamp __aligned(8); ++ } scan; + }; + + static const struct iio_chan_spec lidar_channels[] = { +@@ -166,6 +170,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg) + ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE); + if (ret < 0) { + dev_err(&client->dev, "cannot send start measurement command"); ++ pm_runtime_put_noidle(&client->dev); + return ret; + } + +@@ -235,9 +240,9 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private) + struct lidar_data *data = iio_priv(indio_dev); + int ret; + +- ret = lidar_get_measurement(data, data->buffer); ++ ret = lidar_get_measurement(data, &data->scan.chan); + if (!ret) { +- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_get_time_ns(indio_dev)); + } else if (ret != -EINVAL) { + dev_err(&data->client->dev, "cannot read LIDAR measurement"); +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index 304429fd04dd..97168b856606 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -1252,6 +1252,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, + id.local_id); + if (IS_ERR(cm_id_priv->timewait_info)) { + ret = PTR_ERR(cm_id_priv->timewait_info); ++ cm_id_priv->timewait_info = NULL; + goto out; + } + +@@ -1683,6 +1684,7 @@ static int cm_req_handler(struct cm_work *work) + id.local_id); + if (IS_ERR(cm_id_priv->timewait_info)) { + ret = PTR_ERR(cm_id_priv->timewait_info); ++ cm_id_priv->timewait_info = NULL; + goto destroy; + } + cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index 0a6cc78ebcf7..149d210c68ab 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -2370,7 +2370,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) + work->new_state = RDMA_CM_ROUTE_RESOLVED; + work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; + +- route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); ++ if (!route->path_rec) ++ route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); + if (!route->path_rec) { + ret = -ENOMEM; + goto err1; +diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c +index 41573df1d9fc..692fc42255c9 100644 +--- a/drivers/infiniband/core/cma_configfs.c ++++ b/drivers/infiniband/core/cma_configfs.c +@@ -277,8 +277,21 @@ static struct config_group *make_cma_dev(struct config_group *group, + return ERR_PTR(err); + } + ++static void drop_cma_dev(struct config_group *cgroup, struct config_item *item) ++{ ++ struct config_group *group = ++ container_of(item, struct config_group, cg_item); ++ struct cma_dev_group *cma_dev_group = ++ container_of(group, struct cma_dev_group, device_group); ++ ++ configfs_remove_default_groups(&cma_dev_group->ports_group); ++ configfs_remove_default_groups(&cma_dev_group->device_group); ++ config_item_put(item); ++} ++ + static struct configfs_group_operations cma_subsys_group_ops = { + .make_group = make_cma_dev, ++ .drop_item = drop_cma_dev, + }; + + static struct config_item_type cma_subsys_type = { +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c +index a1f059a9c751..f03e10517acc 100644 +--- a/drivers/infiniband/core/mad.c ++++ b/drivers/infiniband/core/mad.c +@@ -2885,6 +2885,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, + sg_list.addr))) { ++ kfree(mad_priv); + ret = -ENOMEM; + break; + } +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index a4f4cd493265..bb0d728f4b76 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -1296,13 +1296,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) + return -EFAULT; + ++ if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) ++ return -EINVAL; ++ + ctx = ucma_get_ctx(file, cmd.id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + +- if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) +- return -EINVAL; +- + optval = memdup_user((void __user *) (unsigned long) cmd.optval, + cmd.optlen); + if (IS_ERR(optval)) { +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c +index cf93a96b6324..bc6458d76033 100644 +--- a/drivers/infiniband/core/user_mad.c ++++ b/drivers/infiniband/core/user_mad.c +@@ -343,6 +343,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, + + mutex_lock(&file->mutex); + ++ if (file->agents_dead) { ++ mutex_unlock(&file->mutex); ++ return -EIO; ++ } ++ + while (list_empty(&file->recv_list)) { + mutex_unlock(&file->mutex); + +@@ -485,7 +490,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, + + agent = __get_agent(file, packet->mad.hdr.id); + if (!agent) { +- ret = -EINVAL; ++ ret = -EIO; + goto err_up; + } + +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c +index a04a53acb24f..ed4397c3af1a 100644 +--- a/drivers/infiniband/hw/cxgb4/cm.c ++++ b/drivers/infiniband/hw/cxgb4/cm.c +@@ -3245,7 +3245,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { + err = pick_local_ipaddrs(dev, cm_id); + if (err) +- goto fail2; ++ goto fail3; + } + + /* find a route */ +@@ -3267,7 +3267,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) + if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { + err = pick_local_ip6addrs(dev, cm_id); + if (err) +- goto fail2; ++ goto fail3; + } + + /* find a route */ +@@ -3472,13 +3472,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) + ep->com.local_addr.ss_family == AF_INET) { + err = cxgb4_remove_server_filter( + ep->com.dev->rdev.lldi.ports[0], ep->stid, +- ep->com.dev->rdev.lldi.rxq_ids[0], 0); ++ ep->com.dev->rdev.lldi.rxq_ids[0], false); + } else { + struct sockaddr_in6 *sin6; + c4iw_init_wr_wait(&ep->com.wr_wait); + err = cxgb4_remove_server( + ep->com.dev->rdev.lldi.ports[0], ep->stid, +- ep->com.dev->rdev.lldi.rxq_ids[0], 0); ++ ep->com.dev->rdev.lldi.rxq_ids[0], ++ ep->com.local_addr.ss_family == AF_INET6); + if (err) + goto done; + err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c +index a856371bbe58..7b38b9a280ba 100644 +--- a/drivers/infiniband/hw/cxgb4/cq.c ++++ b/drivers/infiniband/hw/cxgb4/cq.c +@@ -893,6 +893,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, + + rhp = to_c4iw_dev(ibdev); + ++ if (entries < 1 || entries > ibdev->attrs.max_cqe) ++ return ERR_PTR(-EINVAL); ++ + if (vector >= rhp->rdev.lldi.nciq) + return ERR_PTR(-EINVAL); + +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index bb45eb22ba1f..87bc7b0db892 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -277,6 +277,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, + if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { + pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", + pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); ++ ret = -EINVAL; + goto free_dma; + } + +@@ -1976,7 +1977,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; +- init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; ++ init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; + return 0; +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c +index 76e63c88a87a..e9313e6f4b0e 100644 +--- a/drivers/infiniband/hw/hfi1/sdma.c ++++ b/drivers/infiniband/hw/hfi1/sdma.c +@@ -3028,6 +3028,7 @@ static void __sdma_process_event(struct sdma_engine *sde, + static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) + { + int i; ++ struct sdma_desc *descp; + + /* Handle last descriptor */ + if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { +@@ -3048,12 +3049,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) + if (unlikely(tx->num_desc == MAX_DESC)) + goto enomem; + +- tx->descp = kmalloc_array( +- MAX_DESC, +- sizeof(struct sdma_desc), +- GFP_ATOMIC); +- if (!tx->descp) ++ descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); ++ if (!descp) + goto enomem; ++ tx->descp = descp; + + /* reserve last descriptor for coalescing */ + tx->desc_limit = MAX_DESC - 1; +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +index 20ec34761b39..29cd059c01f1 100644 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +@@ -231,7 +231,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; + break; + case IB_WR_LOCAL_INV: +- break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + case IB_WR_LSO: +diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c +index 282a726351c8..ce1a4817ab92 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c +@@ -2036,9 +2036,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, + dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); + if (!dst || dst->error) { + if (dst) { +- dst_release(dst); + i40iw_pr_err("ip6_route_output returned dst->error = %d\n", + dst->error); ++ dst_release(dst); + } + return rc; + } +diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c +index ac2f3cd9478c..aafe9989b33d 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_main.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c +@@ -54,10 +54,6 @@ + #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD) + +-static int push_mode; +-module_param(push_mode, int, 0644); +-MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)"); +- + static int debug; + module_param(debug, int, 0644); + MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all"); +@@ -1524,7 +1520,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, + if (status) + goto exit; + iwdev->obj_next = iwdev->obj_mem; +- iwdev->push_mode = push_mode; + + init_waitqueue_head(&iwdev->vchnl_waitq); + init_waitqueue_head(&dev->vf_reqs); +diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c +index 85993dc44f6e..3fbf7a3b00de 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c +@@ -399,12 +399,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n", + pble_rsrc->next_fpm_addr, chunk->size, chunk->size); + pble_rsrc->unallocated_pble -= (chunk->size >> 3); +- list_add(&chunk->list, &pble_rsrc->pinfo.clist); + sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; +- if (sd_entry->valid) +- return 0; +- if (dev->is_pf) { ++ if (dev->is_pf && !sd_entry->valid) { + ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id, + sd_reg_val, idx->sd_idx, + sd_entry->entry_type, true); +@@ -415,6 +412,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + } + + sd_entry->valid = true; ++ list_add(&chunk->list, &pble_rsrc->pinfo.clist); + return 0; + error: + kfree(chunk); +diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c +index c3d2400e36b9..ece83aff2a2d 100644 +--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c ++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c +@@ -208,38 +208,16 @@ static int i40iw_dealloc_ucontext(struct ib_ucontext *context) + */ + static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) + { +- struct i40iw_ucontext *ucontext; +- u64 db_addr_offset; +- u64 push_offset; +- +- ucontext = to_ucontext(context); +- if (ucontext->iwdev->sc_dev.is_pf) { +- db_addr_offset = I40IW_DB_ADDR_OFFSET; +- push_offset = I40IW_PUSH_OFFSET; +- if (vma->vm_pgoff) +- vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1; +- } else { +- db_addr_offset = I40IW_VF_DB_ADDR_OFFSET; +- push_offset = I40IW_VF_PUSH_OFFSET; +- if (vma->vm_pgoff) +- vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1; +- } ++ struct i40iw_ucontext *ucontext = to_ucontext(context); ++ u64 dbaddr; + +- vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT; ++ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) ++ return -EINVAL; + +- if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +- vma->vm_private_data = ucontext; +- } else { +- if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) +- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +- else +- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +- } ++ dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0); + +- if (io_remap_pfn_range(vma, vma->vm_start, +- vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), +- PAGE_SIZE, vma->vm_page_prot)) ++ if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE, ++ pgprot_noncached(vma->vm_page_prot))) + return -EAGAIN; + + return 0; +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c +index 5dc920fe1326..c8c586c78d07 100644 +--- a/drivers/infiniband/hw/mlx4/cm.c ++++ b/drivers/infiniband/hw/mlx4/cm.c +@@ -309,6 +309,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id) + if (!sriov->is_going_down) { + id->scheduled_delete = 1; + schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); ++ } else if (id->scheduled_delete) { ++ /* Adjust timeout if already scheduled */ ++ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT); + } + spin_unlock_irqrestore(&sriov->going_down_lock, flags); + spin_unlock(&sriov->id_map_lock); +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c +index f32ffd74ec47..bf4e0d7a3ec2 100644 +--- a/drivers/infiniband/hw/mlx4/mad.c ++++ b/drivers/infiniband/hw/mlx4/mad.c +@@ -1276,6 +1276,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) + spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); + } + ++static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg) ++{ ++ unsigned long flags; ++ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; ++ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ++ ++ spin_lock_irqsave(&dev->sriov.going_down_lock, flags); ++ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) ++ queue_work(ctx->wi_wq, &ctx->work); ++ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); ++} ++ + static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, + struct mlx4_ib_demux_pv_qp *tun_qp, + int index) +@@ -1978,7 +1990,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + cq_size *= 2; + + cq_attr.cqe = cq_size; +- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ++ ctx->cq = ib_create_cq(ctx->ib_dev, ++ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler, + NULL, ctx, &cq_attr); + if (IS_ERR(ctx->cq)) { + ret = PTR_ERR(ctx->cq); +@@ -2015,6 +2028,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, + INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); + + ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ++ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq; + + ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); + if (ret) { +@@ -2158,7 +2172,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_mcg; + } + +- snprintf(name, sizeof name, "mlx4_ibt%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibt%d", port); + ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->wq) { + pr_err("Failed to create tunnelling WQ for port %d\n", port); +@@ -2166,7 +2180,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + goto err_wq; + } + +- snprintf(name, sizeof name, "mlx4_ibud%d", port); ++ snprintf(name, sizeof(name), "mlx4_ibwi%d", port); ++ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); ++ if (!ctx->wi_wq) { ++ pr_err("Failed to create wire WQ for port %d\n", port); ++ ret = -ENOMEM; ++ goto err_wiwq; ++ } ++ ++ snprintf(name, sizeof(name), "mlx4_ibud%d", port); + ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + if (!ctx->ud_wq) { + pr_err("Failed to create up/down WQ for port %d\n", port); +@@ -2177,6 +2199,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, + return 0; + + err_udwq: ++ destroy_workqueue(ctx->wi_wq); ++ ctx->wi_wq = NULL; ++ ++err_wiwq: + destroy_workqueue(ctx->wq); + ctx->wq = NULL; + +@@ -2224,12 +2250,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) + ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; + } + flush_workqueue(ctx->wq); ++ flush_workqueue(ctx->wi_wq); + for (i = 0; i < dev->dev->caps.sqp_demux; i++) { + destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); + free_pv_object(dev, i, ctx->port); + } + kfree(ctx->tun); + destroy_workqueue(ctx->ud_wq); ++ destroy_workqueue(ctx->wi_wq); + destroy_workqueue(ctx->wq); + } + } +diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h +index 35141f451e5c..91c89ef6ce04 100644 +--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h ++++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h +@@ -439,6 +439,7 @@ struct mlx4_ib_demux_pv_ctx { + struct ib_pd *pd; + struct work_struct work; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct mlx4_ib_demux_pv_qp qp[2]; + }; + +@@ -446,6 +447,7 @@ struct mlx4_ib_demux_ctx { + struct ib_device *ib_dev; + int port; + struct workqueue_struct *wq; ++ struct workqueue_struct *wi_wq; + struct workqueue_struct *ud_wq; + spinlock_t ud_lock; + atomic64_t subnet_prefix; +diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c +index a6531ffe29a6..098653b8157e 100644 +--- a/drivers/infiniband/hw/mthca/mthca_cq.c ++++ b/drivers/infiniband/hw/mthca/mthca_cq.c +@@ -609,7 +609,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev, + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; + break; + default: +- entry->opcode = MTHCA_OPCODE_INVALID; ++ entry->opcode = 0xFF; + break; + } + } else { +@@ -808,8 +808,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); +- if (IS_ERR(mailbox)) ++ if (IS_ERR(mailbox)) { ++ err = PTR_ERR(mailbox); + goto err_out_arm; ++ } + + cq_context = mailbox->buf; + +@@ -851,9 +853,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, + } + + spin_lock_irq(&dev->cq_table.lock); +- if (mthca_array_set(&dev->cq_table.cq, +- cq->cqn & (dev->limits.num_cqs - 1), +- cq)) { ++ err = mthca_array_set(&dev->cq_table.cq, ++ cq->cqn & (dev->limits.num_cqs - 1), cq); ++ if (err) { + spin_unlock_irq(&dev->cq_table.lock); + goto err_out_free_mr; + } +diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h +index 4393a022867b..e1fc67e73bf8 100644 +--- a/drivers/infiniband/hw/mthca/mthca_dev.h ++++ b/drivers/infiniband/hw/mthca/mthca_dev.h +@@ -105,7 +105,6 @@ enum { + MTHCA_OPCODE_ATOMIC_CS = 0x11, + MTHCA_OPCODE_ATOMIC_FA = 0x12, + MTHCA_OPCODE_BIND_MW = 0x18, +- MTHCA_OPCODE_INVALID = 0xff + }; + + enum { +diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c +index f937873e93df..b95f1457c407 100644 +--- a/drivers/infiniband/hw/qedr/main.c ++++ b/drivers/infiniband/hw/qedr/main.c +@@ -527,7 +527,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev) + qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); + + /* Part 2 - check capabilities */ +- page_size = ~dev->attr.page_size_caps + 1; ++ page_size = ~qed_attr->page_size_caps + 1; + if (page_size > PAGE_SIZE) { + DP_ERR(dev, + "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", +diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +index cacb720f44a0..2896808545f4 100644 +--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c ++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +@@ -180,6 +180,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, + + } + usnic_uiom_free_dev_list(dev_list); ++ dev_list = NULL; + } + + if (!found) { +@@ -207,6 +208,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, + spin_unlock(&vf->lock); + if (IS_ERR_OR_NULL(qp_grp)) { + usnic_err("Failed to allocate qp_grp\n"); ++ if (usnic_ib_share_vf) ++ usnic_uiom_free_dev_list(dev_list); + return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); + } + +diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c +index d430c2f7cec4..1a1d7329fbb2 100644 +--- a/drivers/infiniband/sw/rdmavt/vt.c ++++ b/drivers/infiniband/sw/rdmavt/vt.c +@@ -96,9 +96,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports) + if (!rdi) + return rdi; + +- rdi->ports = kcalloc(nports, +- sizeof(struct rvt_ibport **), +- GFP_KERNEL); ++ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL); + if (!rdi->ports) + ib_dealloc_device(&rdi->ibdev); + +diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c +index ab6c3c25d7ff..9f6234585532 100644 +--- a/drivers/infiniband/sw/rxe/rxe.c ++++ b/drivers/infiniband/sw/rxe/rxe.c +@@ -168,9 +168,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) + + rxe_init_port_param(port); + +- if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) +- return -EINVAL; +- + port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, + sizeof(*port->pkey_tbl), GFP_KERNEL); + +@@ -178,7 +175,7 @@ static int rxe_init_ports(struct rxe_dev *rxe) + return -ENOMEM; + + port->pkey_tbl[0] = 0xffff; +- port->port_guid = rxe->ifc_ops->port_guid(rxe); ++ port->port_guid = rxe_port_guid(rxe); + + spin_lock_init(&port->port_lock); + +diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h +index 73849a5a91b3..cd7663062d01 100644 +--- a/drivers/infiniband/sw/rxe/rxe_loc.h ++++ b/drivers/infiniband/sw/rxe/rxe_loc.h +@@ -141,6 +141,22 @@ void rxe_mem_cleanup(void *arg); + + int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); + ++/* rxe_net.c */ ++int rxe_loopback(struct sk_buff *skb); ++int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ++ struct sk_buff *skb); ++__be64 rxe_port_guid(struct rxe_dev *rxe); ++struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, ++ int paylen, struct rxe_pkt_info *pkt); ++int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ++ struct sk_buff *skb, u32 *crc); ++enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num); ++const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num); ++struct device *rxe_dma_device(struct rxe_dev *rxe); ++__be64 rxe_node_guid(struct rxe_dev *rxe); ++int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid); ++int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid); ++ + /* rxe_qp.c */ + int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init); + +@@ -256,9 +272,9 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp, + + if (pkt->mask & RXE_LOOPBACK_MASK) { + memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); +- err = rxe->ifc_ops->loopback(skb); ++ err = rxe_loopback(skb); + } else { +- err = rxe->ifc_ops->send(rxe, pkt, skb); ++ err = rxe_send(rxe, pkt, skb); + } + + if (err) { +diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c +index fa95544ca7e0..890eb6d5c471 100644 +--- a/drivers/infiniband/sw/rxe/rxe_mcast.c ++++ b/drivers/infiniband/sw/rxe/rxe_mcast.c +@@ -61,7 +61,7 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, + + rxe_add_key(grp, mgid); + +- err = rxe->ifc_ops->mcast_add(rxe, mgid); ++ err = rxe_mcast_add(rxe, mgid); + if (err) + goto err2; + +@@ -186,5 +186,5 @@ void rxe_mc_cleanup(void *arg) + struct rxe_dev *rxe = grp->rxe; + + rxe_drop_key(grp); +- rxe->ifc_ops->mcast_delete(rxe, &grp->mgid); ++ rxe_mcast_delete(rxe, &grp->mgid); + } +diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c +index 9b732c5f89e1..e23b322224ab 100644 +--- a/drivers/infiniband/sw/rxe/rxe_mr.c ++++ b/drivers/infiniband/sw/rxe/rxe_mr.c +@@ -175,7 +175,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, + if (IS_ERR(umem)) { + pr_warn("err %d from rxe_umem_get\n", + (int)PTR_ERR(umem)); +- err = -EINVAL; ++ err = PTR_ERR(umem); + goto err1; + } + +@@ -205,6 +205,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, + vaddr = page_address(sg_page(sg)); + if (!vaddr) { + pr_warn("null vaddr\n"); ++ ib_umem_release(umem); + err = -ENOMEM; + goto err1; + } +diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c +index d19e003e8381..f431cecd8b56 100644 +--- a/drivers/infiniband/sw/rxe/rxe_net.c ++++ b/drivers/infiniband/sw/rxe/rxe_net.c +@@ -102,17 +102,17 @@ static __be64 rxe_mac_to_eui64(struct net_device *ndev) + return eui64; + } + +-static __be64 node_guid(struct rxe_dev *rxe) ++__be64 rxe_node_guid(struct rxe_dev *rxe) + { + return rxe_mac_to_eui64(rxe->ndev); + } + +-static __be64 port_guid(struct rxe_dev *rxe) ++__be64 rxe_port_guid(struct rxe_dev *rxe) + { + return rxe_mac_to_eui64(rxe->ndev); + } + +-static struct device *dma_device(struct rxe_dev *rxe) ++struct device *rxe_dma_device(struct rxe_dev *rxe) + { + struct net_device *ndev; + +@@ -124,7 +124,7 @@ static struct device *dma_device(struct rxe_dev *rxe) + return ndev->dev.parent; + } + +-static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) ++int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) + { + int err; + unsigned char ll_addr[ETH_ALEN]; +@@ -135,7 +135,7 @@ static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) + return err; + } + +-static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) ++int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) + { + int err; + unsigned char ll_addr[ETH_ALEN]; +@@ -259,10 +259,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, + + /* Create UDP socket */ + err = udp_sock_create(net, &udp_cfg, &sock); +- if (err < 0) { +- pr_err("failed to create udp socket. err = %d\n", err); ++ if (err < 0) + return ERR_PTR(err); +- } + + tnl_cfg.encap_type = 1; + tnl_cfg.encap_rcv = rxe_udp_encap_recv; +@@ -399,8 +397,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, + return 0; + } + +-static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, +- struct sk_buff *skb, u32 *crc) ++int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ++ struct sk_buff *skb, u32 *crc) + { + int err = 0; + struct rxe_av *av = rxe_get_av(pkt); +@@ -426,8 +424,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) + rxe_run_task(&qp->req.task, 1); + } + +-static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, +- struct sk_buff *skb) ++int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) + { + struct sk_buff *nskb; + struct rxe_av *av; +@@ -462,7 +459,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, + return 0; + } + +-static int loopback(struct sk_buff *skb) ++int rxe_loopback(struct sk_buff *skb) + { + return rxe_rcv(skb); + } +@@ -472,8 +469,8 @@ static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av) + return rxe->port.port_guid == av->grh.dgid.global.interface_id; + } + +-static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av, +- int paylen, struct rxe_pkt_info *pkt) ++struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, ++ int paylen, struct rxe_pkt_info *pkt) + { + unsigned int hdr_len; + struct sk_buff *skb; +@@ -512,31 +509,16 @@ static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av, + * this is required by rxe_cfg to match rxe devices in + * /sys/class/infiniband up with their underlying ethernet devices + */ +-static char *parent_name(struct rxe_dev *rxe, unsigned int port_num) ++const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) + { + return rxe->ndev->name; + } + +-static enum rdma_link_layer link_layer(struct rxe_dev *rxe, +- unsigned int port_num) ++enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num) + { + return IB_LINK_LAYER_ETHERNET; + } + +-static struct rxe_ifc_ops ifc_ops = { +- .node_guid = node_guid, +- .port_guid = port_guid, +- .dma_device = dma_device, +- .mcast_add = mcast_add, +- .mcast_delete = mcast_delete, +- .prepare = prepare, +- .send = send, +- .loopback = loopback, +- .init_packet = init_packet, +- .parent_name = parent_name, +- .link_layer = link_layer, +-}; +- + struct rxe_dev *rxe_net_add(struct net_device *ndev) + { + int err; +@@ -546,7 +528,6 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev) + if (!rxe) + return NULL; + +- rxe->ifc_ops = &ifc_ops; + rxe->ndev = ndev; + + err = rxe_add(rxe, ndev->mtu); +@@ -682,6 +663,12 @@ int rxe_net_ipv6_init(void) + + recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, + htons(ROCE_V2_UDP_DPORT), true); ++ if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) { ++ recv_sockets.sk6 = NULL; ++ pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n"); ++ return 0; ++ } ++ + if (IS_ERR(recv_sockets.sk6)) { + recv_sockets.sk6 = NULL; + pr_err("Failed to create IPv6 UDP tunnel\n"); +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c +index d6672127808b..5fa1442fd4f1 100644 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c +@@ -258,6 +258,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, + if (err) { + vfree(qp->sq.queue->buf); + kfree(qp->sq.queue); ++ qp->sq.queue = NULL; + return err; + } + +@@ -315,6 +316,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, + if (err) { + vfree(qp->rq.queue->buf); + kfree(qp->rq.queue); ++ qp->rq.queue = NULL; + return err; + } + } +@@ -374,6 +376,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, + err2: + rxe_queue_cleanup(qp->sq.queue); + err1: ++ qp->pd = NULL; ++ qp->rcq = NULL; ++ qp->scq = NULL; ++ qp->srq = NULL; ++ + if (srq) + rxe_drop_ref(srq); + rxe_drop_ref(scq); +@@ -597,15 +604,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, + struct ib_gid_attr sgid_attr; + + if (mask & IB_QP_MAX_QP_RD_ATOMIC) { +- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); ++ int max_rd_atomic = attr->max_rd_atomic ? ++ roundup_pow_of_two(attr->max_rd_atomic) : 0; + + qp->attr.max_rd_atomic = max_rd_atomic; + atomic_set(&qp->req.rd_atomic, max_rd_atomic); + } + + if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { +- int max_dest_rd_atomic = +- __roundup_pow_of_two(attr->max_dest_rd_atomic); ++ int max_dest_rd_atomic = attr->max_dest_rd_atomic ? ++ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; + + qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; + +diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c +index db6bb026ae90..ece4fe838e75 100644 +--- a/drivers/infiniband/sw/rxe/rxe_recv.c ++++ b/drivers/infiniband/sw/rxe/rxe_recv.c +@@ -36,21 +36,26 @@ + #include "rxe.h" + #include "rxe_loc.h" + ++/* check that QP matches packet opcode type and is in a valid state */ + static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, + struct rxe_qp *qp) + { ++ unsigned int pkt_type; ++ + if (unlikely(!qp->valid)) + goto err1; + ++ pkt_type = pkt->opcode & 0xe0; ++ + switch (qp_type(qp)) { + case IB_QPT_RC: +- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) { ++ if (unlikely(pkt_type != IB_OPCODE_RC)) { + pr_warn_ratelimited("bad qp type\n"); + goto err1; + } + break; + case IB_QPT_UC: +- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) { ++ if (unlikely(pkt_type != IB_OPCODE_UC)) { + pr_warn_ratelimited("bad qp type\n"); + goto err1; + } +@@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: +- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) { ++ if (unlikely(pkt_type != IB_OPCODE_UD)) { + pr_warn_ratelimited("bad qp type\n"); + goto err1; + } +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c +index 6fb771290c56..463c4b3e7366 100644 +--- a/drivers/infiniband/sw/rxe/rxe_req.c ++++ b/drivers/infiniband/sw/rxe/rxe_req.c +@@ -412,7 +412,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, + + /* init skb */ + av = rxe_get_av(pkt); +- skb = rxe->ifc_ops->init_packet(rxe, av, paylen, pkt); ++ skb = rxe_init_packet(rxe, av, paylen, pkt); + if (unlikely(!skb)) + return NULL; + +@@ -483,7 +483,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, + u32 *p; + int err; + +- err = rxe->ifc_ops->prepare(rxe, pkt, skb, &crc); ++ err = rxe_prepare(rxe, pkt, skb, &crc); + if (err) + return err; + +@@ -661,7 +661,8 @@ int rxe_requester(void *arg) + } + + if (unlikely(qp_type(qp) == IB_QPT_RC && +- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) { ++ psn_compare(qp->req.psn, (qp->comp.psn + ++ RXE_MAX_UNACKED_PSNS)) > 0)) { + qp->req.wait_psn = 1; + goto exit; + } +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c +index 5bfea23f3b60..5733d9d2fcdc 100644 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c +@@ -600,7 +600,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, + pad = (-payload) & 0x3; + paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; + +- skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack); ++ skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack); + if (!skb) + return NULL; + +@@ -629,7 +629,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, + if (ack->mask & RXE_ATMACK_MASK) + atmack_set_orig(ack, qp->resp.atomic_orig); + +- err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc); ++ err = rxe_prepare(rxe, ack, skb, &crc); + if (err) { + kfree_skb(skb); + return NULL; +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c +index ef13082d6ca1..3b24ce8e3b3c 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c +@@ -234,7 +234,7 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, + { + struct rxe_dev *rxe = to_rdev(dev); + +- return rxe->ifc_ops->link_layer(rxe, port_num); ++ return rxe_link_layer(rxe, port_num); + } + + static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev, +@@ -1194,10 +1194,8 @@ static ssize_t rxe_show_parent(struct device *device, + { + struct rxe_dev *rxe = container_of(device, struct rxe_dev, + ib_dev.dev); +- char *name; + +- name = rxe->ifc_ops->parent_name(rxe, 1); +- return snprintf(buf, 16, "%s\n", name); ++ return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); + } + + static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL); +@@ -1219,9 +1217,9 @@ int rxe_register_device(struct rxe_dev *rxe) + dev->node_type = RDMA_NODE_IB_CA; + dev->phys_port_cnt = 1; + dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; +- dev->dma_device = rxe->ifc_ops->dma_device(rxe); ++ dev->dma_device = rxe_dma_device(rxe); + dev->local_dma_lkey = 0; +- dev->node_guid = rxe->ifc_ops->node_guid(rxe); ++ dev->node_guid = rxe_node_guid(rxe); + dev->dma_ops = &rxe_dma_mapping_ops; + + dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h +index dee3853163b6..d4a84f49dfd8 100644 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.h ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h +@@ -373,26 +373,6 @@ struct rxe_port { + u32 qp_gsi_index; + }; + +-/* callbacks from rdma_rxe to network interface layer */ +-struct rxe_ifc_ops { +- void (*release)(struct rxe_dev *rxe); +- __be64 (*node_guid)(struct rxe_dev *rxe); +- __be64 (*port_guid)(struct rxe_dev *rxe); +- struct device *(*dma_device)(struct rxe_dev *rxe); +- int (*mcast_add)(struct rxe_dev *rxe, union ib_gid *mgid); +- int (*mcast_delete)(struct rxe_dev *rxe, union ib_gid *mgid); +- int (*prepare)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, +- struct sk_buff *skb, u32 *crc); +- int (*send)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, +- struct sk_buff *skb); +- int (*loopback)(struct sk_buff *skb); +- struct sk_buff *(*init_packet)(struct rxe_dev *rxe, struct rxe_av *av, +- int paylen, struct rxe_pkt_info *pkt); +- char *(*parent_name)(struct rxe_dev *rxe, unsigned int port_num); +- enum rdma_link_layer (*link_layer)(struct rxe_dev *rxe, +- unsigned int port_num); +-}; +- + struct rxe_dev { + struct ib_device ib_dev; + struct ib_device_attr attr; +@@ -401,8 +381,6 @@ struct rxe_dev { + struct kref ref_cnt; + struct mutex usdev_lock; + +- struct rxe_ifc_ops *ifc_ops; +- + struct net_device *ndev; + + int xmit_errors; +diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c +index f3135ae22df4..39a7abe4b48c 100644 +--- a/drivers/input/joydev.c ++++ b/drivers/input/joydev.c +@@ -448,7 +448,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, + if (IS_ERR(abspam)) + return PTR_ERR(abspam); + +- for (i = 0; i < joydev->nabs; i++) { ++ for (i = 0; i < len && i < joydev->nabs; i++) { + if (abspam[i] > ABS_MAX) { + retval = -EINVAL; + goto out; +@@ -472,6 +472,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, + int i; + int retval = 0; + ++ if (len % sizeof(*keypam)) ++ return -EINVAL; ++ + len = min(len, sizeof(joydev->keypam)); + + /* Validate the map. */ +@@ -479,7 +482,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, + if (IS_ERR(keypam)) + return PTR_ERR(keypam); + +- for (i = 0; i < joydev->nkey; i++) { ++ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) { + if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) { + retval = -EINVAL; + goto out; +@@ -489,7 +492,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, + memcpy(joydev->keypam, keypam, len); + + for (i = 0; i < joydev->nkey; i++) +- joydev->keymap[keypam[i] - BTN_MISC] = i; ++ joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; + + out: + kfree(keypam); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 54a6691d7d87..4168ed0ef187 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -232,9 +232,17 @@ static const struct xpad_device { + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, +@@ -258,6 +266,7 @@ static const struct xpad_device { + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, ++ { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, +@@ -312,6 +321,10 @@ static const struct xpad_device { + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -435,6 +448,7 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ + XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ ++ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ +@@ -444,8 +458,12 @@ static const struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + { } + }; + +diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c +index 25943e9bc8bf..328792e26a9f 100644 +--- a/drivers/input/keyboard/cros_ec_keyb.c ++++ b/drivers/input/keyboard/cros_ec_keyb.c +@@ -140,6 +140,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev, + "changed: [r%d c%d]: byte %02x\n", + row, col, new_state); + ++ input_event(idev, EV_MSC, MSC_SCAN, pos); + input_report_key(idev, keycodes[pos], + new_state); + } +diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c +index f77b295e0123..01788a78041b 100644 +--- a/drivers/input/keyboard/ep93xx_keypad.c ++++ b/drivers/input/keyboard/ep93xx_keypad.c +@@ -257,8 +257,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) + } + + keypad->irq = platform_get_irq(pdev, 0); +- if (!keypad->irq) { +- err = -ENXIO; ++ if (keypad->irq < 0) { ++ err = keypad->irq; + goto failed_free; + } + +diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c +index 5b152f25a8e1..da07742fd9a4 100644 +--- a/drivers/input/keyboard/hil_kbd.c ++++ b/drivers/input/keyboard/hil_kbd.c +@@ -512,6 +512,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) + HIL_IDD_NUM_AXES_PER_SET(*idd)) { + printk(KERN_INFO PREFIX + "combo devices are not supported.\n"); ++ error = -EINVAL; + goto bail1; + } + +diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c +index 4a39c6cfe793..1ae84cdf688d 100644 +--- a/drivers/input/keyboard/matrix_keypad.c ++++ b/drivers/input/keyboard/matrix_keypad.c +@@ -131,21 +131,15 @@ static void matrix_keypad_scan(struct work_struct *work) + /* assert each column and read the row status out */ + for (col = 0; col < pdata->num_col_gpios; col++) { + +- for (row = 0; row < pdata->num_row_gpios; row++) { +- activate_col(pdata, col, true); ++ activate_col(pdata, col, true); ++ ++ for (row = 0; row < pdata->num_row_gpios; row++) + new_state[col] |= + row_asserted(pdata, row) ? (1 << row) : 0; +- gpio_direction_output(pdata->col_gpios[col], 0); +- new_state[col] &= +- row_asserted(pdata, row) ? ~(1 << row) : ~(0); +- } +- if (new_state[col]) +- count_state++; ++ ++ if (new_state[col]) ++ count_state++; + activate_col(pdata, col, false); +- for (row = 0; row < pdata->num_row_gpios; row++) { +- gpio_direction_output(pdata->row_gpios[row], 0); +- gpio_direction_input(pdata->row_gpios[row]); +- } + } + + if (count_state == 5) +diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c +index 7abfd34eb87e..bcec72367c1d 100644 +--- a/drivers/input/keyboard/nspire-keypad.c ++++ b/drivers/input/keyboard/nspire-keypad.c +@@ -96,9 +96,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) + return IRQ_HANDLED; + } + +-static int nspire_keypad_chip_init(struct nspire_keypad *keypad) ++static int nspire_keypad_open(struct input_dev *input) + { ++ struct nspire_keypad *keypad = input_get_drvdata(input); + unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; ++ int error; ++ ++ error = clk_prepare_enable(keypad->clk); ++ if (error) ++ return error; + + cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); + if (cycles_per_us == 0) +@@ -124,30 +130,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad) + keypad->int_mask = 1 << 1; + writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); + +- /* Disable GPIO interrupts to prevent hanging on touchpad */ +- /* Possibly used to detect touchpad events */ +- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); +- /* Acknowledge existing interrupts */ +- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); +- +- return 0; +-} +- +-static int nspire_keypad_open(struct input_dev *input) +-{ +- struct nspire_keypad *keypad = input_get_drvdata(input); +- int error; +- +- error = clk_prepare_enable(keypad->clk); +- if (error) +- return error; +- +- error = nspire_keypad_chip_init(keypad); +- if (error) { +- clk_disable_unprepare(keypad->clk); +- return error; +- } +- + return 0; + } + +@@ -155,6 +137,11 @@ static void nspire_keypad_close(struct input_dev *input) + { + struct nspire_keypad *keypad = input_get_drvdata(input); + ++ /* Disable interrupts */ ++ writel(0, keypad->reg_base + KEYPAD_INTMSK); ++ /* Acknowledge existing interrupts */ ++ writel(~0, keypad->reg_base + KEYPAD_INT); ++ + clk_disable_unprepare(keypad->clk); + } + +@@ -215,6 +202,25 @@ static int nspire_keypad_probe(struct platform_device *pdev) + return -ENOMEM; + } + ++ error = clk_prepare_enable(keypad->clk); ++ if (error) { ++ dev_err(&pdev->dev, "failed to enable clock\n"); ++ return error; ++ } ++ ++ /* Disable interrupts */ ++ writel(0, keypad->reg_base + KEYPAD_INTMSK); ++ /* Acknowledge existing interrupts */ ++ writel(~0, keypad->reg_base + KEYPAD_INT); ++ ++ /* Disable GPIO interrupts to prevent hanging on touchpad */ ++ /* Possibly used to detect touchpad events */ ++ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); ++ /* Acknowledge existing GPIO interrupts */ ++ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); ++ ++ clk_disable_unprepare(keypad->clk); ++ + input_set_drvdata(input, keypad); + + input->id.bustype = BUS_HOST; +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c +index 3d2c60c8de83..c6a468dfdfb4 100644 +--- a/drivers/input/keyboard/omap4-keypad.c ++++ b/drivers/input/keyboard/omap4-keypad.c +@@ -253,10 +253,8 @@ static int omap4_keypad_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "no keyboard irq assigned\n"); +- return -EINVAL; +- } ++ if (irq < 0) ++ return irq; + + keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL); + if (!keypad_data) { +diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c +index dc6bb9d5b4f0..191c27dda386 100644 +--- a/drivers/input/keyboard/sunkbd.c ++++ b/drivers/input/keyboard/sunkbd.c +@@ -115,7 +115,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio, + switch (data) { + + case SUNKBD_RET_RESET: +- schedule_work(&sunkbd->tq); ++ if (sunkbd->enabled) ++ schedule_work(&sunkbd->tq); + sunkbd->reset = -1; + break; + +@@ -216,16 +217,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) + } + + /* +- * sunkbd_reinit() sets leds and beeps to a state the computer remembers they +- * were in. ++ * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers ++ * they were in. + */ + +-static void sunkbd_reinit(struct work_struct *work) ++static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd) + { +- struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); +- +- wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); +- + serio_write(sunkbd->serio, SUNKBD_CMD_SETLED); + serio_write(sunkbd->serio, + (!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) | +@@ -238,11 +235,39 @@ static void sunkbd_reinit(struct work_struct *work) + SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd)); + } + ++ ++/* ++ * sunkbd_reinit() wait for the keyboard reset to complete and restores state ++ * of leds and beeps. ++ */ ++ ++static void sunkbd_reinit(struct work_struct *work) ++{ ++ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); ++ ++ /* ++ * It is OK that we check sunkbd->enabled without pausing serio, ++ * as we only want to catch true->false transition that will ++ * happen once and we will be woken up for it. ++ */ ++ wait_event_interruptible_timeout(sunkbd->wait, ++ sunkbd->reset >= 0 || !sunkbd->enabled, ++ HZ); ++ ++ if (sunkbd->reset >= 0 && sunkbd->enabled) ++ sunkbd_set_leds_beeps(sunkbd); ++} ++ + static void sunkbd_enable(struct sunkbd *sunkbd, bool enable) + { + serio_pause_rx(sunkbd->serio); + sunkbd->enabled = enable; + serio_continue_rx(sunkbd->serio); ++ ++ if (!enable) { ++ wake_up_interruptible(&sunkbd->wait); ++ cancel_work_sync(&sunkbd->tq); ++ } + } + + /* +diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c +index 323a0fb575a4..d87e7cd11ecb 100644 +--- a/drivers/input/keyboard/twl4030_keypad.c ++++ b/drivers/input/keyboard/twl4030_keypad.c +@@ -63,7 +63,7 @@ struct twl4030_keypad { + bool autorepeat; + unsigned int n_rows; + unsigned int n_cols; +- unsigned int irq; ++ int irq; + + struct device *dbg_dev; + struct input_dev *input; +@@ -389,10 +389,8 @@ static int twl4030_kp_probe(struct platform_device *pdev) + } + + kp->irq = platform_get_irq(pdev, 0); +- if (!kp->irq) { +- dev_err(&pdev->dev, "no keyboard irq assigned\n"); +- return -EINVAL; +- } ++ if (kp->irq < 0) ++ return kp->irq; + + error = matrix_keypad_build_keymap(keymap_data, NULL, + TWL4030_MAX_ROWS, +diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c +index 2b2d02f408bb..2e189646d8fe 100644 +--- a/drivers/input/misc/adxl34x.c ++++ b/drivers/input/misc/adxl34x.c +@@ -696,7 +696,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq, + struct input_dev *input_dev; + const struct adxl34x_platform_data *pdata; + int err, range, i; +- unsigned char revid; ++ int revid; + + if (!irq) { + dev_err(dev, "no IRQ?\n"); +diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c +index 23c191a2a071..cf4d507efaf6 100644 +--- a/drivers/input/misc/cm109.c ++++ b/drivers/input/misc/cm109.c +@@ -571,12 +571,15 @@ static int cm109_input_open(struct input_dev *idev) + dev->ctl_data->byte[HID_OR2] = dev->keybit; + dev->ctl_data->byte[HID_OR3] = 0x00; + ++ dev->ctl_urb_pending = 1; + error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); +- if (error) ++ if (error) { ++ dev->ctl_urb_pending = 0; + dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", + __func__, error); +- else ++ } else { + dev->open = 1; ++ } + + mutex_unlock(&dev->pm_mutex); + +diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c +index 016397850b1b..9c1f10491ab1 100644 +--- a/drivers/input/mouse/cyapa_gen6.c ++++ b/drivers/input/mouse/cyapa_gen6.c +@@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa, + + memset(&cmd, 0, sizeof(cmd)); + put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr); +- put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2); ++ put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length); + cmd.head.report_id = PIP_APP_CMD_REPORT_ID; + cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE; + put_unaligned_le16(read_offset, &cmd.read_offset); +diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c +index 5cbf17aa8443..597ecae02c40 100644 +--- a/drivers/input/mouse/psmouse-base.c ++++ b/drivers/input/mouse/psmouse-base.c +@@ -1909,7 +1909,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) + { + int type = *((unsigned int *)kp->arg); + +- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); ++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); + } + + static int __init psmouse_init(void) +diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c +index 11c32ac8234b..779d0b9341c0 100644 +--- a/drivers/input/mouse/sentelic.c ++++ b/drivers/input/mouse/sentelic.c +@@ -454,7 +454,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, + + fsp_reg_write_enable(psmouse, false); + +- return count; ++ return retval; + } + + PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); +diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c +index 65605e4ef3cf..1491a9a5c6b0 100644 +--- a/drivers/input/serio/hil_mlc.c ++++ b/drivers/input/serio/hil_mlc.c +@@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister); + static LIST_HEAD(hil_mlcs); + static DEFINE_RWLOCK(hil_mlcs_lock); + static struct timer_list hil_mlcs_kicker; +-static int hil_mlcs_probe; ++static int hil_mlcs_probe, hil_mlc_stop; + + static void hil_mlcs_process(unsigned long unused); + static DECLARE_TASKLET_DISABLED(hil_mlcs_tasklet, hil_mlcs_process, 0); +@@ -704,9 +704,13 @@ static int hilse_donode(hil_mlc *mlc) + if (!mlc->ostarted) { + mlc->ostarted = 1; + mlc->opacket = pack; +- mlc->out(mlc); ++ rc = mlc->out(mlc); + nextidx = HILSEN_DOZE; + write_unlock_irqrestore(&mlc->lock, flags); ++ if (rc) { ++ hil_mlc_stop = 1; ++ return 1; ++ } + break; + } + mlc->ostarted = 0; +@@ -717,8 +721,13 @@ static int hilse_donode(hil_mlc *mlc) + + case HILSE_CTS: + write_lock_irqsave(&mlc->lock, flags); +- nextidx = mlc->cts(mlc) ? node->bad : node->good; ++ rc = mlc->cts(mlc); ++ nextidx = rc ? node->bad : node->good; + write_unlock_irqrestore(&mlc->lock, flags); ++ if (rc) { ++ hil_mlc_stop = 1; ++ return 1; ++ } + break; + + default: +@@ -786,6 +795,12 @@ static void hil_mlcs_process(unsigned long unused) + + static void hil_mlcs_timer(unsigned long data) + { ++ if (hil_mlc_stop) { ++ /* could not send packet - stop immediately. */ ++ pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n"); ++ return; ++ } ++ + hil_mlcs_probe = 1; + tasklet_schedule(&hil_mlcs_tasklet); + /* Re-insert the periodic task. */ +diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c +index d50f0678bf47..078cbe6522a2 100644 +--- a/drivers/input/serio/hp_sdc_mlc.c ++++ b/drivers/input/serio/hp_sdc_mlc.c +@@ -213,7 +213,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) + priv->tseq[2] = 1; + priv->tseq[3] = 0; + priv->tseq[4] = 0; +- __hp_sdc_enqueue_transaction(&priv->trans); ++ return __hp_sdc_enqueue_transaction(&priv->trans); + busy: + return 1; + done: +@@ -222,7 +222,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc) + return 0; + } + +-static void hp_sdc_mlc_out(hil_mlc *mlc) ++static int hp_sdc_mlc_out(hil_mlc *mlc) + { + struct hp_sdc_mlc_priv_s *priv; + +@@ -237,7 +237,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) + do_data: + if (priv->emtestmode) { + up(&mlc->osem); +- return; ++ return 0; + } + /* Shouldn't be sending commands when loop may be busy */ + BUG_ON(down_trylock(&mlc->csem)); +@@ -299,7 +299,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc) + BUG_ON(down_trylock(&mlc->csem)); + } + enqueue: +- hp_sdc_enqueue_transaction(&priv->trans); ++ return hp_sdc_enqueue_transaction(&priv->trans); + } + + static int __init hp_sdc_mlc_init(void) +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index fd1e79013cf8..3049bccf2422 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -224,6 +224,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), ++ }, ++ }, + { } + }; + +@@ -429,6 +435,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), + }, + }, ++ { ++ /* Lenovo XiaoXin Air 12 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), ++ }, ++ }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), +@@ -545,6 +558,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + }, + }, ++ { ++ /* Entroware Proteus */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), ++ }, ++ }, + { } + }; + +@@ -558,6 +579,11 @@ static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + }, ++ }, { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ ++ }, + }, + { } + }; +@@ -668,6 +694,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), ++ }, ++ }, + { + /* Advent 4211 */ + .matches = { +@@ -745,6 +813,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), + }, + }, ++ { ++ /* Entroware Proteus */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), ++ }, ++ }, + { } + }; + +@@ -774,6 +850,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + }, + }, ++ { ++ /* Acer Aspire 5 A515 */ ++ .matches = { ++ DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), ++ DMI_MATCH(DMI_BOARD_VENDOR, "PK"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index c84c685056b9..37f84ba11f05 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -125,6 +125,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600); + MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]"); + #endif + ++static bool i8042_present; + static bool i8042_bypass_aux_irq_test; + static char i8042_kbd_firmware_id[128]; + static char i8042_aux_firmware_id[128]; +@@ -343,6 +344,9 @@ int i8042_command(unsigned char *param, int command) + unsigned long flags; + int retval; + ++ if (!i8042_present) ++ return -1; ++ + spin_lock_irqsave(&i8042_lock, flags); + retval = __i8042_command(param, command); + spin_unlock_irqrestore(&i8042_lock, flags); +@@ -1452,7 +1456,8 @@ static int __init i8042_setup_aux(void) + if (error) + goto err_free_ports; + +- if (aux_enable()) ++ error = aux_enable(); ++ if (error) + goto err_free_irq; + + i8042_aux_irq_registered = true; +@@ -1597,12 +1602,15 @@ static int __init i8042_init(void) + + err = i8042_platform_init(); + if (err) +- return err; ++ return (err == -ENODEV) ? 0 : err; + + err = i8042_controller_check(); + if (err) + goto err_platform_exit; + ++ /* Set this before creating the dev to allow i8042_command to work right away */ ++ i8042_present = true; ++ + pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); +@@ -1621,6 +1629,9 @@ static int __init i8042_init(void) + + static void __exit i8042_exit(void) + { ++ if (!i8042_present) ++ return; ++ + platform_device_unregister(i8042_platform_device); + platform_driver_unregister(&i8042_driver); + i8042_platform_exit(); +diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c +index 04b96fe39339..46512b4d686a 100644 +--- a/drivers/input/serio/sun4i-ps2.c ++++ b/drivers/input/serio/sun4i-ps2.c +@@ -210,7 +210,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev) + struct sun4i_ps2data *drvdata; + struct serio *serio; + struct device *dev = &pdev->dev; +- unsigned int irq; + int error; + + drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL); +@@ -263,14 +262,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev) + writel(0, drvdata->reg_base + PS2_REG_GCTL); + + /* Get IRQ for the device */ +- irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(dev, "no IRQ found\n"); +- error = -ENXIO; ++ drvdata->irq = platform_get_irq(pdev, 0); ++ if (drvdata->irq < 0) { ++ error = drvdata->irq; + goto err_disable_clk; + } + +- drvdata->irq = irq; + drvdata->serio = serio; + drvdata->dev = dev; + +diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c +index 1ce3ecbe37f8..b77a2178fdec 100644 +--- a/drivers/input/touchscreen/ads7846.c ++++ b/drivers/input/touchscreen/ads7846.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + /* + * This code has been heavily tested on a Nokia 770, and lightly +@@ -410,7 +411,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command) + + if (status == 0) { + /* BE12 value, then padding */ +- status = be16_to_cpu(*((u16 *)&req->sample[1])); ++ status = get_unaligned_be16(&req->sample[1]); + status = status >> 3; + status &= 0x0fff; + } +@@ -785,10 +786,11 @@ static void ads7846_report_state(struct ads7846 *ts) + /* compute touch pressure resistance using equation #2 */ + Rt = z2; + Rt -= z1; +- Rt *= x; + Rt *= ts->x_plate_ohms; ++ Rt = DIV_ROUND_CLOSEST(Rt, 16); ++ Rt *= x; + Rt /= z1; +- Rt = (Rt + 2047) >> 12; ++ Rt = DIV_ROUND_CLOSEST(Rt, 256); + } else { + Rt = 0; + } +diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c +index 8051a4b704ea..e2e31cbd6b2c 100644 +--- a/drivers/input/touchscreen/elo.c ++++ b/drivers/input/touchscreen/elo.c +@@ -345,8 +345,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) + switch (elo->id) { + + case 0: /* 10-byte protocol */ +- if (elo_setup_10(elo)) ++ if (elo_setup_10(elo)) { ++ err = -EIO; + goto fail3; ++ } + + break; + +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c +index 6a02e7301297..ba0ab9963f3c 100644 +--- a/drivers/input/touchscreen/goodix.c ++++ b/drivers/input/touchscreen/goodix.c +@@ -98,6 +98,18 @@ static const struct dmi_system_id rotated_screen[] = { + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"), + }, + }, ++ { ++ .ident = "Teclast X98 Pro", ++ .matches = { ++ /* ++ * Only match BIOS date, because the manufacturers ++ * BIOS does not report the board name at all ++ * (sometimes)... ++ */ ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), ++ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"), ++ }, ++ }, + { + .ident = "WinBook TW100", + .matches = { +diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c +index 8275267eac25..4be7ddc04af0 100644 +--- a/drivers/input/touchscreen/imx6ul_tsc.c ++++ b/drivers/input/touchscreen/imx6ul_tsc.c +@@ -490,20 +490,25 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) + + mutex_lock(&input_dev->mutex); + +- if (input_dev->users) { +- retval = clk_prepare_enable(tsc->adc_clk); +- if (retval) +- goto out; +- +- retval = clk_prepare_enable(tsc->tsc_clk); +- if (retval) { +- clk_disable_unprepare(tsc->adc_clk); +- goto out; +- } ++ if (!input_dev->users) ++ goto out; + +- retval = imx6ul_tsc_init(tsc); ++ retval = clk_prepare_enable(tsc->adc_clk); ++ if (retval) ++ goto out; ++ ++ retval = clk_prepare_enable(tsc->tsc_clk); ++ if (retval) { ++ clk_disable_unprepare(tsc->adc_clk); ++ goto out; + } + ++ retval = imx6ul_tsc_init(tsc); ++ if (retval) { ++ clk_disable_unprepare(tsc->tsc_clk); ++ clk_disable_unprepare(tsc->adc_clk); ++ goto out; ++ } + out: + mutex_unlock(&input_dev->mutex); + return retval; +diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c +index 1f5b6b5b1018..4b9b11ebf29d 100644 +--- a/drivers/input/touchscreen/raydium_i2c_ts.c ++++ b/drivers/input/touchscreen/raydium_i2c_ts.c +@@ -419,6 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, + enum raydium_bl_ack state) + { + int error; ++ static const u8 cmd[] = { 0xFF, 0x39 }; + + error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len); + if (error) { +@@ -427,7 +428,7 @@ static int raydium_i2c_write_object(struct i2c_client *client, + return error; + } + +- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0); ++ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd)); + if (error) { + dev_err(&client->dev, "Ack obj command failed: %d\n", error); + return error; +diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c +index 867772878c0c..3350c0190c4a 100644 +--- a/drivers/input/touchscreen/silead.c ++++ b/drivers/input/touchscreen/silead.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -317,10 +318,8 @@ static int silead_ts_get_id(struct i2c_client *client) + + error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID, + sizeof(chip_id), (u8 *)&chip_id); +- if (error < 0) { +- dev_err(&client->dev, "Chip ID read error %d\n", error); ++ if (error < 0) + return error; +- } + + data->chip_id = le32_to_cpu(chip_id); + dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id); +@@ -333,12 +332,49 @@ static int silead_ts_setup(struct i2c_client *client) + int error; + u32 status; + ++ /* ++ * Some buggy BIOS-es bring up the chip in a stuck state where it ++ * blocks the I2C bus. The following steps are necessary to ++ * unstuck the chip / bus: ++ * 1. Turn off the Silead chip. ++ * 2. Try to do an I2C transfer with the chip, this will fail in ++ * response to which the I2C-bus-driver will call: ++ * i2c_recover_bus() which will unstuck the I2C-bus. Note the ++ * unstuck-ing of the I2C bus only works if we first drop the ++ * chip off the bus by turning it off. ++ * 3. Turn the chip back on. ++ * ++ * On the x86/ACPI systems were this problem is seen, step 1. and ++ * 3. require making ACPI calls and dealing with ACPI Power ++ * Resources. The workaround below runtime-suspends the chip to ++ * turn it off, leaving it up to the ACPI subsystem to deal with ++ * this. ++ */ ++ ++ if (device_property_read_bool(&client->dev, ++ "silead,stuck-controller-bug")) { ++ pm_runtime_set_active(&client->dev); ++ pm_runtime_enable(&client->dev); ++ pm_runtime_allow(&client->dev); ++ ++ pm_runtime_suspend(&client->dev); ++ ++ dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n"); ++ silead_ts_get_id(client); ++ ++ /* The forbid will also resume the device */ ++ pm_runtime_forbid(&client->dev); ++ pm_runtime_disable(&client->dev); ++ } ++ + silead_ts_set_power(client, SILEAD_POWER_OFF); + silead_ts_set_power(client, SILEAD_POWER_ON); + + error = silead_ts_get_id(client); +- if (error) ++ if (error) { ++ dev_err(&client->dev, "Chip ID read error %d\n", error); + return error; ++ } + + error = silead_ts_init(client); + if (error) +diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c +index 499402a975b3..c5d34a782372 100644 +--- a/drivers/input/touchscreen/usbtouchscreen.c ++++ b/drivers/input/touchscreen/usbtouchscreen.c +@@ -266,7 +266,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch) + int ret; + struct usb_device *udev = interface_to_usbdev(usbtouch->interface); + +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + 0x01, 0x02, 0x0000, 0x0081, + NULL, 0, USB_CTRL_SET_TIMEOUT); + +@@ -462,7 +462,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch) + int ret, i; + struct usb_device *udev = interface_to_usbdev(usbtouch->interface); + +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + MTOUCHUSB_RESET, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); +@@ -474,7 +474,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch) + msleep(150); + + for (i = 0; i < 3; i++) { +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + MTOUCHUSB_ASYNC_REPORT, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT); +@@ -645,7 +645,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) + } + + /* start sending data */ +- ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), ++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + TSC10_CMD_DATA1, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index bb0448c91f67..14e9b06829d5 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1331,24 +1331,26 @@ static void increase_address_space(struct protection_domain *domain, + unsigned long flags; + u64 *pte; + ++ pte = (void *)get_zeroed_page(gfp); ++ if (!pte) ++ return; ++ + spin_lock_irqsave(&domain->lock, flags); + + if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + /* address space already 64 bit large */ + goto out; + +- pte = (void *)get_zeroed_page(gfp); +- if (!pte) +- goto out; +- + *pte = PM_LEVEL_PDE(domain->mode, + virt_to_phys(domain->pt_root)); + domain->pt_root = pte; + domain->mode += 1; + domain->updated = true; ++ pte = NULL; + + out: + spin_unlock_irqrestore(&domain->lock, flags); ++ free_page((unsigned long)pte); + + return; + } +@@ -3661,7 +3663,7 @@ static struct irq_chip amd_ir_chip; + + #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) + #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) +-#define DTE_IRQ_TABLE_LEN (8ULL << 1) ++#define DTE_IRQ_TABLE_LEN (9ULL << 1) + #define DTE_IRQ_REMAP_ENABLE 1ULL + + static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) +diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h +index da3fbf82d1cf..e19c05d9e84b 100644 +--- a/drivers/iommu/amd_iommu_types.h ++++ b/drivers/iommu/amd_iommu_types.h +@@ -383,7 +383,11 @@ extern bool amd_iommu_np_cache; + /* Only true if all IOMMUs support device IOTLBs */ + extern bool amd_iommu_iotlb_sup; + +-#define MAX_IRQS_PER_TABLE 256 ++/* ++ * AMD IOMMU hardware only support 512 IRTEs despite ++ * the architectural limitation of 2048 entries. ++ */ ++#define MAX_IRQS_PER_TABLE 512 + #define IRQ_TABLE_ALIGNMENT 128 + + struct irq_remap_table { +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 977070ce4fe9..9ad5a7019abf 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -1024,8 +1024,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + { + struct intel_iommu *iommu; + u32 ver, sts; +- int agaw = 0; +- int msagaw = 0; ++ int agaw = -1; ++ int msagaw = -1; + int err; + + if (!drhd->reg_base_addr) { +@@ -1050,17 +1050,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + err = -EINVAL; +- agaw = iommu_calculate_agaw(iommu); +- if (agaw < 0) { +- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ if (cap_sagaw(iommu->cap) == 0) { ++ pr_info("%s: No supported address widths. Not attempting DMA translation.\n", ++ iommu->name); ++ drhd->ignored = 1; + } +- msagaw = iommu_calculate_max_sagaw(iommu); +- if (msagaw < 0) { +- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", +- iommu->seq_id); +- goto err_unmap; ++ ++ if (!drhd->ignored) { ++ agaw = iommu_calculate_agaw(iommu); ++ if (agaw < 0) { ++ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ } ++ } ++ if (!drhd->ignored) { ++ msagaw = iommu_calculate_max_sagaw(iommu); ++ if (msagaw < 0) { ++ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", ++ iommu->seq_id); ++ drhd->ignored = 1; ++ agaw = -1; ++ } + } + iommu->agaw = agaw; + iommu->msagaw = msagaw; +@@ -1087,7 +1098,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + + raw_spin_lock_init(&iommu->register_lock); + +- if (intel_iommu_enabled) { ++ if (intel_iommu_enabled && !drhd->ignored) { + iommu->iommu_dev = iommu_device_create(NULL, iommu, + intel_iommu_groups, + "%s", iommu->name); +@@ -1099,6 +1110,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + } + + drhd->iommu = iommu; ++ iommu->drhd = drhd; + + return 0; + +@@ -1113,7 +1125,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) + + static void free_iommu(struct intel_iommu *iommu) + { +- iommu_device_destroy(iommu->iommu_dev); ++ if (intel_iommu_enabled && !iommu->drhd->ignored) ++ iommu_device_destroy(iommu->iommu_dev); + + if (iommu->irq) { + if (iommu->pr_irq) { +diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c +index beef59eb94fa..626b434e7967 100644 +--- a/drivers/iommu/exynos-iommu.c ++++ b/drivers/iommu/exynos-iommu.c +@@ -1265,13 +1265,17 @@ static int exynos_iommu_of_xlate(struct device *dev, + return -ENODEV; + + data = platform_get_drvdata(sysmmu); +- if (!data) ++ if (!data) { ++ put_device(&sysmmu->dev); + return -ENODEV; ++ } + + if (!owner) { + owner = kzalloc(sizeof(*owner), GFP_KERNEL); +- if (!owner) ++ if (!owner) { ++ put_device(&sysmmu->dev); + return -ENOMEM; ++ } + + INIT_LIST_HEAD(&owner->controllers); + dev->archdata.iommu = owner; +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 593a4bfcba42..b2061cfa05ab 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -3323,6 +3323,12 @@ static int __init init_dmars(void) + + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; ++ ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) { ++ pr_info("Disable batched IOTLB flush due to virtualization"); ++ intel_iommu_strict = 1; ++ } ++ + #ifdef CONFIG_INTEL_IOMMU_SVM + if (pasid_enabled(iommu)) + intel_svm_alloc_pasid_tables(iommu); +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c +index ac596928f6b4..88ba1a65c283 100644 +--- a/drivers/iommu/intel_irq_remapping.c ++++ b/drivers/iommu/intel_irq_remapping.c +@@ -486,12 +486,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) + + /* Enable interrupt-remapping */ + iommu->gcmd |= DMA_GCMD_IRE; +- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); +- + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRES), sts); + ++ /* Block compatibility-format MSIs */ ++ if (sts & DMA_GSTS_CFIS) { ++ iommu->gcmd &= ~DMA_GCMD_CFI; ++ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); ++ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, ++ readl, !(sts & DMA_GSTS_CFIS), sts); ++ } ++ + /* + * With CFI clear in the Global Command register, we should be + * protected from dangerous (i.e. compatibility) interrupts +@@ -1344,6 +1350,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_cfg = irqd_cfg(irq_data); + if (!irq_data || !irq_cfg) { ++ if (!i) ++ kfree(data); + ret = -EINVAL; + goto out_free_data; + } +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c +index 5c88ba70e4e0..31859687770e 100644 +--- a/drivers/iommu/iova.c ++++ b/drivers/iommu/iova.c +@@ -654,7 +654,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) + for (i = 0 ; i < mag->size; ++i) { + struct iova *iova = private_find_iova(iovad, mag->pfns[i]); + +- BUG_ON(!iova); ++ if (WARN_ON(!iova)) ++ continue; ++ + private_free_iova(iovad, iova); + } + +diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c +index 505548aafeff..cec33e90e399 100644 +--- a/drivers/iommu/omap-iommu-debug.c ++++ b/drivers/iommu/omap-iommu-debug.c +@@ -101,8 +101,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, + mutex_lock(&iommu_debug_lock); + + bytes = omap_iommu_dump_ctx(obj, p, count); ++ if (bytes < 0) ++ goto err; + bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); + ++err: + mutex_unlock(&iommu_debug_lock); + kfree(buf); + +diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c +index 9b23843dcad4..4294523bede5 100644 +--- a/drivers/ipack/carriers/tpci200.c ++++ b/drivers/ipack/carriers/tpci200.c +@@ -94,16 +94,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200) + free_irq(tpci200->info->pdev->irq, (void *) tpci200); + + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); +- pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); + + pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); + pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); + pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); +- pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); + + pci_disable_device(tpci200->info->pdev); +- pci_dev_put(tpci200->info->pdev); + } + + static void tpci200_enable_irq(struct tpci200_board *tpci200, +@@ -524,7 +521,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); + if (!tpci200->info) { + ret = -ENOMEM; +- goto out_err_info; ++ goto err_tpci200; + } + + pci_dev_get(pdev); +@@ -535,7 +532,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + if (ret) { + dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); + ret = -EBUSY; +- goto out_err_pci_request; ++ goto err_tpci200_info; + } + tpci200->info->cfg_regs = ioremap_nocache( + pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), +@@ -543,7 +540,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + if (!tpci200->info->cfg_regs) { + dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); + ret = -EFAULT; +- goto out_err_ioremap; ++ goto err_request_region; + } + + /* Disable byte swapping for 16 bit IP module access. This will ensure +@@ -566,7 +563,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + if (ret) { + dev_err(&pdev->dev, "error during tpci200 install\n"); + ret = -ENODEV; +- goto out_err_install; ++ goto err_cfg_regs; + } + + /* Register the carrier in the industry pack bus driver */ +@@ -578,7 +575,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + dev_err(&pdev->dev, + "error registering the carrier on ipack driver\n"); + ret = -EFAULT; +- goto out_err_bus_register; ++ goto err_tpci200_install; + } + + /* save the bus number given by ipack to logging purpose */ +@@ -589,16 +586,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, + tpci200_create_device(tpci200, i); + return 0; + +-out_err_bus_register: ++err_tpci200_install: + tpci200_uninstall(tpci200); +-out_err_install: +- iounmap(tpci200->info->cfg_regs); +-out_err_ioremap: ++err_cfg_regs: ++ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); ++err_request_region: + pci_release_region(pdev, TPCI200_CFG_MEM_BAR); +-out_err_pci_request: +- pci_dev_put(pdev); ++err_tpci200_info: + kfree(tpci200->info); +-out_err_info: ++ pci_dev_put(pdev); ++err_tpci200: + kfree(tpci200); + return ret; + } +@@ -608,6 +605,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) + ipack_bus_unregister(tpci200->info->ipack_bus); + tpci200_uninstall(tpci200); + ++ pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); ++ ++ pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); ++ ++ pci_dev_put(tpci200->info->pdev); ++ + kfree(tpci200->info); + kfree(tpci200); + } +diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c +index 63d980995d17..ac431697ebe1 100644 +--- a/drivers/irqchip/irq-alpine-msi.c ++++ b/drivers/irqchip/irq-alpine-msi.c +@@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain, + return 0; + + err_sgi: +- while (--i >= 0) +- irq_domain_free_irqs_parent(domain, virq, i); ++ irq_domain_free_irqs_parent(domain, virq, i - 1); + alpine_msix_free_sgi(priv, sgi, nr_irqs); + return err; + } +diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c +old mode 100644 +new mode 100755 +index 8f3aa54ee873..4e0952290bd1 +--- a/drivers/irqchip/irq-gic.c ++++ b/drivers/irqchip/irq-gic.c +@@ -326,10 +326,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) + static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) + { +- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); +- unsigned int cpu, shift = (gic_irq(d) % 4) * 8; +- u32 val, mask, bit; +- unsigned long flags; ++ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d); ++ unsigned int cpu; + + if (!force) + cpu = cpumask_any_and(mask_val, cpu_online_mask); +@@ -339,12 +337,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) + return -EINVAL; + +- gic_lock_irqsave(flags); +- mask = 0xff << shift; +- bit = gic_cpu_map[cpu] << shift; +- val = readl_relaxed_no_log(reg) & ~mask; +- writel_relaxed_no_log(val | bit, reg); +- gic_unlock_irqrestore(flags); ++ writeb_relaxed_no_log(gic_cpu_map[cpu], reg); + + return IRQ_SET_MASK_OK_DONE; + } +diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c +index d15347de415a..9de62c3b8bf9 100644 +--- a/drivers/isdn/capi/kcapi.c ++++ b/drivers/isdn/capi/kcapi.c +@@ -845,7 +845,7 @@ EXPORT_SYMBOL(capi20_put_message); + * Return value: CAPI result code + */ + +-u16 capi20_get_manufacturer(u32 contr, u8 *buf) ++u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) + { + struct capi_ctr *ctr; + u16 ret; +@@ -915,7 +915,7 @@ EXPORT_SYMBOL(capi20_get_version); + * Return value: CAPI result code + */ + +-u16 capi20_get_serial(u32 contr, u8 *serial) ++u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) + { + struct capi_ctr *ctr; + u16 ret; +diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c +index ff48da61c94c..89cf1d695a01 100644 +--- a/drivers/isdn/hardware/mISDN/hfcpci.c ++++ b/drivers/isdn/hardware/mISDN/hfcpci.c +@@ -2352,7 +2352,7 @@ static void __exit + HFC_cleanup(void) + { + if (timer_pending(&hfc_tl)) +- del_timer(&hfc_tl); ++ del_timer_sync(&hfc_tl); + + pci_unregister_driver(&hfc_driver); + } +diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c +index 1fc290659e94..64060429aa50 100644 +--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c ++++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c +@@ -645,17 +645,19 @@ static void + release_io(struct inf_hw *hw) + { + if (hw->cfg.mode) { +- if (hw->cfg.p) { ++ if (hw->cfg.mode == AM_MEMIO) { + release_mem_region(hw->cfg.start, hw->cfg.size); +- iounmap(hw->cfg.p); ++ if (hw->cfg.p) ++ iounmap(hw->cfg.p); + } else + release_region(hw->cfg.start, hw->cfg.size); + hw->cfg.mode = AM_NONE; + } + if (hw->addr.mode) { +- if (hw->addr.p) { ++ if (hw->addr.mode == AM_MEMIO) { + release_mem_region(hw->addr.start, hw->addr.size); +- iounmap(hw->addr.p); ++ if (hw->addr.p) ++ iounmap(hw->addr.p); + } else + release_region(hw->addr.start, hw->addr.size); + hw->addr.mode = AM_NONE; +@@ -685,9 +687,12 @@ setup_io(struct inf_hw *hw) + (ulong)hw->cfg.start, (ulong)hw->cfg.size); + return err; + } +- if (hw->ci->cfg_mode == AM_MEMIO) +- hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size); + hw->cfg.mode = hw->ci->cfg_mode; ++ if (hw->ci->cfg_mode == AM_MEMIO) { ++ hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size); ++ if (!hw->cfg.p) ++ return -ENOMEM; ++ } + if (debug & DEBUG_HW) + pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n", + hw->name, (ulong)hw->cfg.start, +@@ -712,9 +717,12 @@ setup_io(struct inf_hw *hw) + (ulong)hw->addr.start, (ulong)hw->addr.size); + return err; + } +- if (hw->ci->addr_mode == AM_MEMIO) +- hw->addr.p = ioremap(hw->addr.start, hw->addr.size); + hw->addr.mode = hw->ci->addr_mode; ++ if (hw->ci->addr_mode == AM_MEMIO) { ++ hw->addr.p = ioremap(hw->addr.start, hw->addr.size); ++ if (!hw->addr.p) ++ return -ENOMEM; ++ } + if (debug & DEBUG_HW) + pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n", + hw->name, (ulong)hw->addr.start, +diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c +index 8d338ba366d0..01a1afde5d3c 100644 +--- a/drivers/isdn/hardware/mISDN/mISDNipac.c ++++ b/drivers/isdn/hardware/mISDN/mISDNipac.c +@@ -711,7 +711,7 @@ isac_release(struct isac_hw *isac) + { + if (isac->type & IPAC_TYPE_ISACX) + WriteISAC(isac, ISACX_MASK, 0xff); +- else ++ else if (isac->type != 0) + WriteISAC(isac, ISAC_MASK, 0xff); + if (isac->dch.timer.function != NULL) { + del_timer(&isac->dch.timer); +diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c +index e9fcae4569af..6656fcf502aa 100644 +--- a/drivers/isdn/hardware/mISDN/netjet.c ++++ b/drivers/isdn/hardware/mISDN/netjet.c +@@ -1114,7 +1114,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + card->typ = NETJET_S_TJ300; + + card->base = pci_resource_start(pdev, 0); +- card->irq = pdev->irq; + pci_set_drvdata(pdev, card); + err = setup_instance(card); + if (err) +diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig +index c0730d5c734d..fb61181a5c4f 100644 +--- a/drivers/isdn/mISDN/Kconfig ++++ b/drivers/isdn/mISDN/Kconfig +@@ -12,6 +12,7 @@ if MISDN != n + config MISDN_DSP + tristate "Digital Audio Processing of transparent data" + depends on MISDN ++ select BITREVERSE + help + Enable support for digital audio processing capability. + +diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c +index 2522a3dc7035..91af585430d3 100644 +--- a/drivers/leds/led-class.c ++++ b/drivers/leds/led-class.c +@@ -128,6 +128,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev) + { + led_cdev->flags |= LED_SUSPENDED; + led_set_brightness_nopm(led_cdev, 0); ++ flush_work(&led_cdev->set_brightness_work); + } + EXPORT_SYMBOL_GPL(led_classdev_suspend); + +diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c +index dac7a71aad81..e5010c974307 100644 +--- a/drivers/leds/led-triggers.c ++++ b/drivers/leds/led-triggers.c +@@ -284,14 +284,15 @@ void led_trigger_event(struct led_trigger *trig, + enum led_brightness brightness) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) + led_set_brightness(led_cdev, brightness); +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + EXPORT_SYMBOL_GPL(led_trigger_event); + +@@ -302,11 +303,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + int invert) + { + struct led_classdev *led_cdev; ++ unsigned long flags; + + if (!trig) + return; + +- read_lock(&trig->leddev_list_lock); ++ read_lock_irqsave(&trig->leddev_list_lock, flags); + list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { + if (oneshot) + led_blink_set_oneshot(led_cdev, delay_on, delay_off, +@@ -314,7 +316,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, + else + led_blink_set(led_cdev, delay_on, delay_off); + } +- read_unlock(&trig->leddev_list_lock); ++ read_unlock_irqrestore(&trig->leddev_list_lock, flags); + } + + void led_trigger_blink(struct led_trigger *trig, +diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c +index 77a104d2b124..13f414ff6fd0 100644 +--- a/drivers/leds/leds-88pm860x.c ++++ b/drivers/leds/leds-88pm860x.c +@@ -207,21 +207,33 @@ static int pm860x_led_probe(struct platform_device *pdev) + data->cdev.brightness_set_blocking = pm860x_led_set; + mutex_init(&data->lock); + +- ret = devm_led_classdev_register(chip->dev, &data->cdev); ++ ret = led_classdev_register(chip->dev, &data->cdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); + return ret; + } + pm860x_led_set(&data->cdev, 0); ++ ++ platform_set_drvdata(pdev, data); ++ + return 0; + } + ++static int pm860x_led_remove(struct platform_device *pdev) ++{ ++ struct pm860x_led *data = platform_get_drvdata(pdev); ++ ++ led_classdev_unregister(&data->cdev); ++ ++ return 0; ++} + + static struct platform_driver pm860x_led_driver = { + .driver = { + .name = "88pm860x-led", + }, + .probe = pm860x_led_probe, ++ .remove = pm860x_led_remove, + }; + + module_platform_driver(pm860x_led_driver); +diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c +index 1548259297c1..d6d6fb0622b4 100644 +--- a/drivers/leds/leds-bcm6328.c ++++ b/drivers/leds/leds-bcm6328.c +@@ -336,7 +336,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, + led->cdev.brightness_set = bcm6328_led_set; + led->cdev.blink_set = bcm6328_blink_set; + +- rc = led_classdev_register(dev, &led->cdev); ++ rc = devm_led_classdev_register(dev, &led->cdev); + if (rc < 0) + return rc; + +diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c +index b2cc06618abe..a86ab6197a4e 100644 +--- a/drivers/leds/leds-bcm6358.c ++++ b/drivers/leds/leds-bcm6358.c +@@ -141,7 +141,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, + + led->cdev.brightness_set = bcm6358_led_set; + +- rc = led_classdev_register(dev, &led->cdev); ++ rc = devm_led_classdev_register(dev, &led->cdev); + if (rc < 0) + return rc; + +diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c +index 5ff7d72f73aa..ecc265bb69a0 100644 +--- a/drivers/leds/leds-da903x.c ++++ b/drivers/leds/leds-da903x.c +@@ -113,12 +113,23 @@ static int da903x_led_probe(struct platform_device *pdev) + led->flags = pdata->flags; + led->master = pdev->dev.parent; + +- ret = devm_led_classdev_register(led->master, &led->cdev); ++ ret = led_classdev_register(led->master, &led->cdev); + if (ret) { + dev_err(&pdev->dev, "failed to register LED %d\n", id); + return ret; + } + ++ platform_set_drvdata(pdev, led); ++ ++ return 0; ++} ++ ++static int da903x_led_remove(struct platform_device *pdev) ++{ ++ struct da903x_led *led = platform_get_drvdata(pdev); ++ ++ led_classdev_unregister(&led->cdev); ++ + return 0; + } + +@@ -127,6 +138,7 @@ static struct platform_driver da903x_led_driver = { + .name = "da903x-led", + }, + .probe = da903x_led_probe, ++ .remove = da903x_led_remove, + }; + + module_platform_driver(da903x_led_driver); +diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c +index 45296aaca9da..02738b5b1dbf 100644 +--- a/drivers/leds/leds-ktd2692.c ++++ b/drivers/leds/leds-ktd2692.c +@@ -259,6 +259,17 @@ static void ktd2692_setup(struct ktd2692_context *led) + | KTD2692_REG_FLASH_CURRENT_BASE); + } + ++static void regulator_disable_action(void *_data) ++{ ++ struct device *dev = _data; ++ struct ktd2692_context *led = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = regulator_disable(led->regulator); ++ if (ret) ++ dev_err(dev, "Failed to disable supply: %d\n", ret); ++} ++ + static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, + struct ktd2692_led_config_data *cfg) + { +@@ -289,8 +300,14 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, + + if (led->regulator) { + ret = regulator_enable(led->regulator); +- if (ret) ++ if (ret) { + dev_err(dev, "Failed to enable supply: %d\n", ret); ++ } else { ++ ret = devm_add_action_or_reset(dev, ++ regulator_disable_action, dev); ++ if (ret) ++ return ret; ++ } + } + + child_node = of_get_next_available_child(np, NULL); +@@ -380,17 +397,9 @@ static int ktd2692_probe(struct platform_device *pdev) + static int ktd2692_remove(struct platform_device *pdev) + { + struct ktd2692_context *led = platform_get_drvdata(pdev); +- int ret; + + led_classdev_flash_unregister(&led->fled_cdev); + +- if (led->regulator) { +- ret = regulator_disable(led->regulator); +- if (ret) +- dev_err(&pdev->dev, +- "Failed to disable supply: %d\n", ret); +- } +- + mutex_destroy(&led->lock); + + return 0; +diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c +index 5b529dc013d2..3d147489fc45 100644 +--- a/drivers/leds/leds-lm3533.c ++++ b/drivers/leds/leds-lm3533.c +@@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, led); + +- ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev); ++ ret = led_classdev_register(pdev->dev.parent, &led->cdev); + if (ret) { + dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); + return ret; +@@ -708,13 +708,18 @@ static int lm3533_led_probe(struct platform_device *pdev) + + ret = lm3533_led_setup(led, pdata); + if (ret) +- return ret; ++ goto err_deregister; + + ret = lm3533_ctrlbank_enable(&led->cb); + if (ret) +- return ret; ++ goto err_deregister; + + return 0; ++ ++err_deregister: ++ led_classdev_unregister(&led->cdev); ++ ++ return ret; + } + + static int lm3533_led_remove(struct platform_device *pdev) +@@ -724,6 +729,7 @@ static int lm3533_led_remove(struct platform_device *pdev) + dev_dbg(&pdev->dev, "%s\n", __func__); + + lm3533_ctrlbank_disable(&led->cb); ++ led_classdev_unregister(&led->cdev); + + return 0; + } +diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c +index 6cb94f9a2f3f..b9c60dd2b132 100644 +--- a/drivers/leds/leds-lm355x.c ++++ b/drivers/leds/leds-lm355x.c +@@ -168,18 +168,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip) + /* input and output pins configuration */ + switch (chip->type) { + case CHIP_LM3554: +- reg_val = pdata->pin_tx2 | pdata->ntc_pin; ++ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin; + ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val); + if (ret < 0) + goto out; +- reg_val = pdata->pass_mode; ++ reg_val = (u32)pdata->pass_mode; + ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val); + if (ret < 0) + goto out; + break; + + case CHIP_LM3556: +- reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; ++ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin | ++ (u32)pdata->pass_mode; + ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val); + if (ret < 0) + goto out; +diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c +index 44ceed7ac3c5..7cebcd458943 100644 +--- a/drivers/leds/leds-lp5523.c ++++ b/drivers/leds/leds-lp5523.c +@@ -320,7 +320,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) + usleep_range(3000, 6000); + ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); + if (ret) +- return ret; ++ goto out; + status &= LP5523_ENG_STATUS_MASK; + + if (status != LP5523_ENG_STATUS_MASK) { +diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c +index be93b20e792a..4f0ba19c3577 100644 +--- a/drivers/leds/leds-wm831x-status.c ++++ b/drivers/leds/leds-wm831x-status.c +@@ -283,12 +283,23 @@ static int wm831x_status_probe(struct platform_device *pdev) + drvdata->cdev.blink_set = wm831x_status_blink_set; + drvdata->cdev.groups = wm831x_status_groups; + +- ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev); ++ ret = led_classdev_register(wm831x->dev, &drvdata->cdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); + return ret; + } + ++ platform_set_drvdata(pdev, drvdata); ++ ++ return 0; ++} ++ ++static int wm831x_status_remove(struct platform_device *pdev) ++{ ++ struct wm831x_status *drvdata = platform_get_drvdata(pdev); ++ ++ led_classdev_unregister(&drvdata->cdev); ++ + return 0; + } + +@@ -297,6 +308,7 @@ static struct platform_driver wm831x_status_driver = { + .name = "wm831x-status", + }, + .probe = wm831x_status_probe, ++ .remove = wm831x_status_remove, + }; + + module_platform_driver(wm831x_status_driver); +diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c +index 96d16fca68b2..24e7152cd2bf 100644 +--- a/drivers/macintosh/windfarm_pm112.c ++++ b/drivers/macintosh/windfarm_pm112.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -133,14 +134,6 @@ static int create_cpu_loop(int cpu) + s32 tmax; + int fmin; + +- /* Get PID params from the appropriate SAT */ +- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); +- if (hdr == NULL) { +- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); +- return -EINVAL; +- } +- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; +- + /* Get FVT params to get Tmax; if not found, assume default */ + hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL); + if (hdr) { +@@ -153,6 +146,16 @@ static int create_cpu_loop(int cpu) + if (tmax < cpu_all_tmax) + cpu_all_tmax = tmax; + ++ kfree(hdr); ++ ++ /* Get PID params from the appropriate SAT */ ++ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL); ++ if (hdr == NULL) { ++ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n"); ++ return -EINVAL; ++ } ++ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; ++ + /* + * Darwin has a minimum fan speed of 1000 rpm for the 4-way and + * 515 for the 2-way. That appears to be overkill, so for now, +@@ -175,6 +178,9 @@ static int create_cpu_loop(int cpu) + pid.min = fmin; + + wf_cpu_pid_init(&cpu_pid[cpu], &pid); ++ ++ kfree(hdr); ++ + return 0; + } + +diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h +index 7fe7df56fa33..f0939fc1cfe5 100644 +--- a/drivers/md/bcache/bcache.h ++++ b/drivers/md/bcache/bcache.h +@@ -547,6 +547,7 @@ struct cache_set { + */ + wait_queue_head_t btree_cache_wait; + struct task_struct *btree_cache_alloc_lock; ++ spinlock_t btree_cannibalize_lock; + + /* + * When we free a btree node, we increment the gen of the bucket the +diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c +index 158eae17031c..144043621429 100644 +--- a/drivers/md/bcache/bset.c ++++ b/drivers/md/bcache/bset.c +@@ -317,7 +317,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp) + + b->page_order = page_order; + +- t->data = (void *) __get_free_pages(gfp, b->page_order); ++ t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); + if (!t->data) + goto err; + +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c +index c8c5e3368b8b..26e56a9952d0 100644 +--- a/drivers/md/bcache/btree.c ++++ b/drivers/md/bcache/btree.c +@@ -790,7 +790,7 @@ int bch_btree_cache_alloc(struct cache_set *c) + mutex_init(&c->verify_lock); + + c->verify_ondisk = (void *) +- __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); ++ __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); + + c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); + +@@ -836,15 +836,17 @@ static struct btree *mca_find(struct cache_set *c, struct bkey *k) + + static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) + { +- struct task_struct *old; +- +- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); +- if (old && old != current) { ++ spin_lock(&c->btree_cannibalize_lock); ++ if (likely(c->btree_cache_alloc_lock == NULL)) { ++ c->btree_cache_alloc_lock = current; ++ } else if (c->btree_cache_alloc_lock != current) { + if (op) + prepare_to_wait(&c->btree_cache_wait, &op->wait, + TASK_UNINTERRUPTIBLE); ++ spin_unlock(&c->btree_cannibalize_lock); + return -EINTR; + } ++ spin_unlock(&c->btree_cannibalize_lock); + + return 0; + } +@@ -879,10 +881,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, + */ + static void bch_cannibalize_unlock(struct cache_set *c) + { ++ spin_lock(&c->btree_cannibalize_lock); + if (c->btree_cache_alloc_lock == current) { + c->btree_cache_alloc_lock = NULL; + wake_up(&c->btree_cache_wait); + } ++ spin_unlock(&c->btree_cannibalize_lock); + } + + static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, +@@ -1370,7 +1374,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + if (__set_blocks(n1, n1->keys + n2->keys, + block_bytes(b->c)) > + btree_blocks(new_nodes[i])) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + keys = n2->keys; + /* Take the key of the node we're getting rid of */ +@@ -1399,7 +1403,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + + if (__bch_keylist_realloc(&keylist, + bkey_u64s(&new_nodes[i]->key))) +- goto out_nocoalesce; ++ goto out_unlock_nocoalesce; + + bch_btree_node_write(new_nodes[i], &cl); + bch_keylist_add(&keylist, &new_nodes[i]->key); +@@ -1445,6 +1449,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, + /* Invalidated our iterator */ + return -EINTR; + ++out_unlock_nocoalesce: ++ for (i = 0; i < nodes; i++) ++ mutex_unlock(&new_nodes[i]->write_lock); ++ + out_nocoalesce: + closure_sync(&cl); + bch_keylist_free(&keylist); +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c +index 6ee5370eb916..423cb1a310d6 100644 +--- a/drivers/md/bcache/journal.c ++++ b/drivers/md/bcache/journal.c +@@ -839,8 +839,8 @@ int bch_journal_alloc(struct cache_set *c) + j->w[1].c = c; + + if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || +- !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || +- !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) ++ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || ++ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) + return -ENOMEM; + + return 0; +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c +index 86a6dbdb6d02..114413ef7242 100644 +--- a/drivers/md/bcache/super.c ++++ b/drivers/md/bcache/super.c +@@ -1468,7 +1468,7 @@ void bch_cache_set_unregister(struct cache_set *c) + } + + #define alloc_bucket_pages(gfp, c) \ +- ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) ++ ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) + + struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) + { +@@ -1510,6 +1510,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) + sema_init(&c->sb_write_mutex, 1); + mutex_init(&c->bucket_lock); + init_waitqueue_head(&c->btree_cache_wait); ++ spin_lock_init(&c->btree_cannibalize_lock); + init_waitqueue_head(&c->bucket_wait); + init_waitqueue_head(&c->gc_wait); + sema_init(&c->uuid_write_mutex, 1); +@@ -1778,7 +1779,14 @@ static const char *register_cache_set(struct cache *ca) + sysfs_create_link(&c->kobj, &ca->kobj, buf)) + goto err; + +- if (ca->sb.seq > c->sb.seq) { ++ /* ++ * A special case is both ca->sb.seq and c->sb.seq are 0, ++ * such condition happens on a new created cache device whose ++ * super block is never flushed yet. In this case c->sb.version ++ * and other members should be updated too, otherwise we will ++ * have a mistaken super block version in cache set. ++ */ ++ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { + c->sb.version = ca->sb.version; + memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); + c->sb.flags = ca->sb.flags; +diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c +index 63bff4cc7098..863fe19e906e 100644 +--- a/drivers/md/bitmap.c ++++ b/drivers/md/bitmap.c +@@ -1339,7 +1339,7 @@ __acquires(bitmap->lock) + if (bitmap->bp[page].hijacked || + bitmap->bp[page].map == NULL) + csize = ((sector_t)1) << (bitmap->chunkshift + +- PAGE_COUNTER_SHIFT - 1); ++ PAGE_COUNTER_SHIFT); + else + csize = ((sector_t)1) << bitmap->chunkshift; + *blocks = csize - (offset & (csize - 1)); +diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c +index b45f0b2fcc7c..88c8611daf61 100644 +--- a/drivers/md/dm-bow.c ++++ b/drivers/md/dm-bow.c +@@ -8,11 +8,11 @@ + #include "dm-bufio.h" + #include "dm-core.h" + ++#include + #include + #include + + #define DM_MSG_PREFIX "bow" +-#define SECTOR_SIZE 512 + + struct log_entry { + u64 source; +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index 62eb4b7caff3..a9208ab12708 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -508,12 +508,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, + CACHE_MAX_CONCURRENT_LOCKS); + if (IS_ERR(cmd->bm)) { + DMERR("could not create block manager"); +- return PTR_ERR(cmd->bm); ++ r = PTR_ERR(cmd->bm); ++ cmd->bm = NULL; ++ return r; + } + + r = __open_or_format_metadata(cmd, may_format_device); +- if (r) ++ if (r) { + dm_block_manager_destroy(cmd->bm); ++ cmd->bm = NULL; ++ } + + return r; + } +diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c +index 68d4084377ad..cb66ed903000 100644 +--- a/drivers/md/dm-era-target.c ++++ b/drivers/md/dm-era-target.c +@@ -46,6 +46,7 @@ struct writeset { + static void writeset_free(struct writeset *ws) + { + vfree(ws->bits); ++ ws->bits = NULL; + } + + static int setup_on_disk_bitset(struct dm_disk_bitset *info, +@@ -70,8 +71,6 @@ static size_t bitset_size(unsigned nr_bits) + */ + static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) + { +- ws->md.nr_bits = nr_blocks; +- ws->md.root = INVALID_WRITESET_ROOT; + ws->bits = vzalloc(bitset_size(nr_blocks)); + if (!ws->bits) { + DMERR("%s: couldn't allocate in memory bitset", __func__); +@@ -84,12 +83,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) + /* + * Wipes the in-core bitset, and creates a new on disk bitset. + */ +-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) ++static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, ++ dm_block_t nr_blocks) + { + int r; + +- memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); ++ memset(ws->bits, 0, bitset_size(nr_blocks)); + ++ ws->md.nr_bits = nr_blocks; + r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); + if (r) { + DMERR("%s: setup_on_disk_bitset failed", __func__); +@@ -133,7 +134,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info, + { + int r; + +- if (!test_and_set_bit(block, ws->bits)) { ++ if (!test_bit(block, ws->bits)) { + r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); + if (r) { + /* FIXME: fail mode */ +@@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value) + + static int ws_eq(void *context, const void *value1, const void *value2) + { +- return !memcmp(value1, value2, sizeof(struct writeset_metadata)); ++ return !memcmp(value1, value2, sizeof(struct writeset_disk)); + } + + /*----------------------------------------------------------------*/ +@@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md) + } + + disk = dm_block_data(sblock); ++ ++ /* Verify the data block size hasn't changed */ ++ if (le32_to_cpu(disk->data_block_size) != md->block_size) { ++ DMERR("changing the data block size (from %u to %llu) is not supported", ++ le32_to_cpu(disk->data_block_size), md->block_size); ++ r = -EINVAL; ++ goto bad; ++ } ++ + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, + disk->metadata_space_map_root, + sizeof(disk->metadata_space_map_root), +@@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md) + + setup_infos(md); + +- md->block_size = le32_to_cpu(disk->data_block_size); + md->nr_blocks = le32_to_cpu(disk->nr_blocks); + md->current_era = le32_to_cpu(disk->current_era); + ++ ws_unpack(&disk->current_writeset, &md->current_writeset->md); + md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); + md->era_array_root = le64_to_cpu(disk->era_array_root); + md->metadata_snap = le64_to_cpu(disk->metadata_snap); +@@ -747,6 +757,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md, + ws_unpack(&disk, &d->writeset); + d->value = cpu_to_le32(key); + ++ /* ++ * We initialise another bitset info to avoid any caching side effects ++ * with the previous one. ++ */ ++ dm_disk_bitset_init(md->tm, &d->info); ++ + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); + d->current_bit = 0; + d->step = metadata_digest_transcribe_writeset; +@@ -760,12 +776,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d) + return 0; + + memset(d, 0, sizeof(*d)); +- +- /* +- * We initialise another bitset info to avoid any caching side +- * effects with the previous one. +- */ +- dm_disk_bitset_init(md->tm, &d->info); + d->step = metadata_digest_lookup_writeset; + + return 0; +@@ -803,6 +813,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev, + + static void metadata_close(struct era_metadata *md) + { ++ writeset_free(&md->writesets[0]); ++ writeset_free(&md->writesets[1]); + destroy_persistent_data_objects(md); + kfree(md); + } +@@ -840,6 +852,7 @@ static int metadata_resize(struct era_metadata *md, void *arg) + r = writeset_alloc(&md->writesets[1], *new_size); + if (r) { + DMERR("%s: writeset_alloc failed for writeset 1", __func__); ++ writeset_free(&md->writesets[0]); + return r; + } + +@@ -850,6 +863,8 @@ static int metadata_resize(struct era_metadata *md, void *arg) + &value, &md->era_array_root); + if (r) { + DMERR("%s: dm_array_resize failed", __func__); ++ writeset_free(&md->writesets[0]); ++ writeset_free(&md->writesets[1]); + return r; + } + +@@ -871,7 +886,6 @@ static int metadata_era_archive(struct era_metadata *md) + } + + ws_pack(&md->current_writeset->md, &value); +- md->current_writeset->md.root = INVALID_WRITESET_ROOT; + + keys[0] = md->current_era; + __dm_bless_for_disk(&value); +@@ -883,6 +897,7 @@ static int metadata_era_archive(struct era_metadata *md) + return r; + } + ++ md->current_writeset->md.root = INVALID_WRITESET_ROOT; + md->archived_writesets = true; + + return 0; +@@ -899,7 +914,7 @@ static int metadata_new_era(struct era_metadata *md) + int r; + struct writeset *new_writeset = next_writeset(md); + +- r = writeset_init(&md->bitset_info, new_writeset); ++ r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks); + if (r) { + DMERR("%s: writeset_init failed", __func__); + return r; +@@ -952,7 +967,7 @@ static int metadata_commit(struct era_metadata *md) + int r; + struct dm_block *sblock; + +- if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { ++ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { + r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, + &md->current_writeset->md.root); + if (r) { +@@ -1227,8 +1242,10 @@ static void process_deferred_bios(struct era *era) + int r; + struct bio_list deferred_bios, marked_bios; + struct bio *bio; ++ struct blk_plug plug; + bool commit_needed = false; + bool failed = false; ++ struct writeset *ws = era->md->current_writeset; + + bio_list_init(&deferred_bios); + bio_list_init(&marked_bios); +@@ -1238,9 +1255,11 @@ static void process_deferred_bios(struct era *era) + bio_list_init(&era->deferred_bios); + spin_unlock(&era->deferred_lock); + ++ if (bio_list_empty(&deferred_bios)) ++ return; ++ + while ((bio = bio_list_pop(&deferred_bios))) { +- r = writeset_test_and_set(&era->md->bitset_info, +- era->md->current_writeset, ++ r = writeset_test_and_set(&era->md->bitset_info, ws, + get_block(era, bio)); + if (r < 0) { + /* +@@ -1248,7 +1267,6 @@ static void process_deferred_bios(struct era *era) + * FIXME: finish. + */ + failed = true; +- + } else if (r == 0) + commit_needed = true; + +@@ -1264,9 +1282,19 @@ static void process_deferred_bios(struct era *era) + if (failed) + while ((bio = bio_list_pop(&marked_bios))) + bio_io_error(bio); +- else +- while ((bio = bio_list_pop(&marked_bios))) ++ else { ++ blk_start_plug(&plug); ++ while ((bio = bio_list_pop(&marked_bios))) { ++ /* ++ * Only update the in-core writeset if the on-disk one ++ * was updated too. ++ */ ++ if (commit_needed) ++ set_bit(get_block(era, bio), ws->bits); + generic_make_request(bio); ++ } ++ blk_finish_plug(&plug); ++ } + } + + static void process_rpc_calls(struct era *era) +@@ -1487,15 +1515,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) + } + era->md = md; + +- era->nr_blocks = calc_nr_blocks(era); +- +- r = metadata_resize(era->md, &era->nr_blocks); +- if (r) { +- ti->error = "couldn't resize metadata"; +- era_destroy(era); +- return -ENOMEM; +- } +- + era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); + if (!era->wq) { + ti->error = "could not create workqueue for metadata object"; +@@ -1573,16 +1592,24 @@ static int era_preresume(struct dm_target *ti) + dm_block_t new_size = calc_nr_blocks(era); + + if (era->nr_blocks != new_size) { +- r = in_worker1(era, metadata_resize, &new_size); +- if (r) ++ r = metadata_resize(era->md, &new_size); ++ if (r) { ++ DMERR("%s: metadata_resize failed", __func__); ++ return r; ++ } ++ ++ r = metadata_commit(era->md); ++ if (r) { ++ DMERR("%s: metadata_commit failed", __func__); + return r; ++ } + + era->nr_blocks = new_size; + } + + start_worker(era); + +- r = in_worker0(era, metadata_new_era); ++ r = in_worker0(era, metadata_era_rollover); + if (r) { + DMERR("%s: metadata_era_rollover failed", __func__); + return r; +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 446d76e14c58..49b39e9d9830 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -524,7 +524,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size) + * Grab our output buffer. + */ + nl = get_result_buffer(param, param_size, &len); +- if (len < needed) { ++ if (len < needed || len < sizeof(nl->dev)) { + param->flags |= DM_BUFFER_FULL_FLAG; + goto out; + } +@@ -1549,6 +1549,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size) + + if (!argc) { + DMWARN("Empty message received."); ++ r = -EINVAL; + goto out_argv; + } + +diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c +index b4bdbc07127a..4030c716413f 100644 +--- a/drivers/md/dm-req-crypt.c ++++ b/drivers/md/dm-req-crypt.c +@@ -51,7 +51,6 @@ + #define KEY_SIZE_XTS 32 + #define AES_XTS_IV_LEN 16 + #define MAX_MSM_ICE_KEY_LUT_SIZE 32 +-#define SECTOR_SIZE 512 + #define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024) + + #define DM_REQ_CRYPT_ERROR -1 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c +index e289aae3e4d7..d47c0edfc16a 100644 +--- a/drivers/md/dm-rq.c ++++ b/drivers/md/dm-rq.c +@@ -1020,6 +1020,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) + blk_mq_free_tag_set(md->tag_set); + out_kfree_tag_set: + kfree(md->tag_set); ++ md->tag_set = NULL; + + return err; + } +@@ -1029,6 +1030,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md) + if (md->tag_set) { + blk_mq_free_tag_set(md->tag_set); + kfree(md->tag_set); ++ md->tag_set = NULL; + } + } + +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index c04d9f22d160..d85d13a4c57a 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -788,7 +788,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) + static uint32_t __minimum_chunk_size(struct origin *o) + { + struct dm_snapshot *snap; +- unsigned chunk_size = 0; ++ unsigned chunk_size = rounddown_pow_of_two(UINT_MAX); + + if (o) + list_for_each_entry(snap, &o->snapshots, list) +@@ -1264,6 +1264,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) + + if (!s->store->chunk_size) { + ti->error = "Chunk size not set"; ++ r = -EINVAL; + goto bad_read_metadata; + } + +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index 9da9a4c816f2..75c1599c7a9e 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -397,14 +397,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + { + int r; + dev_t dev; ++ unsigned int major, minor; ++ char dummy; + struct dm_dev_internal *dd; + struct dm_table *t = ti->table; + + BUG_ON(!t); + +- dev = dm_get_dev_t(path); +- if (!dev) +- return -ENODEV; ++ if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { ++ /* Extract the major/minor numbers */ ++ dev = MKDEV(major, minor); ++ if (MAJOR(dev) != major || MINOR(dev) != minor) ++ return -EOVERFLOW; ++ } else { ++ dev = dm_get_dev_t(path); ++ if (!dev) ++ return -ENODEV; ++ } + + dd = find_device(&t->devices, dev); + if (!dd) { +@@ -840,12 +849,12 @@ void dm_table_set_type(struct dm_table *t, unsigned type) + } + EXPORT_SYMBOL_GPL(dm_table_set_type); + +-static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + +- return q && blk_queue_dax(q); ++ return q && !blk_queue_dax(q); + } + + static bool dm_table_supports_dax(struct dm_table *t) +@@ -861,7 +870,7 @@ static bool dm_table_supports_dax(struct dm_table *t) + return false; + + if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, device_supports_dax, NULL)) ++ ti->type->iterate_devices(ti, device_not_dax_capable, NULL)) + return false; + } + +@@ -1251,12 +1260,6 @@ void dm_table_event_callback(struct dm_table *t, + + void dm_table_event(struct dm_table *t) + { +- /* +- * You can no longer call dm_table_event() from interrupt +- * context, use a bottom half instead. +- */ +- BUG_ON(in_interrupt()); +- + mutex_lock(&_event_lock); + if (t->event_fn) + t->event_fn(t->event_context); +@@ -1304,6 +1307,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) + return &t->targets[(KEYS_PER_NODE * n) + k]; + } + ++/* ++ * type->iterate_devices() should be called when the sanity check needs to ++ * iterate and check all underlying data devices. iterate_devices() will ++ * iterate all underlying data devices until it encounters a non-zero return ++ * code, returned by whether the input iterate_devices_callout_fn, or ++ * iterate_devices() itself internally. ++ * ++ * For some target type (e.g. dm-stripe), one call of iterate_devices() may ++ * iterate multiple underlying devices internally, in which case a non-zero ++ * return code returned by iterate_devices_callout_fn will stop the iteration ++ * in advance. ++ * ++ * Cases requiring _any_ underlying device supporting some kind of attribute, ++ * should use the iteration structure like dm_table_any_dev_attr(), or call ++ * it directly. @func should handle semantics of positive examples, e.g. ++ * capable of something. ++ * ++ * Cases requiring _all_ underlying devices supporting some kind of attribute, ++ * should use the iteration structure like dm_table_supports_nowait() or ++ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that ++ * uses an @anti_func that handle semantics of counter examples, e.g. not ++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func); ++ */ ++static bool dm_table_any_dev_attr(struct dm_table *t, ++ iterate_devices_callout_fn func) ++{ ++ struct dm_target *ti; ++ unsigned int i; ++ ++ for (i = 0; i < dm_table_get_num_targets(t); i++) { ++ ti = dm_table_get_target(t, i); ++ ++ if (ti->type->iterate_devices && ++ ti->type->iterate_devices(ti, func, NULL)) ++ return true; ++ } ++ ++ return false; ++} ++ + static int count_device(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { +@@ -1474,12 +1517,12 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t) + return true; + } + +-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) ++static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + +- return q && blk_queue_nonrot(q); ++ return q && !blk_queue_nonrot(q); + } + + static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, +@@ -1490,39 +1533,12 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, + return q && !blk_queue_add_random(q); + } + +-static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) +-{ +- struct request_queue *q = bdev_get_queue(dev->bdev); +- +- return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); +-} +- +-static int queue_supports_inline_encryption(struct dm_target *ti, +- struct dm_dev *dev, +- sector_t start, sector_t len, +- void *data) ++static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev, ++ sector_t start, sector_t len, void *data) + { + struct request_queue *q = bdev_get_queue(dev->bdev); + +- return q && blk_queue_inlinecrypt(q); +-} +- +-static bool dm_table_all_devices_attribute(struct dm_table *t, +- iterate_devices_callout_fn func) +-{ +- struct dm_target *ti; +- unsigned i = 0; +- +- while (i < dm_table_get_num_targets(t)) { +- ti = dm_table_get_target(t, i++); +- +- if (!ti->type->iterate_devices || +- !ti->type->iterate_devices(ti, func, NULL)) +- return false; +- } +- +- return true; ++ return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + } + + static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, +@@ -1615,23 +1631,18 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + q->limits.discard_zeroes_data = 0; + + /* Ensure that all underlying devices are non-rotational. */ +- if (dm_table_all_devices_attribute(t, device_is_nonrot)) +- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); +- else ++ if (dm_table_any_dev_attr(t, device_is_rotational)) + queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); ++ else ++ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + + if (!dm_table_supports_write_same(t)) + q->limits.max_write_same_sectors = 0; + +- if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) +- queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); +- else ++ if (dm_table_any_dev_attr(t, queue_no_sg_merge)) + queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); +- +- if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption)) +- queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q); + else +- queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q); ++ queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + + dm_table_verify_integrity(t); + +@@ -1641,7 +1652,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not + * have it set. + */ +- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) ++ if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + + /* +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index d20f4023f6c1..b5bf2ecfaf91 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -700,12 +700,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f + THIN_MAX_CONCURRENT_LOCKS); + if (IS_ERR(pmd->bm)) { + DMERR("could not create block manager"); +- return PTR_ERR(pmd->bm); ++ r = PTR_ERR(pmd->bm); ++ pmd->bm = NULL; ++ return r; + } + + r = __open_or_format_metadata(pmd, format_device); +- if (r) ++ if (r) { + dm_block_manager_destroy(pmd->bm); ++ pmd->bm = NULL; ++ } + + return r; + } +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c +index 8f644879f277..ec253175afd7 100644 +--- a/drivers/md/dm-verity-target.c ++++ b/drivers/md/dm-verity-target.c +@@ -21,6 +21,8 @@ + #include + #include + #include ++#include ++#include + + #define DM_MSG_PREFIX "verity" + +@@ -254,6 +256,13 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, + kernel_restart("dm-verity device corrupted"); + } + ++ if (v->mode == DM_VERITY_MODE_EIO) { ++#ifdef CONFIG_DM_VERITY_AVB ++ dm_verity_avb_error_handler(); ++#endif ++ qpnp_pon_set_restart_reason( ++ PON_RESTART_REASON_DMVERITY_CORRUPTED); ++ } + return 1; + } + +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c +index e870b09b2c84..d08c63aaf10b 100644 +--- a/drivers/md/md-cluster.c ++++ b/drivers/md/md-cluster.c +@@ -1234,6 +1234,7 @@ static void unlock_all_bitmaps(struct mddev *mddev) + } + } + kfree(cinfo->other_bitmap_lockres); ++ cinfo->other_bitmap_lockres = NULL; + } + } + +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 1a7b3f088fd4..8ef293ab0af0 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -508,6 +508,17 @@ void mddev_init(struct mddev *mddev) + } + EXPORT_SYMBOL_GPL(mddev_init); + ++static struct mddev *mddev_find_locked(dev_t unit) ++{ ++ struct mddev *mddev; ++ ++ list_for_each_entry(mddev, &all_mddevs, all_mddevs) ++ if (mddev->unit == unit) ++ return mddev; ++ ++ return NULL; ++} ++ + static struct mddev *mddev_find(dev_t unit) + { + struct mddev *mddev, *new = NULL; +@@ -519,13 +530,13 @@ static struct mddev *mddev_find(dev_t unit) + spin_lock(&all_mddevs_lock); + + if (unit) { +- list_for_each_entry(mddev, &all_mddevs, all_mddevs) +- if (mddev->unit == unit) { +- mddev_get(mddev); +- spin_unlock(&all_mddevs_lock); +- kfree(new); +- return mddev; +- } ++ mddev = mddev_find_locked(unit); ++ if (mddev) { ++ mddev_get(mddev); ++ spin_unlock(&all_mddevs_lock); ++ kfree(new); ++ return mddev; ++ } + + if (new) { + list_add(&new->all_mddevs, &all_mddevs); +@@ -551,12 +562,7 @@ static struct mddev *mddev_find(dev_t unit) + return NULL; + } + +- is_free = 1; +- list_for_each_entry(mddev, &all_mddevs, all_mddevs) +- if (mddev->unit == dev) { +- is_free = 0; +- break; +- } ++ is_free = !mddev_find_locked(dev); + } + new->unit = dev; + new->md_minor = MINOR(dev); +@@ -6857,8 +6863,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, + err = -EBUSY; + goto out; + } +- WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); +- set_bit(MD_CLOSING, &mddev->flags); ++ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { ++ mutex_unlock(&mddev->open_mutex); ++ err = -EBUSY; ++ goto out; ++ } + did_set_md_closing = true; + mutex_unlock(&mddev->open_mutex); + sync_blockdev(bdev); +@@ -7101,9 +7110,9 @@ static int md_open(struct block_device *bdev, fmode_t mode) + */ + mddev_put(mddev); + /* Wait until bdev->bd_disk is definitely gone */ +- flush_workqueue(md_misc_wq); +- /* Then retry the open from the top */ +- return -ERESTARTSYS; ++ if (work_pending(&mddev->del_work)) ++ flush_workqueue(md_misc_wq); ++ return -EBUSY; + } + BUG_ON(mddev != bdev->bd_disk->private_data); + +@@ -8458,11 +8467,11 @@ void md_check_recovery(struct mddev *mddev) + } + + if (mddev_is_clustered(mddev)) { +- struct md_rdev *rdev; ++ struct md_rdev *rdev, *tmp; + /* kick the device if another node issued a + * remove disk. + */ +- rdev_for_each(rdev, mddev) { ++ rdev_for_each_safe(rdev, tmp, mddev) { + if (test_and_clear_bit(ClusterRemove, &rdev->flags) && + rdev->raid_disk < 0) + md_kick_rdev_from_array(rdev); +@@ -8771,12 +8780,12 @@ static int __init md_init(void) + static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) + { + struct mdp_superblock_1 *sb = page_address(rdev->sb_page); +- struct md_rdev *rdev2; ++ struct md_rdev *rdev2, *tmp; + int role, ret; + char b[BDEVNAME_SIZE]; + + /* Check for change of roles in the active devices */ +- rdev_for_each(rdev2, mddev) { ++ rdev_for_each_safe(rdev2, tmp, mddev) { + if (test_bit(Faulty, &rdev2->flags)) + continue; + +diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h +index a240990a7f33..5673f8eb5f88 100644 +--- a/drivers/md/persistent-data/dm-btree-internal.h ++++ b/drivers/md/persistent-data/dm-btree-internal.h +@@ -34,12 +34,12 @@ struct node_header { + __le32 max_entries; + __le32 value_size; + __le32 padding; +-} __packed; ++} __attribute__((packed, aligned(8))); + + struct btree_node { + struct node_header header; + __le64 keys[0]; +-} __packed; ++} __attribute__((packed, aligned(8))); + + + /* +diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c +index eff04fa23dfa..9e4d1212f4c1 100644 +--- a/drivers/md/persistent-data/dm-btree-remove.c ++++ b/drivers/md/persistent-data/dm-btree-remove.c +@@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, + delete_at(n, index); + } + +- *new_root = shadow_root(&spine); ++ if (!r) ++ *new_root = shadow_root(&spine); + exit_shadow_spine(&spine); + + return r; +diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c +index 22729fd92a1b..ca09ad2a639c 100644 +--- a/drivers/md/persistent-data/dm-space-map-common.c ++++ b/drivers/md/persistent-data/dm-space-map-common.c +@@ -337,6 +337,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, + */ + begin = do_div(index_begin, ll->entries_per_block); + end = do_div(end, ll->entries_per_block); ++ if (end == 0) ++ end = ll->entries_per_block; + + for (i = index_begin; i < index_end; i++, begin = 0) { + struct dm_block *blk; +diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h +index 8de63ce39bdd..87e17909ef52 100644 +--- a/drivers/md/persistent-data/dm-space-map-common.h ++++ b/drivers/md/persistent-data/dm-space-map-common.h +@@ -33,7 +33,7 @@ struct disk_index_entry { + __le64 blocknr; + __le32 nr_free; + __le32 none_free_before; +-} __packed; ++} __attribute__ ((packed, aligned(8))); + + + #define MAX_METADATA_BITMAPS 255 +@@ -43,7 +43,7 @@ struct disk_metadata_index { + __le64 blocknr; + + struct disk_index_entry index[MAX_METADATA_BITMAPS]; +-} __packed; ++} __attribute__ ((packed, aligned(8))); + + struct ll_disk; + +@@ -86,7 +86,7 @@ struct disk_sm_root { + __le64 nr_allocated; + __le64 bitmap_root; + __le64 ref_count_root; +-} __packed; ++} __attribute__ ((packed, aligned(8))); + + #define ENTRIES_PER_BYTE 4 + +@@ -94,7 +94,7 @@ struct disk_bitmap_header { + __le32 csum; + __le32 not_used; + __le64 blocknr; +-} __packed; ++} __attribute__ ((packed, aligned(8))); + + enum allocation_event { + SM_NONE, +diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c +index bf4c5e2ccb6f..e0acae7a3815 100644 +--- a/drivers/md/persistent-data/dm-space-map-disk.c ++++ b/drivers/md/persistent-data/dm-space-map-disk.c +@@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); ++ if (r == -ENOSPC) { ++ /* ++ * There's no free block between smd->begin and the end of the metadata device. ++ * We search before smd->begin in case something has been freed. ++ */ ++ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b); ++ } ++ + if (r) + return r; + +@@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm) + return r; + + memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll)); +- smd->begin = 0; + smd->nr_allocated_this_transaction = 0; + + r = sm_disk_get_nr_free(sm, &nr_free); +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 967d8f2a731f..62a4d7da9bd9 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -451,6 +451,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) + * Any block we allocate has to be free in both the old and current ll. + */ + r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); ++ if (r == -ENOSPC) { ++ /* ++ * There's no free block between smm->begin and the end of the metadata device. ++ * We search before smm->begin in case something has been freed. ++ */ ++ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b); ++ } ++ + if (r) + return r; + +@@ -502,7 +510,6 @@ static int sm_metadata_commit(struct dm_space_map *sm) + return r; + + memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); +- smm->begin = 0; + smm->allocated_this_transaction = 0; + + return 0; +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 0c926c725828..e8f04900870a 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2259,8 +2259,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) + } else + err = -ENOMEM; + +- mutex_unlock(&conf->cache_size_mutex); +- + conf->slab_cache = sc; + conf->active_name = 1-conf->active_name; + +@@ -2283,6 +2281,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) + + if (!err) + conf->pool_size = newsize; ++ mutex_unlock(&conf->cache_size_mutex); ++ + return err; + } + +@@ -3364,6 +3364,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, + * is missing/faulty, then we need to read everything we can. + */ + if (sh->raid_conf->level != 6 && ++ sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && + sh->sector < sh->raid_conf->mddev->recovery_cp) + /* reconstruct-write isn't being forced */ + return 0; +@@ -4498,7 +4499,7 @@ static void handle_stripe(struct stripe_head *sh) + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite +- || (conf->level == 6 && s.to_write && s.failed) ++ || (s.to_write && s.failed) + || (s.syncing && (s.uptodate + s.compute < disks)) + || s.replacing + || s.expanding) +diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c +index f3a42834d7d6..b10bd45da9c5 100644 +--- a/drivers/media/common/siano/smscoreapi.c ++++ b/drivers/media/common/siano/smscoreapi.c +@@ -914,7 +914,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, + void *buffer, size_t size) + { + struct sms_firmware *firmware = (struct sms_firmware *) buffer; +- struct sms_msg_data4 *msg; ++ struct sms_msg_data5 *msg; + u32 mem_address, calc_checksum = 0; + u32 i, *ptr; + u8 *payload = firmware->payload; +@@ -995,24 +995,20 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, + goto exit_fw_download; + + if (coredev->mode == DEVICE_MODE_NONE) { +- struct sms_msg_data *trigger_msg = +- (struct sms_msg_data *) msg; +- + pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n"); + SMS_INIT_MSG(&msg->x_msg_header, + MSG_SMS_SWDOWNLOAD_TRIGGER_REQ, +- sizeof(struct sms_msg_hdr) + +- sizeof(u32) * 5); ++ sizeof(*msg)); + +- trigger_msg->msg_data[0] = firmware->start_address; ++ msg->msg_data[0] = firmware->start_address; + /* Entry point */ +- trigger_msg->msg_data[1] = 6; /* Priority */ +- trigger_msg->msg_data[2] = 0x200; /* Stack size */ +- trigger_msg->msg_data[3] = 0; /* Parameter */ +- trigger_msg->msg_data[4] = 4; /* Task ID */ ++ msg->msg_data[1] = 6; /* Priority */ ++ msg->msg_data[2] = 0x200; /* Stack size */ ++ msg->msg_data[3] = 0; /* Parameter */ ++ msg->msg_data[4] = 4; /* Task ID */ + +- rc = smscore_sendrequest_and_wait(coredev, trigger_msg, +- trigger_msg->x_msg_header.msg_length, ++ rc = smscore_sendrequest_and_wait(coredev, msg, ++ msg->x_msg_header.msg_length, + &coredev->trigger_done); + } else { + SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ, +diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h +index 4cc39e4a8318..55d02c27f124 100644 +--- a/drivers/media/common/siano/smscoreapi.h ++++ b/drivers/media/common/siano/smscoreapi.h +@@ -636,9 +636,9 @@ struct sms_msg_data2 { + u32 msg_data[2]; + }; + +-struct sms_msg_data4 { ++struct sms_msg_data5 { + struct sms_msg_hdr x_msg_header; +- u32 msg_data[4]; ++ u32 msg_data[5]; + }; + + struct sms_data_download { +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c +index 9148e14c9d07..3a5b5f94398a 100644 +--- a/drivers/media/common/siano/smsdvb-main.c ++++ b/drivers/media/common/siano/smsdvb-main.c +@@ -1180,12 +1180,19 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev, + rc = dvb_create_media_graph(&client->adapter, true); + if (rc < 0) { + pr_err("dvb_create_media_graph failed %d\n", rc); +- goto client_error; ++ goto media_graph_error; + } + + pr_info("DVB interface registered.\n"); + return 0; + ++media_graph_error: ++ mutex_lock(&g_smsdvb_clientslock); ++ list_del(&client->entry); ++ mutex_unlock(&g_smsdvb_clientslock); ++ ++ smsdvb_debugfs_release(client); ++ + client_error: + dvb_unregister_frontend(&client->frontend); + +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c +index 2f054db8807b..cd45b3894661 100644 +--- a/drivers/media/dvb-core/dvb_frontend.c ++++ b/drivers/media/dvb-core/dvb_frontend.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + #include + + #include "dvb_frontend.h" +@@ -458,7 +459,7 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra + + static void dvb_frontend_swzigzag(struct dvb_frontend *fe) + { +- enum fe_status s = 0; ++ enum fe_status s = FE_NONE; + int retval = 0; + struct dvb_frontend_private *fepriv = fe->frontend_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; +@@ -629,7 +630,7 @@ static int dvb_frontend_thread(void *data) + struct dvb_frontend *fe = data; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct dvb_frontend_private *fepriv = fe->frontend_priv; +- enum fe_status s; ++ enum fe_status s = FE_NONE; + enum dvbfe_algo algo; + bool re_tune = false; + bool semheld = false; +@@ -932,8 +933,6 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe) + memset(c, 0, offsetof(struct dtv_frontend_properties, strength)); + c->delivery_system = delsys; + +- c->state = DTV_CLEAR; +- + dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n", + __func__, c->delivery_system); + +@@ -998,6 +997,17 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe) + .buffer = b \ + } + ++struct dtv_cmds_h { ++ char *name; /* A display name for debugging purposes */ ++ ++ __u32 cmd; /* A unique ID */ ++ ++ /* Flags */ ++ __u32 set:1; /* Either a set or get property */ ++ __u32 buffer:1; /* Does this property use the buffer? */ ++ __u32 reserved:30; /* Align */ ++}; ++ + static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { + _DTV_CMD(DTV_TUNE, 1, 0), + _DTV_CMD(DTV_CLEAR, 1, 0), +@@ -1079,22 +1089,19 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { + _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0), + }; + +-static void dtv_property_dump(struct dvb_frontend *fe, +- bool is_set, ++static void dtv_get_property_dump(struct dvb_frontend *fe, + struct dtv_property *tvp) + { + int i; + + if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) { +- dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n", +- __func__, +- is_set ? "SET" : "GET", ++ dev_warn(fe->dvb->device, "%s: GET tvp.cmd = 0x%08x undefined\n" ++ , __func__, + tvp->cmd); + return; + } + +- dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__, +- is_set ? "SET" : "GET", ++ dev_dbg(fe->dvb->device, "%s: GET tvp.cmd = 0x%08x (%s)\n", __func__, + tvp->cmd, + dtv_cmds[tvp->cmd].name); + +@@ -1285,17 +1292,15 @@ static int dtv_get_frontend(struct dvb_frontend *fe, + return 0; + } + +-static int dvb_frontend_ioctl_legacy(struct file *file, +- unsigned int cmd, void *parg); +-static int dvb_frontend_ioctl_properties(struct file *file, +- unsigned int cmd, void *parg); ++static int dvb_frontend_handle_ioctl(struct file *file, ++ unsigned int cmd, void *parg); + + static int dtv_property_process_get(struct dvb_frontend *fe, + const struct dtv_frontend_properties *c, + struct dtv_property *tvp, + struct file *file) + { +- int r, ncaps; ++ int ncaps; + + switch(tvp->cmd) { + case DTV_ENUM_DELSYS: +@@ -1506,14 +1511,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe, + return -EINVAL; + } + +- /* Allow the frontend to override outgoing properties */ +- if (fe->ops.get_property) { +- r = fe->ops.get_property(fe, tvp); +- if (r < 0) +- return r; +- } +- +- dtv_property_dump(fe, false, tvp); ++ dtv_get_property_dump(fe, tvp); + + return 0; + } +@@ -1740,23 +1738,36 @@ static int dvbv3_set_delivery_system(struct dvb_frontend *fe) + return emulate_delivery_system(fe, delsys); + } + ++/** ++ * dtv_property_process_set - Sets a single DTV property ++ * @fe: Pointer to &struct dvb_frontend ++ * @file: Pointer to &struct file ++ * @cmd: Digital TV command ++ * @data: An unsigned 32-bits number ++ * ++ * This routine assigns the property ++ * value to the corresponding member of ++ * &struct dtv_frontend_properties ++ * ++ * Returns: ++ * Zero on success, negative errno on failure. ++ */ + static int dtv_property_process_set(struct dvb_frontend *fe, +- struct dtv_property *tvp, +- struct file *file) ++ struct file *file, ++ u32 cmd, u32 data) + { + int r = 0; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + +- /* Allow the frontend to validate incoming properties */ +- if (fe->ops.set_property) { +- r = fe->ops.set_property(fe, tvp); +- if (r < 0) +- return r; +- } +- +- dtv_property_dump(fe, true, tvp); +- +- switch(tvp->cmd) { ++ /** Dump DTV command name and value*/ ++ if (!cmd || cmd > DTV_MAX_COMMAND) ++ dev_warn(fe->dvb->device, "%s: SET cmd 0x%08x undefined\n", ++ __func__, cmd); ++ else ++ dev_dbg(fe->dvb->device, ++ "%s: SET cmd 0x%08x (%s) to 0x%08x\n", ++ __func__, cmd, dtv_cmds[cmd].name, data); ++ switch (cmd) { + case DTV_CLEAR: + /* + * Reset a cache of data specific to the frontend here. This does +@@ -1765,144 +1776,144 @@ static int dtv_property_process_set(struct dvb_frontend *fe, + dvb_frontend_clear_cache(fe); + break; + case DTV_TUNE: +- /* interpret the cache of data, build either a traditional frontend +- * tunerequest so we can pass validation in the FE_SET_FRONTEND +- * ioctl. ++ /* ++ * Use the cached Digital TV properties to tune the ++ * frontend + */ +- c->state = tvp->cmd; +- dev_dbg(fe->dvb->device, "%s: Finalised property cache\n", +- __func__); ++ dev_dbg(fe->dvb->device, ++ "%s: Setting the frontend from property cache\n", ++ __func__); + + r = dtv_set_frontend(fe); + break; + case DTV_FREQUENCY: +- c->frequency = tvp->u.data; ++ c->frequency = data; + break; + case DTV_MODULATION: +- c->modulation = tvp->u.data; ++ c->modulation = data; + break; + case DTV_BANDWIDTH_HZ: +- c->bandwidth_hz = tvp->u.data; ++ c->bandwidth_hz = data; + break; + case DTV_INVERSION: +- c->inversion = tvp->u.data; ++ c->inversion = data; + break; + case DTV_SYMBOL_RATE: +- c->symbol_rate = tvp->u.data; ++ c->symbol_rate = data; + break; + case DTV_INNER_FEC: +- c->fec_inner = tvp->u.data; ++ c->fec_inner = data; + break; + case DTV_PILOT: +- c->pilot = tvp->u.data; ++ c->pilot = data; + break; + case DTV_ROLLOFF: +- c->rolloff = tvp->u.data; ++ c->rolloff = data; + break; + case DTV_DELIVERY_SYSTEM: +- r = dvbv5_set_delivery_system(fe, tvp->u.data); ++ r = dvbv5_set_delivery_system(fe, data); + break; + case DTV_VOLTAGE: +- c->voltage = tvp->u.data; +- r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE, ++ c->voltage = data; ++ r = dvb_frontend_handle_ioctl(file, FE_SET_VOLTAGE, + (void *)c->voltage); + break; + case DTV_TONE: +- c->sectone = tvp->u.data; +- r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE, ++ c->sectone = data; ++ r = dvb_frontend_handle_ioctl(file, FE_SET_TONE, + (void *)c->sectone); + break; + case DTV_CODE_RATE_HP: +- c->code_rate_HP = tvp->u.data; ++ c->code_rate_HP = data; + break; + case DTV_CODE_RATE_LP: +- c->code_rate_LP = tvp->u.data; ++ c->code_rate_LP = data; + break; + case DTV_GUARD_INTERVAL: +- c->guard_interval = tvp->u.data; ++ c->guard_interval = data; + break; + case DTV_TRANSMISSION_MODE: +- c->transmission_mode = tvp->u.data; ++ c->transmission_mode = data; + break; + case DTV_HIERARCHY: +- c->hierarchy = tvp->u.data; ++ c->hierarchy = data; + break; + case DTV_INTERLEAVING: +- c->interleaving = tvp->u.data; ++ c->interleaving = data; + break; + + /* ISDB-T Support here */ + case DTV_ISDBT_PARTIAL_RECEPTION: +- c->isdbt_partial_reception = tvp->u.data; ++ c->isdbt_partial_reception = data; + break; + case DTV_ISDBT_SOUND_BROADCASTING: +- c->isdbt_sb_mode = tvp->u.data; ++ c->isdbt_sb_mode = data; + break; + case DTV_ISDBT_SB_SUBCHANNEL_ID: +- c->isdbt_sb_subchannel = tvp->u.data; ++ c->isdbt_sb_subchannel = data; + break; + case DTV_ISDBT_SB_SEGMENT_IDX: +- c->isdbt_sb_segment_idx = tvp->u.data; ++ c->isdbt_sb_segment_idx = data; + break; + case DTV_ISDBT_SB_SEGMENT_COUNT: +- c->isdbt_sb_segment_count = tvp->u.data; ++ c->isdbt_sb_segment_count = data; + break; + case DTV_ISDBT_LAYER_ENABLED: +- c->isdbt_layer_enabled = tvp->u.data; ++ c->isdbt_layer_enabled = data; + break; + case DTV_ISDBT_LAYERA_FEC: +- c->layer[0].fec = tvp->u.data; ++ c->layer[0].fec = data; + break; + case DTV_ISDBT_LAYERA_MODULATION: +- c->layer[0].modulation = tvp->u.data; ++ c->layer[0].modulation = data; + break; + case DTV_ISDBT_LAYERA_SEGMENT_COUNT: +- c->layer[0].segment_count = tvp->u.data; ++ c->layer[0].segment_count = data; + break; + case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: +- c->layer[0].interleaving = tvp->u.data; ++ c->layer[0].interleaving = data; + break; + case DTV_ISDBT_LAYERB_FEC: +- c->layer[1].fec = tvp->u.data; ++ c->layer[1].fec = data; + break; + case DTV_ISDBT_LAYERB_MODULATION: +- c->layer[1].modulation = tvp->u.data; ++ c->layer[1].modulation = data; + break; + case DTV_ISDBT_LAYERB_SEGMENT_COUNT: +- c->layer[1].segment_count = tvp->u.data; ++ c->layer[1].segment_count = data; + break; + case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: +- c->layer[1].interleaving = tvp->u.data; ++ c->layer[1].interleaving = data; + break; + case DTV_ISDBT_LAYERC_FEC: +- c->layer[2].fec = tvp->u.data; ++ c->layer[2].fec = data; + break; + case DTV_ISDBT_LAYERC_MODULATION: +- c->layer[2].modulation = tvp->u.data; ++ c->layer[2].modulation = data; + break; + case DTV_ISDBT_LAYERC_SEGMENT_COUNT: +- c->layer[2].segment_count = tvp->u.data; ++ c->layer[2].segment_count = data; + break; + case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: +- c->layer[2].interleaving = tvp->u.data; ++ c->layer[2].interleaving = data; + break; + + /* Multistream support */ + case DTV_STREAM_ID: + case DTV_DVBT2_PLP_ID_LEGACY: +- c->stream_id = tvp->u.data; ++ c->stream_id = data; + break; + + /* ATSC-MH */ + case DTV_ATSCMH_PARADE_ID: +- fe->dtv_property_cache.atscmh_parade_id = tvp->u.data; ++ fe->dtv_property_cache.atscmh_parade_id = data; + break; + case DTV_ATSCMH_RS_FRAME_ENSEMBLE: +- fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data; ++ fe->dtv_property_cache.atscmh_rs_frame_ensemble = data; + break; + + case DTV_LNA: +- c->lna = tvp->u.data; ++ c->lna = data; + if (fe->ops.set_lna) + r = fe->ops.set_lna(fe); + if (r < 0) +@@ -1916,14 +1927,13 @@ static int dtv_property_process_set(struct dvb_frontend *fe, + return r; + } + +-static int dvb_frontend_ioctl(struct file *file, +- unsigned int cmd, void *parg) ++static int dvb_frontend_do_ioctl(struct file *file, unsigned int cmd, ++ void *parg) + { + struct dvb_device *dvbdev = file->private_data; + struct dvb_frontend *fe = dvbdev->priv; +- struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct dvb_frontend_private *fepriv = fe->frontend_priv; +- int err = -EOPNOTSUPP; ++ int err; + + dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd)); + if (down_interruptible(&fepriv->sem)) +@@ -1934,74 +1944,125 @@ static int dvb_frontend_ioctl(struct file *file, + return -ENODEV; + } + +- if ((file->f_flags & O_ACCMODE) == O_RDONLY && +- (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT || +- cmd == FE_DISEQC_RECV_SLAVE_REPLY)) { ++ /* ++ * If the frontend is opened in read-only mode, only the ioctls ++ * that don't interfere with the tune logic should be accepted. ++ * That allows an external application to monitor the DVB QoS and ++ * statistics parameters. ++ * ++ * That matches all _IOR() ioctls, except for two special cases: ++ * - FE_GET_EVENT is part of the tuning logic on a DVB application; ++ * - FE_DISEQC_RECV_SLAVE_REPLY is part of DiSEqC 2.0 ++ * setup ++ * So, those two ioctls should also return -EPERM, as otherwise ++ * reading from them would interfere with a DVB tune application ++ */ ++ if ((file->f_flags & O_ACCMODE) == O_RDONLY ++ && (_IOC_DIR(cmd) != _IOC_READ ++ || cmd == FE_GET_EVENT ++ || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) { + up(&fepriv->sem); + return -EPERM; + } + +- if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) +- err = dvb_frontend_ioctl_properties(file, cmd, parg); +- else { +- c->state = DTV_UNDEFINED; +- err = dvb_frontend_ioctl_legacy(file, cmd, parg); +- } ++ err = dvb_frontend_handle_ioctl(file, cmd, parg); + + up(&fepriv->sem); + return err; + } + +-static int dvb_frontend_ioctl_properties(struct file *file, +- unsigned int cmd, void *parg) ++static long dvb_frontend_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct dvb_device *dvbdev = file->private_data; ++ ++ if (!dvbdev) ++ return -ENODEV; ++ ++ return dvb_usercopy(file, cmd, arg, dvb_frontend_do_ioctl); ++} ++ ++#ifdef CONFIG_COMPAT ++struct compat_dtv_property { ++ __u32 cmd; ++ __u32 reserved[3]; ++ union { ++ __u32 data; ++ struct dtv_fe_stats st; ++ struct { ++ __u8 data[32]; ++ __u32 len; ++ __u32 reserved1[3]; ++ compat_uptr_t reserved2; ++ } buffer; ++ } u; ++ int result; ++} __attribute__ ((packed)); ++ ++struct compat_dtv_properties { ++ __u32 num; ++ compat_uptr_t props; ++}; ++ ++#define COMPAT_FE_SET_PROPERTY _IOW('o', 82, struct compat_dtv_properties) ++#define COMPAT_FE_GET_PROPERTY _IOR('o', 83, struct compat_dtv_properties) ++ ++static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) + { + struct dvb_device *dvbdev = file->private_data; + struct dvb_frontend *fe = dvbdev->priv; + struct dvb_frontend_private *fepriv = fe->frontend_priv; +- struct dtv_frontend_properties *c = &fe->dtv_property_cache; +- int err = 0; ++ int i, err = 0; + +- struct dtv_properties *tvps = parg; +- struct dtv_property *tvp = NULL; +- int i; ++ if (cmd == COMPAT_FE_SET_PROPERTY) { ++ struct compat_dtv_properties prop, *tvps = NULL; ++ struct compat_dtv_property *tvp = NULL; + +- dev_dbg(fe->dvb->device, "%s:\n", __func__); ++ if (copy_from_user(&prop, compat_ptr(arg), sizeof(prop))) ++ return -EFAULT; + +- if (cmd == FE_SET_PROPERTY) { +- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); +- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); ++ tvps = ∝ + +- /* Put an arbitrary limit on the number of messages that can +- * be sent at once */ +- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) ++ /* ++ * Put an arbitrary limit on the number of messages that can ++ * be sent at once ++ */ ++ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) + return -EINVAL; + +- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp)); ++ tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + + for (i = 0; i < tvps->num; i++) { +- err = dtv_property_process_set(fe, tvp + i, file); +- if (err < 0) +- goto out; +- (tvp + i)->result = err; ++ err = dtv_property_process_set(fe, file, ++ (tvp + i)->cmd, ++ (tvp + i)->u.data); ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } + } +- +- if (c->state == DTV_TUNE) +- dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__); +- +- } else if (cmd == FE_GET_PROPERTY) { ++ kfree(tvp); ++ } else if (cmd == COMPAT_FE_GET_PROPERTY) { ++ struct compat_dtv_properties prop, *tvps = NULL; ++ struct compat_dtv_property *tvp = NULL; + struct dtv_frontend_properties getp = fe->dtv_property_cache; + +- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); +- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); ++ if (copy_from_user(&prop, compat_ptr(arg), sizeof(prop))) ++ return -EFAULT; ++ ++ tvps = ∝ + +- /* Put an arbitrary limit on the number of messages that can +- * be sent at once */ +- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) ++ /* ++ * Put an arbitrary limit on the number of messages that can ++ * be sent at once ++ */ ++ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) + return -EINVAL; + +- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp)); ++ tvp = memdup_user(compat_ptr(tvps->props), tvps->num * sizeof(*tvp)); + if (IS_ERR(tvp)) + return PTR_ERR(tvp); + +@@ -2013,30 +2074,53 @@ static int dvb_frontend_ioctl_properties(struct file *file, + */ + if (fepriv->state != FESTATE_IDLE) { + err = dtv_get_frontend(fe, &getp, NULL); +- if (err < 0) +- goto out; ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } + } + for (i = 0; i < tvps->num; i++) { +- err = dtv_property_process_get(fe, &getp, tvp + i, file); +- if (err < 0) +- goto out; +- (tvp + i)->result = err; ++ err = dtv_property_process_get( ++ fe, &getp, (struct dtv_property *)(tvp + i), file); ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } + } + +- if (copy_to_user((void __user *)tvps->props, tvp, +- tvps->num * sizeof(struct dtv_property))) { +- err = -EFAULT; +- goto out; ++ if (copy_to_user((void __user *)compat_ptr(tvps->props), tvp, ++ tvps->num * sizeof(struct compat_dtv_property))) { ++ kfree(tvp); ++ return -EFAULT; + } ++ kfree(tvp); ++ } + +- } else +- err = -EOPNOTSUPP; +- +-out: +- kfree(tvp); + return err; + } + ++static long dvb_frontend_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct dvb_device *dvbdev = file->private_data; ++ struct dvb_frontend *fe = dvbdev->priv; ++ struct dvb_frontend_private *fepriv = fe->frontend_priv; ++ int err; ++ ++ if (cmd == COMPAT_FE_SET_PROPERTY || cmd == COMPAT_FE_GET_PROPERTY) { ++ if (down_interruptible(&fepriv->sem)) ++ return -ERESTARTSYS; ++ ++ err = dvb_frontend_handle_compat_ioctl(file, cmd, arg); ++ ++ up(&fepriv->sem); ++ return err; ++ } ++ ++ return dvb_frontend_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); ++} ++#endif ++ + static int dtv_set_frontend(struct dvb_frontend *fe) + { + struct dvb_frontend_private *fepriv = fe->frontend_priv; +@@ -2174,16 +2258,103 @@ static int dtv_set_frontend(struct dvb_frontend *fe) + } + + +-static int dvb_frontend_ioctl_legacy(struct file *file, +- unsigned int cmd, void *parg) ++static int dvb_frontend_handle_ioctl(struct file *file, ++ unsigned int cmd, void *parg) + { + struct dvb_device *dvbdev = file->private_data; + struct dvb_frontend *fe = dvbdev->priv; + struct dvb_frontend_private *fepriv = fe->frontend_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; +- int err = -EOPNOTSUPP; ++ int i, err = -ENOTSUPP; ++ ++ dev_dbg(fe->dvb->device, "%s:\n", __func__); ++ ++ switch(cmd) { ++ case FE_SET_PROPERTY: { ++ struct dtv_properties *tvps = parg; ++ struct dtv_property *tvp = NULL; ++ ++ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", ++ __func__, tvps->num); ++ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", ++ __func__, tvps->props); ++ ++ /* ++ * Put an arbitrary limit on the number of messages that can ++ * be sent at once ++ */ ++ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) ++ return -EINVAL; ++ ++ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp)); ++ if (IS_ERR(tvp)) ++ return PTR_ERR(tvp); ++ ++ for (i = 0; i < tvps->num; i++) { ++ err = dtv_property_process_set(fe, file, ++ (tvp + i)->cmd, ++ (tvp + i)->u.data); ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } ++ } ++ kfree(tvp); ++ err = 0; ++ break; ++ } ++ case FE_GET_PROPERTY: { ++ struct dtv_properties *tvps = parg; ++ struct dtv_property *tvp = NULL; ++ struct dtv_frontend_properties getp = fe->dtv_property_cache; ++ ++ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", ++ __func__, tvps->num); ++ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", ++ __func__, tvps->props); ++ ++ /* ++ * Put an arbitrary limit on the number of messages that can ++ * be sent at once ++ */ ++ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) ++ return -EINVAL; ++ ++ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp)); ++ if (IS_ERR(tvp)) ++ return PTR_ERR(tvp); ++ ++ /* ++ * Let's use our own copy of property cache, in order to ++ * avoid mangling with DTV zigzag logic, as drivers might ++ * return crap, if they don't check if the data is available ++ * before updating the properties cache. ++ */ ++ if (fepriv->state != FESTATE_IDLE) { ++ err = dtv_get_frontend(fe, &getp, NULL); ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } ++ } ++ for (i = 0; i < tvps->num; i++) { ++ err = dtv_property_process_get(fe, &getp, tvp + i, file); ++ if (err < 0) { ++ kfree(tvp); ++ return err; ++ } ++ } ++ ++ if (copy_to_user((void __user *)tvps->props, tvp, ++ tvps->num * sizeof(struct dtv_property))) { ++ kfree(tvp); ++ return -EFAULT; ++ } ++ kfree(tvp); ++ err = 0; ++ break; ++ } + +- switch (cmd) { + case FE_GET_INFO: { + struct dvb_frontend_info* info = parg; + +@@ -2247,42 +2418,6 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + break; + } + +- case FE_READ_BER: +- if (fe->ops.read_ber) { +- if (fepriv->thread) +- err = fe->ops.read_ber(fe, (__u32 *) parg); +- else +- err = -EAGAIN; +- } +- break; +- +- case FE_READ_SIGNAL_STRENGTH: +- if (fe->ops.read_signal_strength) { +- if (fepriv->thread) +- err = fe->ops.read_signal_strength(fe, (__u16 *) parg); +- else +- err = -EAGAIN; +- } +- break; +- +- case FE_READ_SNR: +- if (fe->ops.read_snr) { +- if (fepriv->thread) +- err = fe->ops.read_snr(fe, (__u16 *) parg); +- else +- err = -EAGAIN; +- } +- break; +- +- case FE_READ_UNCORRECTED_BLOCKS: +- if (fe->ops.read_ucblocks) { +- if (fepriv->thread) +- err = fe->ops.read_ucblocks(fe, (__u32 *) parg); +- else +- err = -EAGAIN; +- } +- break; +- + case FE_DISEQC_RESET_OVERLOAD: + if (fe->ops.diseqc_reset_overload) { + err = fe->ops.diseqc_reset_overload(fe); +@@ -2334,6 +2469,23 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + } + break; + ++ case FE_DISEQC_RECV_SLAVE_REPLY: ++ if (fe->ops.diseqc_recv_slave_reply) ++ err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg); ++ break; ++ ++ case FE_ENABLE_HIGH_LNB_VOLTAGE: ++ if (fe->ops.enable_high_lnb_voltage) ++ err = fe->ops.enable_high_lnb_voltage(fe, (long) parg); ++ break; ++ ++ case FE_SET_FRONTEND_TUNE_MODE: ++ fepriv->tune_mode_flags = (unsigned long) parg; ++ err = 0; ++ break; ++ ++ /* DEPRECATED dish control ioctls */ ++ + case FE_DISHNETWORK_SEND_LEGACY_CMD: + if (fe->ops.dishnetwork_send_legacy_command) { + err = fe->ops.dishnetwork_send_legacy_command(fe, +@@ -2398,16 +2550,46 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + } + break; + +- case FE_DISEQC_RECV_SLAVE_REPLY: +- if (fe->ops.diseqc_recv_slave_reply) +- err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg); ++ /* DEPRECATED statistics ioctls */ ++ ++ case FE_READ_BER: ++ if (fe->ops.read_ber) { ++ if (fepriv->thread) ++ err = fe->ops.read_ber(fe, (__u32 *) parg); ++ else ++ err = -EAGAIN; ++ } + break; + +- case FE_ENABLE_HIGH_LNB_VOLTAGE: +- if (fe->ops.enable_high_lnb_voltage) +- err = fe->ops.enable_high_lnb_voltage(fe, (long) parg); ++ case FE_READ_SIGNAL_STRENGTH: ++ if (fe->ops.read_signal_strength) { ++ if (fepriv->thread) ++ err = fe->ops.read_signal_strength(fe, (__u16 *) parg); ++ else ++ err = -EAGAIN; ++ } ++ break; ++ ++ case FE_READ_SNR: ++ if (fe->ops.read_snr) { ++ if (fepriv->thread) ++ err = fe->ops.read_snr(fe, (__u16 *) parg); ++ else ++ err = -EAGAIN; ++ } ++ break; ++ ++ case FE_READ_UNCORRECTED_BLOCKS: ++ if (fe->ops.read_ucblocks) { ++ if (fepriv->thread) ++ err = fe->ops.read_ucblocks(fe, (__u32 *) parg); ++ else ++ err = -EAGAIN; ++ } + break; + ++ /* DEPRECATED DVBv3 ioctls */ ++ + case FE_SET_FRONTEND: + err = dvbv3_set_delivery_system(fe); + if (err) +@@ -2434,11 +2616,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file, + err = dtv_get_frontend(fe, &getp, parg); + break; + } +- case FE_SET_FRONTEND_TUNE_MODE: +- fepriv->tune_mode_flags = (unsigned long) parg; +- err = 0; +- break; +- } ++ ++ default: ++ return -ENOTSUPP; ++ } /* switch */ + + return err; + } +@@ -2609,7 +2790,10 @@ static int dvb_frontend_release(struct inode *inode, struct file *file) + + static const struct file_operations dvb_frontend_fops = { + .owner = THIS_MODULE, +- .unlocked_ioctl = dvb_generic_ioctl, ++ .unlocked_ioctl = dvb_frontend_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = dvb_frontend_compat_ioctl, ++#endif + .poll = dvb_frontend_poll, + .open = dvb_frontend_open, + .release = dvb_frontend_release, +@@ -2677,7 +2861,6 @@ int dvb_register_frontend(struct dvb_adapter* dvb, + #if defined(CONFIG_MEDIA_CONTROLLER_DVB) + .name = fe->ops.info.name, + #endif +- .kernel_ioctl = dvb_frontend_ioctl + }; + + dev_dbg(dvb->device, "%s:\n", __func__); +diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h +index fb6e84811504..8a6267ad56d6 100644 +--- a/drivers/media/dvb-core/dvb_frontend.h ++++ b/drivers/media/dvb-core/dvb_frontend.h +@@ -397,13 +397,8 @@ struct dtv_frontend_properties; + * @search: callback function used on some custom algo search algos. + * @tuner_ops: pointer to struct dvb_tuner_ops + * @analog_ops: pointer to struct analog_demod_ops +- * @set_property: callback function to allow the frontend to validade +- * incoming properties. Should not be used on new drivers. +- * @get_property: callback function to allow the frontend to override +- * outcoming properties. Should not be used on new drivers. + */ + struct dvb_frontend_ops { +- + struct dvb_frontend_info info; + + u8 delsys[MAX_DELSYS]; +@@ -461,9 +456,6 @@ struct dvb_frontend_ops { + + struct dvb_tuner_ops tuner_ops; + struct analog_demod_ops analog_ops; +- +- int (*set_property)(struct dvb_frontend* fe, struct dtv_property* tvp); +- int (*get_property)(struct dvb_frontend* fe, struct dtv_property* tvp); + }; + + #ifdef __DVB_CORE__ +@@ -623,11 +615,6 @@ struct dtv_frontend_properties { + struct dtv_fe_stats post_bit_count; + struct dtv_fe_stats block_error; + struct dtv_fe_stats block_count; +- +- /* private: */ +- /* Cache State */ +- u32 state; +- + }; + + #define DVB_FE_NO_EXIT 0 +diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c +index efb7d5284946..99a225bbc77a 100644 +--- a/drivers/media/dvb-core/dvb_net.c ++++ b/drivers/media/dvb-core/dvb_net.c +@@ -57,6 +57,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1350,14 +1351,20 @@ static int dvb_net_do_ioctl(struct file *file, + struct net_device *netdev; + struct dvb_net_priv *priv_data; + struct dvb_net_if *dvbnetif = parg; ++ int if_num = dvbnetif->if_num; + +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || +- !dvbnet->state[dvbnetif->if_num]) { ++ if (if_num >= DVB_NET_DEVICES_MAX) { + ret = -EINVAL; + goto ioctl_error; + } ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX); + +- netdev = dvbnet->device[dvbnetif->if_num]; ++ if (!dvbnet->state[if_num]) { ++ ret = -EINVAL; ++ goto ioctl_error; ++ } ++ ++ netdev = dvbnet->device[if_num]; + + priv_data = netdev_priv(netdev); + dvbnetif->pid=priv_data->pid; +@@ -1410,14 +1417,20 @@ static int dvb_net_do_ioctl(struct file *file, + struct net_device *netdev; + struct dvb_net_priv *priv_data; + struct __dvb_net_if_old *dvbnetif = parg; ++ int if_num = dvbnetif->if_num; ++ ++ if (if_num >= DVB_NET_DEVICES_MAX) { ++ ret = -EINVAL; ++ goto ioctl_error; ++ } ++ if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX); + +- if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || +- !dvbnet->state[dvbnetif->if_num]) { ++ if (!dvbnet->state[if_num]) { + ret = -EINVAL; + goto ioctl_error; + } + +- netdev = dvbnet->device[dvbnetif->if_num]; ++ netdev = dvbnet->device[if_num]; + + priv_data = netdev_priv(netdev); + dvbnetif->pid=priv_data->pid; +diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c +index a1cc1c1e5318..fb1284051d05 100644 +--- a/drivers/media/dvb-core/dvbdev.c ++++ b/drivers/media/dvb-core/dvbdev.c +@@ -216,6 +216,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev) + + if (dvbdev->adapter->conn) { + media_device_unregister_entity(dvbdev->adapter->conn); ++ kfree(dvbdev->adapter->conn); + dvbdev->adapter->conn = NULL; + kfree(dvbdev->adapter->conn_pads); + dvbdev->adapter->conn_pads = NULL; +diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c +index f51a3a0b3949..1b640651531d 100644 +--- a/drivers/media/dvb-frontends/lg2160.c ++++ b/drivers/media/dvb-frontends/lg2160.c +@@ -1052,16 +1052,6 @@ static int lg216x_get_frontend(struct dvb_frontend *fe, + return ret; + } + +-static int lg216x_get_property(struct dvb_frontend *fe, +- struct dtv_property *tvp) +-{ +- struct dtv_frontend_properties *c = &fe->dtv_property_cache; +- +- return (DTV_ATSCMH_FIC_VER == tvp->cmd) ? +- lg216x_get_frontend(fe, c) : 0; +-} +- +- + static int lg2160_set_frontend(struct dvb_frontend *fe) + { + struct lg216x_state *state = fe->demodulator_priv; +@@ -1372,8 +1362,6 @@ static struct dvb_frontend_ops lg2160_ops = { + .init = lg216x_init, + .sleep = lg216x_sleep, + #endif +- .get_property = lg216x_get_property, +- + .set_frontend = lg2160_set_frontend, + .get_frontend = lg216x_get_frontend, + .get_tune_settings = lg216x_get_tune_settings, +@@ -1400,8 +1388,6 @@ static struct dvb_frontend_ops lg2161_ops = { + .init = lg216x_init, + .sleep = lg216x_sleep, + #endif +- .get_property = lg216x_get_property, +- + .set_frontend = lg2160_set_frontend, + .get_frontend = lg216x_get_frontend, + .get_tune_settings = lg216x_get_tune_settings, +diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c +index e87ac30d7fb8..b43135c5a960 100644 +--- a/drivers/media/dvb-frontends/sp8870.c ++++ b/drivers/media/dvb-frontends/sp8870.c +@@ -293,7 +293,9 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe) + sp8870_writereg(state, 0xc05, reg0xc05); + + // read status reg in order to clear pending irqs +- sp8870_readreg(state, 0x200); ++ err = sp8870_readreg(state, 0x200); ++ if (err < 0) ++ return err; + + // system controller start + sp8870_microcontroller_start(state); +diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c +index c93d9a45f7f7..2b8c75f28d2e 100644 +--- a/drivers/media/dvb-frontends/stv0288.c ++++ b/drivers/media/dvb-frontends/stv0288.c +@@ -447,12 +447,6 @@ static int stv0288_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) + return 0; + } + +-static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p) +-{ +- dprintk("%s(..)\n", __func__); +- return 0; +-} +- + static int stv0288_set_frontend(struct dvb_frontend *fe) + { + struct stv0288_state *state = fe->demodulator_priv; +@@ -568,7 +562,6 @@ static struct dvb_frontend_ops stv0288_ops = { + .set_tone = stv0288_set_tone, + .set_voltage = stv0288_set_voltage, + +- .set_property = stv0288_set_property, + .set_frontend = stv0288_set_frontend, + }; + +diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c +index 66a5a7f2295c..93262b13c644 100644 +--- a/drivers/media/dvb-frontends/stv6110.c ++++ b/drivers/media/dvb-frontends/stv6110.c +@@ -263,11 +263,9 @@ static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency) + static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency) + { + struct stv6110_priv *priv = fe->tuner_priv; +- struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u8 ret = 0x04; + u32 divider, ref, p, presc, i, result_freq, vco_freq; + s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val; +- s32 srate; + + dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__, + frequency, priv->mclk); +@@ -278,13 +276,6 @@ static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency) + ((((priv->mclk / 1000000) - 16) & 0x1f) << 3); + + /* BB_GAIN = db/2 */ +- if (fe->ops.set_property && fe->ops.get_property) { +- srate = c->symbol_rate; +- dprintk("%s: Get Frontend parameters: srate=%d\n", +- __func__, srate); +- } else +- srate = 15000000; +- + priv->regs[RSTV6110_CTRL2] &= ~0x0f; + priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f); + +diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c +index 37ebeef2bbd0..43343091ea93 100644 +--- a/drivers/media/dvb-frontends/tda10071.c ++++ b/drivers/media/dvb-frontends/tda10071.c +@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) + goto error; + + if (dev->delivery_system == SYS_DVBS) { +- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | +- buf[2] << 8 | buf[3] << 0; +- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | +- buf[2] << 8 | buf[3] << 0; ++ u32 bit_error = buf[0] << 24 | buf[1] << 16 | ++ buf[2] << 8 | buf[3] << 0; ++ ++ dev->dvbv3_ber = bit_error; ++ dev->post_bit_error += bit_error; + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[0].uvalue = dev->post_bit_error; + dev->block_error += buf[4] << 8 | buf[5] << 0; +diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c +index 247f0e7cb5f7..382f290c3f4d 100644 +--- a/drivers/media/firewire/firedtv-fw.c ++++ b/drivers/media/firewire/firedtv-fw.c +@@ -271,6 +271,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) + + name_len = fw_csr_string(unit->directory, CSR_MODEL, + name, sizeof(name)); ++ if (name_len < 0) { ++ err = name_len; ++ goto fail_free; ++ } + for (i = ARRAY_SIZE(model_names); --i; ) + if (strlen(model_names[i]) <= name_len && + strncmp(name, model_names[i], name_len) == 0) +diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c +index b87c9e7ff146..e81c4cca50c6 100644 +--- a/drivers/media/i2c/adv7511-v4l2.c ++++ b/drivers/media/i2c/adv7511-v4l2.c +@@ -1976,7 +1976,7 @@ static int adv7511_remove(struct i2c_client *client) + + adv7511_set_isr(sd, false); + adv7511_init_setup(sd); +- cancel_delayed_work(&state->edid_handler); ++ cancel_delayed_work_sync(&state->edid_handler); + i2c_unregister_device(state->i2c_edid); + if (state->i2c_cec) + i2c_unregister_device(state->i2c_cec); +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c +index ce6f93074ae0..56b8b7bf759e 100644 +--- a/drivers/media/i2c/adv7604.c ++++ b/drivers/media/i2c/adv7604.c +@@ -3541,7 +3541,7 @@ static int adv76xx_remove(struct i2c_client *client) + io_write(sd, 0x6e, 0); + io_write(sd, 0x73, 0); + +- cancel_delayed_work(&state->delayed_work_enable_hotplug); ++ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + adv76xx_unregister_clients(to_state(sd)); +diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c +index cf3b42c9417e..d7af4fbcb84b 100644 +--- a/drivers/media/i2c/adv7842.c ++++ b/drivers/media/i2c/adv7842.c +@@ -3598,7 +3598,7 @@ static int adv7842_remove(struct i2c_client *client) + struct adv7842_state *state = to_state(sd); + + adv7842_irq_enable(sd, false); +- cancel_delayed_work(&state->delayed_work_enable_hotplug); ++ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + adv7842_unregister_clients(sd); +diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c +index acb804bceccb..e1736777e6cc 100644 +--- a/drivers/media/i2c/m5mols/m5mols_core.c ++++ b/drivers/media/i2c/m5mols/m5mols_core.c +@@ -754,7 +754,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable) + + ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); + if (ret) { +- info->set_power(&client->dev, 0); ++ if (info->set_power) ++ info->set_power(&client->dev, 0); + return ret; + } + +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c +index 3844853ab0a0..9d55abfe070f 100644 +--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c ++++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c +@@ -1393,7 +1393,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state) + s5c73m3_gpio_deassert(state, STBY); + usleep_range(100, 200); + +- s5c73m3_gpio_deassert(state, RST); ++ s5c73m3_gpio_deassert(state, RSET); + usleep_range(50, 100); + + return 0; +@@ -1408,7 +1408,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state) + { + int i, ret; + +- if (s5c73m3_gpio_assert(state, RST)) ++ if (s5c73m3_gpio_assert(state, RSET)) + usleep_range(10, 50); + + if (s5c73m3_gpio_assert(state, STBY)) +@@ -1613,7 +1613,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) + + state->mclk_frequency = pdata->mclk_frequency; + state->gpio[STBY] = pdata->gpio_stby; +- state->gpio[RST] = pdata->gpio_reset; ++ state->gpio[RSET] = pdata->gpio_reset; + return 0; + } + +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h +index 653f68e7ea07..e267b2522149 100644 +--- a/drivers/media/i2c/s5c73m3/s5c73m3.h ++++ b/drivers/media/i2c/s5c73m3/s5c73m3.h +@@ -361,7 +361,7 @@ struct s5c73m3_ctrls { + + enum s5c73m3_gpio_id { + STBY, +- RST, ++ RSET, + GPIO_NUM, + }; + +diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c +index 6ebcf254989a..75fb13a33eab 100644 +--- a/drivers/media/i2c/s5k4ecgx.c ++++ b/drivers/media/i2c/s5k4ecgx.c +@@ -177,7 +177,7 @@ static const char * const s5k4ecgx_supply_names[] = { + + enum s5k4ecgx_gpio_id { + STBY, +- RST, ++ RSET, + GPIO_NUM, + }; + +@@ -482,7 +482,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv) + if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level)) + usleep_range(30, 50); + +- if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level)) ++ if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level)) + usleep_range(30, 50); + + return 0; +@@ -490,7 +490,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv) + + static int __s5k4ecgx_power_off(struct s5k4ecgx *priv) + { +- if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level)) ++ if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level)) + usleep_range(30, 50); + + if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level)) +@@ -878,7 +878,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv, + int ret; + + priv->gpio[STBY].gpio = -EINVAL; +- priv->gpio[RST].gpio = -EINVAL; ++ priv->gpio[RSET].gpio = -EINVAL; + + ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY"); + +@@ -897,7 +897,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv, + s5k4ecgx_free_gpios(priv); + return ret; + } +- priv->gpio[RST] = *gpio; ++ priv->gpio[RSET] = *gpio; + if (gpio_is_valid(gpio->gpio)) + gpio_set_value(gpio->gpio, 0); + +diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c +index db82ed05792e..e8c820cb418b 100644 +--- a/drivers/media/i2c/s5k5baf.c ++++ b/drivers/media/i2c/s5k5baf.c +@@ -238,7 +238,7 @@ struct s5k5baf_gpio { + + enum s5k5baf_gpio_id { + STBY, +- RST, ++ RSET, + NUM_GPIOS, + }; + +@@ -973,7 +973,7 @@ static int s5k5baf_power_on(struct s5k5baf *state) + + s5k5baf_gpio_deassert(state, STBY); + usleep_range(50, 100); +- s5k5baf_gpio_deassert(state, RST); ++ s5k5baf_gpio_deassert(state, RSET); + return 0; + + err_reg_dis: +@@ -991,7 +991,7 @@ static int s5k5baf_power_off(struct s5k5baf *state) + state->apply_cfg = 0; + state->apply_crop = 0; + +- s5k5baf_gpio_assert(state, RST); ++ s5k5baf_gpio_assert(state, RSET); + s5k5baf_gpio_assert(state, STBY); + + if (!IS_ERR(state->clock)) +diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c +index 4b615b4b0463..4d92aac279dc 100644 +--- a/drivers/media/i2c/s5k6aa.c ++++ b/drivers/media/i2c/s5k6aa.c +@@ -181,7 +181,7 @@ static const char * const s5k6aa_supply_names[] = { + + enum s5k6aa_gpio_id { + STBY, +- RST, ++ RSET, + GPIO_NUM, + }; + +@@ -845,7 +845,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa) + ret = s5k6aa->s_power(1); + usleep_range(4000, 4000); + +- if (s5k6aa_gpio_deassert(s5k6aa, RST)) ++ if (s5k6aa_gpio_deassert(s5k6aa, RSET)) + msleep(20); + + return ret; +@@ -855,7 +855,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa) + { + int ret; + +- if (s5k6aa_gpio_assert(s5k6aa, RST)) ++ if (s5k6aa_gpio_assert(s5k6aa, RSET)) + usleep_range(100, 150); + + if (s5k6aa->s_power) { +@@ -1514,7 +1514,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa, + int ret; + + s5k6aa->gpio[STBY].gpio = -EINVAL; +- s5k6aa->gpio[RST].gpio = -EINVAL; ++ s5k6aa->gpio[RSET].gpio = -EINVAL; + + gpio = &pdata->gpio_stby; + if (gpio_is_valid(gpio->gpio)) { +@@ -1537,7 +1537,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa, + if (ret < 0) + return ret; + +- s5k6aa->gpio[RST] = *gpio; ++ s5k6aa->gpio[RSET] = *gpio; + } + + return 0; +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c +index 7ebcb9473956..c799071be66f 100644 +--- a/drivers/media/i2c/tc358743.c ++++ b/drivers/media/i2c/tc358743.c +@@ -1321,7 +1321,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled) + static irqreturn_t tc358743_irq_handler(int irq, void *dev_id) + { + struct tc358743_state *state = dev_id; +- bool handled; ++ bool handled = false; + + tc358743_isr(&state->sd, 0, &handled); + +@@ -1763,6 +1763,7 @@ static int tc358743_probe_of(struct tc358743_state *state) + bps_pr_lane = 2 * endpoint->link_frequencies[0]; + if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) { + dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane); ++ ret = -EINVAL; + goto disable_clk; + } + +diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c +index 90fcccc05b56..c678d7120727 100644 +--- a/drivers/media/pci/bt8xx/bt878.c ++++ b/drivers/media/pci/bt8xx/bt878.c +@@ -494,6 +494,9 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + btwrite(0, BT878_AINT_MASK); + bt878_num++; + ++ if (!bt->tasklet.func) ++ tasklet_disable(&bt->tasklet); ++ + return 0; + + fail2: +diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c +index 97b91a9f9fa9..1d6173998a29 100644 +--- a/drivers/media/pci/bt8xx/bttv-driver.c ++++ b/drivers/media/pci/bt8xx/bttv-driver.c +@@ -4059,11 +4059,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + btv->id = dev->device; + if (pci_enable_device(dev)) { + pr_warn("%d: Can't enable device\n", btv->c.nr); +- return -EIO; ++ result = -EIO; ++ goto free_mem; + } + if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { + pr_warn("%d: No suitable DMA available\n", btv->c.nr); +- return -EIO; ++ result = -EIO; ++ goto free_mem; + } + if (!request_mem_region(pci_resource_start(dev,0), + pci_resource_len(dev,0), +@@ -4071,7 +4073,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + pr_warn("%d: can't request iomem (0x%llx)\n", + btv->c.nr, + (unsigned long long)pci_resource_start(dev, 0)); +- return -EBUSY; ++ result = -EBUSY; ++ goto free_mem; + } + pci_set_master(dev); + pci_set_command(dev); +@@ -4257,6 +4260,10 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) + release_mem_region(pci_resource_start(btv->c.pci,0), + pci_resource_len(btv->c.pci,0)); + pci_disable_device(btv->c.pci); ++ ++free_mem: ++ bttvs[btv->c.nr] = NULL; ++ kfree(btv); + return result; + } + +diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c +index 979634000597..17b717a1c7fa 100644 +--- a/drivers/media/pci/cobalt/cobalt-driver.c ++++ b/drivers/media/pci/cobalt/cobalt-driver.c +@@ -689,6 +689,7 @@ static int cobalt_probe(struct pci_dev *pci_dev, + return -ENOMEM; + cobalt->pci_dev = pci_dev; + cobalt->instance = i; ++ mutex_init(&cobalt->pci_lock); + + retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev); + if (retval) { +diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h +index ed00dc9d9399..8f9454d30b95 100644 +--- a/drivers/media/pci/cobalt/cobalt-driver.h ++++ b/drivers/media/pci/cobalt/cobalt-driver.h +@@ -262,6 +262,8 @@ struct cobalt { + int instance; + struct pci_dev *pci_dev; + struct v4l2_device v4l2_dev; ++ /* serialize PCI access in cobalt_s_bit_sysctrl() */ ++ struct mutex pci_lock; + + void __iomem *bar0, *bar1; + +@@ -333,10 +335,13 @@ static inline u32 cobalt_g_sysctrl(struct cobalt *cobalt) + static inline void cobalt_s_bit_sysctrl(struct cobalt *cobalt, + int bit, int val) + { +- u32 ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE); ++ u32 ctrl; + ++ mutex_lock(&cobalt->pci_lock); ++ ctrl = cobalt_read_bar1(cobalt, COBALT_SYS_CTRL_BASE); + cobalt_write_bar1(cobalt, COBALT_SYS_CTRL_BASE, + (ctrl & ~(1UL << bit)) | (val << bit)); ++ mutex_unlock(&cobalt->pci_lock); + } + + static inline u32 cobalt_g_sysstat(struct cobalt *cobalt) +diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c +index c1aa888af705..83864a99d3a6 100644 +--- a/drivers/media/pci/cx23885/cx23888-ir.c ++++ b/drivers/media/pci/cx23885/cx23888-ir.c +@@ -1179,8 +1179,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) + return -ENOMEM; + + spin_lock_init(&state->rx_kfifo_lock); +- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) ++ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, ++ GFP_KERNEL)) { ++ kfree(state); + return -ENOMEM; ++ } + + state->dev = dev; + sd = &state->sd; +diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c +index 0d4cacb93664..d58c58e61bde 100644 +--- a/drivers/media/pci/cx25821/cx25821-core.c ++++ b/drivers/media/pci/cx25821/cx25821-core.c +@@ -990,8 +990,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci, + __le32 *cpu; + dma_addr_t dma = 0; + +- if (NULL != risc->cpu && risc->size < size) ++ if (risc->cpu && risc->size < size) { + pci_free_consistent(pci, risc->size, risc->cpu, risc->dma); ++ risc->cpu = NULL; ++ } + if (NULL == risc->cpu) { + cpu = pci_zalloc_consistent(pci, size, &dma); + if (NULL == cpu) +diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +index f33c0de3e849..019bbc18cede 100644 +--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c ++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +@@ -184,7 +184,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) + struct spi_master *master; + struct netup_spi *nspi; + +- master = spi_alloc_master(&ndev->pci_dev->dev, ++ master = devm_spi_alloc_master(&ndev->pci_dev->dev, + sizeof(struct netup_spi)); + if (!master) { + dev_err(&ndev->pci_dev->dev, +@@ -217,6 +217,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) + ndev->pci_slot, + ndev->pci_func); + if (!spi_new_device(master, &netup_spi_board)) { ++ spi_unregister_master(master); + ndev->spi = NULL; + dev_err(&ndev->pci_dev->dev, + "%s(): unable to create SPI device\n", __func__); +@@ -235,13 +236,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) + if (!spi) + return; + ++ spi_unregister_master(spi->master); + spin_lock_irqsave(&spi->lock, flags); + reg = readw(&spi->regs->control_stat); + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); + reg = readw(&spi->regs->control_stat); + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); + spin_unlock_irqrestore(&spi->lock, flags); +- spi_unregister_master(spi->master); + ndev->spi = NULL; + } + +diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c +index 4e924e2d1638..fffa3630ba2d 100644 +--- a/drivers/media/pci/ngene/ngene-core.c ++++ b/drivers/media/pci/ngene/ngene-core.c +@@ -402,7 +402,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config) + + com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; + com.cmd.hdr.Length = 6; +- memcpy(&com.cmd.ConfigureBuffers.config, config, 6); ++ memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); + com.in_len = 6; + com.out_len = 0; + +diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h +index fa30930d7047..da154c406545 100644 +--- a/drivers/media/pci/ngene/ngene.h ++++ b/drivers/media/pci/ngene/ngene.h +@@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS { + + struct FW_CONFIGURE_FREE_BUFFERS { + struct FW_HEADER hdr; +- u8 UVI1_BufferLength; +- u8 UVI2_BufferLength; +- u8 TVO_BufferLength; +- u8 AUD1_BufferLength; +- u8 AUD2_BufferLength; +- u8 TVA_BufferLength; ++ struct { ++ u8 UVI1_BufferLength; ++ u8 UVI2_BufferLength; ++ u8 TVO_BufferLength; ++ u8 AUD1_BufferLength; ++ u8 AUD2_BufferLength; ++ u8 TVA_BufferLength; ++ } __packed config; + } __attribute__ ((__packed__)); + + struct FW_CONFIGURE_UART { +diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c +index f0fe2524259f..522e1e18850f 100644 +--- a/drivers/media/pci/saa7134/saa7134-empress.c ++++ b/drivers/media/pci/saa7134/saa7134-empress.c +@@ -297,8 +297,11 @@ static int empress_init(struct saa7134_dev *dev) + q->lock = &dev->lock; + q->dev = &dev->pci->dev; + err = vb2_queue_init(q); +- if (err) ++ if (err) { ++ video_device_release(dev->empress_dev); ++ dev->empress_dev = NULL; + return err; ++ } + dev->empress_dev->queue = q; + + video_set_drvdata(dev->empress_dev, dev); +diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c +index 38f94b742e28..0b5d6f499457 100644 +--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c ++++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c +@@ -697,7 +697,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value) + { + int err; + +- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value); ++ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", ++ (reg << 2) & 0xffffffff, value); + err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR); + if (err < 0) + return err; +diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c +index 504d78807639..3e8753c9e1e4 100644 +--- a/drivers/media/pci/saa7146/mxb.c ++++ b/drivers/media/pci/saa7146/mxb.c +@@ -652,16 +652,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio * + struct mxb *mxb = (struct mxb *)dev->ext_priv; + + DEB_D("VIDIOC_S_AUDIO %d\n", a->index); +- if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) { +- if (mxb->cur_audinput != a->index) { +- mxb->cur_audinput = a->index; +- tea6420_route(mxb, a->index); +- if (mxb->cur_audinput == 0) +- mxb_update_audmode(mxb); +- } +- return 0; ++ if (a->index >= 32 || ++ !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index))) ++ return -EINVAL; ++ ++ if (mxb->cur_audinput != a->index) { ++ mxb->cur_audinput = a->index; ++ tea6420_route(mxb, a->index); ++ if (mxb->cur_audinput == 0) ++ mxb_update_audmode(mxb); + } +- return -EINVAL; ++ return 0; + } + + #ifdef CONFIG_VIDEO_ADV_DEBUG +diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c +index 32a353d162e7..f2e6fbfa019f 100644 +--- a/drivers/media/pci/saa7164/saa7164-encoder.c ++++ b/drivers/media/pci/saa7164/saa7164-encoder.c +@@ -1030,7 +1030,7 @@ int saa7164_encoder_register(struct saa7164_port *port) + "(errno = %d), NO PCI configuration\n", + __func__, result); + result = -ENOMEM; +- goto failed; ++ goto fail_pci; + } + + /* Establish encoder defaults here */ +@@ -1084,7 +1084,7 @@ int saa7164_encoder_register(struct saa7164_port *port) + 100000, ENCODER_DEF_BITRATE); + if (hdl->error) { + result = hdl->error; +- goto failed; ++ goto fail_hdl; + } + + port->std = V4L2_STD_NTSC_M; +@@ -1102,7 +1102,7 @@ int saa7164_encoder_register(struct saa7164_port *port) + printk(KERN_INFO "%s: can't allocate mpeg device\n", + dev->name); + result = -ENOMEM; +- goto failed; ++ goto fail_hdl; + } + + port->v4l_device->ctrl_handler = hdl; +@@ -1113,10 +1113,7 @@ int saa7164_encoder_register(struct saa7164_port *port) + if (result < 0) { + printk(KERN_INFO "%s: can't register mpeg device\n", + dev->name); +- /* TODO: We're going to leak here if we don't dealloc +- The buffers above. The unreg function can't deal wit it. +- */ +- goto failed; ++ goto fail_reg; + } + + printk(KERN_INFO "%s: registered device video%d [mpeg]\n", +@@ -1138,9 +1135,14 @@ int saa7164_encoder_register(struct saa7164_port *port) + + saa7164_api_set_encoder(port); + saa7164_api_get_encoder(port); ++ return 0; + +- result = 0; +-failed: ++fail_reg: ++ video_device_release(port->v4l_device); ++ port->v4l_device = NULL; ++fail_hdl: ++ v4l2_ctrl_handler_free(hdl); ++fail_pci: + return result; + } + +diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c +index 6a35107aca25..0fe69e71331d 100644 +--- a/drivers/media/pci/solo6x10/solo6x10-g723.c ++++ b/drivers/media/pci/solo6x10/solo6x10-g723.c +@@ -385,7 +385,7 @@ int solo_g723_init(struct solo_dev *solo_dev) + + ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev)); + if (ret < 0) +- return ret; ++ goto snd_error; + + ret = solo_snd_pcm_init(solo_dev); + if (ret < 0) +diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c +index 382caf200ba1..c313f51688f4 100644 +--- a/drivers/media/pci/ttpci/av7110.c ++++ b/drivers/media/pci/ttpci/av7110.c +@@ -426,14 +426,15 @@ static void debiirq(unsigned long cookie) + case DATA_CI_GET: + { + u8 *data = av7110->debi_virt; ++ u8 data_0 = data[0]; + +- if ((data[0] < 2) && data[2] == 0xff) { ++ if (data_0 < 2 && data[2] == 0xff) { + int flags = 0; + if (data[5] > 0) + flags |= CA_CI_MODULE_PRESENT; + if (data[5] > 5) + flags |= CA_CI_MODULE_READY; +- av7110->ci_slot[data[0]].flags = flags; ++ av7110->ci_slot[data_0].flags = flags; + } else + ci_get_data(&av7110->ci_rbuffer, + av7110->debi_virt, +diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c +index 6d42dcfd4825..e7bdfc4e4aa8 100644 +--- a/drivers/media/pci/ttpci/budget-core.c ++++ b/drivers/media/pci/ttpci/budget-core.c +@@ -386,20 +386,25 @@ static int budget_register(struct budget *budget) + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); + + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + budget->mem_frontend.source = DMX_MEMORY_FE; + ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); + if (ret < 0) +- return ret; ++ goto err_release_dmx; + + dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); + + return 0; ++ ++err_release_dmx: ++ dvb_dmxdev_release(&budget->dmxdev); ++ dvb_dmx_release(&budget->demux); ++ return ret; + } + + static void budget_unregister(struct budget *budget) +diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c +index 27ff6e0d9845..023ff9ebde5e 100644 +--- a/drivers/media/pci/tw5864/tw5864-video.c ++++ b/drivers/media/pci/tw5864/tw5864-video.c +@@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv, + fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE; + + ret = tw5864_frameinterval_get(input, &frameinterval); ++ if (ret) ++ return ret; ++ + fintv->stepwise.step = frameinterval; + fintv->stepwise.min = frameinterval; + fintv->stepwise.max = frameinterval; +@@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv, + cp->capability = V4L2_CAP_TIMEPERFRAME; + + ret = tw5864_frameinterval_get(input, &cp->timeperframe); ++ if (ret) ++ return ret; ++ + cp->timeperframe.numerator *= input->frame_interval; + cp->capturemode = 0; + cp->readbuffers = 2; +diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c +index 5104cc0ee40e..a3c4cc025ed6 100644 +--- a/drivers/media/platform/davinci/vpif_capture.c ++++ b/drivers/media/platform/davinci/vpif_capture.c +@@ -1408,8 +1408,6 @@ static int vpif_probe_complete(void) + /* Unregister video device */ + video_unregister_device(&ch->video_dev); + } +- kfree(vpif_obj.sd); +- v4l2_device_unregister(&vpif_obj.v4l2_dev); + + return err; + } +diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c +index c2c68988e38a..9884b34d6f40 100644 +--- a/drivers/media/platform/davinci/vpss.c ++++ b/drivers/media/platform/davinci/vpss.c +@@ -519,19 +519,31 @@ static void vpss_exit(void) + + static int __init vpss_init(void) + { ++ int ret; ++ + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) + return -EBUSY; + + oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); + if (unlikely(!oper_cfg.vpss_regs_base2)) { +- release_mem_region(VPSS_CLK_CTRL, 4); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_ioremap; + } + + writel(VPSS_CLK_CTRL_VENCCLKEN | +- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); ++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); ++ ++ ret = platform_driver_register(&vpss_driver); ++ if (ret) ++ goto err_pd_register; ++ ++ return 0; + +- return platform_driver_register(&vpss_driver); ++err_pd_register: ++ iounmap(oper_cfg.vpss_regs_base2); ++err_ioremap: ++ release_mem_region(VPSS_CLK_CTRL, 4); ++ return ret; + } + subsys_initcall(vpss_init); + module_exit(vpss_exit); +diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c +index 8efe9160ab34..dbc4f57f34a5 100644 +--- a/drivers/media/platform/exynos4-is/fimc-isp.c ++++ b/drivers/media/platform/exynos4-is/fimc-isp.c +@@ -311,8 +311,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on) + + if (on) { + ret = pm_runtime_get_sync(&is->pdev->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(&is->pdev->dev); + return ret; ++ } + set_bit(IS_ST_PWR_ON, &is->state); + + ret = fimc_is_start_firmware(is); +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c +index b91abf1c4d43..f1921e06ffe1 100644 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c +@@ -480,7 +480,7 @@ static int fimc_lite_open(struct file *file) + set_bit(ST_FLITE_IN_USE, &fimc->state); + ret = pm_runtime_get_sync(&fimc->pdev->dev); + if (ret < 0) +- goto unlock; ++ goto err_pm; + + ret = v4l2_fh_open(file); + if (ret < 0) +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c +index ef6ccb5b8952..a1599659b88b 100644 +--- a/drivers/media/platform/exynos4-is/media-dev.c ++++ b/drivers/media/platform/exynos4-is/media-dev.c +@@ -477,8 +477,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd) + return -ENXIO; + + ret = pm_runtime_get_sync(fmd->pmf); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(fmd->pmf); + return ret; ++ } + + fmd->num_sensors = 0; + +@@ -1255,6 +1257,7 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) + if (IS_ERR(pctl->state_default)) + return PTR_ERR(pctl->state_default); + ++ /* PINCTRL_STATE_IDLE is optional */ + pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, + PINCTRL_STATE_IDLE); + return 0; +diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c +index befd9fc0adc4..dc87c9cfa52f 100644 +--- a/drivers/media/platform/exynos4-is/mipi-csis.c ++++ b/drivers/media/platform/exynos4-is/mipi-csis.c +@@ -513,8 +513,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable) + if (enable) { + s5pcsis_clear_counters(state); + ret = pm_runtime_get_sync(&state->pdev->dev); +- if (ret && ret != 1) ++ if (ret && ret != 1) { ++ pm_runtime_put_noidle(&state->pdev->dev); + return ret; ++ } + } + + mutex_lock(&state->lock); +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +index 1ad563399308..342b02f783a4 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -46,6 +46,112 @@ static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr; + static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status, + int32_t result_size, void *data); + static int cam_jpeg_mgr_process_cmd(void *priv, void *data); ++static int cam_jpeg_insert_cdm_change_base( ++ struct cam_hw_config_args *config_args, ++ struct cam_jpeg_hw_ctx_data *ctx_data, ++ struct cam_jpeg_hw_mgr *hw_mgr); ++ ++static int cam_jpeg_process_next_hw_update(void *priv, void *data) ++{ ++ int rc; ++ int i = 0; ++ struct cam_jpeg_hw_mgr *hw_mgr = priv; ++ struct cam_hw_update_entry *cmd; ++ struct cam_cdm_bl_request *cdm_cmd; ++ struct cam_hw_config_args *config_args = NULL; ++ struct cam_jpeg_hw_ctx_data *ctx_data = NULL; ++ uint32_t dev_type; ++ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL; ++ uint32_t cdm_cfg_to_insert = 0; ++ ++ if (!data || !priv) { ++ CAM_ERR(CAM_JPEG, "Invalid data"); ++ return -EINVAL; ++ } ++ ++ ctx_data = (struct cam_jpeg_hw_ctx_data *)data; ++ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; ++ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0]; ++ config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args; ++ ++ if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) { ++ CAM_ERR(CAM_JPEG, "op reset null "); ++ rc = -EFAULT; ++ goto end_error; ++ } ++ rc = hw_mgr->devices[dev_type][0]->hw_ops.reset( ++ hw_mgr->devices[dev_type][0]->hw_priv, ++ NULL, 0); ++ if (rc) { ++ CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc); ++ goto end_error; ++ } ++ ++ cdm_cmd = ctx_data->cdm_cmd; ++ cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; ++ cdm_cmd->flag = false; ++ cdm_cmd->userdata = NULL; ++ cdm_cmd->cookie = 0; ++ cdm_cmd->cmd_arrary_count = 0; ++ ++ /* insert cdm chage base cmd */ ++ rc = cam_jpeg_insert_cdm_change_base(config_args, ++ ctx_data, hw_mgr); ++ if (rc) { ++ CAM_ERR(CAM_JPEG, "insert change base failed %d", rc); ++ goto end_error; ++ } ++ ++ /* insert next cdm payload at index */ ++ /* for enc or dma 1st pass at index 1 */ ++ /* for dma 2nd pass at index 2, for 3rd at 4 */ ++ if (p_cfg_req->num_hw_entry_processed == 0) ++ cdm_cfg_to_insert = CAM_JPEG_CFG; ++ else ++ cdm_cfg_to_insert = p_cfg_req->num_hw_entry_processed + 2; ++ ++ CAM_DBG(CAM_JPEG, "processed %d total %d using cfg entry %d for %pK", ++ p_cfg_req->num_hw_entry_processed, ++ config_args->num_hw_update_entries, ++ cdm_cfg_to_insert, ++ p_cfg_req); ++ ++ cmd = (config_args->hw_update_entries + cdm_cfg_to_insert); ++ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle = ++ cmd->handle; ++ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset = ++ cmd->offset; ++ cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = ++ cmd->len; ++ CAM_DBG(CAM_JPEG, "i %d entry h %d o %d l %d", ++ i, cmd->handle, cmd->offset, cmd->len); ++ cdm_cmd->cmd_arrary_count++; ++ ++ rc = cam_cdm_submit_bls( ++ hw_mgr->cdm_info[dev_type][0].cdm_handle, ++ cdm_cmd); ++ if (rc) { ++ CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", rc); ++ goto end_error; ++ } ++ ++ if (!hw_mgr->devices[dev_type][0]->hw_ops.start) { ++ CAM_ERR(CAM_JPEG, "op start null "); ++ rc = -EINVAL; ++ goto end_error; ++ } ++ rc = hw_mgr->devices[dev_type][0]->hw_ops.start( ++ hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0); ++ if (rc) { ++ CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", ++ rc); ++ goto end_error; ++ } ++ ++ return 0; ++end_error: ++ return rc; ++} + + static int cam_jpeg_mgr_process_irq(void *priv, void *data) + { +@@ -92,6 +198,21 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data) + return -EINVAL; + } + ++ p_cfg_req->num_hw_entry_processed++; ++ CAM_DBG(CAM_JPEG, "hw entry processed %d", ++ p_cfg_req->num_hw_entry_processed); ++ ++ if ((task_data->result_size > 0) && ++ (p_cfg_req->num_hw_entry_processed < ++ p_cfg_req->hw_cfg_args.num_hw_update_entries - 2)) { ++ /* start processing next entry before marking device free */ ++ rc = cam_jpeg_process_next_hw_update(priv, ctx_data); ++ if (!rc) { ++ mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex); ++ return 0; ++ } ++ } ++ + irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb; + irq_cb.data = NULL; + irq_cb.b_set_cb = false; +@@ -278,7 +399,9 @@ static int cam_jpeg_insert_cdm_change_base( + + if (config_args->hw_update_entries[CAM_JPEG_CHBASE].offset >= + ch_base_len) { +- CAM_ERR(CAM_JPEG, "Not enough buf"); ++ CAM_ERR(CAM_JPEG, "Not enough buf offset %d len %d", ++ config_args->hw_update_entries[CAM_JPEG_CHBASE].offset, ++ ch_base_len); + return -EINVAL; + } + CAM_DBG(CAM_JPEG, "iova %pK len %zu offset %d", +@@ -317,8 +440,6 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data) + int rc; + int i = 0; + struct cam_jpeg_hw_mgr *hw_mgr = priv; +- struct cam_hw_update_entry *cmd; +- struct cam_cdm_bl_request *cdm_cmd; + struct cam_hw_config_args *config_args = NULL; + struct cam_jpeg_hw_ctx_data *ctx_data = NULL; + uintptr_t request_id = 0; +@@ -420,66 +541,10 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data) + goto end_callcb; + } + +- if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) { +- CAM_ERR(CAM_JPEG, "op reset null "); +- rc = -EFAULT; +- goto end_callcb; +- } +- rc = hw_mgr->devices[dev_type][0]->hw_ops.reset( +- hw_mgr->devices[dev_type][0]->hw_priv, +- NULL, 0); +- if (rc) { +- CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc); +- goto end_callcb; +- } +- +- cdm_cmd = ctx_data->cdm_cmd; +- cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE; +- cdm_cmd->flag = false; +- cdm_cmd->userdata = NULL; +- cdm_cmd->cookie = 0; +- cdm_cmd->cmd_arrary_count = 0; +- +- rc = cam_jpeg_insert_cdm_change_base(config_args, +- ctx_data, hw_mgr); +- if (rc) { +- CAM_ERR(CAM_JPEG, "insert change base failed %d", rc); +- goto end_callcb; +- } +- +- CAM_DBG(CAM_JPEG, "num hw up %d", config_args->num_hw_update_entries); +- for (i = CAM_JPEG_CFG; i < (config_args->num_hw_update_entries - 1); +- i++) { +- cmd = (config_args->hw_update_entries + i); +- cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle +- = cmd->handle; +- cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset = +- cmd->offset; +- cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = +- cmd->len; +- CAM_DBG(CAM_JPEG, "i %d entry h %d o %d l %d", +- i, cmd->handle, cmd->offset, cmd->len); +- cdm_cmd->cmd_arrary_count++; +- } +- +- rc = cam_cdm_submit_bls( +- hw_mgr->cdm_info[dev_type][0].cdm_handle, cdm_cmd); +- if (rc) { +- CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", rc); +- goto end_callcb; +- } +- +- if (!hw_mgr->devices[dev_type][0]->hw_ops.start) { +- CAM_ERR(CAM_JPEG, "op start null "); +- rc = -EINVAL; +- goto end_callcb; +- } +- rc = hw_mgr->devices[dev_type][0]->hw_ops.start( +- hw_mgr->devices[dev_type][0]->hw_priv, +- NULL, 0); ++ /* insert one of the cdm payloads */ ++ rc = cam_jpeg_process_next_hw_update(priv, ctx_data); + if (rc) { +- CAM_ERR(CAM_JPEG, "Failed to start hw %d", +- rc); ++ CAM_ERR(CAM_JPEG, "next hw update failed %d", rc); + goto end_callcb; + } + +@@ -559,6 +624,7 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) + + request_id = (uintptr_t)config_args->priv; + p_cfg_req->req_id = request_id; ++ p_cfg_req->num_hw_entry_processed = 0; + hw_update_entries = config_args->hw_update_entries; + CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %zd", + ctx_data, request_id, (uintptr_t)config_args->priv); +@@ -769,6 +835,7 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv, + i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence); + } + ++ CAM_DBG(CAM_JPEG, "received num cmd buf %d", packet->num_cmd_buf); + + j = prepare_args->num_hw_update_entries; + rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf); +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h +index 82022ec24a77..f6d40188d051 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -76,12 +76,14 @@ struct cam_jpeg_hw_cdm_info_t { + * @hw_cfg_args: Hw config args + * @dev_type: Dev type for cfg request + * @req_id: Request Id ++ * @num_hw_entry_processed: Cdm payloads already processed + */ + struct cam_jpeg_hw_cfg_req { + struct list_head list; + struct cam_hw_config_args hw_cfg_args; + uint32_t dev_type; + uintptr_t req_id; ++ uint32_t num_hw_entry_processed; + }; + + /** +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h +new file mode 100644 +index 000000000000..ff8997bd4615 +--- /dev/null ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/cam_jpeg_dma_hw_info_ver_4_2_0.h +@@ -0,0 +1,53 @@ ++/* Copyright (c) 2020 The Linux Foundation. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#ifndef CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H ++#define CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H ++ ++#define CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE (1 << 0) ++#define CAM_JPEGDMA_HW_IRQ_STATUS_RD_BUF_DONE (1 << 1) ++#define CAM_JPEGDMA_HW_IRQ_STATUS_WR_BUF_DONE (1 << 5) ++#define CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT (1 << 9) ++#define CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE (1 << 10) ++ ++#define CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \ ++ CAM_JPEGDMA_HW_IRQ_STATUS_SESSION_DONE ++#define CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK \ ++ CAM_JPEGDMA_HW_IRQ_STATUS_RST_DONE ++ ++static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = { ++ .reg_offset = { ++ .hw_version = 0x0, ++ .int_clr = 0x14, ++ .int_status = 0x10, ++ .int_mask = 0x0C, ++ .hw_cmd = 0x1C, ++ .reset_cmd = 0x08, ++ .encode_size = 0x180, ++ }, ++ .reg_val = { ++ .int_clr_clearall = 0xFFFFFFFF, ++ .int_mask_disable_all = 0x00000000, ++ .int_mask_enable_all = 0xFFFFFFFF, ++ .hw_cmd_start = 0x00000001, ++ .reset_cmd = 0x32083, ++ .hw_cmd_stop = 0x00000004, ++ }, ++ .int_status = { ++ .framedone = CAM_JPEGDMA_HW_MASK_COMP_FRAMEDONE, ++ .resetdone = CAM_JPEGDMA_HW_MASK_COMP_RESET_ACK, ++ .iserror = 0x0, ++ .stopdone = CAM_JPEGDMA_HW_IRQ_STATUS_AXI_HALT, ++ } ++}; ++ ++#endif /* CAM_JPEG_DMA_HW_INFO_VER_4_2_0_H */ +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c +index bc6dce9f408e..9ec47e67978f 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -31,6 +31,15 @@ + #include "cam_cpas_api.h" + #include "cam_debug_util.h" + ++#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status, hi) \ ++ ((jpeg_irq_status) & (hi)->int_status.framedone) ++#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status, hi) \ ++ ((jpeg_irq_status) & (hi)->int_status.resetdone) ++#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \ ++ ((jpeg_irq_status) & (hi)->int_status.stopdone) ++ ++#define CAM_JPEG_DMA_RESET_TIMEOUT msecs_to_jiffies(500) ++ + int cam_jpeg_dma_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size) + { +@@ -130,7 +139,7 @@ int cam_jpeg_dma_deinit_hw(void *device_priv, + + rc = cam_jpeg_dma_disable_soc_resources(soc_info); + if (rc) +- CAM_ERR(CAM_JPEG, "soc enable failed %d", rc); ++ CAM_ERR(CAM_JPEG, "soc disable failed %d", rc); + + rc = cam_cpas_stop(core_info->cpas_handle); + if (rc) +@@ -141,6 +150,226 @@ int cam_jpeg_dma_deinit_hw(void *device_priv, + return 0; + } + ++irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data) ++{ ++ struct cam_hw_info *jpeg_dma_dev = data; ++ struct cam_jpeg_dma_device_core_info *core_info = NULL; ++ uint32_t irq_status = 0; ++ struct cam_hw_soc_info *soc_info = NULL; ++ struct cam_jpeg_dma_device_hw_info *hw_info = NULL; ++ void __iomem *mem_base; ++ ++ if (!jpeg_dma_dev) { ++ CAM_ERR(CAM_JPEG, "Invalid args"); ++ return IRQ_HANDLED; ++ } ++ soc_info = &jpeg_dma_dev->soc_info; ++ core_info = (struct cam_jpeg_dma_device_core_info *) ++ jpeg_dma_dev->core_info; ++ hw_info = core_info->jpeg_dma_hw_info; ++ mem_base = soc_info->reg_map[0].mem_base; ++ ++ irq_status = cam_io_r_mb(mem_base + ++ core_info->jpeg_dma_hw_info->reg_offset.int_status); ++ cam_io_w_mb(irq_status, ++ soc_info->reg_map[0].mem_base + ++ core_info->jpeg_dma_hw_info->reg_offset.int_clr); ++ CAM_DBG(CAM_JPEG, "irq_num %d irq_status = %x , core_state %d", ++ irq_num, irq_status, core_info->core_state); ++ if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) { ++ spin_lock(&jpeg_dma_dev->hw_lock); ++ if (core_info->core_state == CAM_JPEG_DMA_CORE_READY) { ++ core_info->result_size = 1; ++ CAM_DBG(CAM_JPEG, "result_size %d", ++ core_info->result_size); ++ core_info->core_state = ++ CAM_JPEG_DMA_CORE_RESETTING_ON_DONE; ++ cam_io_w_mb(hw_info->reg_val.reset_cmd, ++ mem_base + hw_info->reg_offset.reset_cmd); ++ } else { ++ CAM_WARN(CAM_JPEG, "unexpected frame done "); ++ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; ++ } ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ } ++ ++ if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) { ++ spin_lock(&jpeg_dma_dev->hw_lock); ++ if (core_info->core_state == CAM_JPEG_DMA_CORE_RESETTING) { ++ core_info->core_state = CAM_JPEG_DMA_CORE_READY; ++ core_info->result_size = -1; ++ complete(&jpeg_dma_dev->hw_complete); ++ } else if (core_info->core_state == ++ CAM_JPEG_DMA_CORE_RESETTING_ON_DONE) { ++ if (core_info->irq_cb.jpeg_hw_mgr_cb) { ++ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, ++ core_info->result_size, ++ core_info->irq_cb.data); ++ } else { ++ CAM_WARN(CAM_JPEG, "unexpected frame done"); ++ } ++ core_info->result_size = -1; ++ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; ++ } else { ++ CAM_ERR(CAM_JPEG, "unexpected reset irq"); ++ } ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ } ++ if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) { ++ spin_lock(&jpeg_dma_dev->hw_lock); ++ if (core_info->core_state == CAM_JPEG_DMA_CORE_ABORTING) { ++ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; ++ complete(&jpeg_dma_dev->hw_complete); ++ if (core_info->irq_cb.jpeg_hw_mgr_cb) { ++ core_info->irq_cb.jpeg_hw_mgr_cb(irq_status, ++ -1, ++ core_info->irq_cb.data); ++ } ++ } else { ++ CAM_ERR(CAM_JPEG, "unexpected abort irq"); ++ } ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++int cam_jpeg_dma_reset_hw(void *data, ++ void *start_args, uint32_t arg_size) ++{ ++ struct cam_hw_info *jpeg_dma_dev = data; ++ struct cam_jpeg_dma_device_core_info *core_info = NULL; ++ struct cam_hw_soc_info *soc_info = NULL; ++ struct cam_jpeg_dma_device_hw_info *hw_info = NULL; ++ void __iomem *mem_base; ++ unsigned long rem_jiffies; ++ ++ if (!jpeg_dma_dev) { ++ CAM_ERR(CAM_JPEG, "Invalid args"); ++ return -EINVAL; ++ } ++ /* maskdisable.clrirq.maskenable.resetcmd */ ++ soc_info = &jpeg_dma_dev->soc_info; ++ core_info = (struct cam_jpeg_dma_device_core_info *) ++ jpeg_dma_dev->core_info; ++ hw_info = core_info->jpeg_dma_hw_info; ++ mem_base = soc_info->reg_map[0].mem_base; ++ ++ mutex_lock(&core_info->core_mutex); ++ spin_lock(&jpeg_dma_dev->hw_lock); ++ if (core_info->core_state == CAM_JPEG_DMA_CORE_RESETTING) { ++ CAM_ERR(CAM_JPEG, "dma alrady resetting"); ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ mutex_unlock(&core_info->core_mutex); ++ return 0; ++ } ++ ++ reinit_completion(&jpeg_dma_dev->hw_complete); ++ ++ core_info->core_state = CAM_JPEG_DMA_CORE_RESETTING; ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ ++ cam_io_w_mb(hw_info->reg_val.int_mask_disable_all, ++ mem_base + hw_info->reg_offset.int_mask); ++ cam_io_w_mb(hw_info->reg_val.int_clr_clearall, ++ mem_base + hw_info->reg_offset.int_clr); ++ cam_io_w_mb(hw_info->reg_val.int_mask_enable_all, ++ mem_base + hw_info->reg_offset.int_mask); ++ cam_io_w_mb(hw_info->reg_val.reset_cmd, ++ mem_base + hw_info->reg_offset.reset_cmd); ++ ++ rem_jiffies = wait_for_completion_timeout(&jpeg_dma_dev->hw_complete, ++ CAM_JPEG_DMA_RESET_TIMEOUT); ++ if (!rem_jiffies) { ++ CAM_ERR(CAM_JPEG, "dma error Reset Timeout"); ++ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; ++ } ++ ++ mutex_unlock(&core_info->core_mutex); ++ return 0; ++} ++ ++int cam_jpeg_dma_start_hw(void *data, ++ void *start_args, uint32_t arg_size) ++{ ++ struct cam_hw_info *jpeg_dma_dev = data; ++ struct cam_jpeg_dma_device_core_info *core_info = NULL; ++ struct cam_hw_soc_info *soc_info = NULL; ++ struct cam_jpeg_dma_device_hw_info *hw_info = NULL; ++ void __iomem *mem_base; ++ ++ if (!jpeg_dma_dev) { ++ CAM_ERR(CAM_JPEG, "Invalid args"); ++ return -EINVAL; ++ } ++ ++ soc_info = &jpeg_dma_dev->soc_info; ++ core_info = (struct cam_jpeg_dma_device_core_info *) ++ jpeg_dma_dev->core_info; ++ hw_info = core_info->jpeg_dma_hw_info; ++ mem_base = soc_info->reg_map[0].mem_base; ++ ++ if (core_info->core_state != CAM_JPEG_DMA_CORE_READY) { ++ CAM_ERR(CAM_JPEG, "Error not ready"); ++ return -EINVAL; ++ } ++ ++ CAM_DBG(CAM_JPEG, "Starting DMA"); ++ cam_io_w_mb(0x00000601, ++ mem_base + hw_info->reg_offset.int_mask); ++ cam_io_w_mb(hw_info->reg_val.hw_cmd_start, ++ mem_base + hw_info->reg_offset.hw_cmd); ++ ++ return 0; ++} ++ ++int cam_jpeg_dma_stop_hw(void *data, ++ void *stop_args, uint32_t arg_size) ++{ ++ struct cam_hw_info *jpeg_dma_dev = data; ++ struct cam_jpeg_dma_device_core_info *core_info = NULL; ++ struct cam_hw_soc_info *soc_info = NULL; ++ struct cam_jpeg_dma_device_hw_info *hw_info = NULL; ++ void __iomem *mem_base; ++ unsigned long rem_jiffies; ++ ++ if (!jpeg_dma_dev) { ++ CAM_ERR(CAM_JPEG, "Invalid args"); ++ return -EINVAL; ++ } ++ soc_info = &jpeg_dma_dev->soc_info; ++ core_info = (struct cam_jpeg_dma_device_core_info *) ++ jpeg_dma_dev->core_info; ++ hw_info = core_info->jpeg_dma_hw_info; ++ mem_base = soc_info->reg_map[0].mem_base; ++ ++ mutex_lock(&core_info->core_mutex); ++ spin_lock(&jpeg_dma_dev->hw_lock); ++ if (core_info->core_state == CAM_JPEG_DMA_CORE_ABORTING) { ++ CAM_ERR(CAM_JPEG, "alrady stopping"); ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ mutex_unlock(&core_info->core_mutex); ++ return 0; ++ } ++ ++ reinit_completion(&jpeg_dma_dev->hw_complete); ++ core_info->core_state = CAM_JPEG_DMA_CORE_ABORTING; ++ spin_unlock(&jpeg_dma_dev->hw_lock); ++ ++ cam_io_w_mb(hw_info->reg_val.hw_cmd_stop, ++ mem_base + hw_info->reg_offset.hw_cmd); ++ ++ rem_jiffies = wait_for_completion_timeout(&jpeg_dma_dev->hw_complete, ++ CAM_JPEG_DMA_RESET_TIMEOUT); ++ if (!rem_jiffies) { ++ CAM_ERR(CAM_JPEG, "error Reset Timeout"); ++ core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY; ++ } ++ ++ mutex_unlock(&core_info->core_mutex); ++ return 0; ++} ++ + int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size) + { +@@ -185,12 +414,7 @@ int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type, + rc = -EINVAL; + break; + } +- ++ if (rc) ++ CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc); + return rc; + } +- +-irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data) +-{ +- return IRQ_HANDLED; +-} +- +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h +index a4d5d89e705e..7da4571432cc 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017, 2020-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -21,14 +21,44 @@ + + #include "cam_jpeg_hw_intf.h" + ++struct cam_jpeg_dma_reg_offsets { ++ uint32_t hw_version; ++ uint32_t int_status; ++ uint32_t int_clr; ++ uint32_t int_mask; ++ uint32_t hw_cmd; ++ uint32_t reset_cmd; ++ uint32_t encode_size; ++}; ++ ++struct cam_jpeg_dma_regval { ++ uint32_t int_clr_clearall; ++ uint32_t int_mask_disable_all; ++ uint32_t int_mask_enable_all; ++ uint32_t hw_cmd_start; ++ uint32_t reset_cmd; ++ uint32_t hw_cmd_stop; ++}; ++ ++struct cam_jpeg_dma_int_status { ++ uint32_t framedone; ++ uint32_t resetdone; ++ uint32_t iserror; ++ uint32_t stopdone; ++}; ++ + struct cam_jpeg_dma_device_hw_info { +- uint32_t reserved; ++ struct cam_jpeg_dma_reg_offsets reg_offset; ++ struct cam_jpeg_dma_regval reg_val; ++ struct cam_jpeg_dma_int_status int_status; + }; + + enum cam_jpeg_dma_core_state { + CAM_JPEG_DMA_CORE_NOT_READY, + CAM_JPEG_DMA_CORE_READY, + CAM_JPEG_DMA_CORE_RESETTING, ++ CAM_JPEG_DMA_CORE_ABORTING, ++ CAM_JPEG_DMA_CORE_RESETTING_ON_DONE, + CAM_JPEG_DMA_CORE_STATE_MAX, + }; + +@@ -39,12 +69,19 @@ struct cam_jpeg_dma_device_core_info { + struct cam_jpeg_set_irq_cb irq_cb; + int32_t ref_count; + struct mutex core_mutex; ++ int32_t result_size; + }; + + int cam_jpeg_dma_init_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); + int cam_jpeg_dma_deinit_hw(void *device_priv, + void *init_hw_args, uint32_t arg_size); ++int cam_jpeg_dma_start_hw(void *device_priv, ++ void *start_hw_args, uint32_t arg_size); ++int cam_jpeg_dma_stop_hw(void *device_priv, ++ void *stop_hw_args, uint32_t arg_size); ++int cam_jpeg_dma_reset_hw(void *device_priv, ++ void *reset_hw_args, uint32_t arg_size); + int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type, + void *cmd_args, uint32_t arg_size); + irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data); +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c +index fd4fdab19fa7..f91eee863352 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -25,11 +25,7 @@ + #include "cam_jpeg_hw_mgr_intf.h" + #include "cam_cpas_api.h" + #include "cam_debug_util.h" +- +-static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = { +- .reserved = 0, +-}; +-EXPORT_SYMBOL(cam_jpeg_dma_hw_info); ++#include "cam_jpeg_dma_hw_info_ver_4_2_0.h" + + static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info, + struct cam_jpeg_dma_device_core_info *core_info, +@@ -142,6 +138,9 @@ static int cam_jpeg_dma_probe(struct platform_device *pdev) + jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev; + jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw; + jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw; ++ jpeg_dma_dev_intf->hw_ops.start = cam_jpeg_dma_start_hw; ++ jpeg_dma_dev_intf->hw_ops.stop = cam_jpeg_dma_stop_hw; ++ jpeg_dma_dev_intf->hw_ops.reset = cam_jpeg_dma_reset_hw; + jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd; + jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA; + +@@ -186,13 +185,11 @@ static int cam_jpeg_dma_probe(struct platform_device *pdev) + mutex_init(&jpeg_dma_dev->hw_mutex); + spin_lock_init(&jpeg_dma_dev->hw_lock); + init_completion(&jpeg_dma_dev->hw_complete); +- +- CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx); +- ++ CAM_DBG(CAM_JPEG, "JPEG-DMA component bound successfully"); + return rc; + + error_reg_cpas: +- rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info); ++ cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info); + error_init_soc: + mutex_destroy(&core_info->core_mutex); + error_match_dev: +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c +index 7fcc1ada1a36..c2b8755306a8 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2020-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -323,6 +323,7 @@ int cam_jpeg_enc_start_hw(void *data, + return -EINVAL; + } + ++ CAM_DBG(CAM_JPEG, "Starting JPEG ENC"); + cam_io_w_mb(hw_info->reg_val.hw_cmd_start, + mem_base + hw_info->reg_offset.hw_cmd); + +diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c +index d4daa6dde308..be703d6c3055 100644 +--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c ++++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -185,7 +185,7 @@ static int cam_jpeg_enc_probe(struct platform_device *pdev) + mutex_init(&jpeg_enc_dev->hw_mutex); + spin_lock_init(&jpeg_enc_dev->hw_lock); + init_completion(&jpeg_enc_dev->hw_complete); +- ++ CAM_DBG(CAM_JPEG, "JPEG-Encoder component bound successfully"); + return rc; + + error_reg_cpas: +diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c +index fb921600d952..f4f27ab2e125 100644 +--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c ++++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h +index 073796b601c7..4eef8a14c680 100644 +--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h ++++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +index f98e23fe6544..6151308c12a9 100644 +--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c ++++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +@@ -198,9 +198,9 @@ static int msm_csiphy_snps_2_lane_config( + + local_data_rate = csiphy_params->data_rate; + +- if (mode == TWO_LANE_PHY_A) ++ if (mode == PHY_A_MODE) + offset = 0x0; +- else if (mode == TWO_LANE_PHY_B) ++ else if (mode == PHY_B_MODE) + offset = SNPS_INTERPHY_OFFSET; + else + return -EINVAL; +@@ -222,7 +222,7 @@ static int msm_csiphy_snps_2_lane_config( + + csiphy_dev->snps_programmed_data_rate = csiphy_params->data_rate; + +- if (mode == TWO_LANE_PHY_A) { ++ if (mode == PHY_A_MODE) { + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_sys_ctrl.data, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. +@@ -342,7 +342,7 @@ static int msm_csiphy_snps_lane_config( + */ + + lane_mask = csiphy_params->lane_mask & 0x3f; +- CDBG("%s:%d lane_maks: %d, cur_snps_state = %d\n", ++ CDBG("%s:%d lane_mask: 0x%x, cur_snps_state = %d\n", + __func__, __LINE__, lane_mask, csiphy_dev->snps_state); + + if (lane_mask == LANE_MASK_AGGR_MODE) { /* Aggregate mdoe */ +@@ -360,14 +360,17 @@ static int msm_csiphy_snps_lane_config( + clk_mux_reg &= ~0xff; + clk_mux_reg |= csiphy_params->csid_core << 4; + clk_mux_reg |= (uint32_t)csiphy_params->csid_core; +- } else if (lane_mask == LANE_MASK_PHY_A) { /* PHY A */ +- /* 2 lane config */ +- num_lanes = 2; +- mode = TWO_LANE_PHY_A; ++ } else if ((lane_mask | LANE_MASK_PHY_A) == LANE_MASK_PHY_A) { ++ /* phy A config */ ++ if (lane_mask == LANE_MASK_PHY_A) ++ num_lanes = 2; ++ else ++ num_lanes = 1; ++ mode = PHY_A_MODE; + if (csiphy_dev->snps_state == NOT_CONFIGURED) { +- csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_A; ++ csiphy_dev->snps_state = CONFIGURED_PHY_A_MODE; + } else if (csiphy_dev->snps_state == +- CONFIGURED_TWO_LANE_PHY_B) { ++ CONFIGURED_PHY_B_MODE) { + /* 2 lane + 2 lane config */ + csiphy_dev->snps_state = CONFIGURED_COMBO_MODE; + } else { +@@ -379,14 +382,17 @@ static int msm_csiphy_snps_lane_config( + } + clk_mux_reg &= ~0xf; + clk_mux_reg |= (uint32_t)csiphy_params->csid_core; +- } else if (lane_mask == LANE_MASK_PHY_B) { /* PHY B */ +- /* 2 lane config */ +- num_lanes = 2; +- mode = TWO_LANE_PHY_B; ++ } else if ((lane_mask | LANE_MASK_PHY_B) == LANE_MASK_PHY_B) { ++ /* phy B config */ ++ if (lane_mask == LANE_MASK_PHY_B) ++ num_lanes = 2; ++ else ++ num_lanes = 1; ++ mode = PHY_B_MODE; + if (csiphy_dev->snps_state == NOT_CONFIGURED) { +- csiphy_dev->snps_state = CONFIGURED_TWO_LANE_PHY_B; ++ csiphy_dev->snps_state = CONFIGURED_PHY_B_MODE; + } else if (csiphy_dev->snps_state == +- CONFIGURED_TWO_LANE_PHY_A) { ++ CONFIGURED_PHY_A_MODE) { + /* 2 lane + 2 lane config */ + csiphy_dev->snps_state = CONFIGURED_COMBO_MODE; + } else { +@@ -407,9 +413,9 @@ static int msm_csiphy_snps_lane_config( + /* ensure write is done */ + mb(); + +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) { ++ if (mode == AGGREGATE_MODE || mode == PHY_A_MODE) { + ret = msm_csiphy_snps_2_lane_config(csiphy_dev, +- csiphy_params, TWO_LANE_PHY_A, num_lanes); ++ csiphy_params, PHY_A_MODE, num_lanes); + if (ret < 0) { + pr_err("%s:%d: Error in setting lane configuration\n", + __func__, __LINE__); +@@ -417,9 +423,9 @@ static int msm_csiphy_snps_lane_config( + } + } + +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) { ++ if (mode == AGGREGATE_MODE || mode == PHY_B_MODE) { + ret = msm_csiphy_snps_2_lane_config(csiphy_dev, +- csiphy_params, TWO_LANE_PHY_B, num_lanes); ++ csiphy_params, PHY_B_MODE, num_lanes); + if (ret < 0) { + pr_err("%s:%d: Error in setting lane configuration\n", + __func__, __LINE__); +@@ -430,10 +436,10 @@ static int msm_csiphy_snps_lane_config( + snps_irq_config(csiphy_dev, csiphy_params); + + value = 0x0; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) +- value |= mask_force_mode_A; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) +- value |= mask_force_mode_B; ++ if (mode == AGGREGATE_MODE || mode == PHY_A_MODE) ++ value |= (mask_force_mode_A & lane_mask); ++ if (mode == AGGREGATE_MODE || mode == PHY_B_MODE) ++ value |= (mask_force_mode_B & lane_mask); + msm_camera_io_w(value, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_force_mode.addr); +@@ -559,33 +565,33 @@ static int msm_csiphy_snps_lane_config( + } + + value = 0x0; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) +- value |= mask_phy_enable_A; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) +- value |= mask_phy_enable_B; ++ if (mode == AGGREGATE_MODE || mode == PHY_A_MODE) ++ value |= (mask_phy_enable_A & lane_mask); ++ if (mode == AGGREGATE_MODE || mode == PHY_B_MODE) ++ value |= (mask_phy_enable_B & lane_mask); + msm_camera_io_w(value, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_enable.addr); + + value = 0x0; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) ++ if (mode == AGGREGATE_MODE || mode == PHY_A_MODE) + value |= mask_base_dir_A; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) ++ if (mode == AGGREGATE_MODE || mode == PHY_B_MODE) + value |= mask_base_dir_B; + msm_camera_io_w(value, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_basedir.addr); + + value = 0x0; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A) ++ if (mode == AGGREGATE_MODE || mode == PHY_A_MODE) + value |= mask_enable_clk_A; +- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B) ++ if (mode == AGGREGATE_MODE || mode == PHY_B_MODE) + value |= mask_enable_clk_B; + msm_camera_io_w(value, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_enable_clk.addr); + +- if (mode == TWO_LANE_PHY_A) { ++ if (mode == PHY_A_MODE) { + msm_camera_io_w(mask_reset_A, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_ctrl_1.addr); +@@ -612,7 +618,7 @@ static int msm_csiphy_snps_lane_config( + } + } + +- if (mode == TWO_LANE_PHY_B) { ++ if (mode == PHY_B_MODE) { + msm_camera_io_w(mask_reset_B, + csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg. + mipi_csiphy_ctrl_1.addr); +diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h +index dade2e377038..09571a2e604c 100644 +--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h ++++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h +@@ -197,16 +197,16 @@ enum msm_csiphy_state_t { + + enum snps_csiphy_mode { + AGGREGATE_MODE, +- TWO_LANE_PHY_A, +- TWO_LANE_PHY_B, ++ PHY_A_MODE, ++ PHY_B_MODE, + INVALID_MODE, + }; + + enum snps_csiphy_state { + NOT_CONFIGURED, + CONFIGURED_AGGREGATE_MODE, +- CONFIGURED_TWO_LANE_PHY_A, +- CONFIGURED_TWO_LANE_PHY_B, ++ CONFIGURED_PHY_A_MODE, ++ CONFIGURED_PHY_B_MODE, + CONFIGURED_COMBO_MODE, + }; + +diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c +index a06a4c6c6339..2c946bb37836 100644 +--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c ++++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_core.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -567,19 +567,42 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev, + struct cam_cci_clk_params_t *clk_params = NULL; + enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master; + enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode; +- struct cam_hw_soc_info *soc_info = +- &cci_dev->soc_info; +- void __iomem *base = soc_info->reg_map[0].mem_base; ++ void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base; ++ struct cam_cci_master_info *cci_master = ++ &cci_dev->cci_master_info[master]; + + if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) { + CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode); + return -EINVAL; + } ++ /* ++ * If no change in i2c freq, then acquire semaphore only for the first ++ * i2c transaction to indicate I2C transaction is in progress, else ++ * always try to acquire semaphore, to make sure that no other I2C ++ * transaction is in progress. ++ */ ++ mutex_lock(&cci_master->mutex); ++ if (i2c_freq_mode == cci_dev->i2c_freq_mode[master]) { ++ CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d", master, ++ i2c_freq_mode); ++ spin_lock(&cci_master->freq_cnt_lock); ++ if (cci_master->freq_ref_cnt == 0) ++ down(&cci_master->master_sem); ++ cci_master->freq_ref_cnt++; ++ spin_unlock(&cci_master->freq_cnt_lock); ++ mutex_unlock(&cci_master->mutex); ++ return 0; ++ } ++ CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", ++ master, cci_dev->i2c_freq_mode[master], i2c_freq_mode); ++ down(&cci_master->master_sem); ++ ++ spin_lock(&cci_master->freq_cnt_lock); ++ cci_master->freq_ref_cnt++; ++ spin_unlock(&cci_master->freq_cnt_lock); + + clk_params = &cci_dev->cci_clk_params[i2c_freq_mode]; + +- if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode) +- return 0; + if (master == MASTER_0) { + cam_io_w_mb(clk_params->hw_thigh << 16 | + clk_params->hw_tlow, +@@ -613,6 +636,7 @@ static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev, + } + cci_dev->i2c_freq_mode[master] = i2c_freq_mode; + ++ mutex_unlock(&cci_master->mutex); + return 0; + } + +@@ -890,41 +914,18 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, + return -EINVAL; + } + +- soc_info = &cci_dev->soc_info; +- base = soc_info->reg_map[0].mem_base; +- +- mutex_lock(&cci_dev->cci_master_info[master].mutex); +- if (cci_dev->cci_master_info[master].is_first_req == true) { +- cci_dev->cci_master_info[master].is_first_req = false; +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else if (c_ctrl->cci_info->i2c_freq_mode +- != cci_dev->i2c_freq_mode[master]) { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- cci_dev->cci_master_info[master].freq_ref_cnt++; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); +- } +- + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { + CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc); +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); +- goto rel_master; ++ return rc; + } +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); + + mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]); ++ ++ soc_info = &cci_dev->soc_info; ++ base = soc_info->reg_map[0].mem_base; ++ + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_read call. This is to +@@ -1120,13 +1121,11 @@ static int32_t cam_cci_burst_read(struct v4l2_subdev *sd, + + rel_mutex_q: + mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); +-rel_master: +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- if (cci_dev->cci_master_info[master].freq_ref_cnt == 0) ++ ++ spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); ++ if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); +- else +- cci_dev->cci_master_info[master].freq_ref_cnt--; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); ++ spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; + } + +@@ -1151,45 +1150,23 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd, + + if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX + || c_ctrl->cci_info->cci_i2c_master < 0) { +- CAM_ERR(CAM_CCI, "Invalid I2C master addr"); ++ CAM_ERR(CAM_CCI, "Invalid I2C master addr:%d", ++ c_ctrl->cci_info->cci_i2c_master); + return -EINVAL; + } + +- soc_info = &cci_dev->soc_info; +- base = soc_info->reg_map[0].mem_base; +- +- mutex_lock(&cci_dev->cci_master_info[master].mutex); +- if (cci_dev->cci_master_info[master].is_first_req == true) { +- cci_dev->cci_master_info[master].is_first_req = false; +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else if (c_ctrl->cci_info->i2c_freq_mode +- != cci_dev->i2c_freq_mode[master]) { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- cci_dev->cci_master_info[master].freq_ref_cnt++; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); +- } +- + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); + CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc); +- goto rel_master; ++ return rc; + } +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); + + mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]); ++ ++ soc_info = &cci_dev->soc_info; ++ base = soc_info->reg_map[0].mem_base; ++ + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_read call. This is to +@@ -1331,13 +1308,11 @@ static int32_t cam_cci_read(struct v4l2_subdev *sd, + } + rel_mutex_q: + mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]); +-rel_master: +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- if (cci_dev->cci_master_info[master].freq_ref_cnt == 0) ++ ++ spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); ++ if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); +- else +- cci_dev->cci_master_info[master].freq_ref_cnt--; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); ++ spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; + } + +@@ -1361,36 +1336,13 @@ static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd, + c_ctrl->cci_info->sid, c_ctrl->cci_info->retries, + c_ctrl->cci_info->id_map); + +- mutex_lock(&cci_dev->cci_master_info[master].mutex); +- if (cci_dev->cci_master_info[master].is_first_req == true) { +- cci_dev->cci_master_info[master].is_first_req = false; +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else if (c_ctrl->cci_info->i2c_freq_mode +- != cci_dev->i2c_freq_mode[master]) { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- down(&cci_dev->cci_master_info[master].master_sem); +- } else { +- CAM_DBG(CAM_CCI, "Master: %d, curr_freq: %d, req_freq: %d", +- master, cci_dev->i2c_freq_mode[master], +- c_ctrl->cci_info->i2c_freq_mode); +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- cci_dev->cci_master_info[master].freq_ref_cnt++; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); +- } +- + /* Set the I2C Frequency */ + rc = cam_cci_set_clk_param(cci_dev, c_ctrl); + if (rc < 0) { + CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc); +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); +- goto ERROR; ++ return rc; + } +- mutex_unlock(&cci_dev->cci_master_info[master].mutex); ++ + /* + * Call validate queue to make sure queue is empty before starting. + * If this call fails, don't proceed with i2c_write call. This is to +@@ -1415,12 +1367,10 @@ static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd, + } + + ERROR: +- spin_lock(&cci_dev->cci_master_info[master].freq_cnt); +- if (cci_dev->cci_master_info[master].freq_ref_cnt == 0) ++ spin_lock(&cci_dev->cci_master_info[master].freq_cnt_lock); ++ if (--cci_dev->cci_master_info[master].freq_ref_cnt == 0) + up(&cci_dev->cci_master_info[master].master_sem); +- else +- cci_dev->cci_master_info[master].freq_ref_cnt--; +- spin_unlock(&cci_dev->cci_master_info[master].freq_cnt); ++ spin_unlock(&cci_dev->cci_master_info[master].freq_cnt_lock); + return rc; + } + +diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h +index 3d0382ff5cf7..1fb9beca0634 100644 +--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h ++++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_dev.h +@@ -146,9 +146,8 @@ struct cam_cci_master_info { + struct completion report_q[NUM_QUEUES]; + atomic_t done_pending[NUM_QUEUES]; + spinlock_t lock_q[NUM_QUEUES]; +- spinlock_t freq_cnt; + struct semaphore master_sem; +- bool is_first_req; ++ spinlock_t freq_cnt_lock; + uint16_t freq_ref_cnt; + }; + +diff --git a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c +index f66d86ce091e..3e9fd930e544 100644 +--- a/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c ++++ b/drivers/media/platform/msm/camera_v3/cam_sensor_module/cam_cci/cam_cci_soc.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -199,10 +199,10 @@ static void cam_cci_init_cci_params(struct cci_device *new_cci_dev) + + for (i = 0; i < NUM_MASTERS; i++) { + new_cci_dev->cci_master_info[i].status = 0; +- new_cci_dev->cci_master_info[i].is_first_req = true; ++ new_cci_dev->cci_master_info[i].freq_ref_cnt = 0; + mutex_init(&new_cci_dev->cci_master_info[i].mutex); + sema_init(&new_cci_dev->cci_master_info[i].master_sem, 1); +- spin_lock_init(&new_cci_dev->cci_master_info[i].freq_cnt); ++ spin_lock_init(&new_cci_dev->cci_master_info[i].freq_cnt_lock); + init_completion( + &new_cci_dev->cci_master_info[i].reset_complete); + init_completion( +diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +index 7f0e3a2f9382..6e105ace353d 100644 +--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c ++++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -23,8 +23,8 @@ + #define MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO (1 << 16) + #define MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO (5 << 16) + +-#define MAX_WIDTH_VALUE 5760 +-#define MAX_HEIGHT_VALUE 2880 ++#define MAX_WIDTH_VALUE 7680 ++#define MAX_HEIGHT_VALUE 3840 + + static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs) + { +@@ -1294,19 +1294,16 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst) + hier_mode |= msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE); + +- /* For higher resolutions enable dual core in low power. */ + if (inst->session_type == MSM_VIDC_ENCODER && + ((inst->prop.width[CAPTURE_PORT] * + inst->prop.height[CAPTURE_PORT]) >= + (MAX_WIDTH_VALUE * MAX_HEIGHT_VALUE)) && +- inst->capability.max_video_cores.max >= VIDC_CORE_ID_3) { +- if (current_inst_lp_load / 2 + +- core0_lp_load <= max_freq && +- current_inst_lp_load / 2 + +- core1_lp_load <= max_freq) { ++ (inst->capability.max_video_cores.max >= VIDC_CORE_ID_3)) { ++ if (current_inst_load / 2 + core0_load <= max_freq && ++ current_inst_load / 2 + core1_load <= max_freq) { + if (inst->clk_data.work_mode == VIDC_WORK_MODE_2) { + inst->clk_data.core_id = VIDC_CORE_ID_3; +- msm_vidc_power_save_mode_enable(inst, true); ++ msm_vidc_power_save_mode_enable(inst, false); + goto decision_done; + } + } +diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c +index 1f685eed4a3d..e9c1522950d5 100644 +--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c ++++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2019, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1222,6 +1222,11 @@ int read_platform_resources_from_dt( + res->never_unload_fw = of_property_read_bool(pdev->dev.of_node, + "qcom,never-unload-fw"); + ++ res->is_qos_type_all_cores = of_property_read_bool(pdev->dev.of_node, ++ "qcom,qos-type-all-cores"); ++ dprintk(VIDC_DBG, "QOS type all cores = %s\n", ++ res->is_qos_type_all_cores ? "yes" : "no"); ++ + of_property_read_u32(pdev->dev.of_node, + "qcom,pm-qos-latency-us", &res->pm_qos_latency_us); + +diff --git a/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h b/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h +index eb4724253d33..67432f412e5e 100644 +--- a/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h ++++ b/drivers/media/platform/msm/vidc_3x/msm_vidc_resources.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2013-2018, 2021 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -183,6 +183,7 @@ struct msm_vidc_platform_resources { + bool sw_power_collapsible; + bool sys_idle_indicator; + bool slave_side_cp; ++ bool is_qos_type_all_cores; + struct list_head context_banks; + bool thermal_mitigable; + const char *fw_name; +diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c +index 2cb7e315c448..91c046bb2b7d 100644 +--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c ++++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c +@@ -1318,6 +1318,9 @@ static int venus_hfi_suspend(void *dev) + return -ENOTSUPP; + } + ++ dprintk(VIDC_DBG, "Suspending Venus\n"); ++ flush_delayed_work(&venus_hfi_pm_work); ++ + mutex_lock(&device->lock); + + if (device->power_enabled) { +@@ -2250,8 +2253,12 @@ static int venus_hfi_core_init(void *device) + + if (dev->res->pm_qos_latency_us) { + #ifdef CONFIG_SMP +- dev->qos.type = PM_QOS_REQ_AFFINE_IRQ; +- dev->qos.irq = dev->hal_data->irq; ++ if (dev->res->is_qos_type_all_cores) { ++ dev->qos.type = PM_QOS_REQ_ALL_CORES; ++ } else { ++ dev->qos.type = PM_QOS_REQ_AFFINE_IRQ; ++ dev->qos.irq = dev->hal_data->irq; ++ } + #endif + pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY, + dev->res->pm_qos_latency_us); +@@ -4425,8 +4432,12 @@ static inline int __resume(struct venus_hfi_device *device) + + if (device->res->pm_qos_latency_us) { + #ifdef CONFIG_SMP +- device->qos.type = PM_QOS_REQ_AFFINE_IRQ; +- device->qos.irq = device->hal_data->irq; ++ if (device->res->is_qos_type_all_cores) { ++ device->qos.type = PM_QOS_REQ_ALL_CORES; ++ } else { ++ device->qos.type = PM_QOS_REQ_AFFINE_IRQ; ++ device->qos.irq = device->hal_data->irq; ++ } + #endif + pm_qos_add_request(&device->qos, PM_QOS_CPU_DMA_LATENCY, + device->res->pm_qos_latency_us); +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index ce651d3ca1b8..a56863b09014 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -2273,8 +2273,10 @@ static int isp_probe(struct platform_device *pdev) + mem = platform_get_resource(pdev, IORESOURCE_MEM, i); + isp->mmio_base[map_idx] = + devm_ioremap_resource(isp->dev, mem); +- if (IS_ERR(isp->mmio_base[map_idx])) +- return PTR_ERR(isp->mmio_base[map_idx]); ++ if (IS_ERR(isp->mmio_base[map_idx])) { ++ ret = PTR_ERR(isp->mmio_base[map_idx]); ++ goto error; ++ } + } + + ret = isp_get_clocks(isp); +diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c +index e981eb2330f1..ac005ae4d21b 100644 +--- a/drivers/media/platform/omap3isp/isppreview.c ++++ b/drivers/media/platform/omap3isp/isppreview.c +@@ -2290,7 +2290,7 @@ static int preview_init_entities(struct isp_prev_device *prev) + me->ops = &preview_media_ops; + ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); + if (ret < 0) +- return ret; ++ goto error_handler_free; + + preview_init_formats(sd, NULL); + +@@ -2323,6 +2323,8 @@ static int preview_init_entities(struct isp_prev_device *prev) + omap3isp_video_cleanup(&prev->video_in); + error_video_in: + media_entity_cleanup(&prev->subdev.entity); ++error_handler_free: ++ v4l2_ctrl_handler_free(&prev->ctrls); + return ret; + } + +diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c +index 3fab9f776afa..425eda460e01 100644 +--- a/drivers/media/platform/pxa_camera.c ++++ b/drivers/media/platform/pxa_camera.c +@@ -1420,6 +1420,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb) + struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue); + struct pxa_buffer *buf = vb2_to_pxa_buffer(vb); + int ret = 0; ++#ifdef DEBUG ++ int i; ++#endif + + switch (pcdev->channels) { + case 1: +diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c +index f3a3f31cdfa9..5b5722e65e9b 100644 +--- a/drivers/media/platform/rcar-fcp.c ++++ b/drivers/media/platform/rcar-fcp.c +@@ -12,6 +12,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -24,6 +25,7 @@ + struct rcar_fcp_device { + struct list_head list; + struct device *dev; ++ struct device_dma_parameters dma_parms; + }; + + static LIST_HEAD(fcp_devices); +@@ -105,8 +107,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp) + return 0; + + ret = pm_runtime_get_sync(fcp->dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put_noidle(fcp->dev); + return ret; ++ } + + return 0; + } +@@ -140,6 +144,9 @@ static int rcar_fcp_probe(struct platform_device *pdev) + + fcp->dev = &pdev->dev; + ++ fcp->dev->dma_parms = &fcp->dma_parms; ++ dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32)); ++ + pm_runtime_enable(&pdev->dev); + + mutex_lock(&fcp_lock); +diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c +index ec4001970313..560e1ff23650 100644 +--- a/drivers/media/platform/s3c-camif/camif-core.c ++++ b/drivers/media/platform/s3c-camif/camif-core.c +@@ -476,7 +476,7 @@ static int s3c_camif_probe(struct platform_device *pdev) + + ret = camif_media_dev_init(camif); + if (ret < 0) +- goto err_alloc; ++ goto err_pm; + + ret = camif_register_sensor(camif); + if (ret < 0) +@@ -510,10 +510,9 @@ static int s3c_camif_probe(struct platform_device *pdev) + media_device_unregister(&camif->media_dev); + media_device_cleanup(&camif->media_dev); + camif_unregister_media_entities(camif); +-err_alloc: ++err_pm: + pm_runtime_put(dev); + pm_runtime_disable(dev); +-err_pm: + camif_clk_put(camif); + err_clk: + s3c_camif_unregister_subdev(camif); +diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c +index 5f6ccf492111..8f083f28dcf3 100644 +--- a/drivers/media/platform/s5p-g2d/g2d.c ++++ b/drivers/media/platform/s5p-g2d/g2d.c +@@ -283,6 +283,9 @@ static int g2d_release(struct file *file) + struct g2d_dev *dev = video_drvdata(file); + struct g2d_ctx *ctx = fh2ctx(file->private_data); + ++ mutex_lock(&dev->mutex); ++ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); ++ mutex_unlock(&dev->mutex); + v4l2_ctrl_handler_free(&ctx->ctrl_handler); + v4l2_fh_del(&ctx->fh); + v4l2_fh_exit(&ctx->fh); +diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +index d88c9ba401b5..bec4278401b2 100644 +--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c ++++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +@@ -1366,7 +1366,7 @@ static int bdisp_probe(struct platform_device *pdev) + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "failed to set PM\n"); +- goto err_dbg; ++ goto err_pm; + } + + /* Filters */ +@@ -1394,7 +1394,6 @@ static int bdisp_probe(struct platform_device *pdev) + bdisp_hw_free_filters(bdisp->dev); + err_pm: + pm_runtime_put(dev); +-err_dbg: + bdisp_debugfs_remove(bdisp); + err_v4l2: + v4l2_device_unregister(&bdisp->v4l2_dev); +diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c +index cf2a8d884536..1653892da9a5 100644 +--- a/drivers/media/platform/sti/hva/hva-hw.c ++++ b/drivers/media/platform/sti/hva/hva-hw.c +@@ -127,8 +127,7 @@ static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg) + ctx_id = (hva->sts_reg & 0xFF00) >> 8; + if (ctx_id >= HVA_MAX_INSTANCES) { + dev_err(dev, "%s %s: bad context identifier: %d\n", +- ctx->name, __func__, ctx_id); +- ctx->hw_err = true; ++ HVA_PREFIX, __func__, ctx_id); + goto out; + } + +@@ -389,7 +388,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva) + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "%s failed to set PM\n", HVA_PREFIX); +- goto err_clk; ++ goto err_pm; + } + + /* check IP hardware version */ +diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c +index 563b9636ab63..803e0794ca13 100644 +--- a/drivers/media/platform/ti-vpe/cal.c ++++ b/drivers/media/platform/ti-vpe/cal.c +@@ -690,12 +690,13 @@ static void pix_proc_config(struct cal_ctx *ctx) + } + + static void cal_wr_dma_config(struct cal_ctx *ctx, +- unsigned int width) ++ unsigned int width, unsigned int height) + { + u32 val; + + val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); + set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); ++ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); + set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, + CAL_WR_DMA_CTRL_DTAG_MASK); + set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, +@@ -1321,7 +1322,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) + csi2_lane_config(ctx); + csi2_ctx_config(ctx); + pix_proc_config(ctx); +- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); ++ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, ++ ctx->v_fmt.fmt.pix.height); + cal_wr_dma_addr(ctx, addr); + csi2_ppi_enable(ctx); + +diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c +index dbb4829acc43..360a2ad14ce4 100644 +--- a/drivers/media/platform/ti-vpe/vpe.c ++++ b/drivers/media/platform/ti-vpe/vpe.c +@@ -2133,6 +2133,8 @@ static int vpe_runtime_get(struct platform_device *pdev) + + r = pm_runtime_get_sync(&pdev->dev); + WARN_ON(r < 0); ++ if (r) ++ pm_runtime_put_noidle(&pdev->dev); + return r < 0 ? r : 0; + } + +diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c +index 8fed2fbe91a9..f78caf72cb3e 100644 +--- a/drivers/media/platform/vivid/vivid-vid-out.c ++++ b/drivers/media/platform/vivid/vivid-vid-out.c +@@ -1003,7 +1003,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh, + return -EINVAL; + } + dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags); +- dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags); ++ dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags); + return 0; + } + +diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c +index 4ac1ff482a0b..8d50a9a9f73d 100644 +--- a/drivers/media/platform/vsp1/vsp1_drv.c ++++ b/drivers/media/platform/vsp1/vsp1_drv.c +@@ -487,7 +487,12 @@ int vsp1_device_get(struct vsp1_device *vsp1) + int ret; + + ret = pm_runtime_get_sync(vsp1->dev); +- return ret < 0 ? ret : 0; ++ if (ret < 0) { ++ pm_runtime_put_noidle(vsp1->dev); ++ return ret; ++ } ++ ++ return 0; + } + + /* +@@ -727,12 +732,12 @@ static int vsp1_probe(struct platform_device *pdev) + /* Configure device parameters based on the version register. */ + pm_runtime_enable(&pdev->dev); + +- ret = pm_runtime_get_sync(&pdev->dev); ++ ret = vsp1_device_get(vsp1); + if (ret < 0) + goto done; + + vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION); +- pm_runtime_put_sync(&pdev->dev); ++ vsp1_device_put(vsp1); + + for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { + if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) == +@@ -759,8 +764,10 @@ static int vsp1_probe(struct platform_device *pdev) + } + + done: +- if (ret) ++ if (ret) { + pm_runtime_disable(&pdev->dev); ++ rcar_fcp_put(vsp1->fcp); ++ } + + return ret; + } +diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c +index 9f5b59706741..7f98db4bc027 100644 +--- a/drivers/media/rc/ati_remote.c ++++ b/drivers/media/rc/ati_remote.c +@@ -850,6 +850,10 @@ static int ati_remote_probe(struct usb_interface *interface, + err("%s: endpoint_in message size==0? \n", __func__); + return -ENODEV; + } ++ if (!usb_endpoint_is_int_out(endpoint_out)) { ++ err("%s: Unexpected endpoint_out\n", __func__); ++ return -ENODEV; ++ } + + ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL); + rc_dev = rc_allocate_device(); +diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c +index 63165d324fff..7d3e50d94d86 100644 +--- a/drivers/media/rc/ite-cir.c ++++ b/drivers/media/rc/ite-cir.c +@@ -292,8 +292,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data) + /* read the interrupt flags */ + iflags = dev->params.get_irq_causes(dev); + ++ /* Check for RX overflow */ ++ if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) { ++ dev_warn(&dev->rdev->dev, "receive overflow\n"); ++ ir_raw_event_reset(dev->rdev); ++ } ++ + /* check for the receive interrupt */ +- if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) { ++ if (iflags & ITE_IRQ_RX_FIFO) { + /* read the FIFO bytes */ + rx_bytes = + dev->params.get_rx_bytes(dev, rx_buf, +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c +index eaadc081760a..3256bf46a376 100644 +--- a/drivers/media/rc/sunxi-cir.c ++++ b/drivers/media/rc/sunxi-cir.c +@@ -132,6 +132,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id) + } else if (status & REG_RXINT_RPEI_EN) { + ir_raw_event_set_idle(ir->rc, true); + ir_raw_event_handle(ir->rc); ++ } else { ++ ir_raw_event_handle(ir->rc); + } + + spin_unlock(&ir->ir_lock); +diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c +index 9f3e0fd4cad9..d4443f9c9fa3 100644 +--- a/drivers/media/tuners/m88rs6000t.c ++++ b/drivers/media/tuners/m88rs6000t.c +@@ -534,7 +534,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) + PGA2_cri = PGA2_GC >> 2; + PGA2_crf = PGA2_GC & 0x03; + +- for (i = 0; i <= RF_GC; i++) ++ for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++) + RFG += RFGS[i]; + + if (RF_GC == 0) +@@ -546,12 +546,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) + if (RF_GC == 3) + RFG += 100; + +- for (i = 0; i <= IF_GC; i++) ++ for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++) + IFG += IFGS[i]; + + TIAG = TIA_GC * TIA_GS; + +- for (i = 0; i <= BB_GC; i++) ++ for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++) + BBG += BBGS[i]; + + PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS; +diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c +index 9af2a155cfca..416d1eeb9c02 100644 +--- a/drivers/media/tuners/qm1d1c0042.c ++++ b/drivers/media/tuners/qm1d1c0042.c +@@ -352,8 +352,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe) + if (val == reg_initval[reg_index][0x00]) + break; + } +- if (reg_index >= QM1D1C0042_NUM_REG_ROWS) ++ if (reg_index >= QM1D1C0042_NUM_REG_ROWS) { ++ ret = -EINVAL; + goto failed; ++ } + memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS); + usleep_range(2000, 3000); + +diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c +index 57b250847cd3..72a47da0db2a 100644 +--- a/drivers/media/tuners/si2157.c ++++ b/drivers/media/tuners/si2157.c +@@ -84,24 +84,23 @@ static int si2157_init(struct dvb_frontend *fe) + struct si2157_cmd cmd; + const struct firmware *fw; + const char *fw_name; +- unsigned int uitmp, chip_id; ++ unsigned int chip_id, xtal_trim; + + dev_dbg(&client->dev, "\n"); + +- /* Returned IF frequency is garbage when firmware is not running */ +- memcpy(cmd.args, "\x15\x00\x06\x07", 4); ++ /* Try to get Xtal trim property, to verify tuner still running */ ++ memcpy(cmd.args, "\x15\x00\x04\x02", 4); + cmd.wlen = 4; + cmd.rlen = 4; + ret = si2157_cmd_execute(client, &cmd); +- if (ret) +- goto err; + +- uitmp = cmd.args[2] << 0 | cmd.args[3] << 8; +- dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp); ++ xtal_trim = cmd.args[2] | (cmd.args[3] << 8); + +- if (uitmp == dev->if_frequency / 1000) ++ if (ret == 0 && xtal_trim < 16) + goto warm; + ++ dev->if_frequency = 0; /* we no longer know current tuner state */ ++ + /* power up */ + if (dev->chiptype == SI2157_CHIPTYPE_SI2146) { + memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9); +diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h +index cdef677d57ec..80a7af6482ae 100644 +--- a/drivers/media/usb/cpia2/cpia2.h ++++ b/drivers/media/usb/cpia2/cpia2.h +@@ -442,6 +442,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd); + int cpia2_do_command(struct camera_data *cam, + unsigned int command, + unsigned char direction, unsigned char param); ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf); + struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf); + int cpia2_init_camera(struct camera_data *cam); + int cpia2_allocate_buffers(struct camera_data *cam); +diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c +index 0310fd6ed103..828f9689f4a1 100644 +--- a/drivers/media/usb/cpia2/cpia2_core.c ++++ b/drivers/media/usb/cpia2/cpia2_core.c +@@ -2158,6 +2158,18 @@ static void reset_camera_struct(struct camera_data *cam) + cam->height = cam->params.roi.height; + } + ++/****************************************************************************** ++ * ++ * cpia2_init_camera_struct ++ * ++ * Deinitialize camera struct ++ *****************************************************************************/ ++void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf) ++{ ++ v4l2_device_unregister(&cam->v4l2_dev); ++ kfree(cam); ++} ++ + /****************************************************************************** + * + * cpia2_init_camera_struct +diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c +index 30e27844e0e9..4f4a130f17af 100644 +--- a/drivers/media/usb/cpia2/cpia2_usb.c ++++ b/drivers/media/usb/cpia2/cpia2_usb.c +@@ -860,15 +860,13 @@ static int cpia2_usb_probe(struct usb_interface *intf, + ret = set_alternate(cam, USBIF_CMDONLY); + if (ret < 0) { + ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret); +- kfree(cam); +- return ret; ++ goto alt_err; + } + + + if((ret = cpia2_init_camera(cam)) < 0) { + ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret); +- kfree(cam); +- return ret; ++ goto alt_err; + } + LOG(" CPiA Version: %d.%02d (%d.%d)\n", + cam->params.version.firmware_revision_hi, +@@ -888,11 +886,14 @@ static int cpia2_usb_probe(struct usb_interface *intf, + ret = cpia2_register_camera(cam); + if (ret < 0) { + ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret); +- kfree(cam); +- return ret; ++ goto alt_err; + } + + return 0; ++ ++alt_err: ++ cpia2_deinit_camera_struct(cam, intf); ++ return ret; + } + + /****************************************************************************** +diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c +index 5c4aa247d650..ca4ed2af5320 100644 +--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c ++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c +@@ -446,7 +446,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) + ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); + + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) +- lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), ++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); + + lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + +diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +index c583c638e468..328a447ce972 100644 +--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c ++++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +@@ -50,7 +50,16 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) + } else { + /* read */ + requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); +- pipe = usb_rcvctrlpipe(d->udev, 0); ++ ++ /* ++ * Zero-length transfers must use usb_sndctrlpipe() and ++ * rtl28xxu_identify_state() uses a zero-length i2c read ++ * command to determine the chip type. ++ */ ++ if (req->size) ++ pipe = usb_rcvctrlpipe(d->udev, 0); ++ else ++ pipe = usb_sndctrlpipe(d->udev, 0); + } + + ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value, +diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c +index 2b7a1b569db0..184a9b827d56 100644 +--- a/drivers/media/usb/dvb-usb/cxusb.c ++++ b/drivers/media/usb/dvb-usb/cxusb.c +@@ -1791,7 +1791,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = { + + .size_of_priv = sizeof(struct cxusb_state), + +- .num_adapters = 2, ++ .num_adapters = 1, + .adapter = { + { + .num_frontends = 1, +diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c +index a0057641cc86..c55180912c3a 100644 +--- a/drivers/media/usb/dvb-usb/dibusb-mb.c ++++ b/drivers/media/usb/dvb-usb/dibusb-mb.c +@@ -84,7 +84,7 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) + + if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { + err("tuner i2c write failed."); +- ret = -EREMOTEIO; ++ return -EREMOTEIO; + } + + if (adap->fe_adap[0].fe->ops.i2c_gate_ctrl) +diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c +index c60fb54f445f..66f34988fdf0 100644 +--- a/drivers/media/usb/dvb-usb/dtv5100.c ++++ b/drivers/media/usb/dvb-usb/dtv5100.c +@@ -39,6 +39,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, + u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) + { + struct dtv5100_state *st = d->priv; ++ unsigned int pipe; + u8 request; + u8 type; + u16 value; +@@ -47,6 +48,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, + switch (wlen) { + case 1: + /* write { reg }, read { value } */ ++ pipe = usb_rcvctrlpipe(d->udev, 0); + request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ : + DTV5100_TUNER_READ); + type = USB_TYPE_VENDOR | USB_DIR_IN; +@@ -54,6 +56,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, + break; + case 2: + /* write { reg, value } */ ++ pipe = usb_sndctrlpipe(d->udev, 0); + request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE : + DTV5100_TUNER_WRITE); + type = USB_TYPE_VENDOR | USB_DIR_OUT; +@@ -67,7 +70,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr, + + memcpy(st->data, rbuf, rlen); + msleep(1); /* avoid I2C errors */ +- return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, ++ return usb_control_msg(d->udev, pipe, request, + type, value, index, st->data, rlen, + DTV5100_USB_TIMEOUT); + } +@@ -154,7 +157,7 @@ static int dtv5100_probe(struct usb_interface *intf, + + /* initialize non qt1010/zl10353 part? */ + for (i = 0; dtv5100_init[i].request; i++) { +- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ++ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + dtv5100_init[i].request, + USB_TYPE_VENDOR | USB_DIR_OUT, + dtv5100_init[i].value, +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c +index b3413404f91a..690c1e06fbfa 100644 +--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c ++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c +@@ -82,11 +82,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) + } + } + +- if ((ret = dvb_usb_adapter_stream_init(adap)) || +- (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) || +- (ret = dvb_usb_adapter_frontend_init(adap))) { ++ ret = dvb_usb_adapter_stream_init(adap); ++ if (ret) + return ret; +- } ++ ++ ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); ++ if (ret) ++ goto dvb_init_err; ++ ++ ret = dvb_usb_adapter_frontend_init(adap); ++ if (ret) ++ goto frontend_init_err; + + /* use exclusive FE lock if there is multiple shared FEs */ + if (adap->fe_adap[1].fe) +@@ -106,6 +112,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) + } + + return 0; ++ ++frontend_init_err: ++ dvb_usb_adapter_dvb_exit(adap); ++dvb_init_err: ++ dvb_usb_adapter_stream_exit(adap); ++ return ret; + } + + static int dvb_usb_adapter_exit(struct dvb_usb_device *d) +diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h +index 107255b08b2b..704d57e3ea1c 100644 +--- a/drivers/media/usb/dvb-usb/dvb-usb.h ++++ b/drivers/media/usb/dvb-usb/dvb-usb.h +@@ -471,7 +471,8 @@ extern int dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16,int) + extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16); + + /* commonly used remote control parsing */ +-extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *); ++int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *d, u8 keybuf[5], ++ u32 *event, int *state); + + /* commonly used firmware download types and function */ + struct hexline { +diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c +index 979f05b4b87c..237f12f9a7f2 100644 +--- a/drivers/media/usb/dvb-usb/friio-fe.c ++++ b/drivers/media/usb/dvb-usb/friio-fe.c +@@ -261,28 +261,6 @@ static int jdvbt90502_read_signal_strength(struct dvb_frontend *fe, + return 0; + } + +- +-/* filter out un-supported properties to notify users */ +-static int jdvbt90502_set_property(struct dvb_frontend *fe, +- struct dtv_property *tvp) +-{ +- int r = 0; +- +- switch (tvp->cmd) { +- case DTV_DELIVERY_SYSTEM: +- if (tvp->u.data != SYS_ISDBT) +- r = -EINVAL; +- break; +- case DTV_CLEAR: +- case DTV_TUNE: +- case DTV_FREQUENCY: +- break; +- default: +- r = -EINVAL; +- } +- return r; +-} +- + static int jdvbt90502_set_frontend(struct dvb_frontend *fe) + { + struct dtv_frontend_properties *p = &fe->dtv_property_cache; +@@ -457,8 +435,6 @@ static struct dvb_frontend_ops jdvbt90502_ops = { + .init = jdvbt90502_init, + .write = _jdvbt90502_write, + +- .set_property = jdvbt90502_set_property, +- + .set_frontend = jdvbt90502_set_frontend, + + .read_status = jdvbt90502_read_status, +diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c +index 993bb7a72985..170a177653c9 100644 +--- a/drivers/media/usb/dvb-usb/gp8psk.c ++++ b/drivers/media/usb/dvb-usb/gp8psk.c +@@ -186,7 +186,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d) + + static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) + { +- u8 status, buf; ++ u8 status = 0, buf; + int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); + + if (onoff) { +diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c +index b0aea48907b7..7e259be47252 100644 +--- a/drivers/media/usb/em28xx/em28xx-dvb.c ++++ b/drivers/media/usb/em28xx/em28xx-dvb.c +@@ -1967,6 +1967,7 @@ static int em28xx_dvb_init(struct em28xx *dev) + return result; + + out_free: ++ em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE); + kfree(dvb); + dev->dvb = NULL; + goto ret; +diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c +index ed9bcaf08d5e..ddfaabd4c081 100644 +--- a/drivers/media/usb/go7007/go7007-usb.c ++++ b/drivers/media/usb/go7007/go7007-usb.c +@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf, + struct go7007_usb *usb; + const struct go7007_usb_board *board; + struct usb_device *usbdev = interface_to_usbdev(intf); ++ struct usb_host_endpoint *ep; + unsigned num_i2c_devs; + char *name; + int video_pipe, i, v_urb_len; +@@ -1147,7 +1148,8 @@ static int go7007_usb_probe(struct usb_interface *intf, + if (usb->intr_urb->transfer_buffer == NULL) + goto allocfail; + +- if (go->board_id == GO7007_BOARDID_SENSORAY_2250) ++ ep = usb->usbdev->ep_in[4]; ++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) + usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, + usb_rcvbulkpipe(usb->usbdev, 4), + usb->intr_urb->transfer_buffer, 2*sizeof(u16), +diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c +index 070871fb1fc4..6e1a85ff3fff 100644 +--- a/drivers/media/usb/go7007/snd-go7007.c ++++ b/drivers/media/usb/go7007/snd-go7007.c +@@ -243,22 +243,18 @@ int go7007_snd_init(struct go7007 *go) + gosnd->capturing = 0; + ret = snd_card_new(go->dev, index[dev], id[dev], THIS_MODULE, 0, + &gosnd->card); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_snd; ++ + ret = snd_device_new(gosnd->card, SNDRV_DEV_LOWLEVEL, go, + &go7007_snd_device_ops); +- if (ret < 0) { +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + ret = snd_pcm_new(gosnd->card, "go7007", 0, 0, 1, &gosnd->pcm); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; ++ + strlcpy(gosnd->card->driver, "go7007", sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->shortname, go->name, sizeof(gosnd->card->driver)); + strlcpy(gosnd->card->longname, gosnd->card->shortname, +@@ -269,11 +265,8 @@ int go7007_snd_init(struct go7007 *go) + &go7007_snd_capture_ops); + + ret = snd_card_register(gosnd->card); +- if (ret < 0) { +- snd_card_free(gosnd->card); +- kfree(gosnd); +- return ret; +- } ++ if (ret < 0) ++ goto free_card; + + gosnd->substream = NULL; + go->snd_context = gosnd; +@@ -281,6 +274,12 @@ int go7007_snd_init(struct go7007 *go) + ++dev; + + return 0; ++ ++free_card: ++ snd_card_free(gosnd->card); ++free_snd: ++ kfree(gosnd); ++ return ret; + } + EXPORT_SYMBOL(go7007_snd_init); + +diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c +index 2cba2e1acdc6..79f1e8904e30 100644 +--- a/drivers/media/usb/gspca/gspca.c ++++ b/drivers/media/usb/gspca/gspca.c +@@ -2145,6 +2145,9 @@ int gspca_dev_probe2(struct usb_interface *intf, + input_unregister_device(gspca_dev->input_dev); + #endif + v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); ++ v4l2_device_unregister(&gspca_dev->v4l2_dev); ++ if (sd_desc->probe_error) ++ sd_desc->probe_error(gspca_dev); + kfree(gspca_dev->usb_buf); + kfree(gspca_dev); + return ret; +diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h +index d39adf90303b..bec8fccc2c94 100644 +--- a/drivers/media/usb/gspca/gspca.h ++++ b/drivers/media/usb/gspca/gspca.h +@@ -101,6 +101,7 @@ struct sd_desc { + cam_cf_op config; /* called on probe */ + cam_op init; /* called on probe and resume */ + cam_op init_controls; /* called on probe */ ++ cam_v_op probe_error; /* called if probe failed, do cleanup here */ + cam_op start; /* called on stream on after URBs creation */ + cam_pkt_op pkt_scan; + /* optional operations */ +diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c +index a0a90dd34ca8..a098aeb290c3 100644 +--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c ++++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c +@@ -159,6 +159,7 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = { + int po1030_probe(struct sd *sd) + { + u8 dev_id_h = 0, i; ++ int err; + struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; + + if (force_sensor) { +@@ -177,10 +178,13 @@ int po1030_probe(struct sd *sd) + for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { + u8 data = preinit_po1030[i][2]; + if (preinit_po1030[i][0] == SENSOR) +- m5602_write_sensor(sd, +- preinit_po1030[i][1], &data, 1); ++ err = m5602_write_sensor(sd, preinit_po1030[i][1], ++ &data, 1); + else +- m5602_write_bridge(sd, preinit_po1030[i][1], data); ++ err = m5602_write_bridge(sd, preinit_po1030[i][1], ++ data); ++ if (err < 0) ++ return err; + } + + if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) +diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c +index a7ae0ec9fa91..efb5e553b772 100644 +--- a/drivers/media/usb/gspca/sq905.c ++++ b/drivers/media/usb/gspca/sq905.c +@@ -130,7 +130,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) + } + + ret = usb_control_msg(gspca_dev->dev, +- usb_sndctrlpipe(gspca_dev->dev, 0), ++ usb_rcvctrlpipe(gspca_dev->dev, 0), + USB_REQ_SYNCH_FRAME, /* request */ + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + SQ905_PING, 0, gspca_dev->usb_buf, 1, +@@ -172,7 +172,7 @@ static int + sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) + { + int ret; +- int act_len; ++ int act_len = 0; + + gspca_dev->usb_buf[0] = '\0'; + if (need_lock) +diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c +index 7d255529ed4c..40d4c99debb8 100644 +--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c ++++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c +@@ -541,12 +541,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, + static int stv06xx_config(struct gspca_dev *gspca_dev, + const struct usb_device_id *id); + ++static void stv06xx_probe_error(struct gspca_dev *gspca_dev) ++{ ++ struct sd *sd = (struct sd *)gspca_dev; ++ ++ kfree(sd->sensor_priv); ++ sd->sensor_priv = NULL; ++} ++ + /* sub-driver description */ + static const struct sd_desc sd_desc = { + .name = MODULE_NAME, + .config = stv06xx_config, + .init = stv06xx_init, + .init_controls = stv06xx_init_controls, ++ .probe_error = stv06xx_probe_error, + .start = stv06xx_start, + .stopN = stv06xx_stopN, + .pkt_scan = stv06xx_pkt_scan, +diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c +index cc3e1478c5a0..949915734d57 100644 +--- a/drivers/media/usb/gspca/sunplus.c ++++ b/drivers/media/usb/gspca/sunplus.c +@@ -255,6 +255,10 @@ static void reg_r(struct gspca_dev *gspca_dev, + PERR("reg_r: buffer overflow\n"); + return; + } ++ if (len == 0) { ++ PERR("reg_r: zero-length read\n"); ++ return; ++ } + if (gspca_dev->usb_err < 0) + return; + ret = usb_control_msg(gspca_dev->dev, +@@ -263,7 +267,7 @@ static void reg_r(struct gspca_dev *gspca_dev, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, /* value */ + index, +- len ? gspca_dev->usb_buf : NULL, len, ++ gspca_dev->usb_buf, len, + 500); + if (ret < 0) { + pr_err("reg_r err %d\n", ret); +@@ -739,7 +743,7 @@ static int sd_start(struct gspca_dev *gspca_dev) + case MegaImageVI: + reg_w_riv(gspca_dev, 0xf0, 0, 0); + spca504B_WaitCmdStatus(gspca_dev); +- reg_r(gspca_dev, 0xf0, 4, 0); ++ reg_w_riv(gspca_dev, 0xf0, 4, 0); + spca504B_WaitCmdStatus(gspca_dev); + break; + default: +diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c +index bb3d31e2a0b5..12bc8cfd9b0d 100644 +--- a/drivers/media/usb/msi2500/msi2500.c ++++ b/drivers/media/usb/msi2500/msi2500.c +@@ -1250,7 +1250,7 @@ static int msi2500_probe(struct usb_interface *intf, + } + + dev->master = master; +- master->bus_num = 0; ++ master->bus_num = -1; + master->num_chipselect = 1; + master->transfer_one_message = msi2500_transfer_one_message; + spi_master_set_devdata(master, dev); +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +index ff489645e070..0cb8dd585235 100644 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +@@ -2722,9 +2722,8 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw) + pvr2_stream_destroy(hdw->vid_stream); + hdw->vid_stream = NULL; + } +- pvr2_i2c_core_done(hdw); + v4l2_device_unregister(&hdw->v4l2_dev); +- pvr2_hdw_remove_usb_stuff(hdw); ++ pvr2_hdw_disconnect(hdw); + mutex_lock(&pvr2_unit_mtx); + do { + if ((hdw->unit_number >= 0) && +@@ -2751,6 +2750,7 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw) + { + pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw); + LOCK_TAKE(hdw->big_lock); ++ pvr2_i2c_core_done(hdw); + LOCK_TAKE(hdw->ctl_lock); + pvr2_hdw_remove_usb_stuff(hdw); + LOCK_GIVE(hdw->ctl_lock); +diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c +index 185c8079d0f9..14f3e8388f35 100644 +--- a/drivers/media/usb/tm6000/tm6000-dvb.c ++++ b/drivers/media/usb/tm6000/tm6000-dvb.c +@@ -156,6 +156,10 @@ static int tm6000_start_stream(struct tm6000_core *dev) + if (ret < 0) { + printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n", + ret, __func__); ++ ++ kfree(dvb->bulk_urb->transfer_buffer); ++ usb_free_urb(dvb->bulk_urb); ++ dvb->bulk_urb = NULL; + return ret; + } else + printk(KERN_ERR "tm6000: pipe resetted\n"); +diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c +index 9db31db7d9ac..df593d643d53 100644 +--- a/drivers/media/usb/usbtv/usbtv-audio.c ++++ b/drivers/media/usb/usbtv/usbtv-audio.c +@@ -398,7 +398,7 @@ void usbtv_audio_free(struct usbtv *usbtv) + cancel_work_sync(&usbtv->snd_trigger); + + if (usbtv->snd && usbtv->udev) { +- snd_card_free(usbtv->snd); ++ snd_card_free_when_closed(usbtv->snd); + usbtv->snd = NULL; + } + } +diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c +index d8ce7d75ff18..fcbabd2a4114 100644 +--- a/drivers/media/usb/usbtv/usbtv-core.c ++++ b/drivers/media/usb/usbtv/usbtv-core.c +@@ -110,7 +110,8 @@ static int usbtv_probe(struct usb_interface *intf, + + usbtv_audio_fail: + /* we must not free at this point */ +- usb_get_dev(usbtv->udev); ++ v4l2_device_get(&usbtv->v4l2_dev); ++ /* this will undo the v4l2_device_get() */ + usbtv_video_free(usbtv); + + usbtv_video_fail: +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index 94acf7ca452d..dda7111251f8 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -869,7 +869,10 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, + unsigned int i; + + extra_size = roundup(extra_size, sizeof(*entity->pads)); +- num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; ++ if (num_pads) ++ num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; ++ else ++ num_inputs = 0; + size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + + num_inputs; + entity = kzalloc(size, GFP_KERNEL); +@@ -885,7 +888,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, + + for (i = 0; i < num_inputs; ++i) + entity->pads[i].flags = MEDIA_PAD_FL_SINK; +- if (!UVC_ENTITY_IS_OTERM(entity)) ++ if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) + entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; + + entity->bNrInPins = num_inputs; +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c +index 05eed4be25df..6a19cf94705b 100644 +--- a/drivers/media/usb/uvc/uvc_v4l2.c ++++ b/drivers/media/usb/uvc/uvc_v4l2.c +@@ -257,11 +257,41 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, + if (ret < 0) + goto done; + ++ /* After the probe, update fmt with the values returned from ++ * negotiation with the device. Some devices return invalid bFormatIndex ++ * and bFrameIndex values, in which case we can only assume they have ++ * accepted the requested format as-is. ++ */ ++ for (i = 0; i < stream->nformats; ++i) { ++ if (probe->bFormatIndex == stream->format[i].index) { ++ format = &stream->format[i]; ++ break; ++ } ++ } ++ ++ if (i == stream->nformats) ++ uvc_trace(UVC_TRACE_FORMAT, ++ "Unknown bFormatIndex %u, using default\n", ++ probe->bFormatIndex); ++ ++ for (i = 0; i < format->nframes; ++i) { ++ if (probe->bFrameIndex == format->frame[i].bFrameIndex) { ++ frame = &format->frame[i]; ++ break; ++ } ++ } ++ ++ if (i == format->nframes) ++ uvc_trace(UVC_TRACE_FORMAT, ++ "Unknown bFrameIndex %u, using default\n", ++ probe->bFrameIndex); ++ + fmt->fmt.pix.width = frame->wWidth; + fmt->fmt.pix.height = frame->wHeight; + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame); + fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize; ++ fmt->fmt.pix.pixelformat = format->fcc; + fmt->fmt.pix.colorspace = format->colorspace; + fmt->fmt.pix.priv = 0; + +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c +index dcca723f7155..1d724e86f378 100644 +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -89,10 +89,37 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, + static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, + struct uvc_streaming_control *ctrl) + { ++ static const struct usb_device_id elgato_cam_link_4k = { ++ USB_DEVICE(0x0fd9, 0x0066) ++ }; + struct uvc_format *format = NULL; + struct uvc_frame *frame = NULL; + unsigned int i; + ++ /* ++ * The response of the Elgato Cam Link 4K is incorrect: The second byte ++ * contains bFormatIndex (instead of being the second byte of bmHint). ++ * The first byte is always zero. The third byte is always 1. ++ * ++ * The UVC 1.5 class specification defines the first five bits in the ++ * bmHint bitfield. The remaining bits are reserved and should be zero. ++ * Therefore a valid bmHint will be less than 32. ++ * ++ * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix. ++ * MCU: 20.02.19, FPGA: 67 ++ */ ++ if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) && ++ ctrl->bmHint > 255) { ++ u8 corrected_format_index = ctrl->bmHint >> 8; ++ ++ /* uvc_dbg(stream->dev, VIDEO, ++ "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n", ++ ctrl->bmHint, ctrl->bFormatIndex, ++ 1, corrected_format_index); */ ++ ctrl->bmHint = 1; ++ ctrl->bFormatIndex = corrected_format_index; ++ } ++ + for (i = 0; i < stream->nformats; ++i) { + if (stream->format[i].index == ctrl->bFormatIndex) { + format = &stream->format[i]; +diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c +index c5513f55e64e..05b9012f9902 100644 +--- a/drivers/media/usb/zr364xx/zr364xx.c ++++ b/drivers/media/usb/zr364xx/zr364xx.c +@@ -1066,6 +1066,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam) + DBG("submitting URB %p\n", pipe_info->stream_urb); + retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); + if (retval) { ++ usb_free_urb(pipe_info->stream_urb); + printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n"); + return retval; + } +diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c +index 0c5e69070586..d44b289205b4 100644 +--- a/drivers/media/v4l2-core/v4l2-fh.c ++++ b/drivers/media/v4l2-core/v4l2-fh.c +@@ -109,6 +109,7 @@ int v4l2_fh_release(struct file *filp) + v4l2_fh_del(fh); + v4l2_fh_exit(fh); + kfree(fh); ++ filp->private_data = NULL; + } + return 0; + } +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c +index 9b98d9794c43..114a30411293 100644 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c +@@ -2920,7 +2920,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + v4l2_kioctl func) + { + char sbuf[128]; +- void *mbuf = NULL; ++ void *mbuf = NULL, *array_buf = NULL; + void *parg = (void *)arg; + long err = -EINVAL; + bool has_array_args; +@@ -2975,20 +2975,14 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + has_array_args = err; + + if (has_array_args) { +- /* +- * When adding new types of array args, make sure that the +- * parent argument to ioctl (which contains the pointer to the +- * array) fits into sbuf (so that mbuf will still remain +- * unused up to here). +- */ +- mbuf = kmalloc(array_size, GFP_KERNEL); ++ array_buf = kmalloc(array_size, GFP_KERNEL); + err = -ENOMEM; +- if (NULL == mbuf) ++ if (array_buf == NULL) + goto out_array_args; + err = -EFAULT; +- if (copy_from_user(mbuf, user_ptr, array_size)) ++ if (copy_from_user(array_buf, user_ptr, array_size)) + goto out_array_args; +- *kernel_ptr = mbuf; ++ *kernel_ptr = array_buf; + } + + /* Handles IOCTL */ +@@ -3007,7 +3001,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + + if (has_array_args) { + *kernel_ptr = (void __force *)user_ptr; +- if (copy_to_user(user_ptr, mbuf, array_size)) ++ if (copy_to_user(user_ptr, array_buf, array_size)) + err = -EFAULT; + goto out_array_args; + } +@@ -3027,6 +3021,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + } + + out: ++ kfree(array_buf); + kfree(mbuf); + return err; + } +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index b1a4d4e2341b..3ac9f7260e72 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -1370,6 +1370,7 @@ static int vb2_start_streaming(struct vb2_queue *q) + int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) + { + struct vb2_buffer *vb; ++ enum vb2_buffer_state orig_state; + int ret; + + if (q->error) { +@@ -1399,6 +1400,7 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) + * Add to the queued buffers list, a buffer will stay on it until + * dequeued in dqbuf. + */ ++ orig_state = vb->state; + list_add_tail(&vb->queued_entry, &q->queued_list); + q->queued_count++; + q->waiting_for_buffers = false; +@@ -1429,8 +1431,17 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) + if (q->streaming && !q->start_streaming_called && + q->queued_count >= q->min_buffers_needed) { + ret = vb2_start_streaming(q); +- if (ret) ++ if (ret) { ++ /* ++ * Since vb2_core_qbuf will return with an error, ++ * we should return it to state DEQUEUED since ++ * the error indicates that the buffer wasn't queued. ++ */ ++ list_del(&vb->queued_entry); ++ q->queued_count--; ++ vb->state = orig_state; + return ret; ++ } + } + + dprintk(1, "qbuf of buffer %d succeeded\n", vb->index); +diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c +index 04644e7b42b1..88c32b8dc88a 100644 +--- a/drivers/memory/emif.c ++++ b/drivers/memory/emif.c +@@ -165,35 +165,12 @@ static const struct file_operations emif_mr4_fops = { + + static int __init_or_module emif_debugfs_init(struct emif_data *emif) + { +- struct dentry *dentry; +- int ret; +- +- dentry = debugfs_create_dir(dev_name(emif->dev), NULL); +- if (!dentry) { +- ret = -ENOMEM; +- goto err0; +- } +- emif->debugfs_root = dentry; +- +- dentry = debugfs_create_file("regcache_dump", S_IRUGO, +- emif->debugfs_root, emif, &emif_regdump_fops); +- if (!dentry) { +- ret = -ENOMEM; +- goto err1; +- } +- +- dentry = debugfs_create_file("mr4", S_IRUGO, +- emif->debugfs_root, emif, &emif_mr4_fops); +- if (!dentry) { +- ret = -ENOMEM; +- goto err1; +- } +- ++ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL); ++ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif, ++ &emif_regdump_fops); ++ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif, ++ &emif_mr4_fops); + return 0; +-err1: +- debugfs_remove_recursive(emif->debugfs_root); +-err0: +- return ret; + } + + static void __exit emif_debugfs_exit(struct emif_data *emif) +diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c +index 662d050243be..2fbf8d09af36 100644 +--- a/drivers/memory/fsl-corenet-cf.c ++++ b/drivers/memory/fsl-corenet-cf.c +@@ -215,10 +215,8 @@ static int ccf_probe(struct platform_device *pdev) + dev_set_drvdata(&pdev->dev, ccf); + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "%s: no irq\n", __func__); +- return -ENXIO; +- } ++ if (irq < 0) ++ return irq; + + ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf); + if (ret) { +diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c +index 1b182b117f9c..38b945eb410f 100644 +--- a/drivers/memory/fsl_ifc.c ++++ b/drivers/memory/fsl_ifc.c +@@ -109,7 +109,6 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) + iounmap(ctrl->gregs); + + dev_set_drvdata(&dev->dev, NULL); +- kfree(ctrl); + + return 0; + } +@@ -221,7 +220,8 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) + + dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); + +- fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); ++ fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev), ++ GFP_KERNEL); + if (!fsl_ifc_ctrl_dev) + return -ENOMEM; + +@@ -231,8 +231,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) + fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); + if (!fsl_ifc_ctrl_dev->gregs) { + dev_err(&dev->dev, "failed to get memory region\n"); +- ret = -ENODEV; +- goto err; ++ return -ENODEV; + } + + if (of_property_read_bool(dev->dev.of_node, "little-endian")) { +@@ -308,6 +307,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) + free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); + irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); + err: ++ iounmap(fsl_ifc_ctrl_dev->gregs); + return ret; + } + +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c +index bf0fe0137dfe..4af2f5b231dd 100644 +--- a/drivers/memory/omap-gpmc.c ++++ b/drivers/memory/omap-gpmc.c +@@ -951,7 +951,7 @@ static int gpmc_cs_remap(int cs, u32 base) + int ret; + u32 old_base, size; + +- if (cs > gpmc_cs_num) { ++ if (cs >= gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } +@@ -986,7 +986,7 @@ int gpmc_cs_request(int cs, unsigned long size, unsigned long *base) + struct resource *res = &gpmc->mem; + int r = -1; + +- if (cs > gpmc_cs_num) { ++ if (cs >= gpmc_cs_num) { + pr_err("%s: requested chip-select is disabled\n", __func__); + return -ENODEV; + } +@@ -1028,8 +1028,8 @@ EXPORT_SYMBOL(gpmc_cs_request); + + void gpmc_cs_free(int cs) + { +- struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; +- struct resource *res = &gpmc->mem; ++ struct gpmc_cs_data *gpmc; ++ struct resource *res; + + spin_lock(&gpmc_mem_lock); + if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { +@@ -1038,6 +1038,9 @@ void gpmc_cs_free(int cs) + spin_unlock(&gpmc_mem_lock); + return; + } ++ gpmc = &gpmc_cs[cs]; ++ res = &gpmc->mem; ++ + gpmc_cs_disable_mem(cs); + if (res->flags) + release_resource(res); +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c +index 1041eb7a6167..2cae85a7ca6d 100644 +--- a/drivers/memstick/core/memstick.c ++++ b/drivers/memstick/core/memstick.c +@@ -469,7 +469,6 @@ static void memstick_check(struct work_struct *work) + host->card = card; + if (device_register(&card->dev)) { + put_device(&card->dev); +- kfree(host->card); + host->card = NULL; + } + } else +diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c +index d5cfb503b9d6..2539984c1db1 100644 +--- a/drivers/memstick/host/r592.c ++++ b/drivers/memstick/host/r592.c +@@ -762,8 +762,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto error3; + + dev->mmio = pci_ioremap_bar(pdev, 0); +- if (!dev->mmio) ++ if (!dev->mmio) { ++ error = -ENOMEM; + goto error4; ++ } + + dev->irq = pdev->irq; + spin_lock_init(&dev->irq_lock); +@@ -790,12 +792,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) + &dev->dummy_dma_page_physical_address, GFP_KERNEL); + r592_stop_dma(dev , 0); + +- if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, +- DRV_NAME, dev)) ++ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED, ++ DRV_NAME, dev); ++ if (error) + goto error6; + + r592_update_card_detect(dev); +- if (memstick_add_host(host)) ++ error = memstick_add_host(host); ++ if (error) + goto error7; + + message("driver successfully loaded"); +diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c +index 6c9fc11efb87..32083759b93d 100644 +--- a/drivers/message/fusion/mptscsih.c ++++ b/drivers/message/fusion/mptscsih.c +@@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); + int mptscsih_resume(struct pci_dev *pdev); + #endif + +-#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE +- + + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + /* +@@ -1178,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev) + + scsi_remove_host(host); + +- if((hd = shost_priv(host)) == NULL) +- return; ++ if (host == NULL) ++ hd = NULL; ++ else ++ hd = shost_priv(host); + + mptscsih_shutdown(pdev); + +@@ -1195,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev) + "Free'd ScsiLookup (%d) memory\n", + ioc->name, sz1)); + +- kfree(hd->info_kbuf); ++ if (hd) ++ kfree(hd->info_kbuf); + + /* NULL the Scsi_Host pointer + */ + ioc->sh = NULL; + +- scsi_host_put(host); +- ++ if (host) ++ scsi_host_put(host); + mpt_detach(pdev); + + } +@@ -2427,7 +2428,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR + /* Copy the sense received into the scsi command block. */ + req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); +- memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); ++ memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); + + /* Log SMART data (asc = 0x5D, non-IM case only) if required. + */ +diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c +index 1f0c2b594654..3382845d4b67 100644 +--- a/drivers/mfd/arizona-core.c ++++ b/drivers/mfd/arizona-core.c +@@ -1537,6 +1537,15 @@ int arizona_dev_init(struct arizona *arizona) + arizona_irq_exit(arizona); + err_pm: + pm_runtime_disable(arizona->dev); ++ ++ switch (arizona->pdata.clk32k_src) { ++ case ARIZONA_32KZ_MCLK1: ++ case ARIZONA_32KZ_MCLK2: ++ arizona_clk32k_disable(arizona); ++ break; ++ default: ++ break; ++ } + err_reset: + arizona_enable_reset(arizona); + regulator_disable(arizona->dcvdd); +@@ -1558,6 +1567,15 @@ int arizona_dev_exit(struct arizona *arizona) + regulator_disable(arizona->dcvdd); + regulator_put(arizona->dcvdd); + ++ switch (arizona->pdata.clk32k_src) { ++ case ARIZONA_32KZ_MCLK1: ++ case ARIZONA_32KZ_MCLK2: ++ arizona_clk32k_disable(arizona); ++ break; ++ default: ++ break; ++ } ++ + mfd_remove_devices(arizona->dev); + arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); + arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); +diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c +index 578e881067a5..4094f97ec7dc 100644 +--- a/drivers/mfd/da9052-i2c.c ++++ b/drivers/mfd/da9052-i2c.c +@@ -118,6 +118,7 @@ static const struct i2c_device_id da9052_i2c_id[] = { + {"da9053-bc", DA9053_BC}, + {} + }; ++MODULE_DEVICE_TABLE(i2c, da9052_i2c_id); + + #ifdef CONFIG_OF + static const struct of_device_id dialog_dt_ids[] = { +diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c +index 672831d5ee32..97a69cd6f127 100644 +--- a/drivers/mfd/dln2.c ++++ b/drivers/mfd/dln2.c +@@ -294,7 +294,11 @@ static void dln2_rx(struct urb *urb) + len = urb->actual_length - sizeof(struct dln2_header); + + if (handle == DLN2_HANDLE_EVENT) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dln2->event_cb_lock, flags); + dln2_run_event_callbacks(dln2, id, echo, data, len); ++ spin_unlock_irqrestore(&dln2->event_cb_lock, flags); + } else { + /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ + if (dln2_transfer_complete(dln2, urb, handle, echo)) +diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c +index 5c8ed2150c8b..fb687368ac98 100644 +--- a/drivers/mfd/mfd-core.c ++++ b/drivers/mfd/mfd-core.c +@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev) + const struct mfd_cell *cell = mfd_get_cell(pdev); + int err = 0; + ++ if (!cell->enable) { ++ dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); ++ return 0; ++ } ++ + /* only call enable hook if the cell wasn't previously enabled */ + if (atomic_inc_return(cell->usage_count) == 1) + err = cell->enable(pdev); +@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev) + const struct mfd_cell *cell = mfd_get_cell(pdev); + int err = 0; + ++ if (!cell->disable) { ++ dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); ++ return 0; ++ } ++ + /* only disable if no other clients are using it */ + if (atomic_dec_return(cell->usage_count) == 0) + err = cell->disable(pdev); +diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c +index 98029ee0959e..be61f8606a04 100644 +--- a/drivers/mfd/rtsx_pcr.c ++++ b/drivers/mfd/rtsx_pcr.c +@@ -1255,12 +1255,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, + ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); + if (ret < 0) +- goto disable_irq; ++ goto free_slots; + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + + return 0; + ++free_slots: ++ kfree(pcr->slots); + disable_irq: + free_irq(pcr->irq, (void *)pcr); + disable_msi: +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c +index 3270b8dbc949..4ca245518a19 100644 +--- a/drivers/mfd/sm501.c ++++ b/drivers/mfd/sm501.c +@@ -1425,8 +1425,14 @@ static int sm501_plat_probe(struct platform_device *dev) + goto err_claim; + } + +- return sm501_init_dev(sm); ++ ret = sm501_init_dev(sm); ++ if (ret) ++ goto err_unmap; ++ ++ return 0; + ++ err_unmap: ++ iounmap(sm->regs); + err_claim: + release_resource(sm->regs_claim); + kfree(sm->regs_claim); +diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c +index 863c39a3353c..d284df25c76b 100644 +--- a/drivers/mfd/stmpe-i2c.c ++++ b/drivers/mfd/stmpe-i2c.c +@@ -109,7 +109,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = { + { "stmpe2403", STMPE2403 }, + { } + }; +-MODULE_DEVICE_TABLE(i2c, stmpe_id); ++MODULE_DEVICE_TABLE(i2c, stmpe_i2c_id); + + static struct i2c_driver stmpe_i2c_driver = { + .driver = { +diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c +index fd789d2eb0f5..9f7ae1e1ebcd 100644 +--- a/drivers/mfd/wm831x-auxadc.c ++++ b/drivers/mfd/wm831x-auxadc.c +@@ -98,11 +98,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x, + wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); + + mutex_lock(&wm831x->auxadc_lock); +- +- list_del(&req->list); + ret = req->val; + + out: ++ list_del(&req->list); + mutex_unlock(&wm831x->auxadc_lock); + + kfree(req); +diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c +index 8588dbad3301..925c1828ec28 100644 +--- a/drivers/mfd/wm8994-core.c ++++ b/drivers/mfd/wm8994-core.c +@@ -698,3 +698,4 @@ module_i2c_driver(wm8994_i2c_driver); + MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); + MODULE_LICENSE("GPL"); + MODULE_AUTHOR("Mark Brown "); ++MODULE_SOFTDEP("pre: wm8994_regulator"); +diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c +index 3a6e4ec21c87..8c9a444d61d3 100644 +--- a/drivers/misc/atmel-ssc.c ++++ b/drivers/misc/atmel-ssc.c +@@ -13,7 +13,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -21,7 +21,7 @@ + #include + + /* Serialize access to ssc_list and user count */ +-static DEFINE_SPINLOCK(user_lock); ++static DEFINE_MUTEX(user_lock); + static LIST_HEAD(ssc_list); + + struct ssc_device *ssc_request(unsigned int ssc_num) +@@ -29,7 +29,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) + int ssc_valid = 0; + struct ssc_device *ssc; + +- spin_lock(&user_lock); ++ mutex_lock(&user_lock); + list_for_each_entry(ssc, &ssc_list, list) { + if (ssc->pdev->dev.of_node) { + if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") +@@ -45,18 +45,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) + } + + if (!ssc_valid) { +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + pr_err("ssc: ssc%d platform device is missing\n", ssc_num); + return ERR_PTR(-ENODEV); + } + + if (ssc->user) { +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + dev_dbg(&ssc->pdev->dev, "module busy\n"); + return ERR_PTR(-EBUSY); + } + ssc->user++; +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + + clk_prepare(ssc->clk); + +@@ -68,14 +68,14 @@ void ssc_free(struct ssc_device *ssc) + { + bool disable_clk = true; + +- spin_lock(&user_lock); ++ mutex_lock(&user_lock); + if (ssc->user) + ssc->user--; + else { + disable_clk = false; + dev_dbg(&ssc->pdev->dev, "device already free\n"); + } +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + + if (disable_clk) + clk_unprepare(ssc->clk); +@@ -195,9 +195,9 @@ static int ssc_probe(struct platform_device *pdev) + return -ENXIO; + } + +- spin_lock(&user_lock); ++ mutex_lock(&user_lock); + list_add_tail(&ssc->list, &ssc_list); +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + + platform_set_drvdata(pdev, ssc); + +@@ -211,9 +211,9 @@ static int ssc_remove(struct platform_device *pdev) + { + struct ssc_device *ssc = platform_get_drvdata(pdev); + +- spin_lock(&user_lock); ++ mutex_lock(&user_lock); + list_del(&ssc->list); +- spin_unlock(&user_lock); ++ mutex_unlock(&user_lock); + + return 0; + } +diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c +index a8b6d6a635e9..e97b3b26805d 100644 +--- a/drivers/misc/cxl/sysfs.c ++++ b/drivers/misc/cxl/sysfs.c +@@ -598,7 +598,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c + rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, + &afu->dev.kobj, "cr%i", cr->cr); + if (rc) +- goto err; ++ goto err1; + + rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); + if (rc) +diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c +index 5afe4cd16569..cd7e7e36907b 100644 +--- a/drivers/misc/eeprom/at25.c ++++ b/drivers/misc/eeprom/at25.c +@@ -355,7 +355,7 @@ static int at25_probe(struct spi_device *spi) + at25->nvmem_config.reg_read = at25_ee_read; + at25->nvmem_config.reg_write = at25_ee_write; + at25->nvmem_config.priv = at25; +- at25->nvmem_config.stride = 4; ++ at25->nvmem_config.stride = 1; + at25->nvmem_config.word_size = 1; + at25->nvmem_config.size = chip.byte_len; + +diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c +index 94cc035aa841..30b490329f55 100644 +--- a/drivers/misc/eeprom/eeprom_93xx46.c ++++ b/drivers/misc/eeprom/eeprom_93xx46.c +@@ -38,6 +38,10 @@ static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { + EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, + }; + ++static const struct eeprom_93xx46_devtype_data microchip_93lc46b_data = { ++ .quirks = EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE, ++}; ++ + struct eeprom_93xx46_dev { + struct spi_device *spi; + struct eeprom_93xx46_platform_data *pdata; +@@ -58,6 +62,11 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) + return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; + } + ++static inline bool has_quirk_extra_read_cycle(struct eeprom_93xx46_dev *edev) ++{ ++ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE; ++} ++ + static int eeprom_93xx46_read(void *priv, unsigned int off, + void *val, size_t count) + { +@@ -99,6 +108,11 @@ static int eeprom_93xx46_read(void *priv, unsigned int off, + dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", + cmd_addr, edev->spi->max_speed_hz); + ++ if (has_quirk_extra_read_cycle(edev)) { ++ cmd_addr <<= 1; ++ bits += 1; ++ } ++ + spi_message_init(&m); + + t[0].tx_buf = (char *)&cmd_addr; +@@ -366,6 +380,7 @@ static void select_deassert(void *context) + static const struct of_device_id eeprom_93xx46_of_table[] = { + { .compatible = "eeprom-93xx46", }, + { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, ++ { .compatible = "microchip,93lc46b", .data = µchip_93lc46b_data, }, + {} + }; + MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); +@@ -533,3 +548,4 @@ MODULE_LICENSE("GPL"); + MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); + MODULE_AUTHOR("Anatolij Gustschin "); + MODULE_ALIAS("spi:93xx46"); ++MODULE_ALIAS("spi:eeprom-93xx46"); +diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c +index 6b3bf9ab051d..706decef68a0 100644 +--- a/drivers/misc/ibmasm/module.c ++++ b/drivers/misc/ibmasm/module.c +@@ -123,7 +123,7 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + result = ibmasm_init_remote_input_dev(sp); + if (result) { + dev_err(sp->dev, "Failed to initialize remote queue\n"); +- goto error_send_message; ++ goto error_init_remote; + } + + result = ibmasm_send_driver_vpd(sp); +@@ -143,8 +143,9 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) + return 0; + + error_send_message: +- disable_sp_interrupts(sp->base_address); + ibmasm_free_remote_input_dev(sp); ++error_init_remote: ++ disable_sp_interrupts(sp->base_address); + free_irq(sp->irq, (void *)sp); + error_request_irq: + iounmap(sp->base_address); +diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c +index 9c5590eb151e..71becb5c4dec 100644 +--- a/drivers/misc/kgdbts.c ++++ b/drivers/misc/kgdbts.c +@@ -105,19 +105,20 @@ + #include + #include + +-#define v1printk(a...) do { \ +- if (verbose) \ +- printk(KERN_INFO a); \ +- } while (0) +-#define v2printk(a...) do { \ +- if (verbose > 1) \ +- printk(KERN_INFO a); \ +- touch_nmi_watchdog(); \ +- } while (0) +-#define eprintk(a...) do { \ +- printk(KERN_ERR a); \ +- WARN_ON(1); \ +- } while (0) ++#define v1printk(a...) do { \ ++ if (verbose) \ ++ printk(KERN_INFO a); \ ++} while (0) ++#define v2printk(a...) do { \ ++ if (verbose > 1) { \ ++ printk(KERN_INFO a); \ ++ } \ ++ touch_nmi_watchdog(); \ ++} while (0) ++#define eprintk(a...) do { \ ++ printk(KERN_ERR a); \ ++ WARN_ON(1); \ ++} while (0) + #define MAX_CONFIG_LEN 40 + + static struct kgdb_io kgdbts_io_ops; +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c +index fb8705fc3aca..205dc5d40ce6 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.c ++++ b/drivers/misc/lis3lv02d/lis3lv02d.c +@@ -220,7 +220,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000}; + static int lis3_3dlh_rates[4] = {50, 100, 400, 1000}; + + /* ODR is Output Data Rate */ +-static int lis3lv02d_get_odr(struct lis3lv02d *lis3) ++static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3) + { + u8 ctrl; + int shift; +@@ -228,15 +228,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3) + lis3->read(lis3, CTRL_REG1, &ctrl); + ctrl &= lis3->odr_mask; + shift = ffs(lis3->odr_mask) - 1; +- return lis3->odrs[(ctrl >> shift)]; ++ return (ctrl >> shift); + } + + static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) + { +- int div = lis3lv02d_get_odr(lis3); ++ int odr_idx = lis3lv02d_get_odr_index(lis3); ++ int div = lis3->odrs[odr_idx]; + +- if (WARN_ONCE(div == 0, "device returned spurious data")) ++ if (div == 0) { ++ if (odr_idx == 0) { ++ /* Power-down mode, not sampling no need to sleep */ ++ return 0; ++ } ++ ++ dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx); + return -ENXIO; ++ } + + /* LIS3 power on delay is quite long */ + msleep(lis3->pwron_delay / div); +@@ -819,9 +827,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) + { + struct lis3lv02d *lis3 = dev_get_drvdata(dev); ++ int odr_idx; + + lis3lv02d_sysfs_poweron(lis3); +- return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3)); ++ ++ odr_idx = lis3lv02d_get_odr_index(lis3); ++ return sprintf(buf, "%d\n", lis3->odrs[odr_idx]); + } + + static ssize_t lis3lv02d_rate_set(struct device *dev, +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h +index c439c827eea8..0ef759671b54 100644 +--- a/drivers/misc/lis3lv02d/lis3lv02d.h ++++ b/drivers/misc/lis3lv02d/lis3lv02d.h +@@ -284,6 +284,7 @@ struct lis3lv02d { + int regs_size; + u8 *reg_cache; + bool regs_stored; ++ bool init_required; + u8 odr_mask; /* ODR bit mask */ + u8 whoami; /* indicates measurement precision */ + s16 (*read_data) (struct lis3lv02d *lis3, int reg); +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index 582b24d2c479..610e87bdb5dc 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -639,9 +639,8 @@ static int mei_cl_device_remove(struct device *dev) + ret = cldrv->remove(cldev); + + module_put(THIS_MODULE); +- dev->driver = NULL; +- return ret; + ++ return ret; + } + + static ssize_t name_show(struct device *dev, struct device_attribute *a, +diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h +index d2bfabecd882..f9d3211ce599 100644 +--- a/drivers/misc/mei/client.h ++++ b/drivers/misc/mei/client.h +@@ -152,11 +152,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl) + * + * @cl: host client + * +- * Return: mtu ++ * Return: mtu or 0 if client is not connected + */ + static inline size_t mei_cl_mtu(const struct mei_cl *cl) + { +- return cl->me_cl->props.max_msg_length; ++ return cl->me_cl ? cl->me_cl->props.max_msg_length : 0; + } + + /** +diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c +index 5a4893ce9c24..857bf1f8f4c0 100644 +--- a/drivers/misc/mei/interrupt.c ++++ b/drivers/misc/mei/interrupt.c +@@ -226,6 +226,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, + return ret; + } + ++ pm_runtime_mark_last_busy(dev->dev); ++ pm_request_autosuspend(dev->dev); ++ + list_move_tail(&cb->list, &cl->rd_pending); + + return 0; +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c +index 32ab0f43f506..4e2cfb6eea35 100644 +--- a/drivers/misc/mic/scif/scif_rma.c ++++ b/drivers/misc/mic/scif/scif_rma.c +@@ -1401,6 +1401,8 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot, + NULL); + up_write(&mm->mmap_sem); + if (nr_pages != pinned_pages->nr_pages) { ++ if (pinned_pages->nr_pages < 0) ++ pinned_pages->nr_pages = 0; + if (try_upgrade) { + if (ulimit) + __scif_dec_pinned_vm_lock(mm, +@@ -1421,7 +1423,6 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot, + + if (pinned_pages->nr_pages < nr_pages) { + err = -EFAULT; +- pinned_pages->nr_pages = nr_pages; + goto dec_pinned; + } + +@@ -1434,7 +1435,6 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot, + __scif_dec_pinned_vm_lock(mm, nr_pages, 0); + /* Something went wrong! Rollback */ + error_unmap: +- pinned_pages->nr_pages = nr_pages; + scif_destroy_pinned_pages(pinned_pages); + *pages = NULL; + dev_dbg(scif_info.mdev.this_device, +diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c +index 1a2b67f3183d..f9da3150f80a 100644 +--- a/drivers/misc/mic/vop/vop_main.c ++++ b/drivers/misc/mic/vop/vop_main.c +@@ -301,7 +301,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, + /* First assign the vring's allocated in host memory */ + vqconfig = _vop_vq_config(vdev->desc) + index; + memcpy_fromio(&config, vqconfig, sizeof(config)); +- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); ++ _vr_size = round_up(vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN), 4); + vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); + va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), + vr_size); +diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c +index fed992e2c258..49e7a7240469 100644 +--- a/drivers/misc/mic/vop/vop_vringh.c ++++ b/drivers/misc/mic/vop/vop_vringh.c +@@ -308,7 +308,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, + + num = le16_to_cpu(vqconfig[i].num); + mutex_init(&vvr->vr_mutex); +- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + ++ vr_size = PAGE_ALIGN(round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4) + + sizeof(struct _mic_vring_info)); + vr->va = (void *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, +@@ -320,7 +320,7 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, + goto err; + } + vr->len = vr_size; +- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); ++ vr->info = vr->va + round_up(vring_size(num, MIC_VIRTIO_RING_ALIGN), 4); + vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i); + vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size, + DMA_BIDIRECTIONAL); +@@ -611,6 +611,7 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, + size_t partlen; + bool dma = VOP_USE_DMA; + int err = 0; ++ size_t offset = 0; + + if (daddr & (dma_alignment - 1)) { + vdev->tx_dst_unaligned += len; +@@ -659,13 +660,20 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, + * We are copying to IO below and should ideally use something + * like copy_from_user_toio(..) if it existed. + */ +- if (copy_from_user((void __force *)dbuf, ubuf, len)) { +- err = -EFAULT; +- dev_err(vop_dev(vdev), "%s %d err %d\n", +- __func__, __LINE__, err); +- goto err; ++ while (len) { ++ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); ++ ++ if (copy_from_user(vvr->buf, ubuf + offset, partlen)) { ++ err = -EFAULT; ++ dev_err(vop_dev(vdev), "%s %d err %d\n", ++ __func__, __LINE__, err); ++ goto err; ++ } ++ memcpy_toio(dbuf + offset, vvr->buf, partlen); ++ offset += partlen; ++ vdev->out_bytes += partlen; ++ len -= partlen; + } +- vdev->out_bytes += len; + err = 0; + err: + vpdev->hw_ops->iounmap(vpdev, dbuf); +diff --git a/drivers/misc/qcom-xr-smrtvwr-misc.c b/drivers/misc/qcom-xr-smrtvwr-misc.c +index fe3cdc855168..8d38b3c3ee7f 100644 +--- a/drivers/misc/qcom-xr-smrtvwr-misc.c ++++ b/drivers/misc/qcom-xr-smrtvwr-misc.c +@@ -29,7 +29,7 @@ struct qcom_xr_smrtvwr { + static int qcom_xr_smrtvwr_probe(struct platform_device *pdev) + { + int rc; +- struct regulator *reg1, *reg2, *reg3; ++ struct regulator *reg1, *reg2, *reg3, *reg4; + int dp3p3_en_gpio = 142; + int wcd_en_gpio = 93; + int rgb_tck_oe_en_gpio = 108; +@@ -64,6 +64,16 @@ static int qcom_xr_smrtvwr_probe(struct platform_device *pdev) + } + } + ++ reg4 = devm_regulator_get(&pdev->dev, "pm660_l15"); ++ if (!IS_ERR(reg4)) { ++ regulator_set_load(reg4, 600000); ++ rc = regulator_enable(reg4); ++ if (rc < 0) { ++ pr_err("%s pm660_l15 failed\n", __func__); ++ goto reg4_fail; ++ } ++ } ++ + rc = gpio_request(dp3p3_en_gpio, "ti-dp-3v3-en-gpio"); + if (rc) { + pr_err("%s dp3p3_en gpio request failed\n", __func__); +@@ -112,6 +122,8 @@ static int qcom_xr_smrtvwr_probe(struct platform_device *pdev) + gpio_free(wcd_en_gpio); + gpio3p3_fail: + gpio_free(dp3p3_en_gpio); ++reg4_fail: ++ devm_regulator_put(reg4); + reg3_fail: + devm_regulator_put(reg3); + reg2_fail: +diff --git a/drivers/misc/tusb1064.c b/drivers/misc/tusb1064.c +index b9f40e94e1fd..206c2e840b21 100644 +--- a/drivers/misc/tusb1064.c ++++ b/drivers/misc/tusb1064.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -81,21 +81,26 @@ static int tusb1064_write(struct i2c_client *client, u8 reg, u8 val) + return 0; + } + +-void tusb1064_usb_event(bool flip) ++bool tusb1064_usb_event(bool flip) + { + if (pdata) { + if (flip) { +- if (standalone_mode) ++ if (standalone_mode) { + tusb1064_write(pdata->i2c_client, 0x0A, 0x05); ++ return true; ++ } + else + tusb1064_write(pdata->i2c_client, 0x0A, 0x06); + } else { +- if (standalone_mode) ++ if (standalone_mode) { + tusb1064_write(pdata->i2c_client, 0x0A, 0x01); ++ return true; ++ } + else + tusb1064_write(pdata->i2c_client, 0x0A, 0x02); + } + } ++ return false; + } + EXPORT_SYMBOL(tusb1064_usb_event); + +diff --git a/drivers/misc/tusb1064.h b/drivers/misc/tusb1064.h +index effecdc23f82..2258ef427df3 100644 +--- a/drivers/misc/tusb1064.h ++++ b/drivers/misc/tusb1064.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2020 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -13,6 +13,6 @@ + #ifndef __TUSB1064_H_ + #define __TUSB1064_H_ + +-void tusb1064_usb_event(bool flip); ++bool tusb1064_usb_event(bool flip); + + #endif /* __TUSB1064_H_ */ +diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c +index b9da2c6cc981..0bdfa90ea6cd 100644 +--- a/drivers/misc/vmw_vmci/vmci_context.c ++++ b/drivers/misc/vmw_vmci/vmci_context.c +@@ -750,7 +750,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, + return VMCI_ERROR_MORE_DATA; + } + +- dbells = kmalloc(data_size, GFP_ATOMIC); ++ dbells = kzalloc(data_size, GFP_ATOMIC); + if (!dbells) + return VMCI_ERROR_NO_MEM; + +diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c +index f005206d9033..4581210349d2 100644 +--- a/drivers/misc/vmw_vmci/vmci_doorbell.c ++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c +@@ -334,7 +334,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) + bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) + { + int result; +- struct vmci_notify_bm_set_msg bitmap_set_msg; ++ struct vmci_notify_bm_set_msg bitmap_set_msg = { }; + + bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, + VMCI_SET_NOTIFY_BITMAP); +diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c +index 189b32519748..9ac3659a5537 100644 +--- a/drivers/misc/vmw_vmci/vmci_guest.c ++++ b/drivers/misc/vmw_vmci/vmci_guest.c +@@ -172,7 +172,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev) + VMCI_UTIL_NUM_RESOURCES * sizeof(u32); + struct vmci_datagram *check_msg; + +- check_msg = kmalloc(msg_size, GFP_KERNEL); ++ check_msg = kzalloc(msg_size, GFP_KERNEL); + if (!check_msg) { + dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); + return -ENOMEM; +diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c +index 5927db046a87..9c8887d3a4b9 100644 +--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c ++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c +@@ -639,6 +639,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) + + queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); + ++ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) ++ return NULL; ++ + queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); + if (queue) { + queue->q_header = NULL; +@@ -732,7 +735,7 @@ static void qp_release_pages(struct page **pages, + + for (i = 0; i < num_pages; i++) { + if (dirty) +- set_page_dirty(pages[i]); ++ set_page_dirty_lock(pages[i]); + + put_page(pages[i]); + pages[i] = NULL; +@@ -758,8 +761,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)produce_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(produce) failed (retval=%d)", + retval); +- qp_release_pages(produce_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(produce_q->kernel_if->u.h.header_page, ++ retval, false); + err = VMCI_ERROR_NO_MEM; + goto out; + } +@@ -770,8 +774,9 @@ static int qp_host_get_user_memory(u64 produce_uva, + if (retval < (int)consume_q->kernel_if->num_pages) { + pr_debug("get_user_pages_fast(consume) failed (retval=%d)", + retval); +- qp_release_pages(consume_q->kernel_if->u.h.header_page, +- retval, false); ++ if (retval > 0) ++ qp_release_pages(consume_q->kernel_if->u.h.header_page, ++ retval, false); + qp_release_pages(produce_q->kernel_if->u.h.header_page, + produce_q->kernel_if->num_pages, false); + err = VMCI_ERROR_NO_MEM; +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c +index b352749a2177..629ffd21e649 100644 +--- a/drivers/mmc/core/core.c ++++ b/drivers/mmc/core/core.c +@@ -552,7 +552,7 @@ static int mmc_devfreq_set_target(struct device *dev, + *freq, current->comm); + + spin_lock_bh(&clk_scaling->lock); +- if (clk_scaling->target_freq == *freq || ++ if (clk_scaling->curr_freq == *freq || + clk_scaling->skip_clk_scale_freq_update) { + spin_unlock_bh(&clk_scaling->lock); + goto out; +@@ -2630,11 +2630,14 @@ int mmc_execute_tuning(struct mmc_card *card) + err = host->ops->execute_tuning(host, opcode); + mmc_host_clk_release(host); + +- if (err) ++ if (err) { + pr_err("%s: tuning execution failed: %d\n", + mmc_hostname(host), err); +- else ++ } else { ++ host->retune_now = 0; ++ host->need_retune = 0; + mmc_retune_enable(host); ++ } + + return err; + } +@@ -3163,7 +3166,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) + mmc_host_clk_hold(host); + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err) +- goto err_command; ++ goto power_cycle; + + if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) { + err = -EIO; +diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c +index 8fa3b705322c..4391b9b2095c 100644 +--- a/drivers/mmc/core/host.c ++++ b/drivers/mmc/core/host.c +@@ -472,6 +472,16 @@ int mmc_retune(struct mmc_host *host) + + if (host->ops->prepare_hs400_tuning) + host->ops->prepare_hs400_tuning(host, &host->ios); ++ ++ /* ++ * Timing should be adjusted to the HS400 target ++ * operation frequency for tuning process. ++ * Similar handling is also done in mmc_hs200_tuning() ++ * This is handled properly in sdhci-msm.c from msm-5.4 onwards. ++ */ ++ if (host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && ++ host->ios.bus_width == MMC_BUS_WIDTH_8) ++ mmc_set_timing(host, MMC_TIMING_MMC_HS400); + } + + err = mmc_execute_tuning(host->card); +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c +index 0831456c3c52..34aa069ba19a 100755 +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -436,10 +436,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) + + /* EXT_CSD value is in units of 10ms, but we store in ms */ + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; +- /* Some eMMC set the value too low so set a minimum */ +- if (card->ext_csd.part_time && +- card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) +- card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + + /* Sleep / awake timeout in 100ns units */ + if (sa_shift > 0 && sa_shift <= 0x17) +@@ -691,6 +687,16 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) + card->ext_csd.barrier_support = 0; + card->ext_csd.cache_flush_policy = 0; + } ++ /* ++ * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined ++ * when accessing a specific field", so use it here if there is no ++ * PARTITION_SWITCH_TIME. ++ */ ++ if (!card->ext_csd.part_time) ++ card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; ++ /* Some eMMC set the value too low so set a minimum */ ++ if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) ++ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; + + /* eMMC v5 or later */ + if (card->ext_csd.rev >= 7) { +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c +index 2a6e08322593..00819077479e 100644 +--- a/drivers/mmc/core/sd.c ++++ b/drivers/mmc/core/sd.c +@@ -142,6 +142,9 @@ static int mmc_decode_csd(struct mmc_card *card) + csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; + csd->erase_size <<= csd->write_blkbits - 9; + } ++ ++ if (UNSTUFF_BITS(resp, 13, 1)) ++ mmc_card_set_readonly(card); + break; + case 1: + /* +@@ -176,6 +179,9 @@ static int mmc_decode_csd(struct mmc_card *card) + csd->write_blkbits = 9; + csd->write_partial = 0; + csd->erase_size = 1; ++ ++ if (UNSTUFF_BITS(resp, 13, 1)) ++ mmc_card_set_readonly(card); + break; + default: + pr_err("%s: unrecognised CSD structure version %d\n", +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c +index d6fbc08d3159..a2f8e558656b 100644 +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -24,12 +24,17 @@ + #include "sdio_cis.h" + #include "sdio_ops.h" + ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ ++ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, + const unsigned char *buf, unsigned size) + { + unsigned i, nr_strings; + char **buffer, *string; + ++ if (size < 2) ++ return 0; ++ + /* Find all null-terminated (including zero length) strings in + the TPLLV1_INFO field. Trailing garbage is ignored. */ + buf += 2; +@@ -266,6 +271,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + + do { + unsigned char tpl_code, tpl_link; ++ unsigned long timeout = jiffies + ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); + + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); + if (ret) +@@ -326,6 +333,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) + prev = &this->next; + + if (ret == -ENOENT) { ++ if (time_after(jiffies, timeout)) ++ break; + /* warn about unknown tuples */ + pr_warn_ratelimited("%s: queuing unknown" + " CIS tuple 0x%02x (%u bytes)\n", +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index d9c7fd0cabaf..c6b91efaa956 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -380,7 +380,7 @@ static void dw_mci_start_command(struct dw_mci *host, + + static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) + { +- struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; ++ struct mmc_command *stop = &host->stop_abort; + + dw_mci_start_command(host, stop, host->stop_cmdr); + } +@@ -1280,10 +1280,7 @@ static void __dw_mci_start_request(struct dw_mci *host, + spin_unlock_irqrestore(&host->irq_lock, irqflags); + } + +- if (mrq->stop) +- host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); +- else +- host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); ++ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); + } + + static void dw_mci_start_request(struct dw_mci *host, +@@ -1869,8 +1866,8 @@ static void dw_mci_tasklet_func(unsigned long priv) + continue; + } + +- dw_mci_stop_dma(host); + send_stop_abort(host, data); ++ dw_mci_stop_dma(host); + state = STATE_SENDING_STOP; + break; + } +@@ -1894,11 +1891,10 @@ static void dw_mci_tasklet_func(unsigned long priv) + */ + if (test_and_clear_bit(EVENT_DATA_ERROR, + &host->pending_events)) { +- dw_mci_stop_dma(host); +- if (data->stop || +- !(host->data_status & (SDMMC_INT_DRTO | ++ if (!(host->data_status & (SDMMC_INT_DRTO | + SDMMC_INT_EBE))) + send_stop_abort(host, data); ++ dw_mci_stop_dma(host); + state = STATE_DATA_ERROR; + break; + } +@@ -1931,11 +1927,10 @@ static void dw_mci_tasklet_func(unsigned long priv) + */ + if (test_and_clear_bit(EVENT_DATA_ERROR, + &host->pending_events)) { +- dw_mci_stop_dma(host); +- if (data->stop || +- !(host->data_status & (SDMMC_INT_DRTO | ++ if (!(host->data_status & (SDMMC_INT_DRTO | + SDMMC_INT_EBE))) + send_stop_abort(host, data); ++ dw_mci_stop_dma(host); + state = STATE_DATA_ERROR; + break; + } +@@ -2009,7 +2004,7 @@ static void dw_mci_tasklet_func(unsigned long priv) + host->cmd = NULL; + host->data = NULL; + +- if (mrq->stop) ++ if (!mrq->sbc && mrq->stop) + dw_mci_command_complete(host, mrq->stop); + else + host->cmd_status = 0; +diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c +index 7fc6ce381142..125c06a10455 100644 +--- a/drivers/mmc/host/mtk-sd.c ++++ b/drivers/mmc/host/mtk-sd.c +@@ -741,13 +741,13 @@ static void msdc_track_cmd_data(struct msdc_host *host, + static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) + { + unsigned long flags; +- bool ret; + +- ret = cancel_delayed_work(&host->req_timeout); +- if (!ret) { +- /* delay work already running */ +- return; +- } ++ /* ++ * No need check the return value of cancel_delayed_work, as only ONE ++ * path will go here! ++ */ ++ cancel_delayed_work(&host->req_timeout); ++ + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); +@@ -765,7 +765,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, + bool done = false; + bool sbc_error; + unsigned long flags; +- u32 *rsp = cmd->resp; ++ u32 *rsp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR +@@ -786,6 +786,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, + + if (done) + return true; ++ rsp = cmd->resp; + + sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); + +@@ -968,7 +969,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, + static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) + { +- struct mmc_command *stop = data->stop; ++ struct mmc_command *stop; + unsigned long flags; + bool done; + unsigned int check_data = events & +@@ -984,6 +985,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + + if (done) + return true; ++ stop = data->stop; + + if (check_data || (stop && stop->error)) { + dev_dbg(host->dev, "DMA status: 0x%8X\n", +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index c8b8ac66ff7e..687fd68fbbcd 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -651,7 +651,7 @@ static int mxs_mmc_probe(struct platform_device *pdev) + + ret = mmc_of_parse(mmc); + if (ret) +- goto out_clk_disable; ++ goto out_free_dma; + + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c +index 445fc47dc3e7..9e52886b5928 100644 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c +@@ -79,7 +79,7 @@ + #define ESDHC_STD_TUNING_EN (1 << 24) + /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ + #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 +-#define ESDHC_TUNING_START_TAP_MASK 0xff ++#define ESDHC_TUNING_START_TAP_MASK 0x7f + #define ESDHC_TUNING_STEP_MASK 0x00070000 + #define ESDHC_TUNING_STEP_SHIFT 16 + +@@ -1301,9 +1301,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); +- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); ++ int dead; + + pm_runtime_get_sync(&pdev->dev); ++ dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index 1ef8bf9f4b1a..f150a502b20f 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1386,6 +1386,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) + u16 preset = 0; + + switch (host->timing) { ++ case MMC_TIMING_MMC_HS: ++ case MMC_TIMING_SD_HS: ++ preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); ++ break; + case MMC_TIMING_UHS_SDR12: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); + break; +diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h +index 2b67d0a616db..beea93b10a89 100644 +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -249,6 +249,7 @@ + + /* 60-FB reserved */ + ++#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64 + #define SDHCI_PRESET_FOR_SDR12 0x66 + #define SDHCI_PRESET_FOR_SDR25 0x68 + #define SDHCI_PRESET_FOR_SDR50 0x6A +diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c +index 1bd5f1a18d4e..ad0a467bb464 100644 +--- a/drivers/mmc/host/usdhi6rol0.c ++++ b/drivers/mmc/host/usdhi6rol0.c +@@ -1809,6 +1809,7 @@ static int usdhi6_probe(struct platform_device *pdev) + + version = usdhi6_read(host, USDHI6_VERSION); + if ((version & 0xfff) != 0xa0d) { ++ ret = -EPERM; + dev_err(dev, "Version not recognized %x\n", version); + goto e_clk_off; + } +@@ -1866,10 +1867,12 @@ static int usdhi6_probe(struct platform_device *pdev) + + ret = mmc_add_host(mmc); + if (ret < 0) +- goto e_clk_off; ++ goto e_release_dma; + + return 0; + ++e_release_dma: ++ usdhi6_dma_release(host); + e_clk_off: + clk_disable_unprepare(host->clk); + e_free_mmc: +diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c +index 63fac78b3d46..a3472127bea3 100644 +--- a/drivers/mmc/host/via-sdmmc.c ++++ b/drivers/mmc/host/via-sdmmc.c +@@ -859,6 +859,9 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask) + { + BUG_ON(intmask == 0); + ++ if (!host->data) ++ return; ++ + if (intmask & VIA_CRDR_SDSTS_DT) + host->data->error = -ETIMEDOUT; + else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)) +@@ -1269,11 +1272,14 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) + static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) + { + struct via_crdr_mmc_host *host; ++ unsigned long flags; + + host = pci_get_drvdata(pcidev); + ++ spin_lock_irqsave(&host->lock, flags); + via_save_pcictrlreg(host); + via_save_sdcreg(host); ++ spin_unlock_irqrestore(&host->lock, flags); + + pci_save_state(pcidev); + pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); +diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c +index bb3e0d1dd355..a3e4288e5391 100644 +--- a/drivers/mmc/host/vub300.c ++++ b/drivers/mmc/host/vub300.c +@@ -2292,7 +2292,7 @@ static int vub300_probe(struct usb_interface *interface, + if (retval < 0) + goto error5; + retval = +- usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), ++ usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_ROM_WAIT_STATES, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + firmware_rom_wait_states, 0x0000, NULL, 0, HZ); +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c +index 00ba09fa6f16..3c4819a05bf0 100644 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -722,7 +722,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) + kfree(mtd->eraseregions); + kfree(mtd); + kfree(cfi->cmdset_priv); +- kfree(cfi->cfiq); + return NULL; + } + +diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c +index fbd5affc0acf..67b71c476c5b 100644 +--- a/drivers/mtd/cmdlinepart.c ++++ b/drivers/mtd/cmdlinepart.c +@@ -228,12 +228,41 @@ static int mtdpart_setup_real(char *s) + struct cmdline_mtd_partition *this_mtd; + struct mtd_partition *parts; + int mtd_id_len, num_parts; +- char *p, *mtd_id; ++ char *p, *mtd_id, *semicol, *open_parenth; ++ ++ /* ++ * Replace the first ';' by a NULL char so strrchr can work ++ * properly. ++ */ ++ semicol = strchr(s, ';'); ++ if (semicol) ++ *semicol = '\0'; ++ ++ /* ++ * make sure that part-names with ":" will not be handled as ++ * part of the mtd-id with an ":" ++ */ ++ open_parenth = strchr(s, '('); ++ if (open_parenth) ++ *open_parenth = '\0'; + + mtd_id = s; + +- /* fetch */ +- p = strchr(s, ':'); ++ /* ++ * fetch . We use strrchr to ignore all ':' that could ++ * be present in the MTD name, only the last one is interpreted ++ * as an / separator. ++ */ ++ p = strrchr(s, ':'); ++ ++ /* Restore the '(' now. */ ++ if (open_parenth) ++ *open_parenth = '('; ++ ++ /* Restore the ';' now. */ ++ if (semicol) ++ *semicol = ';'; ++ + if (!p) { + pr_err("no mtd-id\n"); + return -EINVAL; +diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c +index eb39bf71c4cf..c5a51a34b292 100644 +--- a/drivers/mtd/devices/msm_qpic_nand.c ++++ b/drivers/mtd/devices/msm_qpic_nand.c +@@ -24,6 +24,7 @@ + * Usually, this is (2 * MAX_CW_PER_PAGE). + */ + #define MAX_DESC 16 ++#define ONE_CODEWORD_SIZE 516 + + static bool enable_euclean; + static bool enable_perfstats; +@@ -1173,10 +1174,16 @@ static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, + err = -EINVAL; + goto out; + } +- args->page_count = ops->len / (mtd->writesize + mtd->oobsize); ++ if (ops->len <= ONE_CODEWORD_SIZE) ++ args->page_count = 1; ++ else ++ args->page_count = ops->len / ++ (mtd->writesize + mtd->oobsize); + + } else if (ops->mode == MTD_OPS_AUTO_OOB) { +- if (ops->datbuf && (ops->len % mtd->writesize) != 0) { ++ if (ops->datbuf && (ops->len % ++ ((ops->len <= ONE_CODEWORD_SIZE) ? ++ ONE_CODEWORD_SIZE : mtd->writesize)) != 0) { + /* when ops->datbuf is NULL, ops->len can be ooblen */ + pr_err("unsupported data len %d for AUTO mode\n", + ops->len); +@@ -1189,7 +1196,10 @@ static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, + if ((args->page_count == 0) && (ops->ooblen)) + args->page_count = 1; + } else if (ops->datbuf) { +- args->page_count = ops->len / mtd->writesize; ++ if (ops->len <= ONE_CODEWORD_SIZE) ++ args->page_count = 1; ++ else ++ args->page_count = ops->len / mtd->writesize; + } + } + +@@ -1235,12 +1245,20 @@ static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, + struct msm_nand_rw_params *args, + struct msm_nand_rw_reg_data *data) + { ++ /* ++ * While reading one codeword, CW_PER_PAGE bits of QPIC_NAND_DEV0_CFG0 ++ * should be set to 0, which implies 1 codeword per page. 'n' below, ++ * is used to configure cfg0 for reading one full page or one single ++ * codeword. ++ */ ++ int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; ++ + if (args->read) { + if (ops->mode != MTD_OPS_RAW) { + data->cmd = MSM_NAND_CMD_PAGE_READ_ECC; + data->cfg0 = + (chip->cfg0 & ~(7U << CW_PER_PAGE)) | +- (((args->cwperpage-1) - args->start_sector) ++ (((args->cwperpage-n) - args->start_sector) + << CW_PER_PAGE); + data->cfg1 = chip->cfg1; + data->ecc_bch_cfg = chip->ecc_bch_cfg; +@@ -1248,7 +1266,7 @@ static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, + data->cmd = MSM_NAND_CMD_PAGE_READ_ALL; + data->cfg0 = + (chip->cfg0_raw & ~(7U << CW_PER_PAGE)) | +- (((args->cwperpage-1) - args->start_sector) ++ (((args->cwperpage-n) - args->start_sector) + << CW_PER_PAGE); + data->cfg1 = chip->cfg1_raw; + data->ecc_bch_cfg = chip->ecc_cfg_raw; +@@ -1292,6 +1310,11 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, + uint32_t offset, size, last_read; + struct sps_command_element *curr_ce, *start_ce; + uint32_t *flags_ptr, *num_ce_ptr; ++ /* ++ * Variable to configure read_location register parameters ++ * while reading one codeword or one full page ++ */ ++ int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; + + if (curr_cw == args->start_sector) { + curr_ce = start_ce = &cmd_list->setup_desc.ce[0]; +@@ -1372,10 +1395,15 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, + if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) { + offset = 0; +- size = (curr_cw < (args->cwperpage - 1)) ? 516 : +- (512 - ((args->cwperpage - 1) << 2)); +- last_read = (curr_cw < (args->cwperpage - 1)) ? 1 : +- (ops->oobbuf ? 0 : 1); ++ if (ops->len <= ONE_CODEWORD_SIZE) { ++ size = ONE_CODEWORD_SIZE; ++ last_read = 1; ++ } else { ++ size = (curr_cw < (args->cwperpage - 1)) ? 516 : ++ (512 - ((args->cwperpage - 1) << 2)); ++ last_read = (curr_cw < (args->cwperpage - 1)) ? ++ 1 : (ops->oobbuf ? 0 : 1); ++ } + rdata = (offset << 0) | (size << 16) | + (last_read << 31); + +@@ -1385,7 +1413,7 @@ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, + rdata); + curr_ce++; + } +- if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) { ++ if (curr_cw == (args->cwperpage - n) && ops->oobbuf) { + offset = 512 - ((args->cwperpage - 1) << 2); + size = (args->cwperpage) << 2; + if (size > args->oob_len_cmd) +@@ -1429,6 +1457,11 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + uint32_t sectordatasize, sectoroobsize; + uint32_t sps_flags = 0; + int err = 0; ++ /* ++ * Variable to configure sectordatasize and sectoroobsize ++ * while reading one codeword or one full page. ++ */ ++ int n = (ops->len <= ONE_CODEWORD_SIZE) ? args->cwperpage : 1; + + if (args->read) + data_pipe_handle = info->sps.data_prod.handle; +@@ -1437,7 +1470,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + + if (ops->mode == MTD_OPS_RAW) { + if (ecc_parity_bytes && args->read) { +- if (curr_cw == (args->cwperpage - 1)) ++ if (curr_cw == (args->cwperpage - n)) + sps_flags |= SPS_IOVEC_FLAG_INT; + + /* read only ecc bytes */ +@@ -1452,7 +1485,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + sectordatasize = chip->cw_size; + if (!args->read) + sps_flags = SPS_IOVEC_FLAG_EOT; +- if (curr_cw == (args->cwperpage - 1)) ++ if (curr_cw == (args->cwperpage - n)) + sps_flags |= SPS_IOVEC_FLAG_INT; + + err = sps_transfer_one(data_pipe_handle, +@@ -1465,8 +1498,13 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + } + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) { +- sectordatasize = (curr_cw < (args->cwperpage - 1)) +- ? 516 : (512 - ((args->cwperpage - 1) << 2)); ++ if (ops->len <= ONE_CODEWORD_SIZE) ++ sectordatasize = ONE_CODEWORD_SIZE; ++ else ++ sectordatasize = ++ (curr_cw < (args->cwperpage - 1)) ++ ? 516 : ++ (512 - ((args->cwperpage - 1) << 2)); + + if (!args->read) { + sps_flags = SPS_IOVEC_FLAG_EOT; +@@ -1474,7 +1512,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + ops->oobbuf) + sps_flags = 0; + } +- if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf) ++ if ((curr_cw == (args->cwperpage - n)) && !ops->oobbuf) + sps_flags |= SPS_IOVEC_FLAG_INT; + + err = sps_transfer_one(data_pipe_handle, +@@ -1486,7 +1524,7 @@ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + args->data_dma_addr_curr += sectordatasize; + } + +- if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) { ++ if (ops->oobbuf && (curr_cw == (args->cwperpage - n))) { + sectoroobsize = args->cwperpage << 2; + if (sectoroobsize > args->oob_len_data) + sectoroobsize = args->oob_len_data; +@@ -1840,6 +1878,9 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, + data.addr0 = (rw_params.page << 16) | rw_params.oob_col; + data.addr1 = (rw_params.page >> 16) & 0xff; + ++ if (ops->len <= ONE_CODEWORD_SIZE) ++ cwperpage = 1; ++ + for (n = rw_params.start_sector; n < cwperpage; n++) { + struct sps_command_element *curr_ce, *start_ce; + +@@ -1917,7 +1958,7 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) + submitted_num_desc = cwperpage - +- rw_params.start_sector; ++ rw_params.start_sector; + if (ops->oobbuf) + submitted_num_desc++; + } +@@ -2110,7 +2151,10 @@ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, + } + validate_mtd_params_failed: + if (ops->mode != MTD_OPS_RAW) +- ops->retlen = mtd->writesize * pages_read; ++ if (ops->len <= ONE_CODEWORD_SIZE) ++ ops->retlen = ONE_CODEWORD_SIZE; ++ else ++ ops->retlen = mtd->writesize * pages_read; + else + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read; + ops->oobretlen = ops->ooblen - rw_params.oob_len_data; +@@ -2180,7 +2224,11 @@ static int msm_nand_read_partial_page(struct mtd_info *mtd, + no_copy = false; + + ops->datbuf = no_copy ? actual_buf : bounce_buf; ++ ++ if ((len <= ONE_CODEWORD_SIZE) && (offset == 0)) ++ ops->len = ONE_CODEWORD_SIZE; + err = msm_nand_read_oob(mtd, aligned_from, ops); ++ + if (err == -EUCLEAN) { + is_euclean = 1; + err = 0; +diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c +index 2342277c9bcb..5e36366d9b36 100644 +--- a/drivers/mtd/lpddr/lpddr2_nvm.c ++++ b/drivers/mtd/lpddr/lpddr2_nvm.c +@@ -408,6 +408,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add, + return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); + } + ++static const struct mtd_info lpddr2_nvm_mtd_info = { ++ .type = MTD_RAM, ++ .writesize = 1, ++ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), ++ ._read = lpddr2_nvm_read, ++ ._write = lpddr2_nvm_write, ++ ._erase = lpddr2_nvm_erase, ++ ._unlock = lpddr2_nvm_unlock, ++ ._lock = lpddr2_nvm_lock, ++}; ++ + /* + * lpddr2_nvm driver probe method + */ +@@ -448,6 +459,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + .pfow_base = OW_BASE_ADDRESS, + .fldrv_priv = pcm_data, + }; ++ + if (IS_ERR(map->virt)) + return PTR_ERR(map->virt); + +@@ -459,22 +471,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev) + return PTR_ERR(pcm_data->ctl_regs); + + /* Populate mtd_info data structure */ +- *mtd = (struct mtd_info) { +- .dev = { .parent = &pdev->dev }, +- .name = pdev->dev.init_name, +- .type = MTD_RAM, +- .priv = map, +- .size = resource_size(add_range), +- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width, +- .writesize = 1, +- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width, +- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK), +- ._read = lpddr2_nvm_read, +- ._write = lpddr2_nvm_write, +- ._erase = lpddr2_nvm_erase, +- ._unlock = lpddr2_nvm_unlock, +- ._lock = lpddr2_nvm_lock, +- }; ++ *mtd = lpddr2_nvm_mtd_info; ++ mtd->dev.parent = &pdev->dev; ++ mtd->name = pdev->dev.init_name; ++ mtd->priv = map; ++ mtd->size = resource_size(add_range); ++ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width; ++ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width; + + /* Verify the presence of the device looking for PFOW string */ + if (!lpddr2_nvm_pfow_present(map)) { +diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c +index 95b6a6640bca..b43b8edc18ec 100644 +--- a/drivers/mtd/mtdchar.c ++++ b/drivers/mtd/mtdchar.c +@@ -372,9 +372,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, + uint32_t retlen; + int ret = 0; + +- if (!(file->f_mode & FMODE_WRITE)) +- return -EPERM; +- + if (length > 4096) + return -EINVAL; + +@@ -681,6 +678,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) + return -EFAULT; + } + ++ /* ++ * Check the file mode to require "dangerous" commands to have write ++ * permissions. ++ */ ++ switch (cmd) { ++ /* "safe" commands */ ++ case MEMGETREGIONCOUNT: ++ case MEMGETREGIONINFO: ++ case MEMGETINFO: ++ case MEMREADOOB: ++ case MEMREADOOB64: ++ case MEMISLOCKED: ++ case MEMGETOOBSEL: ++ case MEMGETBADBLOCK: ++ case OTPSELECT: ++ case OTPGETREGIONCOUNT: ++ case OTPGETREGIONINFO: ++ case ECCGETLAYOUT: ++ case ECCGETSTATS: ++ case MTDFILEMODE: ++ case BLKPG: ++ case BLKRRPART: ++ break; ++ ++ /* "dangerous" commands */ ++ case MEMERASE: ++ case MEMERASE64: ++ case MEMLOCK: ++ case MEMUNLOCK: ++ case MEMSETBADBLOCK: ++ case MEMWRITEOOB: ++ case MEMWRITEOOB64: ++ case MEMWRITE: ++ case OTPLOCK: ++ if (!(file->f_mode & FMODE_WRITE)) ++ return -EPERM; ++ break; ++ ++ default: ++ return -ENOTTY; ++ } ++ + switch (cmd) { + case MEMGETREGIONCOUNT: + if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) +@@ -728,9 +767,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) + { + struct erase_info *erase; + +- if(!(file->f_mode & FMODE_WRITE)) +- return -EPERM; +- + erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); + if (!erase) + ret = -ENOMEM; +@@ -1051,9 +1087,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) + ret = 0; + break; + } +- +- default: +- ret = -ENOTTY; + } + + return ret; +@@ -1097,6 +1130,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, + struct mtd_oob_buf32 buf; + struct mtd_oob_buf32 __user *buf_user = argp; + ++ if (!(file->f_mode & FMODE_WRITE)) { ++ ret = -EPERM; ++ break; ++ } ++ + if (copy_from_user(&buf, argp, sizeof(buf))) + ret = -EFAULT; + else +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c +index 97bb8f6304d4..09165eaac7a1 100644 +--- a/drivers/mtd/mtdoops.c ++++ b/drivers/mtd/mtdoops.c +@@ -313,12 +313,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper, + kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, + record_size - MTDOOPS_HEADER_SIZE, NULL); + +- /* Panics must be written immediately */ +- if (reason != KMSG_DUMP_OOPS) ++ if (reason != KMSG_DUMP_OOPS) { ++ /* Panics must be written immediately */ + mtdoops_write(cxt, 1); +- +- /* For other cases, schedule work to write it "nicely" */ +- schedule_work(&cxt->work_write); ++ } else { ++ /* For other cases, schedule work to write it "nicely" */ ++ schedule_work(&cxt->work_write); ++ } + } + + static void mtdoops_notify_add(struct mtd_info *mtd) +diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c +index 78e12cc8bac2..02ec2d183607 100644 +--- a/drivers/mtd/nand/ams-delta.c ++++ b/drivers/mtd/nand/ams-delta.c +@@ -264,7 +264,7 @@ static int ams_delta_cleanup(struct platform_device *pdev) + void __iomem *io_base = platform_get_drvdata(pdev); + + /* Release resources, unregister device */ +- nand_release(ams_delta_mtd); ++ nand_release(mtd_to_nand(ams_delta_mtd)); + + gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); + gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); +diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c +index 68b9160108c9..45495bc1a70e 100644 +--- a/drivers/mtd/nand/atmel_nand.c ++++ b/drivers/mtd/nand/atmel_nand.c +@@ -2336,7 +2336,7 @@ static int atmel_nand_remove(struct platform_device *pdev) + struct atmel_nand_host *host = platform_get_drvdata(pdev); + struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + + atmel_nand_disable(host); + +diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c +index 9bf6d9915694..a0e7789131df 100644 +--- a/drivers/mtd/nand/au1550nd.c ++++ b/drivers/mtd/nand/au1550nd.c +@@ -496,7 +496,7 @@ static int au1550nd_remove(struct platform_device *pdev) + struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); + struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +- nand_release(nand_to_mtd(&ctx->chip)); ++ nand_release(&ctx->chip); + iounmap(ctx->base); + release_mem_region(r->start, 0x1000); + kfree(ctx); +diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c +index fb31429b70a9..d79694160845 100644 +--- a/drivers/mtd/nand/bcm47xxnflash/main.c ++++ b/drivers/mtd/nand/bcm47xxnflash/main.c +@@ -65,7 +65,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev) + { + struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&nflash->nand_chip)); ++ nand_release(&nflash->nand_chip); + + return 0; + } +diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c +index 3962f55bd034..020bb350a2db 100644 +--- a/drivers/mtd/nand/bf5xx_nand.c ++++ b/drivers/mtd/nand/bf5xx_nand.c +@@ -688,7 +688,7 @@ static int bf5xx_nand_remove(struct platform_device *pdev) + * and their partitions, then go through freeing the + * resources used + */ +- nand_release(nand_to_mtd(&info->chip)); ++ nand_release(&info->chip); + + peripheral_free_list(bfin_nfc_pin_req); + bf5xx_nand_dma_remove(info); +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c +index ef9a6b22c9fa..40fdc9d267b9 100644 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c +@@ -491,8 +491,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) + } else { + ctrl->cs_offsets = brcmnand_cs_offsets; + +- /* v5.0 and earlier has a different CS0 offset layout */ +- if (ctrl->nand_version <= 0x0500) ++ /* v3.3-5.0 have a different CS0 offset layout */ ++ if (ctrl->nand_version >= 0x0303 && ++ ctrl->nand_version <= 0x0500) + ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; + } + +@@ -911,11 +912,14 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, + if (!section) { + /* + * Small-page NAND use byte 6 for BBI while large-page +- * NAND use byte 0. ++ * NAND use bytes 0 and 1. + */ +- if (cfg->page_size > 512) +- oobregion->offset++; +- oobregion->length--; ++ if (cfg->page_size > 512) { ++ oobregion->offset += 2; ++ oobregion->length -= 2; ++ } else { ++ oobregion->length--; ++ } + } + } + +@@ -2592,7 +2596,7 @@ int brcmnand_remove(struct platform_device *pdev) + struct brcmnand_host *host; + + list_for_each_entry(host, &ctrl->host_list, node) +- nand_release(nand_to_mtd(&host->chip)); ++ nand_release(&host->chip); + + clk_disable_unprepare(ctrl->clk); + +diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c +index 0b0c93702abb..c16e740c01c3 100644 +--- a/drivers/mtd/nand/cafe_nand.c ++++ b/drivers/mtd/nand/cafe_nand.c +@@ -825,7 +825,7 @@ static void cafe_nand_remove(struct pci_dev *pdev) + /* Disable NAND IRQ in global IRQ mask register */ + cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); + free_irq(pdev->irq, mtd); +- nand_release(mtd); ++ nand_release(chip); + free_rs(cafe->rs); + pci_iounmap(pdev, cafe->mmio); + dma_free_coherent(&cafe->pdev->dev, +diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c +index 49133783ca53..b9667204e711 100644 +--- a/drivers/mtd/nand/cmx270_nand.c ++++ b/drivers/mtd/nand/cmx270_nand.c +@@ -230,7 +230,7 @@ module_init(cmx270_init); + static void __exit cmx270_cleanup(void) + { + /* Release resources, unregister device */ +- nand_release(cmx270_nand_mtd); ++ nand_release(mtd_to_nand(cmx270_nand_mtd)); + + gpio_free(GPIO_NAND_RB); + gpio_free(GPIO_NAND_CS); +diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c +index a65e4e0f57a1..4779dfec3576 100644 +--- a/drivers/mtd/nand/cs553x_nand.c ++++ b/drivers/mtd/nand/cs553x_nand.c +@@ -339,7 +339,7 @@ static void __exit cs553x_cleanup(void) + mmio_base = this->IO_ADDR_R; + + /* Release resources, unregister device */ +- nand_release(mtd); ++ nand_release(this); + kfree(mtd->name); + cs553x_mtd[i] = NULL; + +diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c +index 27fa8b87cd5f..c7f535676e49 100644 +--- a/drivers/mtd/nand/davinci_nand.c ++++ b/drivers/mtd/nand/davinci_nand.c +@@ -840,7 +840,7 @@ static int nand_davinci_remove(struct platform_device *pdev) + ecc4_busy = false; + spin_unlock_irq(&davinci_nand_lock); + +- nand_release(nand_to_mtd(&info->chip)); ++ nand_release(&info->chip); + + clk_disable_unprepare(info->clk); + +diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c +index 0476ae8776d9..982cbc7f412f 100644 +--- a/drivers/mtd/nand/denali.c ++++ b/drivers/mtd/nand/denali.c +@@ -1655,7 +1655,7 @@ void denali_remove(struct denali_nand_info *denali) + */ + int bufsize = mtd->writesize + mtd->oobsize; + +- nand_release(mtd); ++ nand_release(&denali->nand); + denali_irq_cleanup(denali->irq, denali); + dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize, + DMA_BIDIRECTIONAL); +diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c +index a023ab9e9cbf..374b7a10ba51 100644 +--- a/drivers/mtd/nand/diskonchip.c ++++ b/drivers/mtd/nand/diskonchip.c +@@ -1605,13 +1605,10 @@ static int __init doc_probe(unsigned long physadr) + numchips = doc2001_init(mtd); + + if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) { +- /* DBB note: i believe nand_release is necessary here, as ++ /* DBB note: i believe nand_cleanup is necessary here, as + buffers may have been allocated in nand_base. Check with + Thomas. FIX ME! */ +- /* nand_release will call mtd_device_unregister, but we +- haven't yet added it. This is handled without incident by +- mtd_device_unregister, as far as I can tell. */ +- nand_release(mtd); ++ nand_cleanup(nand); + kfree(nand); + goto fail; + } +@@ -1644,7 +1641,7 @@ static void release_nanddoc(void) + doc = nand_get_controller_data(nand); + + nextmtd = doc->nextdoc; +- nand_release(mtd); ++ nand_release(nand); + iounmap(doc->virtadr); + release_mem_region(doc->physadr, DOC_IOREMAP_LEN); + kfree(nand); +diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c +index 7af2a3cd949e..5798cd87f340 100644 +--- a/drivers/mtd/nand/docg4.c ++++ b/drivers/mtd/nand/docg4.c +@@ -1374,7 +1374,7 @@ static int __init probe_docg4(struct platform_device *pdev) + return 0; + + fail: +- nand_release(mtd); /* deletes partitions and mtd devices */ ++ nand_release(nand); /* deletes partitions and mtd devices */ + free_bch(doc->bch); + kfree(nand); + +@@ -1387,7 +1387,7 @@ static int __init probe_docg4(struct platform_device *pdev) + static int __exit cleanup_docg4(struct platform_device *pdev) + { + struct docg4_priv *doc = platform_get_drvdata(pdev); +- nand_release(doc->mtd); ++ nand_release(mtd_to_nand(doc->mtd)); + free_bch(doc->bch); + kfree(mtd_to_nand(doc->mtd)); + iounmap(doc->virtadr); +diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c +index 113f76e59937..2fc4f2ab89ff 100644 +--- a/drivers/mtd/nand/fsl_elbc_nand.c ++++ b/drivers/mtd/nand/fsl_elbc_nand.c +@@ -811,7 +811,7 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) + struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; + struct mtd_info *mtd = nand_to_mtd(&priv->chip); + +- nand_release(mtd); ++ nand_release(&priv->chip); + + kfree(mtd->name); + +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 4c3b986dd74d..cf0fccb5908e 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -918,7 +918,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) + { + struct mtd_info *mtd = nand_to_mtd(&priv->chip); + +- nand_release(mtd); ++ nand_release(&priv->chip); + + kfree(mtd->name); + +diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c +index d85fa2555b68..0b4d2489cc71 100644 +--- a/drivers/mtd/nand/fsl_upm.c ++++ b/drivers/mtd/nand/fsl_upm.c +@@ -326,7 +326,7 @@ static int fun_remove(struct platform_device *ofdev) + struct mtd_info *mtd = nand_to_mtd(&fun->chip); + int i; + +- nand_release(mtd); ++ nand_release(&fun->chip); + kfree(mtd->name); + + for (i = 0; i < fun->mchip_count; i++) { +diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c +index d4f454a4b35e..92737deb7845 100644 +--- a/drivers/mtd/nand/fsmc_nand.c ++++ b/drivers/mtd/nand/fsmc_nand.c +@@ -1038,7 +1038,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) + struct fsmc_nand_data *host = platform_get_drvdata(pdev); + + if (host) { +- nand_release(nand_to_mtd(&host->nand)); ++ nand_release(&host->nand); + + if (host->mode == USE_DMA_ACCESS) { + dma_release_channel(host->write_dma_chan); +diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c +index 6317f6836022..c7461ca1c1a6 100644 +--- a/drivers/mtd/nand/gpio.c ++++ b/drivers/mtd/nand/gpio.c +@@ -197,7 +197,7 @@ static int gpio_nand_remove(struct platform_device *pdev) + { + struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&gpiomtd->nand_chip)); ++ nand_release(&gpiomtd->nand_chip); + + if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) + gpio_set_value(gpiomtd->plat.gpio_nwp, 0); +diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +index d9dab4275859..c43bd945d93a 100644 +--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c ++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +@@ -1930,7 +1930,7 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this) + + static void gpmi_nand_exit(struct gpmi_nand_data *this) + { +- nand_release(nand_to_mtd(&this->nand)); ++ nand_release(&this->nand); + gpmi_free_dma_buffer(this); + } + +@@ -2020,7 +2020,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) + this->bch_geometry.auxiliary_size = 128; + ret = gpmi_alloc_dma_buffer(this); + if (ret) +- goto err_out; ++ return ret; + + ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL); + if (ret) +diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c +index 9432546f4cd4..6c96d9d29a31 100644 +--- a/drivers/mtd/nand/hisi504_nand.c ++++ b/drivers/mtd/nand/hisi504_nand.c +@@ -823,7 +823,7 @@ static int hisi_nfc_probe(struct platform_device *pdev) + return 0; + + err_mtd: +- nand_release(mtd); ++ nand_release(chip); + err_res: + return ret; + } +@@ -831,9 +831,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) + static int hisi_nfc_remove(struct platform_device *pdev) + { + struct hinfc_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->chip); + +- nand_release(mtd); ++ nand_release(&host->chip); + + return 0; + } +diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c +index 5551c36adbdf..6f323858d51a 100644 +--- a/drivers/mtd/nand/jz4740_nand.c ++++ b/drivers/mtd/nand/jz4740_nand.c +@@ -499,7 +499,7 @@ static int jz_nand_probe(struct platform_device *pdev) + return 0; + + err_nand_release: +- nand_release(mtd); ++ nand_release(chip); + err_unclaim_banks: + while (chipnr--) { + unsigned char bank = nand->banks[chipnr]; +@@ -520,7 +520,7 @@ static int jz_nand_remove(struct platform_device *pdev) + struct jz_nand *nand = platform_get_drvdata(pdev); + size_t i; + +- nand_release(nand_to_mtd(&nand->chip)); ++ nand_release(&nand->chip); + + /* Deassert and disable all chips */ + writel(0, nand->base + JZ_REG_NAND_CTRL); +diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c +index a39bb70175ee..e8aaf2543946 100644 +--- a/drivers/mtd/nand/jz4780_nand.c ++++ b/drivers/mtd/nand/jz4780_nand.c +@@ -293,7 +293,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev, + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { +- nand_release(mtd); ++ nand_release(chip); + return ret; + } + +@@ -308,7 +308,7 @@ static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc) + + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list); +- nand_release(nand_to_mtd(&chip->chip)); ++ nand_release(&chip->chip); + list_del(&chip->chip_list); + } + } +diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c +index bc6e49af063a..839f8f4ace9e 100644 +--- a/drivers/mtd/nand/lpc32xx_mlc.c ++++ b/drivers/mtd/nand/lpc32xx_mlc.c +@@ -805,7 +805,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_release(nand_chip); + + err_exit4: + free_irq(host->irq, host); +@@ -828,9 +828,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) + static int lpc32xx_nand_remove(struct platform_device *pdev) + { + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + free_irq(host->irq, host); + if (use_dma) + dma_release_channel(host->dma_chan); +diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c +index 8d3edc34958e..0b5fa254ea60 100644 +--- a/drivers/mtd/nand/lpc32xx_slc.c ++++ b/drivers/mtd/nand/lpc32xx_slc.c +@@ -940,7 +940,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_release(chip); + + err_exit3: + dma_release_channel(host->dma_chan); +@@ -959,9 +959,8 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) + { + uint32_t tmp; + struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + dma_release_channel(host->dma_chan); + + /* Force CE high */ +diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c +index 7eacb2f545f5..9662f8fe4713 100644 +--- a/drivers/mtd/nand/mpc5121_nfc.c ++++ b/drivers/mtd/nand/mpc5121_nfc.c +@@ -827,7 +827,7 @@ static int mpc5121_nfc_remove(struct platform_device *op) + struct device *dev = &op->dev; + struct mtd_info *mtd = dev_get_drvdata(dev); + +- nand_release(mtd); ++ nand_release(mtd_to_nand(mtd)); + mpc5121_nfc_free(dev, mtd); + + return 0; +diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c +index ca95ae00215e..2375dce766ef 100644 +--- a/drivers/mtd/nand/mtk_nand.c ++++ b/drivers/mtd/nand/mtk_nand.c +@@ -1327,7 +1327,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, + ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); + if (ret) { + dev_err(dev, "mtd parse partition error\n"); +- nand_release(mtd); ++ nand_cleanup(nand); + return ret; + } + +@@ -1450,7 +1450,7 @@ static int mtk_nfc_remove(struct platform_device *pdev) + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, + node); +- nand_release(nand_to_mtd(&chip->nand)); ++ nand_release(&chip->nand); + list_del(&chip->node); + } + +diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c +index 5c44eb57885b..deb3cbadbc51 100644 +--- a/drivers/mtd/nand/mxc_nand.c ++++ b/drivers/mtd/nand/mxc_nand.c +@@ -1838,7 +1838,7 @@ static int mxcnd_remove(struct platform_device *pdev) + { + struct mxc_nand_host *host = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&host->nand)); ++ nand_release(&host->nand); + if (host->clk_act) + clk_disable_unprepare(host->clk); + +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c +index 5fb45161789c..bdf40c090acd 100644 +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -4941,12 +4941,12 @@ EXPORT_SYMBOL_GPL(nand_cleanup); + /** + * nand_release - [NAND Interface] Unregister the MTD device and free resources + * held by the NAND device +- * @mtd: MTD device structure ++ * @chip: NAND chip object + */ +-void nand_release(struct mtd_info *mtd) ++void nand_release(struct nand_chip *chip) + { +- mtd_device_unregister(mtd); +- nand_cleanup(mtd_to_nand(mtd)); ++ mtd_device_unregister(nand_to_mtd(chip)); ++ nand_cleanup(chip); + } + EXPORT_SYMBOL_GPL(nand_release); + +diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c +index 1eb934414eb5..fe593f2f1ec7 100644 +--- a/drivers/mtd/nand/nandsim.c ++++ b/drivers/mtd/nand/nandsim.c +@@ -2394,7 +2394,7 @@ static int __init ns_init_module(void) + + err_exit: + free_nandsim(nand); +- nand_release(nsmtd); ++ nand_release(chip); + for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) + kfree(nand->partitions[i].name); + error: +@@ -2417,7 +2417,7 @@ static void __exit ns_cleanup_module(void) + + nandsim_debugfs_remove(ns); + free_nandsim(ns); /* Free nandsim private resources */ +- nand_release(nsmtd); /* Unregister driver */ ++ nand_release(chip); /* Unregister driver */ + for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + kfree(mtd_to_nand(nsmtd)); /* Free other structures */ +diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c +index 28e6118362f7..d03b47d2664b 100644 +--- a/drivers/mtd/nand/ndfc.c ++++ b/drivers/mtd/nand/ndfc.c +@@ -258,7 +258,7 @@ static int ndfc_remove(struct platform_device *ofdev) + struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); + struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); + +- nand_release(mtd); ++ nand_release(&ndfc->chip); + kfree(mtd->name); + + return 0; +diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c +index 8f64011d32ef..f7f54b46f246 100644 +--- a/drivers/mtd/nand/nuc900_nand.c ++++ b/drivers/mtd/nand/nuc900_nand.c +@@ -284,7 +284,7 @@ static int nuc900_nand_remove(struct platform_device *pdev) + { + struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&nuc900_nand->chip)); ++ nand_release(&nuc900_nand->chip); + clk_disable(nuc900_nand->clk); + + return 0; +diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c +index f3a516b3f108..62c0ca437c91 100644 +--- a/drivers/mtd/nand/omap2.c ++++ b/drivers/mtd/nand/omap2.c +@@ -2307,7 +2307,7 @@ static int omap_nand_remove(struct platform_device *pdev) + } + if (info->dma) + dma_release_channel(info->dma); +- nand_release(mtd); ++ nand_release(nand_chip); + return 0; + } + +diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c +index a3f32f939cc1..6736777a4156 100644 +--- a/drivers/mtd/nand/omap_elm.c ++++ b/drivers/mtd/nand/omap_elm.c +@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + if (pm_runtime_get_sync(&pdev->dev) < 0) { + ret = -EINVAL; ++ pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + dev_err(&pdev->dev, "can't enable clock\n"); + return ret; +diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c +index af2f09135fb0..0acfc0a7d8e0 100644 +--- a/drivers/mtd/nand/orion_nand.c ++++ b/drivers/mtd/nand/orion_nand.c +@@ -167,7 +167,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) + mtd->name = "orion_nand"; + ret = mtd_device_register(mtd, board->parts, board->nr_parts); + if (ret) { +- nand_release(mtd); ++ nand_cleanup(nc); + goto no_dev; + } + +@@ -184,9 +184,8 @@ static int orion_nand_remove(struct platform_device *pdev) + { + struct orion_nand_info *info = platform_get_drvdata(pdev); + struct nand_chip *chip = &info->chip; +- struct mtd_info *mtd = nand_to_mtd(chip); + +- nand_release(mtd); ++ nand_release(chip); + + if (!IS_ERR(info->clk)) + clk_disable_unprepare(info->clk); +diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c +index 5de7591b0510..3300e43e2cb9 100644 +--- a/drivers/mtd/nand/pasemi_nand.c ++++ b/drivers/mtd/nand/pasemi_nand.c +@@ -164,7 +164,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { + dev_err(dev, "Unable to register MTD device\n"); + err = -ENODEV; +- goto out_lpc; ++ goto out_cleanup_nand; + } + + dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, +@@ -172,6 +172,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) + + return 0; + ++ out_cleanup_nand: ++ nand_cleanup(chip); + out_lpc: + release_region(lpcctl, 4); + out_ior: +@@ -192,7 +194,7 @@ static int pasemi_nand_remove(struct platform_device *ofdev) + chip = mtd_to_nand(pasemi_nand_mtd); + + /* Release resources, unregister device */ +- nand_release(pasemi_nand_mtd); ++ nand_release(chip); + + release_region(lpcctl, 4); + +diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c +index 415a53a0deeb..ae2b3c0804ce 100644 +--- a/drivers/mtd/nand/plat_nand.c ++++ b/drivers/mtd/nand/plat_nand.c +@@ -100,7 +100,7 @@ static int plat_nand_probe(struct platform_device *pdev) + if (!err) + return err; + +- nand_release(mtd); ++ nand_cleanup(&data->chip); + out: + if (pdata->ctrl.remove) + pdata->ctrl.remove(pdev); +@@ -115,7 +115,7 @@ static int plat_nand_remove(struct platform_device *pdev) + struct plat_nand_data *data = platform_get_drvdata(pdev); + struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); + +- nand_release(nand_to_mtd(&data->chip)); ++ nand_release(&data->chip); + if (pdata->ctrl.remove) + pdata->ctrl.remove(pdev); + +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c +index 3b8911cd3a19..46f13f7e54da 100644 +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -1907,7 +1907,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) + clk_disable_unprepare(info->clk); + + for (cs = 0; cs < pdata->num_cs; cs++) +- nand_release(nand_to_mtd(&info->host[cs]->chip)); ++ nand_release(&info->host[cs]->chip); + return 0; + } + +diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c +index 9f6c9a34b9eb..1594770987fd 100644 +--- a/drivers/mtd/nand/qcom_nandc.c ++++ b/drivers/mtd/nand/qcom_nandc.c +@@ -2163,7 +2163,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) + + err_cs_init: + list_for_each_entry(host, &nandc->host_list, node) +- nand_release(nand_to_mtd(&host->chip)); ++ nand_release(&host->chip); + err_setup: + clk_disable_unprepare(nandc->aon_clk); + err_aon_clk: +@@ -2180,7 +2180,7 @@ static int qcom_nandc_remove(struct platform_device *pdev) + struct qcom_nand_host *host; + + list_for_each_entry(host, &nandc->host_list, node) +- nand_release(nand_to_mtd(&host->chip)); ++ nand_release(&host->chip); + + qcom_nandc_unalloc(nandc); + +diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c +index fc9287af4614..2cfa54941395 100644 +--- a/drivers/mtd/nand/r852.c ++++ b/drivers/mtd/nand/r852.c +@@ -656,7 +656,7 @@ static int r852_register_nand_device(struct r852_device *dev) + dev->card_registred = 1; + return 0; + error3: +- nand_release(mtd); ++ nand_release(dev->chip); + error1: + /* Force card redetect */ + dev->card_detected = 0; +@@ -675,7 +675,7 @@ static void r852_unregister_nand_device(struct r852_device *dev) + return; + + device_remove_file(&mtd->dev, &dev_attr_media_type); +- nand_release(mtd); ++ nand_release(dev->chip); + r852_engine_disable(dev); + dev->card_registred = 0; + } +diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c +index d459c19d78de..be9c145b743c 100644 +--- a/drivers/mtd/nand/s3c2410.c ++++ b/drivers/mtd/nand/s3c2410.c +@@ -768,7 +768,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) + + for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { + pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); +- nand_release(nand_to_mtd(&ptr->chip)); ++ nand_release(&ptr->chip); + } + } + +diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c +index d6c013f93b8c..31f98acdba07 100644 +--- a/drivers/mtd/nand/sh_flctl.c ++++ b/drivers/mtd/nand/sh_flctl.c +@@ -1229,7 +1229,7 @@ static int flctl_remove(struct platform_device *pdev) + struct sh_flctl *flctl = platform_get_drvdata(pdev); + + flctl_release_dma(flctl); +- nand_release(nand_to_mtd(&flctl->chip)); ++ nand_release(&flctl->chip); + pm_runtime_disable(&pdev->dev); + + return 0; +diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c +index 064ca1757589..661b4928e0fc 100644 +--- a/drivers/mtd/nand/sharpsl.c ++++ b/drivers/mtd/nand/sharpsl.c +@@ -192,7 +192,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) + return 0; + + err_add: +- nand_release(mtd); ++ nand_cleanup(this); + + err_scan: + iounmap(sharpsl->io); +@@ -210,7 +210,7 @@ static int sharpsl_nand_remove(struct platform_device *pdev) + struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); + + /* Release resources, unregister device */ +- nand_release(nand_to_mtd(&sharpsl->chip)); ++ nand_release(&sharpsl->chip); + + iounmap(sharpsl->io); + +diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c +index 888fd314c62a..957d9597c484 100644 +--- a/drivers/mtd/nand/socrates_nand.c ++++ b/drivers/mtd/nand/socrates_nand.c +@@ -203,7 +203,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) + if (!res) + return res; + +- nand_release(mtd); ++ nand_cleanup(nand_chip); + + out: + iounmap(host->io_base); +@@ -216,9 +216,8 @@ static int socrates_nand_probe(struct platform_device *ofdev) + static int socrates_nand_remove(struct platform_device *ofdev) + { + struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); +- struct mtd_info *mtd = nand_to_mtd(&host->nand_chip); + +- nand_release(mtd); ++ nand_release(&host->nand_chip); + + iounmap(host->io_base); + +diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c +index 886355bfa761..2cf19372efb9 100644 +--- a/drivers/mtd/nand/sunxi_nand.c ++++ b/drivers/mtd/nand/sunxi_nand.c +@@ -2108,7 +2108,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register mtd device: %d\n", ret); +- nand_release(mtd); ++ nand_cleanup(nand); + return ret; + } + +@@ -2147,7 +2147,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) + while (!list_empty(&nfc->chips)) { + chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, + node); +- nand_release(nand_to_mtd(&chip->nand)); ++ nand_release(&chip->nand); + sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); + } +diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c +index 08b30549ec0a..d07c729f5b9b 100644 +--- a/drivers/mtd/nand/tmio_nand.c ++++ b/drivers/mtd/nand/tmio_nand.c +@@ -446,7 +446,7 @@ static int tmio_probe(struct platform_device *dev) + if (!retval) + return retval; + +- nand_release(mtd); ++ nand_cleanup(nand_chip); + + err_irq: + tmio_hw_stop(dev, tmio); +@@ -457,7 +457,7 @@ static int tmio_remove(struct platform_device *dev) + { + struct tmio_nand *tmio = platform_get_drvdata(dev); + +- nand_release(nand_to_mtd(&tmio->chip)); ++ nand_release(&tmio->chip); + tmio_hw_stop(dev, tmio); + return 0; + } +diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c +index 0a14fda2e41b..f2ba55b0a1e9 100644 +--- a/drivers/mtd/nand/txx9ndfmc.c ++++ b/drivers/mtd/nand/txx9ndfmc.c +@@ -390,7 +390,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) + chip = mtd_to_nand(mtd); + txx9_priv = nand_get_controller_data(chip); + +- nand_release(mtd); ++ nand_release(chip); + kfree(txx9_priv->mtdname); + kfree(txx9_priv); + } +diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c +index ddc629e3f63a..ec004e0a94a3 100644 +--- a/drivers/mtd/nand/vf610_nfc.c ++++ b/drivers/mtd/nand/vf610_nfc.c +@@ -795,7 +795,7 @@ static int vf610_nfc_remove(struct platform_device *pdev) + struct mtd_info *mtd = platform_get_drvdata(pdev); + struct vf610_nfc *nfc = mtd_to_nfc(mtd); + +- nand_release(mtd); ++ nand_release(mtd_to_nand(mtd)); + clk_disable_unprepare(nfc->clk); + return 0; + } +diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c +index 895101a5e686..3d51b8fc5aaf 100644 +--- a/drivers/mtd/nand/xway_nand.c ++++ b/drivers/mtd/nand/xway_nand.c +@@ -211,7 +211,7 @@ static int xway_nand_probe(struct platform_device *pdev) + + err = mtd_device_register(mtd, NULL, 0); + if (err) +- nand_release(mtd); ++ nand_cleanup(&data->chip); + + return err; + } +@@ -223,7 +223,7 @@ static int xway_nand_remove(struct platform_device *pdev) + { + struct xway_nand_data *data = platform_get_drvdata(pdev); + +- nand_release(nand_to_mtd(&data->chip)); ++ nand_release(&data->chip); + + return 0; + } +diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c +index d489fbd07c12..92de2b408734 100644 +--- a/drivers/mtd/spi-nor/cadence-quadspi.c ++++ b/drivers/mtd/spi-nor/cadence-quadspi.c +@@ -461,7 +461,7 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor, + /* Setup dummy clock cycles */ + dummy_clk = nor->read_dummy; + if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) +- dummy_clk = CQSPI_DUMMY_CLKS_MAX; ++ return -EOPNOTSUPP; + + if (dummy_clk / 8) { + reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); +diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c +index 20378b0d55e9..68be79ba571d 100644 +--- a/drivers/mtd/spi-nor/hisi-sfc.c ++++ b/drivers/mtd/spi-nor/hisi-sfc.c +@@ -393,8 +393,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host) + + for_each_available_child_of_node(dev->of_node, np) { + ret = hisi_spi_nor_register(np, host); +- if (ret) ++ if (ret) { ++ of_node_put(np); + goto fail; ++ } + + if (host->num_chip == HIFMC_MAX_CHIP_NUM) { + dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c +index 5c0332e31138..52c108378831 100644 +--- a/drivers/mtd/ubi/wl.c ++++ b/drivers/mtd/ubi/wl.c +@@ -1732,6 +1732,19 @@ int ubi_thread(void *u) + !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&ubi->wl_lock); ++ ++ /* ++ * Check kthread_should_stop() after we set the task ++ * state to guarantee that we either see the stop bit ++ * and exit or the task state is reset to runnable such ++ * that it's not scheduled out indefinitely and detects ++ * the stop bit at kthread_should_stop(). ++ */ ++ if (kthread_should_stop()) { ++ set_current_state(TASK_RUNNING); ++ break; ++ } ++ + schedule(); + continue; + } +diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c +index 1b2e9217ec78..d520ce32ddbf 100644 +--- a/drivers/net/appletalk/cops.c ++++ b/drivers/net/appletalk/cops.c +@@ -324,6 +324,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) + break; + } + ++ dev->base_addr = ioaddr; ++ + /* Reserve any actual interrupt. */ + if (dev->irq) { + retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev); +@@ -331,8 +333,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) + goto err_out; + } + +- dev->base_addr = ioaddr; +- + lp = netdev_priv(dev); + spin_lock_init(&lp->lock); + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index d52fd842ef1f..2b721ed392ad 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1129,6 +1129,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev, + + bond_dev->type = slave_dev->type; + bond_dev->hard_header_len = slave_dev->hard_header_len; ++ bond_dev->needed_headroom = slave_dev->needed_headroom; + bond_dev->addr_len = slave_dev->addr_len; + + memcpy(bond_dev->broadcast, slave_dev->broadcast, +@@ -1237,7 +1238,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL); + } + +-static struct slave *bond_alloc_slave(struct bonding *bond) ++static void slave_kobj_release(struct kobject *kobj) ++{ ++ struct slave *slave = to_slave(kobj); ++ struct bonding *bond = bond_get_bond_by_slave(slave); ++ ++ cancel_delayed_work_sync(&slave->notify_work); ++ if (BOND_MODE(bond) == BOND_MODE_8023AD) ++ kfree(SLAVE_AD_INFO(slave)); ++ ++ kfree(slave); ++} ++ ++static struct kobj_type slave_ktype = { ++ .release = slave_kobj_release, ++#ifdef CONFIG_SYSFS ++ .sysfs_ops = &slave_sysfs_ops, ++#endif ++}; ++ ++static int bond_kobj_init(struct slave *slave) ++{ ++ int err; ++ ++ err = kobject_init_and_add(&slave->kobj, &slave_ktype, ++ &(slave->dev->dev.kobj), "bonding_slave"); ++ if (err) ++ kobject_put(&slave->kobj); ++ ++ return err; ++} ++ ++static struct slave *bond_alloc_slave(struct bonding *bond, ++ struct net_device *slave_dev) + { + struct slave *slave = NULL; + +@@ -1245,30 +1278,25 @@ static struct slave *bond_alloc_slave(struct bonding *bond) + if (!slave) + return NULL; + ++ slave->bond = bond; ++ slave->dev = slave_dev; ++ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); ++ ++ if (bond_kobj_init(slave)) ++ return NULL; ++ + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), + GFP_KERNEL); + if (!SLAVE_AD_INFO(slave)) { +- kfree(slave); ++ kobject_put(&slave->kobj); + return NULL; + } + } +- INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); + + return slave; + } + +-static void bond_free_slave(struct slave *slave) +-{ +- struct bonding *bond = bond_get_bond_by_slave(slave); +- +- cancel_delayed_work_sync(&slave->notify_work); +- if (BOND_MODE(bond) == BOND_MODE_8023AD) +- kfree(SLAVE_AD_INFO(slave)); +- +- kfree(slave); +-} +- + static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) + { + info->bond_mode = BOND_MODE(bond); +@@ -1448,14 +1476,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + bond->dev->addr_assign_type == NET_ADDR_RANDOM) + bond_set_dev_addr(bond->dev, slave_dev); + +- new_slave = bond_alloc_slave(bond); ++ new_slave = bond_alloc_slave(bond, slave_dev); + if (!new_slave) { + res = -ENOMEM; + goto err_undo_flags; + } + +- new_slave->bond = bond; +- new_slave->dev = slave_dev; + /* Set the new_slave's queue_id to be zero. Queue ID mapping + * is set via sysfs or module option if desired. + */ +@@ -1780,7 +1806,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) + dev_set_mtu(slave_dev, new_slave->original_mtu); + + err_free: +- bond_free_slave(new_slave); ++ kobject_put(&new_slave->kobj); + + err_undo_flags: + /* Enslave of first slave has failed and we need to fix master's mac */ +@@ -1964,7 +1990,7 @@ static int __bond_release_one(struct net_device *bond_dev, + if (!netif_is_bond_master(slave_dev)) + slave_dev->priv_flags &= ~IFF_BONDING; + +- bond_free_slave(slave); ++ kobject_put(&slave->kobj); + + return 0; + } +@@ -1985,7 +2011,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, + int ret; + + ret = bond_release(bond_dev, slave_dev); +- if (ret == 0 && !bond_has_slaves(bond)) { ++ if (ret == 0 && !bond_has_slaves(bond) && ++ bond_dev->reg_state != NETREG_UNREGISTERING) { + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; + netdev_info(bond_dev, "Destroying bond %s\n", + bond_dev->name); +@@ -4131,13 +4158,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) + return ret; + } + ++static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) ++{ ++ if (speed == 0 || speed == SPEED_UNKNOWN) ++ speed = slave->speed; ++ else ++ speed = min(speed, slave->speed); ++ ++ return speed; ++} ++ + static int bond_ethtool_get_settings(struct net_device *bond_dev, + struct ethtool_cmd *ecmd) + { + struct bonding *bond = netdev_priv(bond_dev); +- unsigned long speed = 0; + struct list_head *iter; + struct slave *slave; ++ u32 speed = 0; + + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->port = PORT_OTHER; +@@ -4149,8 +4186,13 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, + */ + bond_for_each_slave(bond, slave, iter) { + if (bond_slave_can_tx(slave)) { +- if (slave->speed != SPEED_UNKNOWN) +- speed += slave->speed; ++ if (slave->speed != SPEED_UNKNOWN) { ++ if (BOND_MODE(bond) == BOND_MODE_BROADCAST) ++ speed = bond_mode_bcast_speed(slave, ++ speed); ++ else ++ speed += slave->speed; ++ } + if (ecmd->duplex == DUPLEX_UNKNOWN && + slave->duplex != DUPLEX_UNKNOWN) + ecmd->duplex = slave->duplex; +diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c +index 3f756fa2f603..68bbac4715c3 100644 +--- a/drivers/net/bonding/bond_sysfs_slave.c ++++ b/drivers/net/bonding/bond_sysfs_slave.c +@@ -125,7 +125,6 @@ static const struct slave_attribute *slave_attrs[] = { + }; + + #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr) +-#define to_slave(obj) container_of(obj, struct slave, kobj) + + static ssize_t slave_show(struct kobject *kobj, + struct attribute *attr, char *buf) +@@ -136,28 +135,15 @@ static ssize_t slave_show(struct kobject *kobj, + return slave_attr->show(slave, buf); + } + +-static const struct sysfs_ops slave_sysfs_ops = { ++const struct sysfs_ops slave_sysfs_ops = { + .show = slave_show, + }; + +-static struct kobj_type slave_ktype = { +-#ifdef CONFIG_SYSFS +- .sysfs_ops = &slave_sysfs_ops, +-#endif +-}; +- + int bond_sysfs_slave_add(struct slave *slave) + { + const struct slave_attribute **a; + int err; + +- err = kobject_init_and_add(&slave->kobj, &slave_ktype, +- &(slave->dev->dev.kobj), "bonding_slave"); +- if (err) { +- kobject_put(&slave->kobj); +- return err; +- } +- + for (a = slave_attrs; *a; ++a) { + err = sysfs_create_file(&slave->kobj, &((*a)->attr)); + if (err) { +@@ -175,6 +161,4 @@ void bond_sysfs_slave_del(struct slave *slave) + + for (a = slave_attrs; *a; ++a) + sysfs_remove_file(&slave->kobj, &((*a)->attr)); +- +- kobject_put(&slave->kobj); + } +diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c +index c2dea4916e5d..1243c2e5a86a 100644 +--- a/drivers/net/caif/caif_serial.c ++++ b/drivers/net/caif/caif_serial.c +@@ -281,7 +281,6 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct ser_device *ser; + +- BUG_ON(dev == NULL); + ser = netdev_priv(dev); + + /* Send flow off once, on high water mark */ +@@ -363,6 +362,7 @@ static int ldisc_open(struct tty_struct *tty) + rtnl_lock(); + result = register_netdevice(dev); + if (result) { ++ tty_kref_put(tty); + rtnl_unlock(); + free_netdev(dev); + return -ENODEV; +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c +index 4ead5a18b794..c41ab2cb272e 100644 +--- a/drivers/net/can/c_can/c_can.c ++++ b/drivers/net/can/c_can/c_can.c +@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = { + .brp_inc = 1, + }; + +-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) +-{ +- if (priv->device) +- pm_runtime_enable(priv->device); +-} +- +-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) +-{ +- if (priv->device) +- pm_runtime_disable(priv->device); +-} +- + static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) + { + if (priv->device) +@@ -1318,7 +1306,6 @@ static const struct net_device_ops c_can_netdev_ops = { + + int register_c_can_dev(struct net_device *dev) + { +- struct c_can_priv *priv = netdev_priv(dev); + int err; + + /* Deactivate pins to prevent DRA7 DCAN IP from being +@@ -1328,28 +1315,19 @@ int register_c_can_dev(struct net_device *dev) + */ + pinctrl_pm_select_sleep_state(dev->dev.parent); + +- c_can_pm_runtime_enable(priv); +- + dev->flags |= IFF_ECHO; /* we support local echo */ + dev->netdev_ops = &c_can_netdev_ops; + + err = register_candev(dev); +- if (err) +- c_can_pm_runtime_disable(priv); +- else ++ if (!err) + devm_can_led_init(dev); +- + return err; + } + EXPORT_SYMBOL_GPL(register_c_can_dev); + + void unregister_c_can_dev(struct net_device *dev) + { +- struct c_can_priv *priv = netdev_priv(dev); +- + unregister_candev(dev); +- +- c_can_pm_runtime_disable(priv); + } + EXPORT_SYMBOL_GPL(unregister_c_can_dev); + +diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c +index d065c0e2d18e..f3e0b2124a37 100644 +--- a/drivers/net/can/c_can/c_can_pci.c ++++ b/drivers/net/can/c_can/c_can_pci.c +@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev) + { + struct net_device *dev = pci_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); ++ void __iomem *addr = priv->base; + + unregister_c_can_dev(dev); + + free_c_can_dev(dev); + +- pci_iounmap(pdev, priv->base); ++ pci_iounmap(pdev, addr); + pci_disable_msi(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); +diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c +index 717530eac70c..c6a03f565e3f 100644 +--- a/drivers/net/can/c_can/c_can_platform.c ++++ b/drivers/net/can/c_can/c_can_platform.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -385,6 +386,7 @@ static int c_can_plat_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + ++ pm_runtime_enable(priv->device); + ret = register_c_can_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", +@@ -397,6 +399,7 @@ static int c_can_plat_probe(struct platform_device *pdev) + return 0; + + exit_free_device: ++ pm_runtime_disable(priv->device); + free_c_can_dev(dev); + exit: + dev_err(&pdev->dev, "probe failed\n"); +@@ -407,9 +410,10 @@ static int c_can_plat_probe(struct platform_device *pdev) + static int c_can_plat_remove(struct platform_device *pdev) + { + struct net_device *dev = platform_get_drvdata(pdev); ++ struct c_can_priv *priv = netdev_priv(dev); + + unregister_c_can_dev(dev); +- ++ pm_runtime_disable(priv->device); + free_c_can_dev(dev); + + return 0; +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index ffc5467a1ec2..3d7bffd529fe 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -469,9 +469,13 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; +- u8 len = cf->len; + +- *len_ptr = len; ++ /* get the real payload length for netdev statistics */ ++ if (cf->can_id & CAN_RTR_FLAG) ++ *len_ptr = 0; ++ else ++ *len_ptr = cf->len; ++ + priv->echo_skb[idx] = NULL; + + return skb; +@@ -496,7 +500,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) + if (!skb) + return 0; + +- netif_rx(skb); ++ skb_get(skb); ++ if (netif_rx(skb) == NET_RX_SUCCESS) ++ dev_consume_skb_any(skb); ++ else ++ dev_kfree_skb_any(skb); + + return len; + } +@@ -547,11 +555,11 @@ static void can_restart(struct net_device *dev) + } + cf->can_id |= CAN_ERR_RESTARTED; + +- netif_rx(skb); +- + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + ++ netif_rx_ni(skb); ++ + restart: + netdev_dbg(dev, "restarted\n"); + priv->can_stats.restarts++; +@@ -1009,7 +1017,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) + { + struct can_priv *priv = netdev_priv(dev); + struct can_ctrlmode cm = {.flags = priv->ctrlmode}; +- struct can_berr_counter bec; ++ struct can_berr_counter bec = { }; + enum can_state state = priv->state; + + if (priv->do_get_state) +@@ -1076,6 +1084,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head) + + static struct rtnl_link_ops can_link_ops __read_mostly = { + .kind = "can", ++ .netns_refund = true, + .maxtype = IFLA_CAN_MAX, + .policy = can_policy, + .setup = can_setup, +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c +index 6b866d0451b2..dcad5213eb34 100644 +--- a/drivers/net/can/flexcan.c ++++ b/drivers/net/can/flexcan.c +@@ -379,11 +379,17 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) + static int flexcan_chip_freeze(struct flexcan_priv *priv) + { + struct flexcan_regs __iomem *regs = priv->regs; +- unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; ++ unsigned int timeout; ++ u32 bitrate = priv->can.bittiming.bitrate; + u32 reg; + ++ if (bitrate) ++ timeout = 1000 * 1000 * 10 / bitrate; ++ else ++ timeout = FLEXCAN_TIMEOUT_US / 10; ++ + reg = flexcan_read(®s->mcr); +- reg |= FLEXCAN_MCR_HALT; ++ reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) +@@ -1098,10 +1104,14 @@ static int register_flexcandev(struct net_device *dev) + if (err) + goto out_chip_disable; + +- /* set freeze, halt and activate FIFO, restrict register access */ ++ /* set freeze, halt */ ++ err = flexcan_chip_freeze(priv); ++ if (err) ++ goto out_chip_disable; ++ ++ /* activate FIFO, restrict register access */ + reg = flexcan_read(®s->mcr); +- reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | +- FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; ++ reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + flexcan_write(reg, ®s->mcr); + + /* Currently we only support newer versions of this core +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c +index 195f15edb32e..197c27d8f584 100644 +--- a/drivers/net/can/m_can/m_can.c ++++ b/drivers/net/can/m_can/m_can.c +@@ -428,9 +428,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) + } + + while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { +- if (rxfs & RXFS_RFL) +- netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); +- + m_can_read_fifo(dev, rxfs); + + quota--; +@@ -572,7 +569,7 @@ static int m_can_handle_state_change(struct net_device *dev, + unsigned int ecr; + + switch (new_state) { +- case CAN_STATE_ERROR_ACTIVE: ++ case CAN_STATE_ERROR_WARNING: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; +@@ -601,7 +598,7 @@ static int m_can_handle_state_change(struct net_device *dev, + __m_can_get_berr_counter(dev, &bec); + + switch (new_state) { +- case CAN_STATE_ERROR_ACTIVE: ++ case CAN_STATE_ERROR_WARNING: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? +diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c +index 7621f91a8a20..fd48770ba792 100644 +--- a/drivers/net/can/softing/softing_main.c ++++ b/drivers/net/can/softing/softing_main.c +@@ -393,8 +393,13 @@ static int softing_netdev_open(struct net_device *ndev) + + /* check or determine and set bittime */ + ret = open_candev(ndev); +- if (!ret) +- ret = softing_startstop(ndev, 1); ++ if (ret) ++ return ret; ++ ++ ret = softing_startstop(ndev, 1); ++ if (ret < 0) ++ close_candev(ndev); ++ + return ret; + } + +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c +index d0846ae9e0e4..d62d61d734ea 100644 +--- a/drivers/net/can/usb/ems_usb.c ++++ b/drivers/net/can/usb/ems_usb.c +@@ -267,6 +267,8 @@ struct ems_usb { + unsigned int free_slots; /* remember number of available slots */ + + struct ems_cpc_msg active_params; /* active controller parameters */ ++ void *rxbuf[MAX_RX_URBS]; ++ dma_addr_t rxbuf_dma[MAX_RX_URBS]; + }; + + static void ems_usb_read_interrupt_callback(struct urb *urb) +@@ -598,6 +600,7 @@ static int ems_usb_start(struct ems_usb *dev) + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; ++ dma_addr_t buf_dma; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); +@@ -607,7 +610,7 @@ static int ems_usb_start(struct ems_usb *dev) + } + + buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, +- &urb->transfer_dma); ++ &buf_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + usb_free_urb(urb); +@@ -615,6 +618,8 @@ static int ems_usb_start(struct ems_usb *dev) + break; + } + ++ urb->transfer_dma = buf_dma; ++ + usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), + buf, RX_BUFFER_SIZE, + ems_usb_read_bulk_callback, dev); +@@ -630,6 +635,9 @@ static int ems_usb_start(struct ems_usb *dev) + break; + } + ++ dev->rxbuf[i] = buf; ++ dev->rxbuf_dma[i] = buf_dma; ++ + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + } +@@ -695,6 +703,10 @@ static void unlink_all_urbs(struct ems_usb *dev) + + usb_kill_anchored_urbs(&dev->rx_submitted); + ++ for (i = 0; i < MAX_RX_URBS; ++i) ++ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, ++ dev->rxbuf[i], dev->rxbuf_dma[i]); ++ + usb_kill_anchored_urbs(&dev->tx_submitted); + atomic_set(&dev->active_tx_urbs, 0); + +@@ -1064,7 +1076,6 @@ static void ems_usb_disconnect(struct usb_interface *intf) + + if (dev) { + unregister_netdev(dev->netdev); +- free_candev(dev->netdev); + + unlink_all_urbs(dev); + +@@ -1072,6 +1083,8 @@ static void ems_usb_disconnect(struct usb_interface *intf) + + kfree(dev->intr_in_buffer); + kfree(dev->tx_msg_buffer); ++ ++ free_candev(dev->netdev); + } + } + +diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c +index c6dcf93675c0..fbe1173b2651 100644 +--- a/drivers/net/can/usb/esd_usb2.c ++++ b/drivers/net/can/usb/esd_usb2.c +@@ -207,6 +207,8 @@ struct esd_usb2 { + int net_count; + u32 version; + int rxinitdone; ++ void *rxbuf[MAX_RX_URBS]; ++ dma_addr_t rxbuf_dma[MAX_RX_URBS]; + }; + + struct esd_usb2_net_priv { +@@ -234,8 +236,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, + if (id == ESD_EV_CAN_ERROR_EXT) { + u8 state = msg->msg.rx.data[0]; + u8 ecc = msg->msg.rx.data[1]; +- u8 txerr = msg->msg.rx.data[2]; +- u8 rxerr = msg->msg.rx.data[3]; ++ u8 rxerr = msg->msg.rx.data[2]; ++ u8 txerr = msg->msg.rx.data[3]; + + skb = alloc_can_err_skb(priv->netdev, &cf); + if (skb == NULL) { +@@ -556,6 +558,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf = NULL; ++ dma_addr_t buf_dma; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); +@@ -565,7 +568,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) + } + + buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, +- &urb->transfer_dma); ++ &buf_dma); + if (!buf) { + dev_warn(dev->udev->dev.parent, + "No memory left for USB buffer\n"); +@@ -573,6 +576,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) + goto freeurb; + } + ++ urb->transfer_dma = buf_dma; ++ + usb_fill_bulk_urb(urb, dev->udev, + usb_rcvbulkpipe(dev->udev, 1), + buf, RX_BUFFER_SIZE, +@@ -585,8 +590,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) + usb_unanchor_urb(urb); + usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, + urb->transfer_dma); ++ goto freeurb; + } + ++ dev->rxbuf[i] = buf; ++ dev->rxbuf_dma[i] = buf_dma; ++ + freeurb: + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); +@@ -674,6 +683,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev) + int i, j; + + usb_kill_anchored_urbs(&dev->rx_submitted); ++ ++ for (i = 0; i < MAX_RX_URBS; ++i) ++ usb_free_coherent(dev->udev, RX_BUFFER_SIZE, ++ dev->rxbuf[i], dev->rxbuf_dma[i]); ++ + for (i = 0; i < dev->net_count; i++) { + priv = dev->nets[i]; + if (priv) { +diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c +index a65203e6ea5f..d21c68882e86 100644 +--- a/drivers/net/can/usb/gs_usb.c ++++ b/drivers/net/can/usb/gs_usb.c +@@ -71,21 +71,27 @@ enum gs_can_identify_mode { + }; + + /* data types passed between host and device */ ++ ++/* The firmware on the original USB2CAN by Geschwister Schneider ++ * Technologie Entwicklungs- und Vertriebs UG exchanges all data ++ * between the host and the device in host byte order. This is done ++ * with the struct gs_host_config::byte_order member, which is sent ++ * first to indicate the desired byte order. ++ * ++ * The widely used open source firmware candleLight doesn't support ++ * this feature and exchanges the data in little endian byte order. ++ */ + struct gs_host_config { +- u32 byte_order; ++ __le32 byte_order; + } __packed; +-/* All data exchanged between host and device is exchanged in host byte order, +- * thanks to the struct gs_host_config byte_order member, which is sent first +- * to indicate the desired byte order. +- */ + + struct gs_device_config { + u8 reserved1; + u8 reserved2; + u8 reserved3; + u8 icount; +- u32 sw_version; +- u32 hw_version; ++ __le32 sw_version; ++ __le32 hw_version; + } __packed; + + #define GS_CAN_MODE_NORMAL 0 +@@ -95,26 +101,26 @@ struct gs_device_config { + #define GS_CAN_MODE_ONE_SHOT BIT(3) + + struct gs_device_mode { +- u32 mode; +- u32 flags; ++ __le32 mode; ++ __le32 flags; + } __packed; + + struct gs_device_state { +- u32 state; +- u32 rxerr; +- u32 txerr; ++ __le32 state; ++ __le32 rxerr; ++ __le32 txerr; + } __packed; + + struct gs_device_bittiming { +- u32 prop_seg; +- u32 phase_seg1; +- u32 phase_seg2; +- u32 sjw; +- u32 brp; ++ __le32 prop_seg; ++ __le32 phase_seg1; ++ __le32 phase_seg2; ++ __le32 sjw; ++ __le32 brp; + } __packed; + + struct gs_identify_mode { +- u32 mode; ++ __le32 mode; + } __packed; + + #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) +@@ -125,23 +131,23 @@ struct gs_identify_mode { + #define GS_CAN_FEATURE_IDENTIFY BIT(5) + + struct gs_device_bt_const { +- u32 feature; +- u32 fclk_can; +- u32 tseg1_min; +- u32 tseg1_max; +- u32 tseg2_min; +- u32 tseg2_max; +- u32 sjw_max; +- u32 brp_min; +- u32 brp_max; +- u32 brp_inc; ++ __le32 feature; ++ __le32 fclk_can; ++ __le32 tseg1_min; ++ __le32 tseg1_max; ++ __le32 tseg2_min; ++ __le32 tseg2_max; ++ __le32 sjw_max; ++ __le32 brp_min; ++ __le32 brp_max; ++ __le32 brp_inc; + } __packed; + + #define GS_CAN_FLAG_OVERFLOW 1 + + struct gs_host_frame { + u32 echo_id; +- u32 can_id; ++ __le32 can_id; + + u8 can_dlc; + u8 channel; +@@ -337,13 +343,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) + if (!skb) + return; + +- cf->can_id = hf->can_id; ++ cf->can_id = le32_to_cpu(hf->can_id); + + cf->can_dlc = get_can_dlc(hf->can_dlc); + memcpy(cf->data, hf->data, 8); + + /* ERROR frames tell us information about the controller */ +- if (hf->can_id & CAN_ERR_FLAG) ++ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) + gs_update_state(dev, cf); + + netdev->stats.rx_packets++; +@@ -426,11 +432,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev) + if (!dbt) + return -ENOMEM; + +- dbt->prop_seg = bt->prop_seg; +- dbt->phase_seg1 = bt->phase_seg1; +- dbt->phase_seg2 = bt->phase_seg2; +- dbt->sjw = bt->sjw; +- dbt->brp = bt->brp; ++ dbt->prop_seg = cpu_to_le32(bt->prop_seg); ++ dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1); ++ dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2); ++ dbt->sjw = cpu_to_le32(bt->sjw); ++ dbt->brp = cpu_to_le32(bt->brp); + + /* request bit timings */ + rc = usb_control_msg(interface_to_usbdev(intf), +@@ -511,7 +517,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, + + cf = (struct can_frame *)skb->data; + +- hf->can_id = cf->can_id; ++ hf->can_id = cpu_to_le32(cf->can_id); + hf->can_dlc = cf->can_dlc; + memcpy(hf->data, cf->data, cf->can_dlc); + +@@ -582,6 +588,7 @@ static int gs_can_open(struct net_device *netdev) + int rc, i; + struct gs_device_mode *dm; + u32 ctrlmode; ++ u32 flags = 0; + + rc = open_candev(netdev); + if (rc) +@@ -649,24 +656,24 @@ static int gs_can_open(struct net_device *netdev) + + /* flags */ + ctrlmode = dev->can.ctrlmode; +- dm->flags = 0; + + if (ctrlmode & CAN_CTRLMODE_LOOPBACK) +- dm->flags |= GS_CAN_MODE_LOOP_BACK; ++ flags |= GS_CAN_MODE_LOOP_BACK; + else if (ctrlmode & CAN_CTRLMODE_LISTENONLY) +- dm->flags |= GS_CAN_MODE_LISTEN_ONLY; ++ flags |= GS_CAN_MODE_LISTEN_ONLY; + + /* Controller is not allowed to retry TX + * this mode is unavailable on atmels uc3c hardware + */ + if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) +- dm->flags |= GS_CAN_MODE_ONE_SHOT; ++ flags |= GS_CAN_MODE_ONE_SHOT; + + if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) +- dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE; ++ flags |= GS_CAN_MODE_TRIPLE_SAMPLE; + + /* finally start device */ +- dm->mode = GS_CAN_MODE_START; ++ dm->mode = cpu_to_le32(GS_CAN_MODE_START); ++ dm->flags = cpu_to_le32(flags); + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), + GS_USB_BREQ_MODE, +@@ -746,9 +753,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) + return -ENOMEM; + + if (do_identify) +- imode->mode = GS_CAN_IDENTIFY_ON; ++ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON); + else +- imode->mode = GS_CAN_IDENTIFY_OFF; ++ imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF); + + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), +@@ -799,6 +806,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, + struct net_device *netdev; + int rc; + struct gs_device_bt_const *bt_const; ++ u32 feature; + + bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL); + if (!bt_const) +@@ -839,14 +847,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, + + /* dev settup */ + strcpy(dev->bt_const.name, "gs_usb"); +- dev->bt_const.tseg1_min = bt_const->tseg1_min; +- dev->bt_const.tseg1_max = bt_const->tseg1_max; +- dev->bt_const.tseg2_min = bt_const->tseg2_min; +- dev->bt_const.tseg2_max = bt_const->tseg2_max; +- dev->bt_const.sjw_max = bt_const->sjw_max; +- dev->bt_const.brp_min = bt_const->brp_min; +- dev->bt_const.brp_max = bt_const->brp_max; +- dev->bt_const.brp_inc = bt_const->brp_inc; ++ dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min); ++ dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max); ++ dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min); ++ dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max); ++ dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max); ++ dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min); ++ dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max); ++ dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc); + + dev->udev = interface_to_usbdev(intf); + dev->iface = intf; +@@ -863,28 +871,29 @@ static struct gs_can *gs_make_candev(unsigned int channel, + + /* can settup */ + dev->can.state = CAN_STATE_STOPPED; +- dev->can.clock.freq = bt_const->fclk_can; ++ dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can); + dev->can.bittiming_const = &dev->bt_const; + dev->can.do_set_bittiming = gs_usb_set_bittiming; + + dev->can.ctrlmode_supported = 0; + +- if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY) ++ feature = le32_to_cpu(bt_const->feature); ++ if (feature & GS_CAN_FEATURE_LISTEN_ONLY) + dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; + +- if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK) ++ if (feature & GS_CAN_FEATURE_LOOP_BACK) + dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; + +- if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) ++ if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) + dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + +- if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) ++ if (feature & GS_CAN_FEATURE_ONE_SHOT) + dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; + + SET_NETDEV_DEV(netdev, &intf->dev); + +- if (dconf->sw_version > 1) +- if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) ++ if (le32_to_cpu(dconf->sw_version) > 1) ++ if (feature & GS_CAN_FEATURE_IDENTIFY) + netdev->ethtool_ops = &gs_usb_ethtool_ops; + + kfree(bt_const); +@@ -919,7 +928,7 @@ static int gs_usb_probe(struct usb_interface *intf, + if (!hconf) + return -ENOMEM; + +- hconf->byte_order = 0x0000beef; ++ hconf->byte_order = cpu_to_le32(0x0000beef); + + /* send host config */ + rc = usb_control_msg(interface_to_usbdev(intf), +diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c +index 3a75352f632b..792a1afabf5d 100644 +--- a/drivers/net/can/usb/kvaser_usb.c ++++ b/drivers/net/can/usb/kvaser_usb.c +@@ -791,7 +791,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, + if (!urb) + return -ENOMEM; + +- buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); ++ buf = kzalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; +@@ -1459,7 +1459,7 @@ static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +@@ -1592,7 +1592,7 @@ static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv) + struct kvaser_msg *msg; + int rc; + +- msg = kmalloc(sizeof(*msg), GFP_KERNEL); ++ msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +index 6cd4317fe94d..c8502bd8e3dc 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +@@ -152,14 +152,55 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, + /* protect from getting timeval before setting now */ + if (time_ref->tv_host.tv_sec > 0) { + u64 delta_us; ++ s64 delta_ts = 0; ++ ++ /* General case: dev_ts_1 < dev_ts_2 < ts, with: ++ * ++ * - dev_ts_1 = previous sync timestamp ++ * - dev_ts_2 = last sync timestamp ++ * - ts = event timestamp ++ * - ts_period = known sync period (theoretical) ++ * ~ dev_ts2 - dev_ts1 ++ * *but*: ++ * ++ * - time counters wrap (see adapter->ts_used_bits) ++ * - sometimes, dev_ts_1 < ts < dev_ts2 ++ * ++ * "normal" case (sync time counters increase): ++ * must take into account case when ts wraps (tsw) ++ * ++ * < ts_period > < > ++ * | | | ++ * ---+--------+----+-------0-+--+--> ++ * ts_dev_1 | ts_dev_2 | ++ * ts tsw ++ */ ++ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { ++ /* case when event time (tsw) wraps */ ++ if (ts < time_ref->ts_dev_1) ++ delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits); ++ ++ /* Otherwise, sync time counter (ts_dev_2) has wrapped: ++ * handle case when event time (tsn) hasn't. ++ * ++ * < ts_period > < > ++ * | | | ++ * ---+--------+--0-+---------+--+--> ++ * ts_dev_1 | ts_dev_2 | ++ * tsn ts ++ */ ++ } else if (time_ref->ts_dev_1 < ts) { ++ delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits); ++ } + +- delta_us = ts - time_ref->ts_dev_2; +- if (ts < time_ref->ts_dev_2) +- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; ++ /* add delay between last sync and event timestamps */ ++ delta_ts += (signed int)(ts - time_ref->ts_dev_2); + +- delta_us += time_ref->ts_total; ++ /* add time from beginning to last sync */ ++ delta_ts += time_ref->ts_total; + +- delta_us *= time_ref->adapter->us_per_ts_scale; ++ /* convert ticks number into microseconds */ ++ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; + delta_us >>= time_ref->adapter->us_per_ts_shift; + + *tv = time_ref->tv_host_0; +@@ -839,7 +880,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, + if (dev->adapter->dev_set_bus) { + err = dev->adapter->dev_set_bus(dev, 0); + if (err) +- goto lbl_unregister_candev; ++ goto adap_dev_free; + } + + /* get device number early */ +@@ -851,6 +892,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, + + return 0; + ++adap_dev_free: ++ if (dev->adapter->dev_free) ++ dev->adapter->dev_free(dev); ++ + lbl_unregister_candev: + unregister_candev(netdev); + +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +index 40647b837b31..d314e73f3d06 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +@@ -475,12 +475,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, + struct pucan_msg *rx_msg) + { + struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; +- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)]; +- struct net_device *netdev = dev->netdev; ++ struct peak_usb_device *dev; ++ struct net_device *netdev; + struct canfd_frame *cfd; + struct sk_buff *skb; + const u16 rx_msg_flags = le16_to_cpu(rm->flags); + ++ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) ++ return -ENOMEM; ++ ++ dev = usb_if->dev[pucan_msg_get_channel(rm)]; ++ netdev = dev->netdev; ++ + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { + /* CANFD frame case */ + skb = alloc_canfd_skb(netdev, &cfd); +@@ -527,15 +533,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, + struct pucan_msg *rx_msg) + { + struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; +- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; +- struct pcan_usb_fd_device *pdev = +- container_of(dev, struct pcan_usb_fd_device, dev); ++ struct pcan_usb_fd_device *pdev; + enum can_state new_state = CAN_STATE_ERROR_ACTIVE; + enum can_state rx_state, tx_state; +- struct net_device *netdev = dev->netdev; ++ struct peak_usb_device *dev; ++ struct net_device *netdev; + struct can_frame *cf; + struct sk_buff *skb; + ++ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) ++ return -ENOMEM; ++ ++ dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; ++ pdev = container_of(dev, struct pcan_usb_fd_device, dev); ++ netdev = dev->netdev; ++ + /* nothing should be sent while in BUS_OFF state */ + if (dev->can.state == CAN_STATE_BUS_OFF) + return 0; +@@ -588,9 +600,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, + struct pucan_msg *rx_msg) + { + struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; +- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)]; +- struct pcan_usb_fd_device *pdev = +- container_of(dev, struct pcan_usb_fd_device, dev); ++ struct pcan_usb_fd_device *pdev; ++ struct peak_usb_device *dev; ++ ++ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) ++ return -EINVAL; ++ ++ dev = usb_if->dev[pucan_ermsg_get_channel(er)]; ++ pdev = container_of(dev, struct pcan_usb_fd_device, dev); + + /* keep a trace of tx and rx error counters for later use */ + pdev->bec.txerr = er->tx_err_cnt; +@@ -604,11 +621,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, + struct pucan_msg *rx_msg) + { + struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; +- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)]; +- struct net_device *netdev = dev->netdev; ++ struct peak_usb_device *dev; ++ struct net_device *netdev; + struct can_frame *cf; + struct sk_buff *skb; + ++ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) ++ return -EINVAL; ++ ++ dev = usb_if->dev[pufd_omsg_get_channel(ov)]; ++ netdev = dev->netdev; ++ + /* allocate an skb to store the error frame */ + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) +@@ -726,6 +749,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, + u16 tx_msg_size, tx_msg_flags; + u8 can_dlc; + ++ if (cfd->len > CANFD_MAX_DLEN) ++ return -EINVAL; ++ + tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); + tx_msg->size = cpu_to_le16(tx_msg_size); + tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); +diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c +index 3e4416473607..df99354ec12a 100644 +--- a/drivers/net/can/usb/usb_8dev.c ++++ b/drivers/net/can/usb/usb_8dev.c +@@ -148,7 +148,8 @@ struct usb_8dev_priv { + u8 *cmd_msg_buffer; + + struct mutex usb_8dev_cmd_lock; +- ++ void *rxbuf[MAX_RX_URBS]; ++ dma_addr_t rxbuf_dma[MAX_RX_URBS]; + }; + + /* tx frame */ +@@ -744,6 +745,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) + for (i = 0; i < MAX_RX_URBS; i++) { + struct urb *urb = NULL; + u8 *buf; ++ dma_addr_t buf_dma; + + /* create a URB, and a buffer for it */ + urb = usb_alloc_urb(0, GFP_KERNEL); +@@ -753,7 +755,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) + } + + buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, +- &urb->transfer_dma); ++ &buf_dma); + if (!buf) { + netdev_err(netdev, "No memory left for USB buffer\n"); + usb_free_urb(urb); +@@ -761,6 +763,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) + break; + } + ++ urb->transfer_dma = buf_dma; ++ + usb_fill_bulk_urb(urb, priv->udev, + usb_rcvbulkpipe(priv->udev, + USB_8DEV_ENDP_DATA_RX), +@@ -778,6 +782,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) + break; + } + ++ priv->rxbuf[i] = buf; ++ priv->rxbuf_dma[i] = buf_dma; ++ + /* Drop reference, USB core will take care of freeing it */ + usb_free_urb(urb); + } +@@ -847,6 +854,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv) + + usb_kill_anchored_urbs(&priv->rx_submitted); + ++ for (i = 0; i < MAX_RX_URBS; ++i) ++ usb_free_coherent(priv->udev, RX_BUFFER_SIZE, ++ priv->rxbuf[i], priv->rxbuf_dma[i]); ++ + usb_kill_anchored_urbs(&priv->tx_submitted); + atomic_set(&priv->active_tx_urbs, 0); + +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index 060f9b176929..b6867a8915da 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -502,6 +502,19 @@ static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) + } + } + ++static void b53_port_set_learning(struct b53_device *dev, int port, ++ bool learning) ++{ ++ u16 reg; ++ ++ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); ++ if (learning) ++ reg &= ~BIT(port); ++ else ++ reg |= BIT(port); ++ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); ++} ++ + static int b53_enable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) + { +@@ -509,6 +522,8 @@ static int b53_enable_port(struct dsa_switch *ds, int port, + unsigned int cpu_port = dev->cpu_port; + u16 pvlan; + ++ b53_port_set_learning(dev, port, false); ++ + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); + +@@ -552,6 +567,8 @@ static void b53_enable_cpu_port(struct b53_device *dev) + PORT_CTRL_RX_MCST_EN | + PORT_CTRL_RX_UCST_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl); ++ ++ b53_port_set_learning(dev, cpu_port, false); + } + + static void b53_enable_mib(struct b53_device *dev) +@@ -946,7 +963,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, + if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) + return -EOPNOTSUPP; + +- if (vlan->vid_end > dev->num_vlans) ++ if (vlan->vid_end >= dev->num_vlans) + return -ERANGE; + + b53_enable_vlan(dev, true); +@@ -1175,6 +1192,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, + return ret; + + switch (ret) { ++ case -ETIMEDOUT: ++ return ret; + case -ENOSPC: + dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", + addr, vid); +@@ -1373,6 +1392,8 @@ static int b53_br_join(struct dsa_switch *ds, int port, + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + ++ b53_port_set_learning(dev, port, true); ++ + return 0; + } + +@@ -1424,6 +1445,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port) + vl->untag |= BIT(port) | BIT(dev->cpu_port); + b53_set_vlan_entry(dev, pvid, vl); + } ++ b53_port_set_learning(dev, port, false); + } + + static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state) +diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h +index 3cf246c6bdcc..aed70e76006d 100644 +--- a/drivers/net/dsa/b53/b53_regs.h ++++ b/drivers/net/dsa/b53/b53_regs.h +@@ -112,6 +112,7 @@ + #define B53_UC_FLOOD_MASK 0x32 + #define B53_MC_FLOOD_MASK 0x34 + #define B53_IPMC_FLOOD_MASK 0x36 ++#define B53_DIS_LEARNING 0x3c + + /* + * Override Ports 0-7 State on devices with xMII interfaces (8 bit) +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 796571fccba7..40b3adf7ad99 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -224,6 +224,11 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + ++ /* Disable learning */ ++ reg = core_readl(priv, CORE_DIS_LEARN); ++ reg |= BIT(port); ++ core_writel(priv, reg, CORE_DIS_LEARN); ++ + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + core_writel(priv, 0, CORE_G_PCTL_PORT(port)); + +@@ -515,15 +520,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); +- if (!priv->master_mii_bus) ++ if (!priv->master_mii_bus) { ++ of_node_put(dn); + return -EPROBE_DEFER; ++ } + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); +- if (!priv->slave_mii_bus) ++ if (!priv->slave_mii_bus) { ++ of_node_put(dn); + return -ENOMEM; ++ } + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; +@@ -579,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) + * in bits 15:8 and the patch level in bits 7:0 which is exactly what + * the REG_PHY_REVISION register layout is. + */ +- +- return priv->hw_params.gphy_rev; ++ if (priv->int_phy_mask & BIT(port)) ++ return priv->hw_params.gphy_rev; ++ else ++ return 0; + } + + static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, +@@ -1039,6 +1050,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) + spin_lock_init(&priv->indir_lock); + mutex_init(&priv->stats_mutex); + ++ /* Balance of_node_put() done by of_find_node_by_name() */ ++ of_node_get(dn); + ports = of_find_node_by_name(dn, "ports"); + if (ports) { + bcm_sf2_identify_ports(priv, ports); +diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h +index 838fe373cd6f..ca1d1f2e1161 100644 +--- a/drivers/net/dsa/bcm_sf2_regs.h ++++ b/drivers/net/dsa/bcm_sf2_regs.h +@@ -138,6 +138,8 @@ + #define CORE_SWITCH_CTRL 0x00088 + #define MII_DUMB_FWDG_EN (1 << 6) + ++#define CORE_DIS_LEARN 0x000f0 ++ + #define CORE_SFT_LRN_CTRL 0x000f8 + #define SW_LEARN_CNTL(x) (1 << (x)) + +diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c +index f8df8248035e..31e02ca56572 100644 +--- a/drivers/net/ethernet/aeroflex/greth.c ++++ b/drivers/net/ethernet/aeroflex/greth.c +@@ -1554,10 +1554,11 @@ static int greth_of_remove(struct platform_device *of_dev) + mdiobus_unregister(greth->mdio); + + unregister_netdev(ndev); +- free_netdev(ndev); + + of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); + ++ free_netdev(ndev); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c +index 6ffdff68bfc4..3dc101f7d6bd 100644 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c +@@ -412,7 +412,7 @@ static void emac_timeout(struct net_device *dev) + /* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +-static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) ++static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; +@@ -420,7 +420,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) + + channel = db->tx_fifo_stat & 3; + if (channel == 3) +- return 1; ++ return NETDEV_TX_BUSY; + + channel = (channel == 1 ? 1 : 0); + +@@ -827,13 +827,13 @@ static int emac_probe(struct platform_device *pdev) + db->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(db->clk)) { + ret = PTR_ERR(db->clk); +- goto out_iounmap; ++ goto out_dispose_mapping; + } + + ret = clk_prepare_enable(db->clk); + if (ret) { + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); +- goto out_iounmap; ++ goto out_dispose_mapping; + } + + ret = sunxi_sram_claim(&pdev->dev); +@@ -890,6 +890,8 @@ static int emac_probe(struct platform_device *pdev) + sunxi_sram_release(&pdev->dev); + out_clk_disable_unprepare: + clk_disable_unprepare(db->clk); ++out_dispose_mapping: ++ irq_dispose_mapping(ndev->irq); + out_iounmap: + iounmap(db->membase); + out: +@@ -908,6 +910,7 @@ static int emac_remove(struct platform_device *pdev) + unregister_netdev(ndev); + sunxi_sram_release(&pdev->dev); + clk_disable_unprepare(db->clk); ++ irq_dispose_mapping(ndev->irq); + iounmap(db->membase); + free_netdev(ndev); + +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index 905911f78693..e95f19e573a7 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -2096,6 +2096,9 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + rss->hash_key; + int rc; + ++ if (unlikely(!func)) ++ return -EINVAL; ++ + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, +@@ -2108,8 +2111,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + if (rss->hash_func) + rss->hash_func--; + +- if (func) +- *func = rss->hash_func; ++ *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c +index da21886609e3..de3d6f8b5431 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c +@@ -2345,16 +2345,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, + goto err_mmio_read_less; + } + +- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); + if (rc) { +- dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); +- goto err_mmio_read_less; +- } +- +- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); +- if (rc) { +- dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", +- rc); ++ dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); + goto err_mmio_read_less; + } + +@@ -2894,6 +2887,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + return rc; + } + ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); ++ if (rc) { ++ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); ++ goto err_disable_device; ++ } ++ + pci_set_master(pdev); + + ena_dev = vzalloc(sizeof(*ena_dev)); +diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c +index c22bf52d3320..c3887ac3d04d 100644 +--- a/drivers/net/ethernet/amd/pcnet32.c ++++ b/drivers/net/ethernet/amd/pcnet32.c +@@ -1493,8 +1493,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) + } + pci_set_master(pdev); + +- ioaddr = pci_resource_start(pdev, 0); +- if (!ioaddr) { ++ if (!pci_resource_len(pdev, 0)) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("card has no PCI IO resources, aborting\n"); + return -ENODEV; +@@ -1506,6 +1505,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) + pr_err("architecture does not support 32bit PCI busmaster DMA\n"); + return err; + } ++ ++ ioaddr = pci_resource_start(pdev, 0); + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("io address range already allocated\n"); +diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c +index a22403c688c9..337cfce78aef 100644 +--- a/drivers/net/ethernet/arc/emac_mdio.c ++++ b/drivers/net/ethernet/arc/emac_mdio.c +@@ -152,6 +152,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) + if (IS_ERR(data->reset_gpio)) { + error = PTR_ERR(data->reset_gpio); + dev_err(priv->dev, "Failed to request gpio: %d\n", error); ++ mdiobus_free(bus); + return error; + } + +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c +index c0f84b73574d..59af298f99e0 100644 +--- a/drivers/net/ethernet/atheros/alx/main.c ++++ b/drivers/net/ethernet/atheros/alx/main.c +@@ -1056,8 +1056,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) + + static void __alx_stop(struct alx_priv *alx) + { +- alx_halt(alx); + alx_free_irq(alx); ++ ++ cancel_work_sync(&alx->link_check_wk); ++ cancel_work_sync(&alx->reset_wk); ++ ++ alx_halt(alx); + alx_free_rings(alx); + } + +@@ -1649,6 +1653,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + free_netdev(netdev); + out_pci_release: + pci_release_mem_regions(pdev); ++ pci_disable_pcie_error_reporting(pdev); + out_pci_disable: + pci_disable_device(pdev); + return err; +@@ -1659,9 +1664,6 @@ static void alx_remove(struct pci_dev *pdev) + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + +- cancel_work_sync(&alx->link_check_wk); +- cancel_work_sync(&alx->reset_wk); +- + /* restore permanent mac address */ + alx_set_macaddr(hw, hw->perm_addr); + +@@ -1693,13 +1695,19 @@ static int alx_resume(struct device *dev) + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; ++ int err; + + alx_reset_phy(hw); + + if (!netif_running(alx->dev)) + return 0; ++ ++ err = __alx_open(alx, true); ++ if (err) ++ return err; ++ + netif_device_attach(alx->dev); +- return __alx_open(alx, true); ++ return 0; + } + + static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c +index d95dec595786..9a9f6c12f89a 100644 +--- a/drivers/net/ethernet/broadcom/b44.c ++++ b/drivers/net/ethernet/broadcom/b44.c +@@ -2390,7 +2390,8 @@ static int b44_init_one(struct ssb_device *sdev, + goto err_out_free_dev; + } + +- if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) { ++ err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30)); ++ if (err) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system\n"); + goto err_out_powerdown; +diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c +index 1f7034d739b0..e15e487c14dd 100644 +--- a/drivers/net/ethernet/broadcom/bnx2.c ++++ b/drivers/net/ethernet/broadcom/bnx2.c +@@ -8256,9 +8256,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) + BNX2_WR(bp, PCI_COMMAND, reg); + } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && + !(bp->flags & BNX2_FLAG_PCIX)) { +- + dev_err(&pdev->dev, + "5706 A1 can only be used in a PCIX bus, aborting\n"); ++ rc = -EPERM; + goto err_out_unmap; + } + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index 46a7dcf2ff4a..9d7f491931ce 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -2672,7 +2672,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) + } + + /* Allocated memory for FW statistics */ +- if (bnx2x_alloc_fw_stats_mem(bp)) ++ rc = bnx2x_alloc_fw_stats_mem(bp); ++ if (rc) + LOAD_ERROR_EXIT(bp, load_error0); + + /* request pf to initialize status blocks */ +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +index e8a09d0afe1c..545b59ff5d7e 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +@@ -1240,8 +1240,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + goto failed; + + /* SR-IOV capability was enabled but there are no VFs*/ +- if (iov->total == 0) ++ if (iov->total == 0) { ++ err = -EINVAL; + goto failed; ++ } + + iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 421cbba9a3bc..77dadbe1a446 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -4262,7 +4262,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); +- bp->dev->dev_port = pf->port_id; + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); + memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); + pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); +@@ -5095,9 +5094,10 @@ static void bnxt_tx_disable(struct bnxt *bp) + txr->dev_state = BNXT_DEV_STATE_CLOSING; + } + } ++ /* Drop carrier first to prevent TX timeout */ ++ netif_carrier_off(bp->dev); + /* Stop all TX queues */ + netif_tx_disable(bp->dev); +- netif_carrier_off(bp->dev); + } + + static void bnxt_tx_enable(struct bnxt *bp) +@@ -5589,14 +5589,14 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) + } + } + +- bnxt_enable_napi(bp); +- + rc = bnxt_init_nic(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); +- goto open_err; ++ goto open_err_irq; + } + ++ bnxt_enable_napi(bp); ++ + if (link_re_init) { + mutex_lock(&bp->link_lock); + rc = bnxt_update_phy_setting(bp); +@@ -5618,9 +5618,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) + + return 0; + +-open_err: +- bnxt_disable_napi(bp); +- + open_err_irq: + bnxt_del_napi(bp); + +@@ -6322,7 +6319,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); +- goto init_err_disable; ++ rc = -EIO; ++ goto init_err_release; + } + + pci_set_master(pdev); +@@ -7085,6 +7083,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + + bnxt_parse_log_pcie_link(bp); + ++ pci_save_state(pdev); + return 0; + + init_err: +@@ -7158,6 +7157,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) + "Cannot re-enable PCI device after reset.\n"); + } else { + pci_set_master(pdev); ++ pci_restore_state(pdev); ++ pci_save_state(pdev); + + if (netif_running(netdev)) + err = bnxt_open(netdev); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +index 3a352f76e633..9e5251c427a3 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +@@ -1000,9 +1000,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + ++ mutex_lock(&bp->link_lock); + if (epause->autoneg) { +- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) +- return -EINVAL; ++ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { ++ rc = -EINVAL; ++ goto pause_exit; ++ } + + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + if (bp->hwrm_spec_code >= 0x10201) +@@ -1025,6 +1028,9 @@ static int bnxt_set_pauseparam(struct net_device *dev, + + if (netif_running(dev)) + rc = bnxt_hwrm_set_pause(bp); ++ ++pause_exit: ++ mutex_unlock(&bp->link_lock); + return rc; + } + +@@ -1454,6 +1460,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) + if (rc != 0) + return rc; + ++ if (!dir_entries || !entry_length) ++ return -EIO; ++ + /* Insert 2 bytes of directory info (count and size of entries) */ + if (len < 2) + return -EINVAL; +@@ -1665,8 +1674,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) + struct bnxt *bp = netdev_priv(dev); + struct ethtool_eee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; +- u32 advertising = +- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); ++ u32 advertising; + int rc = 0; + + if (!BNXT_SINGLE_PF(bp)) +@@ -1675,19 +1683,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return -EOPNOTSUPP; + ++ mutex_lock(&bp->link_lock); ++ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + if (!edata->eee_enabled) + goto eee_ok; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + netdev_warn(dev, "EEE requires autoneg\n"); +- return -EINVAL; ++ rc = -EINVAL; ++ goto eee_exit; + } + if (edata->tx_lpi_enabled) { + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || + edata->tx_lpi_timer < bp->lpi_tmr_lo)) { + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", + bp->lpi_tmr_lo, bp->lpi_tmr_hi); +- return -EINVAL; ++ rc = -EINVAL; ++ goto eee_exit; + } else if (!bp->lpi_tmr_hi) { + edata->tx_lpi_timer = eee->tx_lpi_timer; + } +@@ -1697,7 +1709,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) + } else if (edata->advertised & ~advertising) { + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", + edata->advertised, advertising); +- return -EINVAL; ++ rc = -EINVAL; ++ goto eee_exit; + } + + eee->advertised = edata->advertised; +@@ -1709,6 +1722,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, false, true); + ++eee_exit: ++ mutex_unlock(&bp->link_lock); + return rc; + } + +@@ -1847,7 +1862,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev, + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; +- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, ++ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, + start, length, data); + } + return rc; +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +index 393cce3bf2fc..1d6cb5f0ffeb 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +@@ -342,6 +342,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) + } + } + ++ bp->pf.active_vfs = 0; + kfree(bp->pf.vf); + bp->pf.vf = NULL; + } +@@ -590,7 +591,6 @@ void bnxt_sriov_disable(struct bnxt *bp) + + bnxt_free_vf_resources(bp); + +- bp->pf.active_vfs = 0; + /* Reclaim all resources for the PF. */ + bnxt_hwrm_func_qcaps(bp); + } +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +index 5d4189c94718..fae551777083 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c +@@ -1094,7 +1094,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | +- EXT_PWR_DOWN_BIAS); ++ EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK); + /* fallthrough */ + case GENET_POWER_CABLE_SENSE: + /* enable APD */ +@@ -2698,15 +2698,21 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + /* Returns a reusable dma control register value */ + static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) + { ++ unsigned int i; + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; ++ for (i = 0; i < priv->hw_params->tx_queues; i++) ++ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + ++ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; ++ for (i = 0; i < priv->hw_params->rx_queues; i++) ++ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); +@@ -2815,12 +2821,6 @@ static int bcmgenet_open(struct net_device *dev) + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + +- if (priv->internal_phy) { +- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +- reg |= EXT_ENERGY_DET_MASK; +- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +- } +- + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + +@@ -3433,8 +3433,10 @@ static int bcmgenet_probe(struct platform_device *pdev) + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); +- if (err) ++ if (err) { ++ bcmgenet_mii_exit(dev); + goto err; ++ } + + return err; + +@@ -3508,7 +3510,6 @@ static int bcmgenet_resume(struct device *d) + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; +- u32 reg; + + if (!netif_running(dev)) + return 0; +@@ -3543,12 +3544,6 @@ static int bcmgenet_resume(struct device *d) + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + +- if (priv->internal_phy) { +- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +- reg |= EXT_ENERGY_DET_MASK; +- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +- } +- + if (priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +index b97122926d3a..df107ed67220 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +@@ -167,12 +167,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + +- if (priv->hw_params->flags & GENET_HAS_EXT) { +- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +- reg &= ~EXT_ENERGY_DET_MASK; +- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +- } +- + /* Enable the MPD interrupt */ + cpu_mask_clear = UMAC_IRQ_MPD_R; + +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index c069a04a6e7e..2db6102ed584 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -7201,8 +7201,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) + + static inline void tg3_reset_task_cancel(struct tg3 *tp) + { +- cancel_work_sync(&tp->reset_task); +- tg3_flag_clear(tp, RESET_TASK_PENDING); ++ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) ++ cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + +@@ -11174,18 +11174,27 @@ static void tg3_reset_task(struct work_struct *work) + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, true); +- if (err) ++ if (err) { ++ tg3_full_unlock(tp); ++ tp->irq_sync = 0; ++ tg3_napi_enable(tp); ++ /* Clear this flag so that tg3_reset_task_cancel() will not ++ * call cancel_work_sync() and wait forever. ++ */ ++ tg3_flag_clear(tp, RESET_TASK_PENDING); ++ dev_close(tp->dev); + goto out; ++ } + + tg3_netif_start(tp); + +-out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + tg3_flag_clear(tp, RESET_TASK_PENDING); ++out: + rtnl_unlock(); + } + +@@ -18174,8 +18183,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + + rtnl_lock(); + +- /* We probably don't have netdev yet */ +- if (!netdev || !netif_running(netdev)) ++ /* Could be second call or maybe we don't have netdev yet */ ++ if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) + goto done; + + /* We needn't recover from permanent error */ +diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c +index 30e93041bf83..69fa47351a32 100644 +--- a/drivers/net/ethernet/cadence/macb.c ++++ b/drivers/net/ethernet/cadence/macb.c +@@ -2031,6 +2031,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) + struct gem_stats *hwstat = &bp->hw_stats.gem; + struct net_device_stats *nstat = &bp->stats; + ++ if (!netif_running(bp->dev)) ++ return nstat; ++ + gem_update_stats(bp); + + nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + +@@ -3024,7 +3027,7 @@ static int macb_probe(struct platform_device *pdev) + bp->wol = 0; + if (of_get_property(np, "magic-packet", NULL)) + bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; +- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); ++ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) +diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h +index 03d79d95ab75..d7e0d2ce15c1 100644 +--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h ++++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h +@@ -526,7 +526,7 @@ + #define CN23XX_BAR1_INDEX_OFFSET 3 + + #define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \ +- (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \ ++ (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \ + ((idx) << CN23XX_BAR1_INDEX_OFFSET)) + + /*############################ DPI #########################*/ +diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +index 5e3aff242ad3..3ab84d18ad3a 100644 +--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h ++++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +@@ -417,7 +417,7 @@ + | CN6XXX_INTR_M0UNWI_ERR \ + | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UPWI_ERR \ +- | CN6XXX_INTR_M1UPB0_ERR \ ++ | CN6XXX_INTR_M1UNB0_ERR \ + | CN6XXX_INTR_M1UNWI_ERR \ + | CN6XXX_INTR_INSTR_DB_OF_ERR \ + | CN6XXX_INTR_SLIST_DB_OF_ERR \ +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +index 747ef0882976..8f3d544bec0c 100644 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +@@ -537,7 +537,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, + mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; + mbx.rq.qs_num = qs->vnic_id; + mbx.rq.rq_num = qidx; +- mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | ++ mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) | + (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | + (rq->cont_qs_rbdr_idx << 8) | + (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); +diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c +index e4b5b057f417..f012649891da 100644 +--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c +@@ -3111,6 +3111,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, + GFP_KERNEL | __GFP_COMP); + if (!avail) { + CH_ALERT(adapter, "free list queue 0 initialization failed\n"); ++ ret = -ENOMEM; + goto err; + } + if (avail < q->fl[0].size) +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +index c15052164717..becfb54421cc 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +@@ -2446,7 +2446,7 @@ do { \ + seq_printf(seq, "%-12s", s); \ + for (i = 0; i < n; ++i) \ + seq_printf(seq, " %16" fmt_spec, v); \ +- seq_putc(seq, '\n'); \ ++ seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + #define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v) +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +index 49d2debb334e..6afad4d3385a 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +@@ -47,7 +47,7 @@ static int fill_match_fields(struct adapter *adap, + bool next_header) + { + unsigned int i, j; +- u32 val, mask; ++ __be32 val, mask; + int off, err; + bool found; + +@@ -217,7 +217,7 @@ int cxgb4_config_knode(struct net_device *dev, __be16 protocol, + const struct cxgb4_next_header *next; + bool found = false; + unsigned int i, j; +- u32 val, mask; ++ __be32 val, mask; + int off; + + if (t->table[link_uhtid - 1].link_handle) { +@@ -231,10 +231,10 @@ int cxgb4_config_knode(struct net_device *dev, __be16 protocol, + + /* Try to find matches that allow jumps to next header. */ + for (i = 0; next[i].jump; i++) { +- if (next[i].offoff != cls->knode.sel->offoff || +- next[i].shift != cls->knode.sel->offshift || +- next[i].mask != cls->knode.sel->offmask || +- next[i].offset != cls->knode.sel->off) ++ if (next[i].sel.offoff != cls->knode.sel->offoff || ++ next[i].sel.offshift != cls->knode.sel->offshift || ++ next[i].sel.offmask != cls->knode.sel->offmask || ++ next[i].sel.off != cls->knode.sel->off) + continue; + + /* Found a possible candidate. Find a key that +@@ -246,9 +246,9 @@ int cxgb4_config_knode(struct net_device *dev, __be16 protocol, + val = cls->knode.sel->keys[j].val; + mask = cls->knode.sel->keys[j].mask; + +- if (next[i].match_off == off && +- next[i].match_val == val && +- next[i].match_mask == mask) { ++ if (next[i].key.off == off && ++ next[i].key.val == val && ++ next[i].key.mask == mask) { + found = true; + break; + } +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +index a4b99edcc339..141085e159e5 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +@@ -38,12 +38,12 @@ + struct cxgb4_match_field { + int off; /* Offset from the beginning of the header to match */ + /* Fill the value/mask pair in the spec if matched */ +- int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); ++ int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); + }; + + /* IPv4 match fields */ + static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + f->val.tos = (ntohl(val) >> 16) & 0x000000FF; + f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; +@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + u32 mask_val; + u8 frag_val; +@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + f->val.proto = (ntohl(val) >> 16) & 0x000000FF; + f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; +@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.fip[0], &val, sizeof(u32)); + memcpy(&f->mask.fip[0], &mask, sizeof(u32)); +@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.lip[0], &val, sizeof(u32)); + memcpy(&f->mask.lip[0], &mask, sizeof(u32)); +@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { + + /* IPv6 match fields */ + static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + f->val.tos = (ntohl(val) >> 20) & 0x000000FF; + f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; +@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + f->val.proto = (ntohl(val) >> 8) & 0x000000FF; + f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; +@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.fip[0], &val, sizeof(u32)); + memcpy(&f->mask.fip[0], &mask, sizeof(u32)); +@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.fip[4], &val, sizeof(u32)); + memcpy(&f->mask.fip[4], &mask, sizeof(u32)); +@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.fip[8], &val, sizeof(u32)); + memcpy(&f->mask.fip[8], &mask, sizeof(u32)); +@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.fip[12], &val, sizeof(u32)); + memcpy(&f->mask.fip[12], &mask, sizeof(u32)); +@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.lip[0], &val, sizeof(u32)); + memcpy(&f->mask.lip[0], &mask, sizeof(u32)); +@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.lip[4], &val, sizeof(u32)); + memcpy(&f->mask.lip[4], &mask, sizeof(u32)); +@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.lip[8], &val, sizeof(u32)); + memcpy(&f->mask.lip[8], &mask, sizeof(u32)); +@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, + } + + static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + memcpy(&f->val.lip[12], &val, sizeof(u32)); + memcpy(&f->mask.lip[12], &mask, sizeof(u32)); +@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { + + /* TCP/UDP match */ + static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, +- u32 val, u32 mask) ++ __be32 val, __be32 mask) + { + f->val.fport = ntohl(val) >> 16; + f->mask.fport = ntohl(mask) >> 16; +@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { + }; + + struct cxgb4_next_header { +- unsigned int offset; /* Offset to next header */ +- /* offset, shift, and mask added to offset above ++ /* Offset, shift, and mask added to beginning of the header + * to get to next header. Useful when using a header + * field's value to jump to next header such as IHL field + * in IPv4 header. + */ +- unsigned int offoff; +- u32 shift; +- u32 mask; +- /* match criteria to make this jump */ +- unsigned int match_off; +- u32 match_val; +- u32 match_mask; ++ struct tc_u32_sel sel; ++ struct tc_u32_key key; + /* location of jump to make */ + const struct cxgb4_match_field *jump; + }; +@@ -258,26 +252,74 @@ struct cxgb4_next_header { + * IPv4 header. + */ + static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { +- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, +- .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, +- .jump = cxgb4_tcp_fields }, +- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, +- .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, +- .jump = cxgb4_udp_fields }, +- { .jump = NULL } ++ { ++ /* TCP Jump */ ++ .sel = { ++ .off = 0, ++ .offoff = 0, ++ .offshift = 6, ++ .offmask = cpu_to_be16(0x0f00), ++ }, ++ .key = { ++ .off = 8, ++ .val = cpu_to_be32(0x00060000), ++ .mask = cpu_to_be32(0x00ff0000), ++ }, ++ .jump = cxgb4_tcp_fields, ++ }, ++ { ++ /* UDP Jump */ ++ .sel = { ++ .off = 0, ++ .offoff = 0, ++ .offshift = 6, ++ .offmask = cpu_to_be16(0x0f00), ++ }, ++ .key = { ++ .off = 8, ++ .val = cpu_to_be32(0x00110000), ++ .mask = cpu_to_be32(0x00ff0000), ++ }, ++ .jump = cxgb4_udp_fields, ++ }, ++ { .jump = NULL }, + }; + + /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header + * to get to transport layer header. + */ + static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { +- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, +- .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, +- .jump = cxgb4_tcp_fields }, +- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, +- .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, +- .jump = cxgb4_udp_fields }, +- { .jump = NULL } ++ { ++ /* TCP Jump */ ++ .sel = { ++ .off = 40, ++ .offoff = 0, ++ .offshift = 0, ++ .offmask = 0, ++ }, ++ .key = { ++ .off = 4, ++ .val = cpu_to_be32(0x00000600), ++ .mask = cpu_to_be32(0x0000ff00), ++ }, ++ .jump = cxgb4_tcp_fields, ++ }, ++ { ++ /* UDP Jump */ ++ .sel = { ++ .off = 40, ++ .offoff = 0, ++ .offshift = 0, ++ .offmask = 0, ++ }, ++ .key = { ++ .off = 4, ++ .val = cpu_to_be32(0x00001100), ++ .mask = cpu_to_be32(0x0000ff00), ++ }, ++ .jump = cxgb4_udp_fields, ++ }, ++ { .jump = NULL }, + }; + + struct cxgb4_link { +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +index 62bc2af9cde7..9a2edc4d4fe8 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +@@ -3152,7 +3152,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ +- ret = -t4_read_flash(adap, FLASH_FW_START, ++ ret = t4_read_flash(adap, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { +@@ -3181,8 +3181,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + should_install_fs_fw(adap, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { +- ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, +- fw_size, 0); ++ ret = t4_fw_upgrade(adap, adap->mbox, fw_data, ++ fw_size, 0); + if (ret != 0) { + dev_err(adap->pdev_dev, + "failed to install firmware: %d\n", ret); +@@ -3213,7 +3213,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); +- ret = EINVAL; ++ ret = -EINVAL; + goto bye; + } + +diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h +index 130f910e4785..b6ebcee40a0d 100644 +--- a/drivers/net/ethernet/cisco/enic/enic.h ++++ b/drivers/net/ethernet/cisco/enic/enic.h +@@ -163,6 +163,7 @@ struct enic { + u16 num_vfs; + #endif + spinlock_t enic_api_lock; ++ bool enic_api_busy; + struct enic_port_profile *pp; + + /* work queue cache line section */ +diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c +index b161f24522b8..b028ea2dec2b 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_api.c ++++ b/drivers/net/ethernet/cisco/enic/enic_api.c +@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + struct vnic_dev *vdev = enic->vdev; + + spin_lock(&enic->enic_api_lock); ++ while (enic->enic_api_busy) { ++ spin_unlock(&enic->enic_api_lock); ++ cpu_relax(); ++ spin_lock(&enic->enic_api_lock); ++ } ++ + spin_lock_bh(&enic->devcmd_lock); + + vnic_dev_cmd_proxy_by_index_start(vdev, vf); +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c +index 96290b83dfde..3a3f3a7d7a75 100644 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c +@@ -1938,8 +1938,6 @@ static int enic_dev_wait(struct vnic_dev *vdev, + int done; + int err; + +- BUG_ON(in_interrupt()); +- + err = start(vdev, arg); + if (err) + return err; +@@ -2116,6 +2114,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic) + rss_hash_bits, rss_base_cpu, rss_enable); + } + ++static void enic_set_api_busy(struct enic *enic, bool busy) ++{ ++ spin_lock(&enic->enic_api_lock); ++ enic->enic_api_busy = busy; ++ spin_unlock(&enic->enic_api_lock); ++} ++ + static void enic_reset(struct work_struct *work) + { + struct enic *enic = container_of(work, struct enic, reset); +@@ -2125,7 +2130,9 @@ static void enic_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_stop(enic->netdev); + enic_dev_soft_reset(enic); + enic_reset_addr_lists(enic); +@@ -2133,7 +2140,10 @@ static void enic_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +@@ -2145,7 +2155,9 @@ static void enic_tx_hang_reset(struct work_struct *work) + + rtnl_lock(); + +- spin_lock(&enic->enic_api_lock); ++ /* Stop any activity from infiniband */ ++ enic_set_api_busy(enic, true); ++ + enic_dev_hang_notify(enic); + enic_stop(enic->netdev); + enic_dev_hang_reset(enic); +@@ -2154,7 +2166,10 @@ static void enic_tx_hang_reset(struct work_struct *work) + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); +- spin_unlock(&enic->enic_api_lock); ++ ++ /* Allow infiniband to fiddle with the device again */ ++ enic_set_api_busy(enic, false); ++ + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c +index f45385f5c6e5..0fe4d8999823 100644 +--- a/drivers/net/ethernet/davicom/dm9000.c ++++ b/drivers/net/ethernet/davicom/dm9000.c +@@ -143,6 +143,8 @@ struct board_info { + u32 wake_state; + + int ip_summed; ++ ++ struct regulator *power_supply; + }; + + /* debug code */ +@@ -1459,7 +1461,7 @@ dm9000_probe(struct platform_device *pdev) + if (ret) { + dev_err(dev, "failed to request reset gpio %d: %d\n", + reset_gpios, ret); +- return -ENODEV; ++ goto out_regulator_disable; + } + + /* According to manual PWRST# Low Period Min 1ms */ +@@ -1471,14 +1473,18 @@ dm9000_probe(struct platform_device *pdev) + + if (!pdata) { + pdata = dm9000_parse_dt(&pdev->dev); +- if (IS_ERR(pdata)) +- return PTR_ERR(pdata); ++ if (IS_ERR(pdata)) { ++ ret = PTR_ERR(pdata); ++ goto out_regulator_disable; ++ } + } + + /* Init network device */ + ndev = alloc_etherdev(sizeof(struct board_info)); +- if (!ndev) +- return -ENOMEM; ++ if (!ndev) { ++ ret = -ENOMEM; ++ goto out_regulator_disable; ++ } + + SET_NETDEV_DEV(ndev, &pdev->dev); + +@@ -1489,6 +1495,8 @@ dm9000_probe(struct platform_device *pdev) + + db->dev = &pdev->dev; + db->ndev = ndev; ++ if (!IS_ERR(power)) ++ db->power_supply = power; + + spin_lock_init(&db->lock); + mutex_init(&db->addr_lock); +@@ -1715,6 +1723,10 @@ dm9000_probe(struct platform_device *pdev) + dm9000_release_board(pdev, db); + free_netdev(ndev); + ++out_regulator_disable: ++ if (!IS_ERR(power)) ++ regulator_disable(power); ++ + return ret; + } + +@@ -1774,10 +1786,13 @@ static int + dm9000_drv_remove(struct platform_device *pdev) + { + struct net_device *ndev = platform_get_drvdata(pdev); ++ struct board_info *dm = to_dm9000_board(ndev); + + unregister_netdev(ndev); +- dm9000_release_board(pdev, netdev_priv(ndev)); ++ dm9000_release_board(pdev, dm); + free_netdev(ndev); /* free device structure */ ++ if (dm->power_supply) ++ regulator_disable(dm->power_supply); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c +index cadcee645f74..11ce50a05799 100644 +--- a/drivers/net/ethernet/dec/tulip/de2104x.c ++++ b/drivers/net/ethernet/dec/tulip/de2104x.c +@@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi + #define DSL CONFIG_DE2104X_DSL + #endif + +-#define DE_RX_RING_SIZE 64 ++#define DE_RX_RING_SIZE 128 + #define DE_TX_RING_SIZE 64 + #define DE_RING_BYTES \ + ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ +diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c +index 1f62b9423851..31dfb695eded 100644 +--- a/drivers/net/ethernet/dec/tulip/winbond-840.c ++++ b/drivers/net/ethernet/dec/tulip/winbond-840.c +@@ -368,7 +368,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + void __iomem *ioaddr; + +- i = pci_enable_device(pdev); ++ i = pcim_enable_device(pdev); + if (i) return i; + + pci_set_master(pdev); +@@ -390,7 +390,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + + ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); + if (!ioaddr) +- goto err_out_free_res; ++ goto err_out_netdev; + + for (i = 0; i < 3; i++) + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); +@@ -469,8 +469,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + + err_out_cleardev: + pci_iounmap(pdev, ioaddr); +-err_out_free_res: +- pci_release_regions(pdev); + err_out_netdev: + free_netdev (dev); + return -ENODEV; +@@ -1537,7 +1535,6 @@ static void w840_remove1(struct pci_dev *pdev) + if (dev) { + struct netdev_private *np = netdev_priv(dev); + unregister_netdev(dev); +- pci_release_regions(pdev); + pci_iounmap(pdev, np->base_addr); + free_netdev(dev); + } +diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c +index f7b42483921c..0ade0c6d81ee 100644 +--- a/drivers/net/ethernet/ec_bhf.c ++++ b/drivers/net/ethernet/ec_bhf.c +@@ -589,10 +589,12 @@ static void ec_bhf_remove(struct pci_dev *dev) + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); +- free_netdev(net_dev); + + pci_iounmap(dev, priv->dma_io); + pci_iounmap(dev, priv->io); ++ ++ free_netdev(net_dev); ++ + pci_release_regions(dev); + pci_clear_master(dev); + pci_disable_device(dev); +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c +index 289560b0f643..b0b9f77c3740 100644 +--- a/drivers/net/ethernet/emulex/benet/be_main.c ++++ b/drivers/net/ethernet/emulex/benet/be_main.c +@@ -5998,6 +5998,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) + unmap_bars: + be_unmap_pci_bars(adapter); + free_netdev: ++ pci_disable_pcie_error_reporting(pdev); + free_netdev(netdev); + rel_reg: + pci_release_regions(pdev); +diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c +index e31199f3048c..0d3b159f4c56 100644 +--- a/drivers/net/ethernet/ethoc.c ++++ b/drivers/net/ethernet/ethoc.c +@@ -1190,7 +1190,7 @@ static int ethoc_probe(struct platform_device *pdev) + ret = mdiobus_register(priv->mdio); + if (ret) { + dev_err(&netdev->dev, "failed to register MDIO bus\n"); +- goto free2; ++ goto free3; + } + + ret = ethoc_mdio_probe(netdev); +@@ -1222,6 +1222,7 @@ static int ethoc_probe(struct platform_device *pdev) + netif_napi_del(&priv->napi); + error: + mdiobus_unregister(priv->mdio); ++free3: + mdiobus_free(priv->mdio); + free2: + if (priv->clk) +diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c +index 223f35cc034c..6de29f2a0a09 100644 +--- a/drivers/net/ethernet/ezchip/nps_enet.c ++++ b/drivers/net/ethernet/ezchip/nps_enet.c +@@ -624,7 +624,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) + + /* Get IRQ number */ + priv->irq = platform_get_irq(pdev, 0); +- if (!priv->irq) { ++ if (priv->irq < 0) { + dev_err(dev, "failed to retrieve value from device tree\n"); + err = -ENODEV; + goto out_netdev; +@@ -659,8 +659,8 @@ static s32 nps_enet_remove(struct platform_device *pdev) + struct nps_enet_priv *priv = netdev_priv(ndev); + + unregister_netdev(ndev); +- free_netdev(ndev); + netif_napi_del(&priv->napi); ++ free_netdev(ndev); + + return 0; + } +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index 0437149f5939..65859ecf5c32 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -1444,6 +1444,8 @@ static int ftgmac100_probe(struct platform_device *pdev) + return 0; + + err_ncsi_dev: ++ if (priv->ndev) ++ ncsi_unregister_dev(priv->ndev); + err_register_netdev: + ftgmac100_destroy_mdio(netdev); + err_setup_mdio: +@@ -1465,6 +1467,8 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) + netdev = platform_get_drvdata(pdev); + priv = netdev_priv(netdev); + ++ if (priv->ndev) ++ ncsi_unregister_dev(priv->ndev); + unregister_netdev(netdev); + ftgmac100_destroy_mdio(netdev); + +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c +index 8df32398d343..5fc40f025d21 100644 +--- a/drivers/net/ethernet/freescale/fec_main.c ++++ b/drivers/net/ethernet/freescale/fec_main.c +@@ -3505,11 +3505,11 @@ fec_probe(struct platform_device *pdev) + failed_irq: + failed_init: + fec_ptp_stop(pdev); +- if (fep->reg_phy) +- regulator_disable(fep->reg_phy); + failed_reset: + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); ++ if (fep->reg_phy) ++ regulator_disable(fep->reg_phy); + failed_regulator: + failed_clk_ipg: + fec_enet_clk_enable(ndev, false); +@@ -3546,13 +3546,13 @@ fec_drv_remove(struct platform_device *pdev) + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(fep->phy_node); +- free_netdev(ndev); + + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + ++ free_netdev(ndev); + return 0; + } + +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c +index f9e74461bdc0..031d4b3a544c 100644 +--- a/drivers/net/ethernet/freescale/fec_ptp.c ++++ b/drivers/net/ethernet/freescale/fec_ptp.c +@@ -396,9 +396,16 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) + u64 ns; + unsigned long flags; + ++ mutex_lock(&adapter->ptp_clk_mutex); ++ /* Check the ptp clock */ ++ if (!adapter->ptp_clk_on) { ++ mutex_unlock(&adapter->ptp_clk_mutex); ++ return -EINVAL; ++ } + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); ++ mutex_unlock(&adapter->ptp_clk_mutex); + + *ts = ns_to_timespec64(ns); + +@@ -579,6 +586,10 @@ void fec_ptp_init(struct platform_device *pdev) + fep->ptp_caps.enable = fec_ptp_enable; + + fep->cycle_speed = clk_get_rate(fep->clk_ptp); ++ if (!fep->cycle_speed) { ++ fep->cycle_speed = NSEC_PER_SEC; ++ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); ++ } + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; + + spin_lock_init(&fep->tmreg_lock); +diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c +index 380c4a2f6516..6a11f9916116 100644 +--- a/drivers/net/ethernet/freescale/fman/fman.c ++++ b/drivers/net/ethernet/freescale/fman/fman.c +@@ -1446,8 +1446,7 @@ static void enable_time_stamp(struct fman *fman) + { + struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; + u16 fm_clk_freq = fman->state->fm_clk_freq; +- u32 tmp, intgr, ts_freq; +- u64 frac; ++ u32 tmp, intgr, ts_freq, frac; + + ts_freq = (u32)(1 << fman->state->count1_micro_bit); + /* configure timestamp so that bit 8 will count 1 microsecond +diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +index 641b916f122b..332b60f03d22 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c ++++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c +@@ -1095,7 +1095,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) + list_for_each(pos, + &dtsec->multicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +@@ -1108,7 +1108,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) + list_for_each(pos, + &dtsec->unicast_addr_hash->lsts[bucket]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h +index dd6d0526f6c1..19f327efdaff 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_mac.h ++++ b/drivers/net/ethernet/freescale/fman/fman_mac.h +@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) + struct eth_hash_t *hash; + + /* Allocate address hash table */ +- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); ++ hash = kmalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) + return NULL; + +diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c +index c30994a09a7c..4b0be0cebd19 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_memac.c ++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c +@@ -851,7 +851,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, + + tmp = ioread32be(®s->command_config); + tmp &= ~CMD_CFG_PFC_MODE; +- priority = 0; + + iowrite32be(tmp, ®s->command_config); + +@@ -953,7 +952,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) + + list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c +index 9f3bb50a2365..4986f6ba278a 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_port.c ++++ b/drivers/net/ethernet/freescale/fman/fman_port.c +@@ -1623,6 +1623,7 @@ static int fman_port_probe(struct platform_device *of_dev) + struct fman_port *port; + struct fman *fman; + struct device_node *fm_node, *port_node; ++ struct platform_device *fm_pdev; + struct resource res; + struct resource *dev_res; + u32 val; +@@ -1647,8 +1648,14 @@ static int fman_port_probe(struct platform_device *of_dev) + goto return_err; + } + +- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); ++ fm_pdev = of_find_device_by_node(fm_node); + of_node_put(fm_node); ++ if (!fm_pdev) { ++ err = -EINVAL; ++ goto return_err; ++ } ++ ++ fman = dev_get_drvdata(&fm_pdev->dev); + if (!fman) { + err = -EINVAL; + goto return_err; +diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c +index e575259d20f4..c8ad9b8a75f8 100644 +--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c ++++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c +@@ -585,7 +585,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) + + list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { + hash_entry = ETH_HASH_ENTRY_OBJ(pos); +- if (hash_entry->addr == addr) { ++ if (hash_entry && hash_entry->addr == addr) { + list_del_init(&hash_entry->node); + kfree(hash_entry); + break; +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +index 1f015edcca22..3c6fc61597f7 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = { + }; + + module_platform_driver(fs_enet_bb_mdio_driver); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +index a89267b94352..fb2b0586469b 100644 +--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c ++++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = { + }; + + module_platform_driver(fs_enet_fec_mdio_driver); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c +index b665d27f8e29..9fd68cfdd973 100644 +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -485,7 +485,11 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) + + static int gfar_set_mac_addr(struct net_device *dev, void *p) + { +- eth_mac_addr(dev, p); ++ int ret; ++ ++ ret = eth_mac_addr(dev, p); ++ if (ret) ++ return ret; + + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + +@@ -844,8 +848,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) + continue; + + err = gfar_parse_group(child, priv, model); +- if (err) ++ if (err) { ++ of_node_put(child); + goto err_grp_init; ++ } + } + } else { /* SQ_SG_MODE */ + err = gfar_parse_group(np, priv, model); +@@ -1383,7 +1389,7 @@ static int gfar_probe(struct platform_device *ofdev) + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) +- dev->needed_headroom = GMAC_FCB_LEN; ++ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; + + /* Initializing some of the rx/tx queue level parameters */ + for (i = 0; i < priv->num_tx_queues; i++) { +@@ -2365,20 +2371,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; + + /* make space for additional header when fcb is needed */ +- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { +- struct sk_buff *skb_new; +- +- skb_new = skb_realloc_headroom(skb, fcb_len); +- if (!skb_new) { ++ if (fcb_len) { ++ if (unlikely(skb_cow_head(skb, fcb_len))) { + dev->stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +- +- if (skb->sk) +- skb_set_owner_w(skb_new, skb->sk); +- dev_consume_skb_any(skb); +- skb = skb_new; + } + + /* total number of fragments in the SKB */ +diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c +index af922bac19ae..cdef919d5b75 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.c ++++ b/drivers/net/ethernet/freescale/ucc_geth.c +@@ -3939,12 +3939,12 @@ static int ucc_geth_remove(struct platform_device* ofdev) + struct device_node *np = ofdev->dev.of_node; + + unregister_netdev(dev); +- free_netdev(dev); + ucc_geth_memclean(ugeth); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(ugeth->ug_info->tbi_node); + of_node_put(ugeth->ug_info->phy_node); ++ free_netdev(dev); + + return 0; + } +diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h +index 5da19b440a6a..bf25e49d4fe3 100644 +--- a/drivers/net/ethernet/freescale/ucc_geth.h ++++ b/drivers/net/ethernet/freescale/ucc_geth.h +@@ -580,7 +580,14 @@ struct ucc_geth_tx_global_pram { + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ +- u8 res2[0x80 - 0x74]; ++ u8 res2[0x78 - 0x74]; ++ u64 snums_en; ++ u32 l2l3baseptr; /* top byte consists of a few other bit fields */ ++ ++ u16 mtu[8]; ++ u8 res3[0xa8 - 0x94]; ++ u32 wrrtablebase; /* top byte is reserved */ ++ u8 res4[0xc0 - 0xac]; + } __packed; + + /* structure representing Extended Filtering Global Parameters in PRAM */ +diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +index 399cfd217288..cfda55bfa811 100644 +--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c ++++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +@@ -548,6 +548,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) + return -1; + + base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); ++ if (!base) { ++ pcmcia_release_window(link, link->resource[2]); ++ return -1; ++ } ++ + pcmcia_map_mem_page(link, link->resource[2], 0); + + /* +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +index 24a815997ec5..796f81106b43 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +@@ -1990,8 +1990,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) + priv->enet_ver = AE_VERSION_1; + else if (acpi_dev_found(hns_enet_acpi_match[1].id)) + priv->enet_ver = AE_VERSION_2; +- else +- return -ENXIO; ++ else { ++ ret = -ENXIO; ++ goto out_read_prop_fail; ++ } + + /* try to find port-idx-in-ae first */ + ret = acpi_node_get_property_reference(dev->fwnode, +@@ -2003,7 +2005,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) + priv->fwnode = acpi_fwnode_handle(args.adev); + } else { + dev_err(dev, "cannot read cfg data from OF or acpi\n"); +- return -ENXIO; ++ ret = -ENXIO; ++ goto out_read_prop_fail; + } + + ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +index f38848c4f69d..62143ccfd5fb 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +@@ -447,6 +447,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, + /* for mutl buffer*/ + new_skb = skb_copy(skb, GFP_ATOMIC); + dev_kfree_skb_any(skb); ++ if (!new_skb) { ++ netdev_err(ndev, "skb alloc failed\n"); ++ return; ++ } + skb = new_skb; + + check_ok = 0; +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c +index 3692adb8902d..6c70cba92df0 100644 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c +@@ -2656,10 +2656,8 @@ static int ehea_restart_qps(struct net_device *dev) + u16 dummy16 = 0; + + cb0 = (void *)get_zeroed_page(GFP_KERNEL); +- if (!cb0) { +- ret = -ENOMEM; +- goto out; +- } ++ if (!cb0) ++ return -ENOMEM; + + for (i = 0; i < (port->num_def_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; +@@ -2679,6 +2677,7 @@ static int ehea_restart_qps(struct net_device *dev) + cb0); + if (hret != H_SUCCESS) { + netdev_err(dev, "query_ehea_qp failed (1)\n"); ++ ret = -EFAULT; + goto out; + } + +@@ -2691,6 +2690,7 @@ static int ehea_restart_qps(struct net_device *dev) + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + netdev_err(dev, "modify_ehea_qp failed (1)\n"); ++ ret = -EFAULT; + goto out; + } + +@@ -2699,6 +2699,7 @@ static int ehea_restart_qps(struct net_device *dev) + cb0); + if (hret != H_SUCCESS) { + netdev_err(dev, "query_ehea_qp failed (2)\n"); ++ ret = -EFAULT; + goto out; + } + +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c +index de9897c8e933..f5fd20dc8ab1 100644 +--- a/drivers/net/ethernet/ibm/ibmveth.c ++++ b/drivers/net/ethernet/ibm/ibmveth.c +@@ -1256,6 +1256,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); + int lrg_pkt = ibmveth_rxq_large_packet(adapter); ++ __sum16 iph_check = 0; + + skb = ibmveth_rxq_get_buffer(adapter); + +@@ -1307,7 +1308,17 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) + } + } + +- if (length > netdev->mtu + ETH_HLEN) { ++ /* PHYP without PLSO support places a -1 in the ip ++ * checksum for large send frames. ++ */ ++ if (skb->protocol == cpu_to_be16(ETH_P_IP)) { ++ struct iphdr *iph = (struct iphdr *)skb->data; ++ ++ iph_check = iph->check; ++ } ++ ++ if ((length > netdev->mtu + ETH_HLEN) || ++ lrg_pkt || iph_check == 0xffff) { + ibmveth_rx_mss_helper(skb, mss, lrg_pkt); + adapter->rx_large_packets++; + } +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index 897a87ae8655..f23559a2b2bd 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -985,6 +985,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) + + if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) + break; ++ /* The queue entry at the current index is peeked at above ++ * to determine that there is a valid descriptor awaiting ++ * processing. We want to be sure that the current slot ++ * holds a valid descriptor before reading its contents. ++ */ ++ dma_rmb(); + next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); + rx_buff = + (struct ibmvnic_rx_buff *)be64_to_cpu(next-> +@@ -1373,13 +1379,18 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, + while (pending_scrq(adapter, scrq)) { + unsigned int pool = scrq->pool_index; + ++ /* The queue entry at the current index is peeked at above ++ * to determine that there is a valid descriptor awaiting ++ * processing. We want to be sure that the current slot ++ * holds a valid descriptor before reading its contents. ++ */ ++ dma_rmb(); ++ + next = ibmvnic_next_scrq(adapter, scrq); + for (i = 0; i < next->tx_comp.num_comps; i++) { +- if (next->tx_comp.rcs[i]) { ++ if (next->tx_comp.rcs[i]) + dev_err(dev, "tx error %x\n", + next->tx_comp.rcs[i]); +- continue; +- } + index = be32_to_cpu(next->tx_comp.correlators[i]); + txbuff = &adapter->tx_pool[pool].tx_buff[index]; + +@@ -1515,7 +1526,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) + req_tx_irq_failed: + for (j = 0; j < i; j++) { + free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); +- irq_dispose_mapping(adapter->rx_scrq[j]->irq); ++ irq_dispose_mapping(adapter->tx_scrq[j]->irq); + } + release_sub_crqs_no_irqs(adapter); + return rc; +@@ -1707,6 +1718,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, + } + spin_unlock_irqrestore(&scrq->lock, flags); + ++ /* Ensure that the entire buffer descriptor has been ++ * loaded before reading its contents ++ */ ++ dma_rmb(); ++ + return entry; + } + +@@ -3362,12 +3378,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, + dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); + break; + } +- dev_info(dev, "Partner protocol version is %d\n", +- crq->version_exchange_rsp.version); +- if (be16_to_cpu(crq->version_exchange_rsp.version) < +- ibmvnic_version) +- ibmvnic_version = ++ ibmvnic_version = + be16_to_cpu(crq->version_exchange_rsp.version); ++ dev_info(dev, "Partner protocol version is %d\n", ++ ibmvnic_version); + send_cap_queries(adapter); + break; + case QUERY_CAPABILITY_RSP: +@@ -3482,6 +3496,12 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = ibmvnic_next_crq(adapter)) != NULL) { ++ /* This barrier makes sure ibmvnic_next_crq()'s ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded ++ * before ibmvnic_handle_crq()'s ++ * switch(gen_crq->first) and switch(gen_crq->cmd). ++ */ ++ dma_rmb(); + ibmvnic_handle_crq(crq, adapter); + crq->generic.first = 0; + } +@@ -3527,6 +3547,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) + } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); + + /* Clean out the queue */ ++ if (!crq->msgs) ++ return -EINVAL; ++ + memset(crq->msgs, 0, PAGE_SIZE); + crq->cur = 0; + +diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c +index 93c29094ceff..9035cb5fc70d 100644 +--- a/drivers/net/ethernet/intel/e100.c ++++ b/drivers/net/ethernet/intel/e100.c +@@ -1423,7 +1423,7 @@ static int e100_phy_check_without_mii(struct nic *nic) + u8 phy_type; + int without_mii; + +- phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; ++ phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ +@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && +- (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { ++ (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); +@@ -2298,9 +2298,9 @@ static int e100_asf(struct nic *nic) + { + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && +- (nic->eeprom[eeprom_config_asf] & eeprom_asf) && +- !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && +- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); ++ (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && ++ !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && ++ ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); + } + + static int e100_up(struct nic *nic) +@@ -2952,7 +2952,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && +- (nic->eeprom[eeprom_id] & eeprom_id_wol)) { ++ (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } +diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c +index 39a09e18c1b7..c30792b761ee 100644 +--- a/drivers/net/ethernet/intel/e1000/e1000_main.c ++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c +@@ -568,8 +568,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); +- e1000_down(adapter); +- e1000_up(adapter); ++ ++ /* only run the task if not already down */ ++ if (!test_bit(__E1000_DOWN, &adapter->flags)) { ++ e1000_down(adapter); ++ e1000_up(adapter); ++ } ++ + clear_bit(__E1000_RESETTING, &adapter->flags); + } + +@@ -1456,10 +1461,15 @@ int e1000_close(struct net_device *netdev) + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + +- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) ++ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + +- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); ++ WARN_ON(count < 0); ++ ++ /* signal that we're down so that the reset task will no longer run */ ++ set_bit(__E1000_DOWN, &adapter->flags); ++ clear_bit(__E1000_RESETTING, &adapter->flags); ++ + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); +@@ -3167,8 +3177,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { ++ case e1000_82544: { + unsigned int pull_size; +- case e1000_82544: ++ + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements +@@ -3189,6 +3200,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + } + len = skb_headlen(skb); + break; ++ } + default: + /* do nothing */ + break; +diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c +index 6b03c8553e59..65deaf8f3004 100644 +--- a/drivers/net/ethernet/intel/e1000e/82571.c ++++ b/drivers/net/ethernet/intel/e1000e/82571.c +@@ -917,6 +917,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); ++ if (ret_val) ++ return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable +diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h +index 879cca47b021..62675938cb59 100644 +--- a/drivers/net/ethernet/intel/e1000e/e1000.h ++++ b/drivers/net/ethernet/intel/e1000e/e1000.h +@@ -589,7 +589,6 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) + + #define er32(reg) __er32(hw, E1000_##reg) + +-s32 __ew32_prepare(struct e1000_hw *hw); + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + + #define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c +index 625008e8cb0d..500016209ae0 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -1010,6 +1010,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) + { + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; ++ u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ ++ u16 lat_enc_d = 0; /* latency decoded */ + u16 lat_enc = 0; /* latency encoded */ + + if (link) { +@@ -1063,7 +1065,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + +- if (lat_enc > max_ltr_enc) ++ lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * ++ (1U << (E1000_LTRV_SCALE_FACTOR * ++ ((lat_enc & E1000_LTRV_SCALE_MASK) ++ >> E1000_LTRV_SCALE_SHIFT))); ++ ++ max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * ++ (1U << (E1000_LTRV_SCALE_FACTOR * ++ ((max_ltr_enc & E1000_LTRV_SCALE_MASK) ++ >> E1000_LTRV_SCALE_SHIFT))); ++ ++ if (lat_enc_d > max_ltr_enc_d) + lat_enc = max_ltr_enc; + } + +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h +index 6374c8fc76a8..9957a4ffdc6d 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h +@@ -291,8 +291,11 @@ + + /* Latency Tolerance Reporting */ + #define E1000_LTRV 0x000F8 ++#define E1000_LTRV_VALUE_MASK 0x000003FF + #define E1000_LTRV_SCALE_MAX 5 + #define E1000_LTRV_SCALE_FACTOR 5 ++#define E1000_LTRV_SCALE_SHIFT 10 ++#define E1000_LTRV_SCALE_MASK 0x00001C00 + #define E1000_LTRV_REQ_SHIFT 15 + #define E1000_LTRV_NOSNOOP_SHIFT 16 + #define E1000_LTRV_SEND (1 << 30) +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index a0f97c5ab6ef..5d7967c03554 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -136,14 +136,12 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +-s32 __ew32_prepare(struct e1000_hw *hw) ++static void __ew32_prepare(struct e1000_hw *hw) + { + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); +- +- return i; + } + + void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +@@ -624,11 +622,11 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) + { + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, rx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { ++ if (unlikely(i != readl(rx_ring->tail))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); +@@ -641,11 +639,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) + { + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; +- s32 ret_val = __ew32_prepare(hw); + ++ __ew32_prepare(hw); + writel(i, tx_ring->tail); + +- if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { ++ if (unlikely(i != readl(tx_ring->tail))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); +@@ -5922,15 +5920,19 @@ static void e1000_reset_task(struct work_struct *work) + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + ++ rtnl_lock(); + /* don't run the task if already down */ +- if (test_bit(__E1000_DOWN, &adapter->state)) ++ if (test_bit(__E1000_DOWN, &adapter->state)) { ++ rtnl_unlock(); + return; ++ } + + if (!(adapter->flags & FLAG_RESTART_NOW)) { + e1000e_dump(adapter); + e_err("Reset adapter unexpectedly\n"); + } + e1000e_reinit_locked(adapter); ++ rtnl_unlock(); + } + + /** +@@ -6317,11 +6319,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +- u32 ctrl, ctrl_ext, rctl, status; +- /* Runtime suspend should only enable wakeup for link changes */ +- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; ++ u32 ctrl, ctrl_ext, rctl, status, wufc; + int retval = 0; + ++ /* Runtime suspend should only enable wakeup for link changes */ ++ if (runtime) ++ wufc = E1000_WUFC_LNKC; ++ else if (device_may_wakeup(&pdev->dev)) ++ wufc = adapter->wol; ++ else ++ wufc = 0; ++ + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; +@@ -6379,7 +6387,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { +- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) ++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ +@@ -7367,6 +7375,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + err_ioremap: + free_netdev(netdev); + err_alloc_etherdev: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); + err_pci_reg: + err_dma: +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +index e372a5823480..8e6ad74f29d1 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +@@ -2083,6 +2083,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + err_ioremap: + free_netdev(netdev); + err_alloc_netdev: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); + err_pci_reg: + err_dma: +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +index 67e396b2b347..0e75c3a34fe7 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +@@ -1107,7 +1107,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { + #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 + #define I40E_AQC_SET_VSI_DEFAULT 0x08 + #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +-#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 ++#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 + __le16 seid; + #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c +index 2154a34c1dd8..09b47088dcc2 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_common.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c +@@ -1922,6 +1922,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + return status; + } + ++/** ++ * i40e_is_aq_api_ver_ge ++ * @aq: pointer to AdminQ info containing HW API version to compare ++ * @maj: API major value ++ * @min: API minor value ++ * ++ * Assert whether current HW API version is greater/equal than provided. ++ **/ ++static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, ++ u16 min) ++{ ++ return (aq->api_maj_ver > maj || ++ (aq->api_maj_ver == maj && aq->api_min_ver >= min)); ++} ++ + /** + * i40e_aq_add_vsi + * @hw: pointer to the hw struct +@@ -2047,18 +2062,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + + if (set) { + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; +- if (rx_only_promisc && +- (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || +- (hw->aq.api_maj_ver > 1))) +- flags |= I40E_AQC_SET_VSI_PROMISC_TX; ++ if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } + + cmd->promiscuous_flags = cpu_to_le16(flags); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); +- if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || +- (hw->aq.api_maj_ver > 1)) +- cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) ++ cmd->valid_flags |= ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); + + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); +@@ -2155,11 +2168,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + +- if (enable) ++ if (enable) { + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) ++ flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; ++ } + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); ++ if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) ++ cmd->valid_flags |= ++ cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); + cmd->seid = cpu_to_le16(seid); + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index 043b69b5843b..832fffed4a1f 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -5478,6 +5478,8 @@ int i40e_vsi_open(struct i40e_vsi *vsi) + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + err = i40e_vsi_request_irq(vsi, int_name); ++ if (err) ++ goto err_setup_rx; + + } else { + err = -EINVAL; +@@ -8504,6 +8506,7 @@ static int i40e_sw_init(struct i40e_pf *pf) + { + int err = 0; + int size; ++ u16 pow; + + pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, + (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); +@@ -8531,6 +8534,11 @@ static int i40e_sw_init(struct i40e_pf *pf) + pf->rss_table_size = pf->hw.func_caps.rss_table_size; + pf->rss_size_max = min_t(int, pf->rss_size_max, + pf->hw.func_caps.num_tx_qp); ++ ++ /* find the next higher power-of-2 of num cpus */ ++ pow = roundup_pow_of_two(num_online_cpus()); ++ pf->rss_size_max = min_t(int, pf->rss_size_max, pow); ++ + if (pf->hw.func_caps.rss) { + pf->flags |= I40E_FLAG_RSS_ENABLED; + pf->alloc_rss_size = min_t(int, pf->rss_size_max, +@@ -9053,10 +9061,6 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + 0, 0, nlflags, filter_mask, NULL); + } + +-/* Hardware supports L4 tunnel length of 128B (=2^7) which includes +- * inner mac plus all inner ethertypes. +- */ +-#define I40E_MAX_TUNNEL_HDR_LEN 128 + /** + * i40e_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff +@@ -9067,12 +9071,52 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) + { +- if (skb->encapsulation && +- ((skb_inner_network_header(skb) - skb_transport_header(skb)) > +- I40E_MAX_TUNNEL_HDR_LEN)) +- return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); ++ size_t len; ++ ++ /* No point in doing any of this if neither checksum nor GSO are ++ * being requested for this frame. We can rule out both by just ++ * checking for CHECKSUM_PARTIAL ++ */ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) ++ return features; ++ ++ /* We cannot support GSO if the MSS is going to be less than ++ * 64 bytes. If it is then we need to drop support for GSO. ++ */ ++ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) ++ features &= ~NETIF_F_GSO_MASK; ++ ++ /* MACLEN can support at most 63 words */ ++ len = skb_network_header(skb) - skb->data; ++ if (len & ~(63 * 2)) ++ goto out_err; ++ ++ /* IPLEN and EIPLEN can support at most 127 dwords */ ++ len = skb_transport_header(skb) - skb_network_header(skb); ++ if (len & ~(127 * 4)) ++ goto out_err; ++ ++ if (skb->encapsulation) { ++ /* L4TUNLEN can support 127 words */ ++ len = skb_inner_network_header(skb) - skb_transport_header(skb); ++ if (len & ~(127 * 2)) ++ goto out_err; ++ ++ /* IPLEN can support at most 127 dwords */ ++ len = skb_inner_transport_header(skb) - ++ skb_inner_network_header(skb); ++ if (len & ~(127 * 4)) ++ goto out_err; ++ } ++ ++ /* No need to validate L4LEN as TCP is the only protocol with a ++ * a flexible value and we support all possible values supported ++ * by TCP, which is at most 15 dwords ++ */ + + return features; ++out_err: ++ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + + static const struct net_device_ops i40e_netdev_ops = { +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +index 2e12ccf73dba..877b49cc9d3c 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +@@ -2452,13 +2452,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { ++ int ret; ++ + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; +- if (l4.hdr != exthdr) +- ipv6_skip_exthdr(skb, exthdr - skb->data, +- &l4_proto, &frag_off); ++ ret = ipv6_skip_exthdr(skb, exthdr - skb->data, ++ &l4_proto, &frag_off); ++ if (ret < 0) ++ return -1; + } + + /* define outer transport */ +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index 7484ad3c955d..e3e02ec8f149 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -188,7 +188,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) + * check for the valid queue id + **/ + static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, +- u8 qid) ++ u16 qid) + { + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); +@@ -203,7 +203,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, + * + * check for the valid vector id + **/ +-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) ++static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) + { + struct i40e_pf *pf = vf->pf; + +@@ -417,11 +417,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, + u32 v_idx, i, reg_idx, reg; + u32 next_q_idx, next_q_type; + u32 msix_vf, size; ++ int ret = 0; ++ ++ msix_vf = pf->hw.func_caps.num_msix_vectors_vf; ++ ++ if (qvlist_info->num_vectors > msix_vf) { ++ dev_warn(&pf->pdev->dev, ++ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", ++ qvlist_info->num_vectors, ++ msix_vf); ++ ret = -EINVAL; ++ goto err_out; ++ } + + size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + + (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + (qvlist_info->num_vectors - 1)); ++ kfree(vf->qvlist_info); + vf->qvlist_info = kzalloc(size, GFP_KERNEL); ++ if (!vf->qvlist_info) { ++ ret = -ENOMEM; ++ goto err_out; ++ } + vf->qvlist_info->num_vectors = qvlist_info->num_vectors; + + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; +@@ -432,8 +449,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, + v_idx = qv_info->v_idx; + + /* Validate vector id belongs to this vf */ +- if (!i40e_vc_isvalid_vector_id(vf, v_idx)) +- goto err; ++ if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { ++ ret = -EINVAL; ++ goto err_free; ++ } + + vf->qvlist_info->qv_info[i] = *qv_info; + +@@ -475,10 +494,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, + } + + return 0; +-err: ++err_free: + kfree(vf->qvlist_info); + vf->qvlist_info = NULL; +- return -EINVAL; ++err_out: ++ return ret; + } + + /** +diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c +index 14372810fc27..537776a3e5de 100644 +--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c ++++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c +@@ -2641,6 +2641,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + err_ioremap: + free_netdev(netdev); + err_alloc_etherdev: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + err_pci_reg: + err_dma: +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index 737b664d004c..b02e262ed76a 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -153,7 +153,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) + u32 status; + u32 speed; + +- status = rd32(E1000_STATUS); ++ status = pm_runtime_suspended(&adapter->pdev->dev) ? ++ 0 : rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 90eab0521be1..6bede6774486 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -948,6 +948,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) + **/ + static int igb_request_msix(struct igb_adapter *adapter) + { ++ unsigned int num_q_vectors = adapter->num_q_vectors; + struct net_device *netdev = adapter->netdev; + int i, err = 0, vector = 0, free_vector = 0; + +@@ -956,7 +957,13 @@ static int igb_request_msix(struct igb_adapter *adapter) + if (err) + goto err_out; + +- for (i = 0; i < adapter->num_q_vectors; i++) { ++ if (num_q_vectors > MAX_Q_VECTORS) { ++ num_q_vectors = MAX_Q_VECTORS; ++ dev_warn(&adapter->pdev->dev, ++ "The number of queue vectors (%d) is higher than max allowed (%d)\n", ++ adapter->num_q_vectors, MAX_Q_VECTORS); ++ } ++ for (i = 0; i < num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; +@@ -2767,6 +2774,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + err_ioremap: + free_netdev(netdev); + err_alloc_etherdev: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); + err_pci_reg: + err_dma: +@@ -5381,9 +5389,18 @@ static void igb_reset_task(struct work_struct *work) + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + ++ rtnl_lock(); ++ /* If we're already down or resetting, just bail */ ++ if (test_bit(__IGB_DOWN, &adapter->state) || ++ test_bit(__IGB_RESETTING, &adapter->state)) { ++ rtnl_unlock(); ++ return; ++ } ++ + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); ++ rtnl_unlock(); + } + + /** +@@ -5656,8 +5673,6 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); +- else +- dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); + ack |= TSINTR_SYS_WRAP; + } + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +index b06e32d0d22a..293ff8cc6994 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +@@ -991,6 +991,7 @@ void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); + void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); + void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); + void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); ++void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); + void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); + void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); + static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 0d2baec546e1..c17135b7fca7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -2219,7 +2219,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Configure pause time (2 TCs per register) */ +- reg = hw->fc.pause_time * 0x00010001; ++ reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index 4c729faeb713..2266552532c4 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -7257,7 +7257,9 @@ static void ixgbe_service_task(struct work_struct *work) + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { + ixgbe_ptp_overflow_check(adapter); +- ixgbe_ptp_rx_hang(adapter); ++ if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) ++ ixgbe_ptp_rx_hang(adapter); ++ ixgbe_ptp_tx_hang(adapter); + } + + ixgbe_service_event_complete(adapter); +@@ -8675,8 +8677,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue); +- if (!err) +- ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); ++ if (err) ++ goto err_out_w_lock; ++ ++ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) +@@ -9836,6 +9840,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + err_alloc_etherdev: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); + err_pci_reg: + err_dma: +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +index a92277683a64..a93a1b3bb8e4 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +@@ -662,6 +662,33 @@ static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } + ++/** ++ * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes ++ * @adapter: private network adapter structure ++ */ ++void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter) ++{ ++ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + ++ IXGBE_PTP_TX_TIMEOUT); ++ ++ if (!adapter->ptp_tx_skb) ++ return; ++ ++ if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) ++ return; ++ ++ /* If we haven't received a timestamp within the timeout, it is ++ * reasonable to assume that it will never occur, so we can unlock the ++ * timestamp bit when this occurs. ++ */ ++ if (timeout) { ++ cancel_work_sync(&adapter->ptp_tx_work); ++ ixgbe_ptp_clear_tx_timestamp(adapter); ++ adapter->tx_hwtstamp_timeouts++; ++ e_warn(drv, "clearing Tx timestamp hang\n"); ++ } ++} ++ + /** + * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c +index c051987aab83..4cf1fc89df3c 100644 +--- a/drivers/net/ethernet/korina.c ++++ b/drivers/net/ethernet/korina.c +@@ -216,7 +216,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&lp->lock, flags); + +- return NETDEV_TX_BUSY; ++ return NETDEV_TX_OK; + } + } + +@@ -1188,7 +1188,7 @@ static int korina_probe(struct platform_device *pdev) + return rc; + + probe_err_register: +- kfree(lp->td_ring); ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); + probe_err_td_ring: + iounmap(lp->tx_dma_regs); + probe_err_dma_tx: +@@ -1208,6 +1208,7 @@ static int korina_remove(struct platform_device *pdev) + iounmap(lp->eth_regs); + iounmap(lp->rx_dma_regs); + iounmap(lp->tx_dma_regs); ++ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring)); + + unregister_netdev(bif->dev); + free_netdev(bif->dev); +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c +index bb6bc84995a2..ccacdcfb5932 100644 +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -100,7 +100,7 @@ + #define MVNETA_DESC_SWAP BIT(6) + #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) + #define MVNETA_PORT_STATUS 0x2444 +-#define MVNETA_TX_IN_PRGRS BIT(1) ++#define MVNETA_TX_IN_PRGRS BIT(0) + #define MVNETA_TX_FIFO_EMPTY BIT(8) + #define MVNETA_RX_MIN_FRAME_SIZE 0x247c + #define MVNETA_SERDES_CFG 0x24A0 +diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c +index 5d5000c8edf1..09cb0ac701e1 100644 +--- a/drivers/net/ethernet/marvell/pxa168_eth.c ++++ b/drivers/net/ethernet/marvell/pxa168_eth.c +@@ -1571,8 +1571,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) + + mdiobus_unregister(pep->smi_bus); + mdiobus_free(pep->smi_bus); +- unregister_netdev(dev); + cancel_work_sync(&pep->tx_timeout_task); ++ unregister_netdev(dev); + free_netdev(dev); + return 0; + } +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c +index 49f692907a30..c4197d0ec4d2 100644 +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -215,7 +215,7 @@ static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) + + static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) + { +- u16 v; ++ u16 v = 0; + __gm_phy_read(hw, port, reg, &v); + return v; + } +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +index d10c8a8156bc..5b072bf80783 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c +@@ -964,7 +964,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, + skb->protocol = eth_type_trans(skb, netdev); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && +- RX_DMA_VID(trxd.rxd3)) ++ (trxd.rxd2 & RX_DMA_VTAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + RX_DMA_VID(trxd.rxd3)); + napi_gro_receive(napi, skb); +diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +index 99b1c8e9f16f..2e7ccd8261c3 100644 +--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h ++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h +@@ -272,6 +272,7 @@ + #define RX_DMA_DONE BIT(31) + #define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) + #define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) ++#define RX_DMA_VTAG BIT(15) + + /* QDMA descriptor rxd3 */ + #define RX_DMA_VID(_x) ((_x) & 0xfff) +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index 6068e7c4fc7e..410a36c98241 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -47,7 +47,7 @@ + #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) + #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) + +-static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) ++int mlx4_en_moderation_update(struct mlx4_en_priv *priv) + { + int i; + int err = 0; +@@ -1931,8 +1931,6 @@ static int mlx4_en_set_tunable(struct net_device *dev, + return ret; + } + +-#define MLX4_EEPROM_PAGE_LEN 256 +- + static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) + { +@@ -1967,7 +1965,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; +- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN; ++ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +index 0160c93de6d3..543f30dec4a0 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -1311,8 +1311,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) + } + + priv->port_stats.tx_timeout++; +- en_dbg(DRV, priv, "Scheduling watchdog\n"); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { ++ en_dbg(DRV, priv, "Scheduling port restart\n"); ++ queue_work(mdev->workqueue, &priv->restart_task); ++ } + } + + +@@ -1746,6 +1748,7 @@ int mlx4_en_start_port(struct net_device *dev) + local_bh_enable(); + } + ++ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); + netif_tx_start_all_queues(dev); + netif_device_attach(dev); + +@@ -1900,7 +1903,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) + static void mlx4_en_restart(struct work_struct *work) + { + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, +- watchdog_task); ++ restart_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + +@@ -2220,7 +2223,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) + if (netif_running(dev)) { + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { +- /* NIC is probably restarting - let watchdog task reset ++ /* NIC is probably restarting - let restart task reset + * the port */ + en_dbg(DRV, priv, "Change MTU called with card down!?\n"); + } else { +@@ -2229,7 +2232,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) + if (err) { + en_err(priv, "Failed restarting port:%d\n", + priv->port); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, ++ &priv->state)) ++ queue_work(mdev->workqueue, &priv->restart_task); + } + } + mutex_unlock(&mdev->state_lock); +@@ -2701,7 +2706,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) + if (err) { + en_err(priv, "Failed starting port %d for XDP change\n", + priv->port); +- queue_work(mdev->workqueue, &priv->watchdog_task); ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) ++ queue_work(mdev->workqueue, &priv->restart_task); + } + } + +@@ -3080,7 +3086,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); +- INIT_WORK(&priv->watchdog_task, mlx4_en_restart); ++ INIT_WORK(&priv->restart_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +@@ -3453,6 +3459,8 @@ int mlx4_en_reset_config(struct net_device *dev, + en_err(priv, "Failed starting port\n"); + } + ++ if (!err) ++ err = mlx4_en_moderation_update(priv); + out: + mutex_unlock(&mdev->state_lock); + kfree(tmp); +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c +index fe9dc1b3078c..7c3e505cf255 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.c ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.c +@@ -1843,14 +1843,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) + #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) + #define INIT_HCA_MCAST_OFFSET 0x0c0 + #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +-#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +-#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) ++#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13) ++#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17) + #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) + #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) + #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 + #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 + #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) +-#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) ++#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13) + #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) + #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) + #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) +diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h +index 5343a0599253..b30a5a9b9777 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/fw.h ++++ b/drivers/net/ethernet/mellanox/mlx4/fw.h +@@ -184,8 +184,8 @@ struct mlx4_init_hca_param { + u64 cmpt_base; + u64 mtt_base; + u64 global_caps; +- u16 log_mc_entry_sz; +- u16 log_mc_hash_sz; ++ u8 log_mc_entry_sz; ++ u8 log_mc_hash_sz; + u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ + u8 log_num_qps; + u8 log_num_srqs; +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c +index 751aac54f2d5..78b04f271344 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c +@@ -3445,6 +3445,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, + + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); ++ err = -EINVAL; + goto err_close; + } + } +@@ -4176,12 +4177,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev) + static void mlx4_shutdown(struct pci_dev *pdev) + { + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); ++ struct mlx4_dev *dev = persist->dev; + + mlx4_info(persist->dev, "mlx4_shutdown was called\n"); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + mutex_unlock(&persist->interface_state_mutex); ++ mlx4_pci_disable_device(dev); + } + + static const struct pci_error_handlers mlx4_err_handler = { +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +index 247d340be743..2e8c138beaf7 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -522,6 +522,10 @@ struct mlx4_en_stats_bitmap { + struct mutex mutex; /* for mutual access to stats bitmap */ + }; + ++enum { ++ MLX4_EN_STATE_FLAG_RESTARTING, ++}; ++ + struct mlx4_en_priv { + struct mlx4_en_dev *mdev; + struct mlx4_en_port_profile *prof; +@@ -586,7 +590,7 @@ struct mlx4_en_priv { + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; + struct mlx4_qp drop_qp; + struct work_struct rx_mode_task; +- struct work_struct watchdog_task; ++ struct work_struct restart_task; + struct work_struct linkstate_task; + struct delayed_work stats_task; + struct delayed_work service_task; +@@ -632,6 +636,7 @@ struct mlx4_en_priv { + u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; ++ unsigned long state; + }; + + enum mlx4_en_wol { +@@ -768,6 +773,7 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); + #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + ++int mlx4_en_moderation_update(struct mlx4_en_priv *priv); + int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); +diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c +index 3637474cab8a..50683693d9fc 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/mr.c ++++ b/drivers/net/ethernet/mellanox/mlx4/mr.c +@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) + goto err_out; + + for (i = 0; i <= buddy->max_order; ++i) { +- s = BITS_TO_LONGS(1 << (buddy->max_order - i)); ++ s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); +diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c +index 3173875a715f..231f09712850 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/port.c ++++ b/drivers/net/ethernet/mellanox/mlx4/port.c +@@ -1856,6 +1856,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + #define I2C_ADDR_LOW 0x50 + #define I2C_ADDR_HIGH 0x51 + #define I2C_PAGE_SIZE 256 ++#define I2C_HIGH_PAGE_SIZE 128 + + /* Module Info Data */ + struct mlx4_cable_info { +@@ -1909,6 +1910,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status) + return "Unknown Error"; + } + ++static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id) ++{ ++ struct mlx4_cmd_mailbox *inbox, *outbox; ++ struct mlx4_mad_ifc *inmad, *outmad; ++ struct mlx4_cable_info *cable_info; ++ int ret; ++ ++ inbox = mlx4_alloc_cmd_mailbox(dev); ++ if (IS_ERR(inbox)) ++ return PTR_ERR(inbox); ++ ++ outbox = mlx4_alloc_cmd_mailbox(dev); ++ if (IS_ERR(outbox)) { ++ mlx4_free_cmd_mailbox(dev, inbox); ++ return PTR_ERR(outbox); ++ } ++ ++ inmad = (struct mlx4_mad_ifc *)(inbox->buf); ++ outmad = (struct mlx4_mad_ifc *)(outbox->buf); ++ ++ inmad->method = 0x1; /* Get */ ++ inmad->class_version = 0x1; ++ inmad->mgmt_class = 0x1; ++ inmad->base_version = 0x1; ++ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ ++ ++ cable_info = (struct mlx4_cable_info *)inmad->data; ++ cable_info->dev_mem_address = 0; ++ cable_info->page_num = 0; ++ cable_info->i2c_addr = I2C_ADDR_LOW; ++ cable_info->size = cpu_to_be16(1); ++ ++ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, ++ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, ++ MLX4_CMD_NATIVE); ++ if (ret) ++ goto out; ++ ++ if (be16_to_cpu(outmad->status)) { ++ /* Mad returned with bad status */ ++ ret = be16_to_cpu(outmad->status); ++ mlx4_warn(dev, ++ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", ++ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret, ++ cable_info_mad_err_str(ret)); ++ ret = -ret; ++ goto out; ++ } ++ cable_info = (struct mlx4_cable_info *)outmad->data; ++ *module_id = cable_info->data[0]; ++out: ++ mlx4_free_cmd_mailbox(dev, inbox); ++ mlx4_free_cmd_mailbox(dev, outbox); ++ return ret; ++} ++ ++static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset) ++{ ++ *i2c_addr = I2C_ADDR_LOW; ++ *page_num = 0; ++ ++ if (*offset < I2C_PAGE_SIZE) ++ return; ++ ++ *i2c_addr = I2C_ADDR_HIGH; ++ *offset -= I2C_PAGE_SIZE; ++} ++ ++static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset) ++{ ++ /* Offsets 0-255 belong to page 0. ++ * Offsets 256-639 belong to pages 01, 02, 03. ++ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2 ++ */ ++ if (*offset < I2C_PAGE_SIZE) ++ *page_num = 0; ++ else ++ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE; ++ *i2c_addr = I2C_ADDR_LOW; ++ *offset -= *page_num * I2C_HIGH_PAGE_SIZE; ++} ++ + /** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. +@@ -1928,12 +2011,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; +- u16 i2c_addr; ++ u8 module_id, i2c_addr, page_num; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + ++ ret = mlx4_get_module_id(dev, port, &module_id); ++ if (ret) ++ return ret; ++ ++ switch (module_id) { ++ case MLX4_MODULE_ID_SFP: ++ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); ++ break; ++ case MLX4_MODULE_ID_QSFP: ++ case MLX4_MODULE_ID_QSFP_PLUS: ++ case MLX4_MODULE_ID_QSFP28: ++ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); ++ break; ++ default: ++ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id); ++ return -EINVAL; ++ } ++ + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); +@@ -1959,11 +2060,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + */ + size -= offset + size - I2C_PAGE_SIZE; + +- i2c_addr = I2C_ADDR_LOW; +- + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); +- cable_info->page_num = 0; ++ cable_info->page_num = page_num; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +index 7d1e8ab956e6..ab046bffed15 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +@@ -4948,6 +4948,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule + + if (!fs_rule->mirr_mbox) { + mlx4_err(dev, "rule mirroring mailbox is null\n"); ++ mlx4_free_cmd_mailbox(dev, mailbox); + return -EINVAL; + } + memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +index 8cd7227fbdfc..3dd0bc8804c1 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +@@ -930,6 +930,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) + ft->g[ft->num_groups] = NULL; + mlx5e_destroy_groups(ft); + kvfree(in); ++ kfree(ft->g); + + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +index b210c171a380..f3d3ad25f05e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +@@ -139,7 +139,7 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + +- if (esw->mode == SRIOV_NONE) ++ if (esw->mode != SRIOV_OFFLOADS) + return -EOPNOTSUPP; + + switch (attr->id) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 13dfc197bdd8..6c092dc41c82 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -701,17 +701,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, + static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct fs_prio *prio) + { +- struct mlx5_flow_table *next_ft; ++ struct mlx5_flow_table *next_ft, *first_ft; + int err = 0; + + /* Connect_prev_fts and update_root_ft_create are mutually exclusive */ + +- if (list_empty(&prio->node.children)) { ++ first_ft = list_first_entry_or_null(&prio->node.children, ++ struct mlx5_flow_table, node.list); ++ if (!first_ft || first_ft->level > ft->level) { + err = connect_prev_fts(dev, ft, prio); + if (err) + return err; + +- next_ft = find_next_chained_ft(prio); ++ next_ft = first_ft ? first_ft : find_next_chained_ft(prio); + err = connect_fwd_rules(dev, ft, next_ft); + if (err) + return err; +@@ -1357,7 +1359,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft) + node.list) == ft)) + return 0; + +- next_ft = find_next_chained_ft(prio); ++ next_ft = find_next_ft(ft); + err = connect_fwd_rules(dev, next_ft, ft); + if (err) + return err; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +index a57d5a81eb05..858c99864047 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +@@ -331,6 +331,24 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + return err; + } + ++static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, ++ u32 npages) ++{ ++ u32 pages_set = 0; ++ unsigned int n; ++ ++ for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) { ++ MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set, ++ fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE)); ++ pages_set++; ++ ++ if (!--npages) ++ break; ++ } ++ ++ return pages_set; ++} ++ + static int reclaim_pages_cmd(struct mlx5_core_dev *dev, + u32 *in, int in_size, u32 *out, int out_size) + { +@@ -354,8 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, + if (fwp->func_id != func_id) + continue; + +- MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); +- i++; ++ i += fwp_fill_manage_pages_out(fwp, out, i, npages - i); + } + + MLX5_SET(manage_pages_out, out, output_num_entries, i); +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c +index aa33d58b9f81..901835d70f73 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/core.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c +@@ -436,7 +436,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) + { + unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); + +- mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); ++ mlxsw_core_schedule_dw(&trans->timeout_dw, timeout << trans->retries); + } + + static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, +@@ -485,6 +485,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, + err = mlxsw_emad_transmit(trans->core, trans); + if (err == 0) + return; ++ ++ if (!atomic_dec_and_test(&trans->active)) ++ return; + } else { + err = -EIO; + } +@@ -1370,7 +1373,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, + err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, + bulk_list, cb, cb_priv, tid); + if (err) { +- kfree(trans); ++ kfree_rcu(trans, rcu); + return err; + } + return 0; +@@ -1584,9 +1587,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + break; + } + } +- rcu_read_unlock(); +- if (!found) ++ if (!found) { ++ rcu_read_unlock(); + goto drop; ++ } + + pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); +@@ -1597,6 +1601,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + u64_stats_update_end(&pcpu_stats->syncp); + + rxl->func(skb, local_port, rxl_item->priv); ++ rcu_read_unlock(); + return; + + drop: +diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c +index cb0102dd7f70..d691c33dffc6 100644 +--- a/drivers/net/ethernet/micrel/ks8842.c ++++ b/drivers/net/ethernet/micrel/ks8842.c +@@ -1150,6 +1150,10 @@ static int ks8842_probe(struct platform_device *pdev) + unsigned i; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!iomem) { ++ dev_err(&pdev->dev, "Invalid resource\n"); ++ return -EINVAL; ++ } + if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) + goto err_mem_region; + +diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c +index 6fe61d9343cb..7f782aec3c48 100644 +--- a/drivers/net/ethernet/moxa/moxart_ether.c ++++ b/drivers/net/ethernet/moxa/moxart_ether.c +@@ -548,10 +548,8 @@ static int moxart_mac_probe(struct platform_device *pdev) + SET_NETDEV_DEV(ndev, &pdev->dev); + + ret = register_netdev(ndev); +- if (ret) { +- free_netdev(ndev); ++ if (ret) + goto init_fail; +- } + + netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", + __func__, ndev->irq, ndev->dev_addr); +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +index 02ec326cb129..5eeba263b5f8 100644 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +@@ -4050,6 +4050,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + dev_err(&pdev->dev, + "invalid sram_size %dB or board span %ldB\n", + mgp->sram_size, mgp->board_span); ++ status = -EINVAL; + goto abort_with_ioremap; + } + memcpy_fromio(mgp->eeprom_strings, +diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c +index ed89029ff75b..c0e128e17321 100644 +--- a/drivers/net/ethernet/natsemi/natsemi.c ++++ b/drivers/net/ethernet/natsemi/natsemi.c +@@ -817,7 +817,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + printk(version); + #endif + +- i = pci_enable_device(pdev); ++ i = pcim_enable_device(pdev); + if (i) return i; + + /* natsemi has a non-standard PM control register +@@ -850,7 +850,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + ioaddr = ioremap(iostart, iosize); + if (!ioaddr) { + i = -ENOMEM; +- goto err_ioremap; ++ goto err_pci_request_regions; + } + + /* Work around the dropped serial bit. */ +@@ -968,9 +968,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) + err_register_netdev: + iounmap(ioaddr); + +- err_ioremap: +- pci_release_regions(pdev); +- + err_pci_request_regions: + free_netdev(dev); + return i; +@@ -3228,7 +3225,6 @@ static void natsemi_remove1(struct pci_dev *pdev) + + NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); + unregister_netdev (dev); +- pci_release_regions (pdev); + iounmap(ioaddr); + free_netdev (dev); + } +diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c +index e0993eba5df3..c6950e580883 100644 +--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c ++++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c +@@ -3539,13 +3539,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) + + kfree(vdev->vpaths); + +- /* we are safe to free it now */ +- free_netdev(dev); +- + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", + buf); + vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, + __func__, __LINE__); ++ ++ /* we are safe to free it now */ ++ free_netdev(dev); + } + + /* +diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c +index 9fcaf1910633..9b98ec3dcb82 100644 +--- a/drivers/net/ethernet/nxp/lpc_eth.c ++++ b/drivers/net/ethernet/nxp/lpc_eth.c +@@ -845,7 +845,8 @@ static int lpc_mii_init(struct netdata_local *pldat) + if (mdiobus_register(pldat->mii_bus)) + goto err_out_unregister_bus; + +- if (lpc_mii_probe(pldat->ndev) != 0) ++ err = lpc_mii_probe(pldat->ndev); ++ if (err) + goto err_out_unregister_bus; + + return 0; +diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +index 3cd87a41ac92..cd59577a0c92 100644 +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -124,7 +124,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) + { + u8 *data = skb->data; + unsigned int offset; +- u16 *hi, *id; ++ u16 hi, id; + u32 lo; + + if (ptp_classify_raw(skb) == PTP_CLASS_NONE) +@@ -135,14 +135,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) + if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) + return 0; + +- hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); +- id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); ++ hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0); ++ lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2); ++ id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID); + +- memcpy(&lo, &hi[1], sizeof(lo)); +- +- return (uid_hi == *hi && +- uid_lo == lo && +- seqid == *id); ++ return (uid_hi == hi && uid_lo == lo && seqid == id); + } + + static void +@@ -152,7 +149,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) + struct pci_dev *pdev; + u64 ns; + u32 hi, lo, val; +- u16 uid, seq; + + if (!adapter->hwts_rx_en) + return; +@@ -168,10 +164,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) + lo = pch_src_uuid_lo_read(pdev); + hi = pch_src_uuid_hi_read(pdev); + +- uid = hi & 0xffff; +- seq = (hi >> 16) & 0xffff; +- +- if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) ++ if (!pch_ptp_match(skb, hi, lo, hi >> 16)) + goto out; + + ns = pch_rx_snap_read(pdev); +@@ -2617,9 +2610,13 @@ static int pch_gbe_probe(struct pci_dev *pdev, + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; ++ + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data; +- if (adapter->pdata && adapter->pdata->platform_init) +- adapter->pdata->platform_init(pdev); ++ if (adapter->pdata && adapter->pdata->platform_init) { ++ ret = adapter->pdata->platform_init(pdev); ++ if (ret) ++ goto err_free_netdev; ++ } + + adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, + PCI_DEVFN(12, 4)); +@@ -2709,7 +2706,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, + */ + static int pch_gbe_minnow_platform_init(struct pci_dev *pdev) + { +- unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT; ++ unsigned long flags = GPIOF_OUT_INIT_HIGH; + unsigned gpio = MINNOW_PHY_RESET_GPIO; + int ret; + +diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c +index dcd56ac68748..585225fbe293 100644 +--- a/drivers/net/ethernet/pasemi/pasemi_mac.c ++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c +@@ -1089,16 +1089,20 @@ static int pasemi_mac_open(struct net_device *dev) + + mac->tx = pasemi_mac_setup_tx_resources(dev); + +- if (!mac->tx) ++ if (!mac->tx) { ++ ret = -ENOMEM; + goto out_tx_ring; ++ } + + /* We might already have allocated rings in case mtu was changed + * before interface was brought up. + */ + if (dev->mtu > 1500 && !mac->num_cs) { + pasemi_mac_setup_csrings(mac); +- if (!mac->num_cs) ++ if (!mac->num_cs) { ++ ret = -ENOMEM; + goto out_tx_ring; ++ } + } + + /* Zero out rmon counters */ +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +index 7a0281a36c28..75e25a3fe4a7 100644 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +@@ -586,11 +586,6 @@ static const struct net_device_ops netxen_netdev_ops = { + #endif + }; + +-static inline bool netxen_function_zero(struct pci_dev *pdev) +-{ +- return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +-} +- + static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, + u32 mode) + { +@@ -686,7 +681,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter) + netxen_initialize_interrupt_registers(adapter); + netxen_set_msix_bit(pdev, 0); + +- if (netxen_function_zero(pdev)) { ++ if (adapter->portnum == 0) { + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); + else +@@ -1622,6 +1617,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + free_netdev(netdev); + + err_out_free_res: ++ if (NX_IS_REVISION_P3(pdev->revision)) ++ pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + + err_out_disable_pdev: +diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +index f1956c4d02a0..d026da36e47e 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c +@@ -339,7 +339,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + +- iids->vf_cids += vf_cids * p_mngr->vf_count; ++ iids->vf_cids = vf_cids; + iids->tids += vf_tids * p_mngr->vf_count; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +index 7b6824e560d2..59e59878a3a7 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +@@ -1205,9 +1205,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; ++ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) != ++ sizeof(p_hwfn->p_dcbx_info->set.config.params)); + memcpy(&p_hwfn->p_dcbx_info->set.config.params, + &dcbx_info->operational.params, +- sizeof(struct qed_dcbx_admin_params)); ++ sizeof(p_hwfn->p_dcbx_info->set.config.params)); + p_hwfn->p_dcbx_info->set.config.valid = true; + + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c +index fd19372db2f8..6e1d38041d0a 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_int.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c +@@ -2158,7 +2158,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + } else if (asserted_bits == 0x100) { +- DP_INFO(p_hwfn, "MFW indication via attention\n"); ++ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, ++ "MFW indication via attention\n"); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication [deassertion]\n"); +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c +index cf34908ec8e1..170243d3276b 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c +@@ -57,12 +57,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); + } + ++#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 ++#define QED_VF_CHANNEL_USLEEP_DELAY 100 ++#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 ++#define QED_VF_CHANNEL_MSLEEP_DELAY 25 ++ + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) + { + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + struct ustorm_vf_zone *zone_data; +- int rc = 0, time = 100; ++ int iter, rc = 0; + + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; + +@@ -102,11 +107,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) + REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); + + /* When PF would be done with the response, it would write back to the +- * `done' address. Poll until then. ++ * `done' address from a coherent DMA zone. Poll until then. + */ +- while ((!*done) && time) { +- msleep(25); +- time--; ++ ++ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; ++ while (!*done && iter--) { ++ udelay(QED_VF_CHANNEL_USLEEP_DELAY); ++ dma_rmb(); ++ } ++ ++ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; ++ while (!*done && iter--) { ++ msleep(QED_VF_CHANNEL_MSLEEP_DELAY); ++ dma_rmb(); + } + + if (!*done) { +diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c +index f2cb77c3b199..cb9d43c871c4 100644 +--- a/drivers/net/ethernet/qlogic/qla3xxx.c ++++ b/drivers/net/ethernet/qlogic/qla3xxx.c +@@ -115,7 +115,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev, + value = readl(&port_regs->CommonRegs.semaphoreReg); + if ((value & (sem_mask >> 16)) == sem_bits) + return 0; +- ssleep(1); ++ mdelay(1000); + } while (--seconds); + return -1; + } +@@ -155,7 +155,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) + "driver lock acquired\n"); + return 1; + } +- ssleep(1); ++ mdelay(1000); + } while (++i < 10); + + netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); +@@ -3287,7 +3287,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) + if ((value & ISP_CONTROL_SR) == 0) + break; + +- ssleep(1); ++ mdelay(1000); + } while ((--max_wait_time)); + + /* +@@ -3323,7 +3323,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) + break; +- ssleep(1); ++ mdelay(1000); + } while ((--max_wait_time)); + } + if (max_wait_time == 0) +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +index 5d2de48b77a0..dce36e9e1879 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +@@ -3157,8 +3157,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, + + indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = QLCRD32(adapter, indirect_addr, &err); +- if (err == -EIO) ++ if (err == -EIO) { ++ qlcnic_83xx_unlock_flash(adapter); + return err; ++ } + + word = ret; + *(u32 *)p_data = word; +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +index cda5b0a9e948..10286215092f 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +@@ -2251,7 +2251,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) + + /* Boot either flash image or firmware image from host file system */ + if (qlcnic_load_fw_file == 1) { +- if (qlcnic_83xx_load_fw_image_from_host(adapter)) ++ err = qlcnic_83xx_load_fw_image_from_host(adapter); ++ if (err) + return err; + } else { + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +index 63ebc491057b..99fc0121da93 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +@@ -1039,7 +1039,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { + skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); + if (!skb) +- break; ++ goto error; + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); + skb_put(skb, QLCNIC_ILB_PKT_SIZE); + adapter->ahw->diag_cnt = 0; +@@ -1063,6 +1063,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) + cnt++; + } + if (cnt != i) { ++error: + dev_err(&adapter->pdev->dev, + "LB Test: failed, TX[%d], RX[%d]\n", i, cnt); + if (mode != QLCNIC_ILB_MODE) +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +index ebf5ead16939..19dca845042e 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +@@ -2507,6 +2507,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + qlcnic_sriov_vf_register_map(ahw); + break; + default: ++ err = -EINVAL; + goto err_out_free_hw_res; + } + +@@ -2706,6 +2707,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + kfree(ahw); + + err_out_free_res: ++ pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + + err_out_disable_pdev: +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +index 5174e0bd75d1..625336264a44 100644 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +@@ -1426,6 +1426,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) + + if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { + vfree(fw_dump->tmpl_hdr); ++ fw_dump->tmpl_hdr = NULL; + + if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) + extended = !qlcnic_83xx_extend_md_capab(adapter); +@@ -1444,6 +1445,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; ++ if (!hdr) ++ return; + hdr->drv_cap_mask = 0x1f; + fw_dump->cap_mask = 0x1f; + dev_info(&pdev->dev, +diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c +index 9d223ff65071..882f2b7ec6d1 100644 +--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c +@@ -1504,6 +1504,7 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, + { + struct emac_tpd tpd; + u32 prod_idx; ++ int len; + + memset(&tpd, 0, sizeof(tpd)); + +@@ -1523,9 +1524,10 @@ int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, + if (skb_network_offset(skb) != ETH_HLEN) + TPD_TYP_SET(&tpd, 1); + ++ len = skb->len; + emac_tx_fill_tpd(adpt, tx_q, skb, &tpd); + +- netdev_sent_queue(adpt->netdev, skb->len); ++ netdev_sent_queue(adpt->netdev, len); + + /* Make sure the are enough free descriptors to hold one + * maximum-sized SKB. We need one desc for each fragment, +diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c +index 57b35aeac51a..971aea538acd 100644 +--- a/drivers/net/ethernet/qualcomm/emac/emac.c ++++ b/drivers/net/ethernet/qualcomm/emac/emac.c +@@ -477,13 +477,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, + + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); + if (ret) +- return ret; ++ goto disable_clk_axi; + + ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); + if (ret) +- return ret; ++ goto disable_clk_cfg_ahb; ++ ++ ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); ++ if (ret) ++ goto disable_clk_cfg_ahb; ++ ++ return 0; + +- return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); ++disable_clk_cfg_ahb: ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); ++disable_clk_axi: ++ clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); ++ ++ return ret; + } + + /* Enable clocks; needs emac_clks_phase1_init to be called before */ +@@ -735,12 +746,13 @@ static int emac_remove(struct platform_device *pdev) + if (!has_acpi_companion(&pdev->dev)) + put_device(&adpt->phydev->mdio.dev); + mdiobus_unregister(adpt->mii_bus); +- free_netdev(netdev); + + if (adpt->phy.digital) + iounmap(adpt->phy.digital); + iounmap(adpt->phy.base); + ++ free_netdev(netdev); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c +index 7a4393ffe98e..f321b115719a 100644 +--- a/drivers/net/ethernet/realtek/r8169.c ++++ b/drivers/net/ethernet/realtek/r8169.c +@@ -2338,7 +2338,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) + { + switch(stringset) { + case ETH_SS_STATS: +- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); ++ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } + } +@@ -4476,6 +4476,58 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) + rtl_unlock_work(tp); + } + ++static void rtl_init_rxcfg(struct rtl8169_private *tp) ++{ ++ void __iomem *ioaddr = tp->mmio_addr; ++ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_01: ++ case RTL_GIGA_MAC_VER_02: ++ case RTL_GIGA_MAC_VER_03: ++ case RTL_GIGA_MAC_VER_04: ++ case RTL_GIGA_MAC_VER_05: ++ case RTL_GIGA_MAC_VER_06: ++ case RTL_GIGA_MAC_VER_10: ++ case RTL_GIGA_MAC_VER_11: ++ case RTL_GIGA_MAC_VER_12: ++ case RTL_GIGA_MAC_VER_13: ++ case RTL_GIGA_MAC_VER_14: ++ case RTL_GIGA_MAC_VER_15: ++ case RTL_GIGA_MAC_VER_16: ++ case RTL_GIGA_MAC_VER_17: ++ RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); ++ break; ++ case RTL_GIGA_MAC_VER_18: ++ case RTL_GIGA_MAC_VER_19: ++ case RTL_GIGA_MAC_VER_20: ++ case RTL_GIGA_MAC_VER_21: ++ case RTL_GIGA_MAC_VER_22: ++ case RTL_GIGA_MAC_VER_23: ++ case RTL_GIGA_MAC_VER_24: ++ case RTL_GIGA_MAC_VER_34: ++ case RTL_GIGA_MAC_VER_35: ++ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); ++ break; ++ case RTL_GIGA_MAC_VER_40: ++ case RTL_GIGA_MAC_VER_41: ++ case RTL_GIGA_MAC_VER_42: ++ case RTL_GIGA_MAC_VER_43: ++ case RTL_GIGA_MAC_VER_44: ++ case RTL_GIGA_MAC_VER_45: ++ case RTL_GIGA_MAC_VER_46: ++ case RTL_GIGA_MAC_VER_47: ++ case RTL_GIGA_MAC_VER_48: ++ case RTL_GIGA_MAC_VER_49: ++ case RTL_GIGA_MAC_VER_50: ++ case RTL_GIGA_MAC_VER_51: ++ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); ++ break; ++ default: ++ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); ++ break; ++ } ++} ++ + static int rtl_set_mac_address(struct net_device *dev, void *p) + { + struct rtl8169_private *tp = netdev_priv(dev); +@@ -4494,6 +4546,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) + + pm_runtime_put_noidle(d); + ++ /* Reportedly at least Asus X453MA truncates packets otherwise */ ++ if (tp->mac_version == RTL_GIGA_MAC_VER_37) ++ rtl_init_rxcfg(tp); ++ + return 0; + } + +@@ -4931,58 +4987,6 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) + } + } + +-static void rtl_init_rxcfg(struct rtl8169_private *tp) +-{ +- void __iomem *ioaddr = tp->mmio_addr; +- +- switch (tp->mac_version) { +- case RTL_GIGA_MAC_VER_01: +- case RTL_GIGA_MAC_VER_02: +- case RTL_GIGA_MAC_VER_03: +- case RTL_GIGA_MAC_VER_04: +- case RTL_GIGA_MAC_VER_05: +- case RTL_GIGA_MAC_VER_06: +- case RTL_GIGA_MAC_VER_10: +- case RTL_GIGA_MAC_VER_11: +- case RTL_GIGA_MAC_VER_12: +- case RTL_GIGA_MAC_VER_13: +- case RTL_GIGA_MAC_VER_14: +- case RTL_GIGA_MAC_VER_15: +- case RTL_GIGA_MAC_VER_16: +- case RTL_GIGA_MAC_VER_17: +- RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); +- break; +- case RTL_GIGA_MAC_VER_18: +- case RTL_GIGA_MAC_VER_19: +- case RTL_GIGA_MAC_VER_20: +- case RTL_GIGA_MAC_VER_21: +- case RTL_GIGA_MAC_VER_22: +- case RTL_GIGA_MAC_VER_23: +- case RTL_GIGA_MAC_VER_24: +- case RTL_GIGA_MAC_VER_34: +- case RTL_GIGA_MAC_VER_35: +- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); +- break; +- case RTL_GIGA_MAC_VER_40: +- case RTL_GIGA_MAC_VER_41: +- case RTL_GIGA_MAC_VER_42: +- case RTL_GIGA_MAC_VER_43: +- case RTL_GIGA_MAC_VER_44: +- case RTL_GIGA_MAC_VER_45: +- case RTL_GIGA_MAC_VER_46: +- case RTL_GIGA_MAC_VER_47: +- case RTL_GIGA_MAC_VER_48: +- case RTL_GIGA_MAC_VER_49: +- case RTL_GIGA_MAC_VER_50: +- case RTL_GIGA_MAC_VER_51: +- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); +- break; +- default: +- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); +- break; +- } +-} +- + static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) + { + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 545cb6262cff..5452fe4bf12a 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -1444,6 +1444,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) + struct ravb_private *priv = container_of(work, struct ravb_private, + work); + struct net_device *ndev = priv->ndev; ++ int error; + + netif_tx_stop_all_queues(ndev); + +@@ -1452,15 +1453,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) + ravb_ptp_stop(ndev); + + /* Wait for DMA stopping */ +- ravb_stop_dma(ndev); ++ if (ravb_stop_dma(ndev)) { ++ /* If ravb_stop_dma() fails, the hardware is still operating ++ * for TX and/or RX. So, this should not call the following ++ * functions because ravb_dmac_init() is possible to fail too. ++ * Also, this should not retry ravb_stop_dma() again and again ++ * here because it's possible to wait forever. So, this just ++ * re-enables the TX and RX and skip the following ++ * re-initialization procedure. ++ */ ++ ravb_rcv_snd_enable(ndev); ++ goto out; ++ } + + ravb_ring_free(ndev, RAVB_BE); + ravb_ring_free(ndev, RAVB_NC); + + /* Device init */ +- ravb_dmac_init(ndev); ++ error = ravb_dmac_init(ndev); ++ if (error) { ++ /* If ravb_dmac_init() fails, descriptors are freed. So, this ++ * should return here to avoid re-enabling the TX and RX in ++ * ravb_emac_init(). ++ */ ++ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", ++ __func__, error); ++ return; ++ } + ravb_emac_init(ndev); + ++out: + /* Initialise PTP Clock driver */ + if (priv->chip_id == RCAR_GEN2) + ravb_ptp_init(ndev, priv->pdev); +@@ -1707,12 +1729,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) + config.flags = 0; + config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : + HWTSTAMP_TX_OFF; +- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT) ++ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { ++ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT: + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; +- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL) ++ break; ++ case RAVB_RXTSTAMP_TYPE_ALL: + config.rx_filter = HWTSTAMP_FILTER_ALL; +- else ++ break; ++ default: + config.rx_filter = HWTSTAMP_FILTER_NONE; ++ } + + return copy_to_user(req->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c +index 6f8d4810ce97..468f02beccee 100644 +--- a/drivers/net/ethernet/renesas/sh_eth.c ++++ b/drivers/net/ethernet/renesas/sh_eth.c +@@ -526,6 +526,8 @@ static struct sh_eth_cpu_data r7s72100_data = { + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + ++ .trscer_err_mask = DESC_I_RINT8 | DESC_I_RINT5, ++ + .no_psr = 1, + .apr = 1, + .mpr = 1, +@@ -2115,7 +2117,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) + { + switch (stringset) { + case ETH_SS_STATS: +- memcpy(data, *sh_eth_gstrings_stats, ++ memcpy(data, sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +@@ -2411,10 +2413,10 @@ static int sh_eth_close(struct net_device *ndev) + /* Free all the skbuffs in the Rx queue and the DMA buffer. */ + sh_eth_ring_free(ndev); + +- pm_runtime_put_sync(&mdp->pdev->dev); +- + mdp->is_opened = 0; + ++ pm_runtime_put(&mdp->pdev->dev); ++ + return 0; + } + +diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c +index 24b746406bc7..4640e6c4aecf 100644 +--- a/drivers/net/ethernet/rocker/rocker_main.c ++++ b/drivers/net/ethernet/rocker/rocker_main.c +@@ -648,10 +648,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) + err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + err_dma_event_ring_create: ++ rocker_dma_cmd_ring_waits_free(rocker); ++err_dma_cmd_ring_waits_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +-err_dma_cmd_ring_waits_alloc: +- rocker_dma_cmd_ring_waits_free(rocker); + err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c +index a949b9d27329..bef23e19cbbd 100644 +--- a/drivers/net/ethernet/sfc/ef10_sriov.c ++++ b/drivers/net/ethernet/sfc/ef10_sriov.c +@@ -405,12 +405,17 @@ static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) + return rc; + } + ++/* Disable SRIOV and remove VFs ++ * If some VFs are attached to a guest (using Xen, only) nothing is ++ * done if force=false, and vports are freed if force=true (for the non ++ * attachedc ones, only) but SRIOV is not disabled and VFs are not ++ * removed in either case. ++ */ + static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) + { + struct pci_dev *dev = efx->pci_dev; +- unsigned int vfs_assigned = 0; +- +- vfs_assigned = pci_vfs_assigned(dev); ++ unsigned int vfs_assigned = pci_vfs_assigned(dev); ++ int rc = 0; + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " +@@ -420,10 +425,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) + + if (!vfs_assigned) + pci_disable_sriov(dev); ++ else ++ rc = -EBUSY; + + efx_ef10_sriov_free_vf_vswitching(efx); + efx->vf_count = 0; +- return 0; ++ return rc; + } + + int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) +@@ -442,7 +449,6 @@ int efx_ef10_sriov_init(struct efx_nic *efx) + void efx_ef10_sriov_fini(struct efx_nic *efx) + { + struct efx_ef10_nic_data *nic_data = efx->nic_data; +- unsigned int i; + int rc; + + if (!nic_data->vf) { +@@ -452,14 +458,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx) + return; + } + +- /* Remove any VFs in the host */ +- for (i = 0; i < efx->vf_count; ++i) { +- struct efx_nic *vf_efx = nic_data->vf[i].efx; +- +- if (vf_efx) +- vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); +- } +- ++ /* Disable SRIOV and remove any VFs in the host */ + rc = efx_ef10_pci_sriov_disable(efx, true); + if (rc) + netif_dbg(efx, drv, efx->net_dev, +diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c +index ae9b983e8e5c..4a0e69a2d4f5 100644 +--- a/drivers/net/ethernet/sis/sis900.c ++++ b/drivers/net/ethernet/sis/sis900.c +@@ -442,7 +442,7 @@ static int sis900_probe(struct pci_dev *pci_dev, + #endif + + /* setup various bits in PCI command register */ +- ret = pci_enable_device(pci_dev); ++ ret = pcim_enable_device(pci_dev); + if(ret) return ret; + + i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); +@@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev, + ioaddr = pci_iomap(pci_dev, 0, 0); + if (!ioaddr) { + ret = -ENOMEM; +- goto err_out_cleardev; ++ goto err_out; + } + + sis_priv = netdev_priv(net_dev); +@@ -576,8 +576,6 @@ static int sis900_probe(struct pci_dev *pci_dev, + sis_priv->tx_ring_dma); + err_out_unmap: + pci_iounmap(pci_dev, ioaddr); +-err_out_cleardev: +- pci_release_regions(pci_dev); + err_out: + free_netdev(net_dev); + return ret; +@@ -2425,7 +2423,6 @@ static void sis900_remove(struct pci_dev *pci_dev) + sis_priv->tx_ring_dma); + pci_iounmap(pci_dev, sis_priv->ioaddr); + free_netdev(net_dev); +- pci_release_regions(pci_dev); + } + + #ifdef CONFIG_PM +diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c +index b0c72167bade..3c221ca2cc12 100644 +--- a/drivers/net/ethernet/smsc/smc91x.c ++++ b/drivers/net/ethernet/smsc/smc91x.c +@@ -2294,7 +2294,7 @@ static int smc_drv_probe(struct platform_device *pdev) + ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, + "power", 0, 0, 100); + if (ret) +- return ret; ++ goto out_free_netdev; + + /* + * Optional reset GPIO configured? Minimum 100 ns reset needed +@@ -2303,7 +2303,7 @@ static int smc_drv_probe(struct platform_device *pdev) + ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, + "reset", 0, 0, 100); + if (ret) +- return ret; ++ goto out_free_netdev; + + /* + * Need to wait for optional EEPROM to load, max 750 us according +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +index bcc5d1e16ce2..f4ff43a1b5ba 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +@@ -362,6 +362,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) + plat_dat->has_gmac = true; + plat_dat->bsp_priv = gmac; + plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; ++ plat_dat->multicast_filter_bins = 0; ++ plat_dat->tx_fifo_size = 8192; ++ plat_dat->rx_fifo_size = 8192; + + err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (err) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +index 6704d3e0392d..afc68cbca4e2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +@@ -30,7 +30,6 @@ + #define PRG_ETH0_RGMII_MODE BIT(0) + + /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ +-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 + #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) + + #define PRG_ETH0_TXDLY_SHIFT 5 +@@ -123,8 +122,9 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac) + init.num_parents = MUX_CLK_NUM_PARENTS; + + dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0; +- dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; +- dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; ++ dwmac->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK); ++ dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >> ++ dwmac->m250_mux.shift; + dwmac->m250_mux.flags = 0; + dwmac->m250_mux.table = NULL; + dwmac->m250_mux.hw.init = &init; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +index fc1fa0f9f338..fe4128405bbb 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +@@ -39,7 +39,7 @@ struct sunxi_priv_data { + static int sun7i_gmac_init(struct platform_device *pdev, void *priv) + { + struct sunxi_priv_data *gmac = priv; +- int ret; ++ int ret = 0; + + if (gmac->regulator) { + ret = regulator_enable(gmac->regulator); +@@ -60,11 +60,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) + } else { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); + ret = clk_prepare(gmac->tx_clk); +- if (ret) +- return ret; ++ if (ret && gmac->regulator) ++ regulator_disable(gmac->regulator); + } + +- return 0; ++ return ret; + } + + static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +index ff3e5ab39bd0..24fb7a2bba62 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +@@ -91,10 +91,10 @@ enum power_event { + #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ + + /* GMAC HW ADDR regs */ +-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ +- (reg * 8)) +-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ +- (reg * 8)) ++#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ ++ 0x00000040 + (reg * 8)) ++#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \ ++ 0x00000044 + (reg * 8)) + #define GMAC_MAX_PERFECT_ADDRESSES 1 + + #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +index 3a2edf9f51e2..bd265eb36e70 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +@@ -172,6 +172,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, + value = GMAC_FRAME_FILTER_PR; + } else if (dev->flags & IFF_ALLMULTI) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ ++ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { ++ /* Fall back to all multicast if we've no filter */ ++ value = GMAC_FRAME_FILTER_PM; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +index 3519a8a589dd..c8673e231a88 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +@@ -678,23 +678,16 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + +- if (!edata->eee_enabled) { ++ if (!priv->dma_cap.eee) ++ return -EOPNOTSUPP; ++ ++ if (!edata->eee_enabled) + stmmac_disable_eee_mode(priv); +- } else { +- /* We are asking for enabling the EEE but it is safe +- * to verify all by invoking the eee_init function. +- * In case of failure it will return an error. +- */ +- edata->eee_enabled = stmmac_eee_init(priv); +- if (!edata->eee_enabled) +- return -EOPNOTSUPP; +- } + + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + +- priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 8c1a5361f661..dbd56fefa2f3 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -1901,9 +1901,6 @@ static int stmmac_release(struct net_device *dev) + { + struct stmmac_priv *priv = netdev_priv(dev); + +- if (priv->eee_enabled) +- del_timer_sync(&priv->eee_ctrl_timer); +- + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); +@@ -1924,6 +1921,11 @@ static int stmmac_release(struct net_device *dev) + if (priv->lpi_irq > 0) + free_irq(priv->lpi_irq, dev); + ++ if (priv->eee_enabled) { ++ priv->tx_path_in_lpi_mode = false; ++ del_timer_sync(&priv->eee_ctrl_timer); ++ } ++ + /* Stop TX/RX DMA and clear the descriptors */ + priv->hw->dma->stop_tx(priv->ioaddr); + priv->hw->dma->stop_rx(priv->ioaddr); +@@ -3503,6 +3505,11 @@ int stmmac_suspend(struct device *dev) + + napi_disable(&priv->napi); + ++ if (priv->eee_enabled) { ++ priv->tx_path_in_lpi_mode = false; ++ del_timer_sync(&priv->eee_ctrl_timer); ++ } ++ + /* Stop TX/RX DMA */ + priv->hw->dma->stop_tx(priv->ioaddr); + priv->hw->dma->stop_rx(priv->ioaddr); +diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c +index fe5b0ac8c631..f3aed26656a3 100644 +--- a/drivers/net/ethernet/sun/niu.c ++++ b/drivers/net/ethernet/sun/niu.c +@@ -3948,8 +3948,6 @@ static void niu_xmac_interrupt(struct niu *np) + mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; +- if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) +- mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) + mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; + if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) +@@ -8168,10 +8166,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) + "VPD_SCAN: Reading in property [%s] len[%d]\n", + namebuf, prop_len); + for (i = 0; i < prop_len; i++) { +- err = niu_pci_eeprom_read(np, off + i); +- if (err >= 0) +- *prop_buf = err; +- ++prop_buf; ++ err = niu_pci_eeprom_read(np, off + i); ++ if (err < 0) ++ return err; ++ *prop_buf++ = err; + } + } + +@@ -8182,14 +8180,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) + } + + /* ESPC_PIO_EN_ENABLE must be set */ +-static void niu_pci_vpd_fetch(struct niu *np, u32 start) ++static int niu_pci_vpd_fetch(struct niu *np, u32 start) + { + u32 offset; + int err; + + err = niu_pci_eeprom_read16_swp(np, start + 1); + if (err < 0) +- return; ++ return err; + + offset = err + 3; + +@@ -8198,12 +8196,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start) + u32 end; + + err = niu_pci_eeprom_read(np, here); ++ if (err < 0) ++ return err; + if (err != 0x90) +- return; ++ return -EINVAL; + + err = niu_pci_eeprom_read16_swp(np, here + 1); + if (err < 0) +- return; ++ return err; + + here = start + offset + 3; + end = start + offset + err; +@@ -8211,9 +8211,13 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start) + offset += err; + + err = niu_pci_vpd_scan_props(np, here, end); +- if (err < 0 || err == 1) +- return; ++ if (err < 0) ++ return err; ++ /* ret == 1 is not an error */ ++ if (err == 1) ++ return 0; + } ++ return 0; + } + + /* ESPC_PIO_EN_ENABLE must be set */ +@@ -9306,8 +9310,11 @@ static int niu_get_invariants(struct niu *np) + offset = niu_pci_vpd_offset(np); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "%s() VPD offset [%08x]\n", __func__, offset); +- if (offset) +- niu_pci_vpd_fetch(np, offset); ++ if (offset) { ++ err = niu_pci_vpd_fetch(np, offset); ++ if (err < 0) ++ return err; ++ } + nw64(ESPC_PIO_EN, 0); + + if (np->flags & NIU_FLAGS_VPD_VALID) { +diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c +index 904a5a12a85d..8408f9551276 100644 +--- a/drivers/net/ethernet/sun/sunvnet_common.c ++++ b/drivers/net/ethernet/sun/sunvnet_common.c +@@ -1263,28 +1263,12 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + if (vio_version_after_eq(&port->vio, 1, 3)) + localmtu -= VLAN_HLEN; + +- if (skb->protocol == htons(ETH_P_IP)) { +- struct flowi4 fl4; +- struct rtable *rt = NULL; +- +- memset(&fl4, 0, sizeof(fl4)); +- fl4.flowi4_oif = dev->ifindex; +- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); +- fl4.daddr = ip_hdr(skb)->daddr; +- fl4.saddr = ip_hdr(skb)->saddr; +- +- rt = ip_route_output_key(dev_net(dev), &fl4); +- rcu_read_unlock(); +- if (!IS_ERR(rt)) { +- skb_dst_set(skb, &rt->dst); +- icmp_send(skb, ICMP_DEST_UNREACH, +- ICMP_FRAG_NEEDED, +- htonl(localmtu)); +- } +- } ++ if (skb->protocol == htons(ETH_P_IP)) ++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, ++ htonl(localmtu)); + #if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) +- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); ++ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); + #endif + goto out_dropped; + } +diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c +index 7108c68f16d3..6ee7f8d2f2d1 100644 +--- a/drivers/net/ethernet/tehuti/tehuti.c ++++ b/drivers/net/ethernet/tehuti/tehuti.c +@@ -2062,6 +2062,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + /*bdx_hw_reset(priv); */ + if (bdx_read_mac(priv)) { + pr_err("load MAC address failed\n"); ++ err = -EFAULT; + goto err_out_iomap; + } + SET_NETDEV_DEV(ndev, &pdev->dev); +diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c +index 413cf14dbacd..8b7596fef42a 100644 +--- a/drivers/net/ethernet/ti/davinci_emac.c ++++ b/drivers/net/ethernet/ti/davinci_emac.c +@@ -183,11 +183,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; + /* EMAC mac_status register */ + #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) + #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) +-#define EMAC_MACSTATUS_TXERRCH_MASK (0x7) ++#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000) + #define EMAC_MACSTATUS_TXERRCH_SHIFT (16) + #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) + #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) +-#define EMAC_MACSTATUS_RXERRCH_MASK (0x7) ++#define EMAC_MACSTATUS_RXERRCH_MASK (0x700) + #define EMAC_MACSTATUS_RXERRCH_SHIFT (8) + + /* EMAC RX register masks */ +diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c +index 32516661f180..c17967b23d3c 100644 +--- a/drivers/net/ethernet/ti/netcp_core.c ++++ b/drivers/net/ethernet/ti/netcp_core.c +@@ -1325,9 +1325,9 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) + tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, + KNAV_QUEUE_SHARED); + if (IS_ERR(tx_pipe->dma_queue)) { ++ ret = PTR_ERR(tx_pipe->dma_queue); + dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n", + name, ret); +- ret = PTR_ERR(tx_pipe->dma_queue); + goto err; + } + +diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c +index 6c7ec1ddd475..ba6f28cf3985 100644 +--- a/drivers/net/ethernet/ti/tlan.c ++++ b/drivers/net/ethernet/ti/tlan.c +@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev) + pci_release_regions(pdev); + #endif + +- free_netdev(dev); +- + cancel_work_sync(&priv->tlan_tqueue); ++ free_netdev(dev); + } + + static void tlan_start(struct net_device *dev) +diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c +index 108598794621..9507ca2e02ac 100644 +--- a/drivers/net/ethernet/toshiba/spider_net.c ++++ b/drivers/net/ethernet/toshiba/spider_net.c +@@ -296,8 +296,8 @@ spider_net_free_chain(struct spider_net_card *card, + descr = descr->next; + } while (descr != chain->ring); + +- dma_free_coherent(&card->pdev->dev, chain->num_desc, +- chain->hwring, chain->dma_addr); ++ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), ++ chain->hwring, chain->dma_addr); + } + + /** +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index 545f60877bb7..9ba36c930ce3 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -735,6 +735,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + /* Kick off the transfer */ + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ + ++ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { ++ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__); ++ netif_stop_queue(ndev); ++ } ++ + return NETDEV_TX_OK; + } + +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +index 034b36442ee7..df3b3384984c 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +@@ -1179,9 +1179,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) + } + + dev_info(dev, +- "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", +- (unsigned int __force)ndev->mem_start, +- (unsigned int __force)lp->base_addr, ndev->irq); ++ "Xilinx EmacLite at 0x%08X mapped to 0x%p, irq=%d\n", ++ (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); + return 0; + + error: +diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig +index 3a424c864f4d..ecebeeb9b2a0 100644 +--- a/drivers/net/fddi/Kconfig ++++ b/drivers/net/fddi/Kconfig +@@ -28,17 +28,20 @@ config DEFXX + + config DEFXX_MMIO + bool +- prompt "Use MMIO instead of PIO" if PCI || EISA ++ prompt "Use MMIO instead of IOP" if PCI || EISA + depends on DEFXX +- default n if PCI || EISA ++ default n if EISA + default y + ---help--- + This instructs the driver to use EISA or PCI memory-mapped I/O +- (MMIO) as appropriate instead of programmed I/O ports (PIO). ++ (MMIO) as appropriate instead of programmed I/O ports (IOP). + Enabling this gives an improvement in processing time in parts +- of the driver, but it may cause problems with EISA (DEFEA) +- adapters. TURBOchannel does not have the concept of I/O ports, +- so MMIO is always used for these (DEFTA) adapters. ++ of the driver, but it requires a memory window to be configured ++ for EISA (DEFEA) adapters that may not always be available. ++ Conversely some PCIe host bridges do not support IOP, so MMIO ++ may be required to access PCI (DEFPA) adapters on downstream PCI ++ buses with some systems. TURBOchannel does not have the concept ++ of I/O ports, so MMIO is always used for these (DEFTA) adapters. + + If unsure, say N. + +diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c +index b0de8ecd7fe8..bdcf4aa34566 100644 +--- a/drivers/net/fddi/defxx.c ++++ b/drivers/net/fddi/defxx.c +@@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = { + .ndo_set_mac_address = dfx_ctl_set_mac_address, + }; + ++static void dfx_register_res_alloc_err(const char *print_name, bool mmio, ++ bool eisa) ++{ ++ pr_err("%s: Cannot use %s, no address set, aborting\n", ++ print_name, mmio ? "MMIO" : "I/O"); ++ pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n", ++ print_name, mmio ? 'n' : 'y'); ++ if (eisa && mmio) ++ pr_err("%s: Or run ECU and set adapter's MMIO location\n", ++ print_name); ++} ++ ++static void dfx_register_res_err(const char *print_name, bool mmio, ++ unsigned long start, unsigned long len) ++{ ++ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n", ++ print_name, mmio ? "MMIO" : "I/O", len, start); ++} ++ + /* + * ================ + * = dfx_register = +@@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev) + dev_set_drvdata(bdev, dev); + + dfx_get_bars(bdev, bar_start, bar_len); +- if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { +- pr_err("%s: Cannot use MMIO, no address set, aborting\n", +- print_name); +- pr_err("%s: Run ECU and set adapter's MMIO location\n", +- print_name); +- pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" +- "\n", print_name); ++ if (bar_len[0] == 0 || ++ (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) { ++ dfx_register_res_alloc_err(print_name, dfx_use_mmio, ++ dfx_bus_eisa); + err = -ENXIO; +- goto err_out; ++ goto err_out_disable; + } + + if (dfx_use_mmio) +@@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev) + else + region = request_region(bar_start[0], bar_len[0], print_name); + if (!region) { +- pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " +- "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, +- (long)bar_len[0], (long)bar_start[0]); ++ dfx_register_res_err(print_name, dfx_use_mmio, ++ bar_start[0], bar_len[0]); + err = -EBUSY; + goto err_out_disable; + } + if (bar_start[1] != 0) { + region = request_region(bar_start[1], bar_len[1], print_name); + if (!region) { +- pr_err("%s: Cannot reserve I/O resource " +- "0x%lx @ 0x%lx, aborting\n", print_name, +- (long)bar_len[1], (long)bar_start[1]); ++ dfx_register_res_err(print_name, 0, ++ bar_start[1], bar_len[1]); + err = -EBUSY; + goto err_out_csr_region; + } +@@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev) + if (bar_start[2] != 0) { + region = request_region(bar_start[2], bar_len[2], print_name); + if (!region) { +- pr_err("%s: Cannot reserve I/O resource " +- "0x%lx @ 0x%lx, aborting\n", print_name, +- (long)bar_len[2], (long)bar_start[2]); ++ dfx_register_res_err(print_name, 0, ++ bar_start[2], bar_len[2]); + err = -EBUSY; + goto err_out_bh_region; + } +diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c +index 3511d40ba3f1..440047a239f5 100644 +--- a/drivers/net/fjes/fjes_main.c ++++ b/drivers/net/fjes/fjes_main.c +@@ -1212,6 +1212,10 @@ static int fjes_probe(struct platform_device *plat_dev) + adapter->interrupt_watch_enable = false; + + res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); ++ if (!res) { ++ err = -EINVAL; ++ goto err_free_control_wq; ++ } + hw->hw_res.start = res->start; + hw->hw_res.size = resource_size(res); + hw->hw_res.irq = platform_get_irq(plat_dev, 0); +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c +index 35d8c636de12..d89995f4bd43 100644 +--- a/drivers/net/geneve.c ++++ b/drivers/net/geneve.c +@@ -732,7 +732,8 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb, + static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, + struct net_device *dev, + struct flowi4 *fl4, +- struct ip_tunnel_info *info) ++ struct ip_tunnel_info *info, ++ __be16 dport, __be16 sport) + { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); + struct geneve_dev *geneve = netdev_priv(dev); +@@ -746,6 +747,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, + memset(fl4, 0, sizeof(*fl4)); + fl4->flowi4_mark = skb->mark; + fl4->flowi4_proto = IPPROTO_UDP; ++ fl4->fl4_dport = dport; ++ fl4->fl4_sport = sport; + + if (info) { + fl4->daddr = info->key.u.ipv4.dst; +@@ -791,7 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, + static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, + struct net_device *dev, + struct flowi6 *fl6, +- struct ip_tunnel_info *info) ++ struct ip_tunnel_info *info, ++ __be16 dport, __be16 sport) + { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); + struct geneve_dev *geneve = netdev_priv(dev); +@@ -807,6 +811,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, + memset(fl6, 0, sizeof(*fl6)); + fl6->flowi6_mark = skb->mark; + fl6->flowi6_proto = IPPROTO_UDP; ++ fl6->fl6_dport = dport; ++ fl6->fl6_sport = sport; + + if (info) { + fl6->daddr = info->key.u.ipv6.dst; +@@ -894,13 +900,14 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + +- rt = geneve_get_v4_rt(skb, dev, &fl4, info); ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); ++ rt = geneve_get_v4_rt(skb, dev, &fl4, info, ++ geneve->dst_port, sport); + if (IS_ERR(rt)) { + err = PTR_ERR(rt); + goto tx_error; + } + +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + skb_reset_mac_header(skb); + + if (info) { +@@ -983,13 +990,14 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, + } + } + +- dst = geneve_get_v6_dst(skb, dev, &fl6, info); ++ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); ++ dst = geneve_get_v6_dst(skb, dev, &fl6, info, ++ geneve->dst_port, sport); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto tx_error; + } + +- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + skb_reset_mac_header(skb); + + if (info) { +@@ -1114,9 +1122,14 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) + struct dst_entry *dst; + struct flowi6 fl6; + #endif ++ __be16 sport; + + if (ip_tunnel_info_af(info) == AF_INET) { +- rt = geneve_get_v4_rt(skb, dev, &fl4, info); ++ sport = udp_flow_src_port(geneve->net, skb, ++ 1, USHRT_MAX, true); ++ ++ rt = geneve_get_v4_rt(skb, dev, &fl4, info, ++ geneve->dst_port, sport); + if (IS_ERR(rt)) + return PTR_ERR(rt); + +@@ -1124,7 +1137,11 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) + info->key.u.ipv4.src = fl4.saddr; + #if IS_ENABLED(CONFIG_IPV6) + } else if (ip_tunnel_info_af(info) == AF_INET6) { +- dst = geneve_get_v6_dst(skb, dev, &fl6, info); ++ sport = udp_flow_src_port(geneve->net, skb, ++ 1, USHRT_MAX, true); ++ ++ dst = geneve_get_v6_dst(skb, dev, &fl6, info, ++ geneve->dst_port, sport); + if (IS_ERR(dst)) + return PTR_ERR(dst); + +@@ -1135,8 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) + return -EINVAL; + } + +- info->key.tp_src = udp_flow_src_port(geneve->net, skb, +- 1, USHRT_MAX, true); ++ info->key.tp_src = sport; + info->key.tp_dst = geneve->dst_port; + return 0; + } +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index fe844888e0ed..e513736ae0d5 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -559,9 +559,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, + if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && + mtu < ntohs(iph->tot_len)) { + netdev_dbg(dev, "packet too big, fragmentation needed\n"); +- memset(IPCB(skb), 0, sizeof(*IPCB(skb))); +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, +- htonl(mtu)); ++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, ++ htonl(mtu)); + goto err_rt; + } + +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c +index 03c96a6cbafd..e510dbda77e5 100644 +--- a/drivers/net/hamradio/6pack.c ++++ b/drivers/net/hamradio/6pack.c +@@ -870,6 +870,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) + return; + } + ++ if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { ++ pr_err("6pack: cooked buffer overrun, data loss\n"); ++ sp->rx_count = 0; ++ return; ++ } ++ + buf = sp->raw_buf; + sp->cooked_buf[sp->rx_count_cooked++] = + buf[0] | ((buf[1] << 2) & 0xc0); +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c +index 088fe5d34f50..76340bc3cf44 100644 +--- a/drivers/net/hamradio/mkiss.c ++++ b/drivers/net/hamradio/mkiss.c +@@ -810,6 +810,7 @@ static void mkiss_close(struct tty_struct *tty) + ax->tty = NULL; + + unregister_netdev(ax->dev); ++ free_netdev(ax->dev); + } + + /* Perform I/O control on an active ax25 channel. */ +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c +index aaff07c10058..a453b82d1077 100644 +--- a/drivers/net/hamradio/yam.c ++++ b/drivers/net/hamradio/yam.c +@@ -1160,6 +1160,7 @@ static int __init yam_init_driver(void) + err = register_netdev(dev); + if (err) { + printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name); ++ free_netdev(dev); + goto error; + } + yam_devs[i] = dev; +diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c +index 313e006f74fe..6f3519123eb6 100644 +--- a/drivers/net/hippi/rrunner.c ++++ b/drivers/net/hippi/rrunner.c +@@ -1250,7 +1250,7 @@ static int rr_open(struct net_device *dev) + rrpriv->info = NULL; + } + if (rrpriv->rx_ctrl) { +- pci_free_consistent(pdev, sizeof(struct ring_ctrl), ++ pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), + rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); + rrpriv->rx_ctrl = NULL; + } +diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c +index 1b980f12663a..a605dfb15bb7 100644 +--- a/drivers/net/ieee802154/adf7242.c ++++ b/drivers/net/ieee802154/adf7242.c +@@ -834,7 +834,9 @@ static int adf7242_rx(struct adf7242_local *lp) + int ret; + u8 lqi, len_u8, *data; + +- adf7242_read_reg(lp, 0, &len_u8); ++ ret = adf7242_read_reg(lp, 0, &len_u8); ++ if (ret) ++ return ret; + + len = len_u8; + +diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c +index 12df6cfb423a..0ee54fba0a23 100644 +--- a/drivers/net/ieee802154/atusb.c ++++ b/drivers/net/ieee802154/atusb.c +@@ -341,6 +341,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n) + return -ENOMEM; + } + usb_anchor_urb(urb, &atusb->idle_urbs); ++ usb_free_urb(urb); + n--; + } + return 0; +diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c +index 72f37e546ed2..ac757819cadb 100644 +--- a/drivers/net/ipvlan/ipvlan_main.c ++++ b/drivers/net/ipvlan/ipvlan_main.c +@@ -168,12 +168,21 @@ static void ipvlan_port_destroy(struct net_device *dev) + kfree_rcu(port, rcu); + } + ++#define IPVLAN_ALWAYS_ON_OFLOADS \ ++ (NETIF_F_SG | NETIF_F_HW_CSUM | \ ++ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) ++ ++#define IPVLAN_ALWAYS_ON \ ++ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) ++ + #define IPVLAN_FEATURES \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + ++ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ ++ + #define IPVLAN_STATE_MASK \ + ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) + +@@ -186,7 +195,9 @@ static int ipvlan_init(struct net_device *dev) + dev->state = (dev->state & ~IPVLAN_STATE_MASK) | + (phy_dev->state & IPVLAN_STATE_MASK); + dev->features = phy_dev->features & IPVLAN_FEATURES; +- dev->features |= NETIF_F_LLTX; ++ dev->features |= IPVLAN_ALWAYS_ON; ++ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; ++ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; + dev->gso_max_size = phy_dev->gso_max_size; + dev->gso_max_segs = phy_dev->gso_max_segs; + dev->hard_header_len = phy_dev->hard_header_len; +@@ -274,7 +285,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, + { + struct ipvl_dev *ipvlan = netdev_priv(dev); + +- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); ++ features |= NETIF_F_ALL_FOR_ALL; ++ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); ++ features = netdev_increment_features(ipvlan->phy_dev->features, ++ features, features); ++ features |= IPVLAN_ALWAYS_ON; ++ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); ++ ++ return features; + } + + static void ipvlan_change_rx_flags(struct net_device *dev, int change) +@@ -675,10 +693,9 @@ static int ipvlan_device_event(struct notifier_block *unused, + + case NETDEV_FEAT_CHANGE: + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { +- ipvlan->dev->features = dev->features & IPVLAN_FEATURES; + ipvlan->dev->gso_max_size = dev->gso_max_size; + ipvlan->dev->gso_max_segs = dev->gso_max_segs; +- netdev_features_change(ipvlan->dev); ++ netdev_update_features(ipvlan->dev); + } + break; + +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c +index 5defa29069ca..774b9db0c811 100644 +--- a/drivers/net/macsec.c ++++ b/drivers/net/macsec.c +@@ -1087,6 +1087,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) + struct macsec_rx_sa *rx_sa; + struct macsec_rxh_data *rxd; + struct macsec_dev *macsec; ++ unsigned int len; + sci_t sci; + u32 pn; + bool cbit; +@@ -1242,9 +1243,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) + macsec_rxsc_put(rx_sc); + + skb_orphan(skb); ++ len = skb->len; + ret = gro_cells_receive(&macsec->gro_cells, skb); + if (ret == NET_RX_SUCCESS) +- count_rx(dev, skb->len); ++ count_rx(dev, len); + else + macsec->secy.netdev->stats.rx_dropped++; + +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 4f582ce929f2..9dda2dc6b5e7 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -421,6 +421,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + int ret; + rx_handler_result_t handle_res; + ++ /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ ++ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) ++ return RX_HANDLER_PASS; ++ + port = macvlan_port_get_rcu(skb->dev); + if (is_multicast_ether_addr(eth->h_dest)) { + unsigned int hash; +diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig +index 2651c8d8de2f..032017bd0ced 100644 +--- a/drivers/net/phy/Kconfig ++++ b/drivers/net/phy/Kconfig +@@ -135,6 +135,7 @@ config MDIO_THUNDER + depends on 64BIT + depends on PCI + select MDIO_CAVIUM ++ select MDIO_DEVRES + help + This driver supports the MDIO interfaces found on Cavium + ThunderX SoCs when the MDIO bus device appears as a PCI +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c +index 7e94526de51c..5649cc075ccb 100644 +--- a/drivers/net/phy/dp83640.c ++++ b/drivers/net/phy/dp83640.c +@@ -1337,6 +1337,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) + dp83640->hwts_rx_en = 1; + dp83640->layer = PTP_CLASS_L4; + dp83640->version = PTP_CLASS_V1; ++ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: +@@ -1344,6 +1345,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) + dp83640->hwts_rx_en = 1; + dp83640->layer = PTP_CLASS_L4; + dp83640->version = PTP_CLASS_V2; ++ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: +@@ -1351,6 +1353,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) + dp83640->hwts_rx_en = 1; + dp83640->layer = PTP_CLASS_L2; + dp83640->version = PTP_CLASS_V2; ++ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: +@@ -1358,6 +1361,7 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) + dp83640->hwts_rx_en = 1; + dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; + dp83640->version = PTP_CLASS_V2; ++ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; +diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c +index 8c73b2e771dd..e6ff731d753d 100644 +--- a/drivers/net/phy/mdio-bcm-unimac.c ++++ b/drivers/net/phy/mdio-bcm-unimac.c +@@ -177,6 +177,8 @@ static int unimac_mdio_probe(struct platform_device *pdev) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!r) ++ return -EINVAL; + + /* Just ioremap, as this MDIO block is usually integrated into an + * Ethernet MAC controller register range +diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c +index ab6914f8bd50..1da104150f44 100644 +--- a/drivers/net/phy/mdio-octeon.c ++++ b/drivers/net/phy/mdio-octeon.c +@@ -75,7 +75,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) + + return 0; + fail_register: +- mdiobus_free(bus->mii_bus); + smi_en.u64 = 0; + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); + return err; +@@ -89,7 +88,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) + bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus->mii_bus); +- mdiobus_free(bus->mii_bus); + smi_en.u64 = 0; + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); + return 0; +diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c +index 564616968cad..c0c922eff760 100644 +--- a/drivers/net/phy/mdio-thunder.c ++++ b/drivers/net/phy/mdio-thunder.c +@@ -129,7 +129,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev) + continue; + + mdiobus_unregister(bus->mii_bus); +- mdiobus_free(bus->mii_bus); + oct_mdio_writeq(0, bus->register_base + SMI_EN); + } + pci_set_drvdata(pdev, NULL); +diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c +index a9bbdcec0bad..8cc7563ab103 100644 +--- a/drivers/net/phy/mdio_bus.c ++++ b/drivers/net/phy/mdio_bus.c +@@ -362,7 +362,8 @@ void mdiobus_unregister(struct mii_bus *bus) + struct mdio_device *mdiodev; + int i; + +- BUG_ON(bus->state != MDIOBUS_REGISTERED); ++ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED)) ++ return; + bus->state = MDIOBUS_UNREGISTERED; + + for (i = 0; i < PHY_MAX_ADDR; i++) { +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 2f5587306022..a3ba95e96695 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -1013,7 +1013,8 @@ void phy_detach(struct phy_device *phydev) + phydev->attached_dev = NULL; + phy_suspend(phydev); + +- module_put(phydev->mdio.dev.driver->owner); ++ if (phydev->mdio.dev.driver) ++ module_put(phydev->mdio.dev.driver->owner); + + /* If the device had no specific driver before (i.e. - it + * was using the generic driver), we unbind the device +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index d209d5fe9c8e..28c778dbbd32 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -285,7 +285,7 @@ static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); + static int ppp_connect_channel(struct channel *pch, int unit); + static int ppp_disconnect_channel(struct channel *pch); + static void ppp_destroy_channel(struct channel *pch); +-static int unit_get(struct idr *p, void *ptr); ++static int unit_get(struct idr *p, void *ptr, int min); + static int unit_set(struct idr *p, void *ptr, int n); + static void unit_put(struct idr *p, int n); + static void *unit_find(struct idr *p, int n); +@@ -976,9 +976,20 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) + mutex_lock(&pn->all_ppp_mutex); + + if (unit < 0) { +- ret = unit_get(&pn->units_idr, ppp); ++ ret = unit_get(&pn->units_idr, ppp, 0); + if (ret < 0) + goto err; ++ if (!ifname_is_set) { ++ while (1) { ++ snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret); ++ if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name)) ++ break; ++ unit_put(&pn->units_idr, ret); ++ ret = unit_get(&pn->units_idr, ppp, ret + 1); ++ if (ret < 0) ++ goto err; ++ } ++ } + } else { + /* Caller asked for a specific unit number. Fail with -EEXIST + * if unavailable. For backward compatibility, return -EEXIST +@@ -1125,7 +1136,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, + * the PPP unit identifer as suffix (i.e. ppp). This allows + * userspace to infer the device name using to the PPPIOCGUNIT ioctl. + */ +- if (!tb[IFLA_IFNAME]) ++ if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) + conf.ifname_is_set = false; + + err = ppp_dev_configure(src_net, dev, &conf); +@@ -3313,9 +3324,9 @@ static int unit_set(struct idr *p, void *ptr, int n) + } + + /* get new free unit number and associate pointer with it */ +-static int unit_get(struct idr *p, void *ptr) ++static int unit_get(struct idr *p, void *ptr, int min) + { +- return idr_alloc(p, ptr, 0, 0, GFP_KERNEL); ++ return idr_alloc(p, ptr, min, 0, GFP_KERNEL); + } + + /* put unit number back to a pool */ +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c +index cea6d2eabe7e..001dea7aaba3 100644 +--- a/drivers/net/team/team.c ++++ b/drivers/net/team/team.c +@@ -299,7 +299,7 @@ static int __team_options_register(struct team *team, + for (i--; i >= 0; i--) + __team_option_inst_del_option(team, dst_opts[i]); + +- i = option_count - 1; ++ i = option_count; + alloc_rollback: + for (i--; i >= 0; i--) + kfree(dst_opts[i]); +@@ -2085,6 +2085,7 @@ static void team_setup_by_port(struct net_device *dev, + dev->header_ops = port_dev->header_ops; + dev->type = port_dev->type; + dev->hard_header_len = port_dev->hard_header_len; ++ dev->needed_headroom = port_dev->needed_headroom; + dev->addr_len = port_dev->addr_len; + dev->mtu = port_dev->mtu; + memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index fbd8ffe34e87..ac60322f7a5d 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -72,6 +72,14 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + + #include + +@@ -2025,6 +2033,45 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) + return ret; + } + ++/* Return correct value for tun->dev->addr_len based on tun->dev->type. */ ++static unsigned char tun_get_addr_len(unsigned short type) ++{ ++ switch (type) { ++ case ARPHRD_IP6GRE: ++ case ARPHRD_TUNNEL6: ++ return sizeof(struct in6_addr); ++ case ARPHRD_IPGRE: ++ case ARPHRD_TUNNEL: ++ case ARPHRD_SIT: ++ return 4; ++ case ARPHRD_ETHER: ++ return ETH_ALEN; ++ case ARPHRD_IEEE802154: ++ case ARPHRD_IEEE802154_MONITOR: ++ return IEEE802154_EXTENDED_ADDR_LEN; ++ case ARPHRD_PHONET_PIPE: ++ case ARPHRD_PPP: ++ case ARPHRD_NONE: ++ return 0; ++ case ARPHRD_6LOWPAN: ++ return EUI64_ADDR_LEN; ++ case ARPHRD_FDDI: ++ return FDDI_K_ALEN; ++ case ARPHRD_HIPPI: ++ return HIPPI_ALEN; ++ case ARPHRD_IEEE802: ++ return FC_ALEN; ++ case ARPHRD_ROSE: ++ return ROSE_ADDR_LEN; ++ case ARPHRD_NETROM: ++ return AX25_ADDR_LEN; ++ case ARPHRD_LOCALTLK: ++ return LTALK_ALEN; ++ default: ++ return 0; ++ } ++} ++ + static long __tun_chr_ioctl(struct file *file, unsigned int cmd, + unsigned long arg, int ifreq_len) + { +@@ -2169,6 +2216,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, + ret = -EBUSY; + } else { + tun->dev->type = (int) arg; ++ tun->dev->addr_len = tun_get_addr_len(tun->dev->type); + tun_debug(KERN_INFO, tun, "linktype set to %d\n", + tun->dev->type); + ret = 0; +diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c +index 3dbb0646b024..541c06c884e5 100644 +--- a/drivers/net/usb/asix_common.c ++++ b/drivers/net/usb/asix_common.c +@@ -277,7 +277,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) + + netdev_dbg(dev->net, "asix_get_phy_addr()\n"); + +- if (ret < 0) { ++ if (ret < 2) { + netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); + goto out; + } +diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c +index 2c50497cc4ed..7ec8992401fb 100644 +--- a/drivers/net/usb/ax88172a.c ++++ b/drivers/net/usb/ax88172a.c +@@ -217,6 +217,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) + ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); + if (ret < ETH_ALEN) { + netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); ++ ret = -EIO; + goto free; + } + memcpy(dev->net->dev_addr, buf, ETH_ALEN); +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c +index cb46d9680c43..011fcd73a536 100644 +--- a/drivers/net/usb/ax88179_178a.c ++++ b/drivers/net/usb/ax88179_178a.c +@@ -307,12 +307,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, + int ret; + + if (2 == size) { +- u16 buf; ++ u16 buf = 0; + ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); + le16_to_cpus(&buf); + *((u16 *)data) = buf; + } else if (4 == size) { +- u32 buf; ++ u32 buf = 0; + ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); + le32_to_cpus(&buf); + *((u32 *)data) = buf; +@@ -1396,10 +1396,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + } + + if (pkt_cnt == 0) { +- /* Skip IP alignment psudo header */ +- skb_pull(skb, 2); + skb->len = pkt_len; +- skb_set_tail_pointer(skb, pkt_len); ++ /* Skip IP alignment pseudo header */ ++ skb_pull(skb, 2); ++ skb_set_tail_pointer(skb, skb->len); + skb->truesize = pkt_len + sizeof(struct sk_buff); + ax88179_rx_checksum(skb, pkt_hdr); + return 1; +@@ -1408,8 +1408,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (ax_skb) { + ax_skb->len = pkt_len; +- ax_skb->data = skb->data + 2; +- skb_set_tail_pointer(ax_skb, pkt_len); ++ /* Skip IP alignment pseudo header */ ++ skb_pull(ax_skb, 2); ++ skb_set_tail_pointer(ax_skb, ax_skb->len); + ax_skb->truesize = pkt_len + sizeof(struct sk_buff); + ax88179_rx_checksum(ax_skb, pkt_hdr); + usbnet_skb_return(dev, ax_skb); +diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c +index ff2270ead2e6..84e0e7f78029 100644 +--- a/drivers/net/usb/cdc-phonet.c ++++ b/drivers/net/usb/cdc-phonet.c +@@ -406,6 +406,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i + + err = register_netdev(dev); + if (err) { ++ /* Set disconnected flag so that disconnect() returns early. */ ++ pnd->disconnected = 1; + usb_driver_release_interface(&usbpn_driver, data_intf); + goto out; + } +diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c +index f7180f8db39e..9c15e1a1261b 100644 +--- a/drivers/net/usb/cdc_eem.c ++++ b/drivers/net/usb/cdc_eem.c +@@ -138,10 +138,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + } + + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); ++ dev_kfree_skb_any(skb); + if (!skb2) + return NULL; + +- dev_kfree_skb_any(skb); + skb = skb2; + + done: +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c +index be4e56826daf..5a5db2f09f78 100644 +--- a/drivers/net/usb/cdc_ncm.c ++++ b/drivers/net/usb/cdc_ncm.c +@@ -1128,7 +1128,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + * accordingly. Otherwise, we should check here. + */ + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) +- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); ++ delayed_ndp_size = ctx->max_ndp_size + ++ max_t(u32, ++ ctx->tx_ndp_modulus, ++ ctx->tx_modulus + ctx->tx_remainder) - 1; + else + delayed_ndp_size = 0; + +@@ -1281,7 +1284,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) + if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_max - skb_out->len; +- memset(skb_put(skb_out, padding_count), 0, padding_count); ++ if (!WARN_ON(padding_count > ctx->tx_max)) ++ memset(skb_put(skb_out, padding_count), 0, padding_count); + } else if (skb_out->len < ctx->tx_max && + (skb_out->len % dev->maxpacket) == 0) { + *skb_put(skb_out, 1) = 0; /* force short packet */ +@@ -1559,6 +1563,15 @@ cdc_ncm_speed_change(struct usbnet *dev, + uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); + uint32_t tx_speed = le32_to_cpu(data->ULBitRate); + ++ /* if the speed hasn't changed, don't report it. ++ * RTL8156 shipped before 2021 sends notification about every 32ms. ++ */ ++ if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed) ++ return; ++ ++ dev->rx_speed = rx_speed; ++ dev->tx_speed = tx_speed; ++ + /* + * Currently the USB-NET API does not support reporting the actual + * device speed. Do print it instead. +@@ -1602,10 +1615,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) + * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be + * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. + */ +- netif_info(dev, link, dev->net, +- "network connection: %sconnected\n", +- !!event->wValue ? "" : "dis"); +- usbnet_link_change(dev, !!event->wValue, 0); ++ if (netif_carrier_ok(dev->net) != !!event->wValue) ++ usbnet_link_change(dev, !!event->wValue, 0); + break; + + case USB_CDC_NOTIFY_SPEED_CHANGE: +@@ -1628,7 +1639,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) + static const struct driver_info cdc_ncm_info = { + .description = "CDC NCM", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET +- | FLAG_LINK_INTR, ++ | FLAG_LINK_INTR | FLAG_ETHER, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, +diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c +index 0b4bdd39106b..fb18801d0fe7 100644 +--- a/drivers/net/usb/dm9601.c ++++ b/drivers/net/usb/dm9601.c +@@ -624,6 +624,10 @@ static const struct usb_device_id products[] = { + USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ + .driver_info = (unsigned long)&dm9601_info, + }, ++ { ++ USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ ++ .driver_info = (unsigned long)&dm9601_info, ++ }, + {}, // END + }; + +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c +index 27fc699d8be5..5066b7bc67da 100644 +--- a/drivers/net/usb/hso.c ++++ b/drivers/net/usb/hso.c +@@ -626,7 +626,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) + return serial; + } + +-static int get_free_serial_index(void) ++static int obtain_minor(struct hso_serial *serial) + { + int index; + unsigned long flags; +@@ -634,8 +634,10 @@ static int get_free_serial_index(void) + spin_lock_irqsave(&serial_table_lock, flags); + for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { + if (serial_table[index] == NULL) { ++ serial_table[index] = serial->parent; ++ serial->minor = index; + spin_unlock_irqrestore(&serial_table_lock, flags); +- return index; ++ return 0; + } + } + spin_unlock_irqrestore(&serial_table_lock, flags); +@@ -644,15 +646,12 @@ static int get_free_serial_index(void) + return -1; + } + +-static void set_serial_by_index(unsigned index, struct hso_serial *serial) ++static void release_minor(struct hso_serial *serial) + { + unsigned long flags; + + spin_lock_irqsave(&serial_table_lock, flags); +- if (serial) +- serial_table[index] = serial->parent; +- else +- serial_table[index] = NULL; ++ serial_table[serial->minor] = NULL; + spin_unlock_irqrestore(&serial_table_lock, flags); + } + +@@ -1404,8 +1403,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) + unsigned long flags; + + if (old) +- hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", +- tty->termios.c_cflag, old->c_cflag); ++ hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", ++ (unsigned int)tty->termios.c_cflag, ++ (unsigned int)old->c_cflag); + + /* the actual setup */ + spin_lock_irqsave(&serial->serial_lock, flags); +@@ -1703,7 +1703,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, + spin_unlock_irqrestore(&serial->serial_lock, flags); + + return usb_control_msg(serial->parent->usb, +- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22, ++ usb_sndctrlpipe(serial->parent->usb, 0), 0x22, + 0x21, val, if_num, NULL, 0, + USB_CTRL_SET_TIMEOUT); + } +@@ -2242,6 +2242,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) + static void hso_serial_tty_unregister(struct hso_serial *serial) + { + tty_unregister_device(tty_drv, serial->minor); ++ release_minor(serial); + } + + static void hso_serial_common_free(struct hso_serial *serial) +@@ -2266,23 +2267,23 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, + int rx_size, int tx_size) + { + struct device *dev; +- int minor; + int i; + + tty_port_init(&serial->port); + +- minor = get_free_serial_index(); +- if (minor < 0) +- goto exit; ++ if (obtain_minor(serial)) ++ goto exit2; + + /* register our minor number */ + serial->parent->dev = tty_port_register_device_attr(&serial->port, +- tty_drv, minor, &serial->parent->interface->dev, ++ tty_drv, serial->minor, &serial->parent->interface->dev, + serial->parent, hso_serial_dev_groups); ++ if (IS_ERR(serial->parent->dev)) { ++ release_minor(serial); ++ goto exit2; ++ } + dev = serial->parent->dev; + +- /* fill in specific data for later use */ +- serial->minor = minor; + serial->magic = HSO_SERIAL_MAGIC; + spin_lock_init(&serial->serial_lock); + serial->num_rx_urbs = num_urbs; +@@ -2324,6 +2325,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, + return 0; + exit: + hso_serial_tty_unregister(serial); ++exit2: + hso_serial_common_free(serial); + return -1; + } +@@ -2449,7 +2451,7 @@ static int hso_rfkill_set_block(void *data, bool blocked) + if (hso_dev->usb_gone) + rv = 0; + else +- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0), ++ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0), + enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + mutex_unlock(&hso_dev->mutex); +@@ -2674,9 +2676,6 @@ static struct hso_device *hso_create_bulk_serial_device( + + serial->write_data = hso_std_serial_write_data; + +- /* and record this serial */ +- set_serial_by_index(serial->minor, serial); +- + /* setup the proc dirs and files if needed */ + hso_log_port(hso_dev); + +@@ -2733,9 +2732,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, + serial->shared_int->ref_count++; + mutex_unlock(&serial->shared_int->shared_int_lock); + +- /* and record this serial */ +- set_serial_by_index(serial->minor, serial); +- + /* setup the proc dirs and files if needed */ + hso_log_port(hso_dev); + +@@ -3119,8 +3115,7 @@ static void hso_free_interface(struct usb_interface *interface) + cancel_work_sync(&serial_table[i]->async_put_intf); + cancel_work_sync(&serial_table[i]->async_get_intf); + hso_serial_tty_unregister(serial); +- kref_put(&serial_table[i]->ref, hso_serial_ref_free); +- set_serial_by_index(i, NULL); ++ kref_put(&serial->parent->ref, hso_serial_ref_free); + } + } + +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c +index 2b16a5fed9de..0cf5324d493e 100644 +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -70,7 +70,7 @@ + #define IPHETH_USBINTF_SUBCLASS 253 + #define IPHETH_USBINTF_PROTO 1 + +-#define IPHETH_BUF_SIZE 1516 ++#define IPHETH_BUF_SIZE 1514 + #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ + #define IPHETH_TX_TIMEOUT (5 * HZ) + +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 075d7e50a52e..ab508c472ed1 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -315,10 +315,6 @@ struct lan78xx_net { + struct tasklet_struct bh; + struct delayed_work wq; + +- struct usb_host_endpoint *ep_blkin; +- struct usb_host_endpoint *ep_blkout; +- struct usb_host_endpoint *ep_intr; +- + int msg_enable; + + struct urb *urb_intr; +@@ -2560,78 +2556,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) + return NETDEV_TX_OK; + } + +-static int +-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) +-{ +- int tmp; +- struct usb_host_interface *alt = NULL; +- struct usb_host_endpoint *in = NULL, *out = NULL; +- struct usb_host_endpoint *status = NULL; +- +- for (tmp = 0; tmp < intf->num_altsetting; tmp++) { +- unsigned ep; +- +- in = NULL; +- out = NULL; +- status = NULL; +- alt = intf->altsetting + tmp; +- +- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { +- struct usb_host_endpoint *e; +- int intr = 0; +- +- e = alt->endpoint + ep; +- switch (e->desc.bmAttributes) { +- case USB_ENDPOINT_XFER_INT: +- if (!usb_endpoint_dir_in(&e->desc)) +- continue; +- intr = 1; +- /* FALLTHROUGH */ +- case USB_ENDPOINT_XFER_BULK: +- break; +- default: +- continue; +- } +- if (usb_endpoint_dir_in(&e->desc)) { +- if (!intr && !in) +- in = e; +- else if (intr && !status) +- status = e; +- } else { +- if (!out) +- out = e; +- } +- } +- if (in && out) +- break; +- } +- if (!alt || !in || !out) +- return -EINVAL; +- +- dev->pipe_in = usb_rcvbulkpipe(dev->udev, +- in->desc.bEndpointAddress & +- USB_ENDPOINT_NUMBER_MASK); +- dev->pipe_out = usb_sndbulkpipe(dev->udev, +- out->desc.bEndpointAddress & +- USB_ENDPOINT_NUMBER_MASK); +- dev->ep_intr = status; +- +- return 0; +-} +- + static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) + { + struct lan78xx_priv *pdata = NULL; + int ret; + int i; + +- ret = lan78xx_get_endpoints(dev, intf); +- if (ret) { +- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", +- ret); +- return ret; +- } +- + dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); + + pdata = (struct lan78xx_priv *)(dev->data[0]); +@@ -3339,6 +3269,7 @@ static void lan78xx_stat_monitor(unsigned long param) + static int lan78xx_probe(struct usb_interface *intf, + const struct usb_device_id *id) + { ++ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; + struct lan78xx_net *dev; + struct net_device *netdev; + struct usb_device *udev; +@@ -3389,6 +3320,34 @@ static int lan78xx_probe(struct usb_interface *intf, + + mutex_init(&dev->stats.access_lock); + ++ if (intf->cur_altsetting->desc.bNumEndpoints < 3) { ++ ret = -ENODEV; ++ goto out2; ++ } ++ ++ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); ++ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); ++ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { ++ ret = -ENODEV; ++ goto out2; ++ } ++ ++ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); ++ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); ++ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { ++ ret = -ENODEV; ++ goto out2; ++ } ++ ++ ep_intr = &intf->cur_altsetting->endpoint[2]; ++ if (!usb_endpoint_is_int_in(&ep_intr->desc)) { ++ ret = -ENODEV; ++ goto out2; ++ } ++ ++ dev->pipe_intr = usb_rcvintpipe(dev->udev, ++ usb_endpoint_num(&ep_intr->desc)); ++ + ret = lan78xx_bind(dev, intf); + if (ret < 0) + goto out2; +@@ -3398,18 +3357,7 @@ static int lan78xx_probe(struct usb_interface *intf, + netdev->mtu = dev->hard_mtu - netdev->hard_header_len; + netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); + +- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; +- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; +- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; +- +- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); +- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); +- +- dev->pipe_intr = usb_rcvintpipe(dev->udev, +- dev->ep_intr->desc.bEndpointAddress & +- USB_ENDPOINT_NUMBER_MASK); +- period = dev->ep_intr->desc.bInterval; +- ++ period = ep_intr->desc.bInterval; + maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); + buf = kmalloc(maxp, GFP_KERNEL); + if (buf) { +@@ -3422,6 +3370,7 @@ static int lan78xx_probe(struct usb_interface *intf, + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); ++ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; + } + } + +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c +index 5fe9f1273fe2..6cfc6faf9747 100644 +--- a/drivers/net/usb/pegasus.c ++++ b/drivers/net/usb/pegasus.c +@@ -755,12 +755,16 @@ static inline void disable_net_traffic(pegasus_t *pegasus) + set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); + } + +-static inline void get_interrupt_interval(pegasus_t *pegasus) ++static inline int get_interrupt_interval(pegasus_t *pegasus) + { + u16 data; + u8 interval; ++ int ret; ++ ++ ret = read_eprom_word(pegasus, 4, &data); ++ if (ret < 0) ++ return ret; + +- read_eprom_word(pegasus, 4, &data); + interval = data >> 8; + if (pegasus->usb->speed != USB_SPEED_HIGH) { + if (interval < 0x80) { +@@ -775,6 +779,8 @@ static inline void get_interrupt_interval(pegasus_t *pegasus) + } + } + pegasus->intr_interval = interval; ++ ++ return 0; + } + + static void set_carrier(struct net_device *net) +@@ -1191,7 +1197,9 @@ static int pegasus_probe(struct usb_interface *intf, + | NETIF_MSG_PROBE | NETIF_MSG_LINK); + + pegasus->features = usb_dev_id[dev_index].private; +- get_interrupt_interval(pegasus); ++ res = get_interrupt_interval(pegasus); ++ if (res) ++ goto out2; + if (reset_mac(pegasus)) { + dev_err(&intf->dev, "can't reset MAC\n"); + res = -EIO; +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 9a873616dd27..a8c960152a35 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -705,7 +705,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x05c6, 0x9011, 4)}, + {QMI_FIXED_INTF(0x05c6, 0x9021, 1)}, + {QMI_FIXED_INTF(0x05c6, 0x9022, 2)}, +- {QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */ ++ {QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */ + {QMI_FIXED_INTF(0x05c6, 0x9026, 3)}, + {QMI_FIXED_INTF(0x05c6, 0x902e, 5)}, + {QMI_FIXED_INTF(0x05c6, 0x9031, 5)}, +@@ -881,6 +881,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, + {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */ ++ {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */ + {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, + {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, +@@ -890,6 +891,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ ++ {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ + {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ +@@ -910,6 +912,8 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, + {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ ++ {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ ++ {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ + {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ + {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ +@@ -923,10 +927,13 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ + {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ +- {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ ++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ ++ {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ ++ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ + {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ +@@ -936,12 +943,14 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ ++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ + {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ ++ {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */ + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ +@@ -962,6 +971,7 @@ static const struct usb_device_id products[] = { + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ ++ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ + {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ + +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 6e74965d26a0..96f6edcb0062 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -3347,9 +3347,10 @@ static int rtl8152_close(struct net_device *netdev) + tp->rtl_ops.down(tp); + + mutex_unlock(&tp->control); ++ } + ++ if (!res) + usb_autopm_put_interface(tp->intf); +- } + + free_all_mem(tp); + +@@ -3938,7 +3939,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) + { + switch (stringset) { + case ETH_SS_STATS: +- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings)); ++ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings)); + break; + } + } +diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c +index 4f4f71b2966b..dc5b8e69f8e8 100644 +--- a/drivers/net/usb/rndis_host.c ++++ b/drivers/net/usb/rndis_host.c +@@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) + dev_dbg(&info->control->dev, + "rndis response error, code %d\n", retval); + } +- msleep(20); ++ msleep(40); + } + dev_dbg(&info->control->dev, "rndis response timeout\n"); + return -ETIMEDOUT; +@@ -398,7 +398,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) + reply_len = sizeof *phym; + retval = rndis_query(dev, intf, u.buf, + RNDIS_OID_GEN_PHYSICAL_MEDIUM, +- 0, (void **) &phym, &reply_len); ++ reply_len, (void **)&phym, &reply_len); + if (retval != 0 || !phym) { + /* OID is optional so don't fail here. */ + phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); +diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c +index 9504800217ed..445549372368 100644 +--- a/drivers/net/usb/rtl8150.c ++++ b/drivers/net/usb/rtl8150.c +@@ -277,12 +277,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) + return 1; + } + +-static inline void set_ethernet_addr(rtl8150_t * dev) ++static void set_ethernet_addr(rtl8150_t *dev) + { +- u8 node_id[6]; ++ u8 node_id[ETH_ALEN]; ++ int ret; ++ ++ ret = get_registers(dev, IDR, sizeof(node_id), node_id); + +- get_registers(dev, IDR, sizeof(node_id), node_id); +- memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); ++ if (ret == sizeof(node_id)) { ++ ether_addr_copy(dev->netdev->dev_addr, node_id); ++ } else { ++ eth_hw_addr_random(dev->netdev); ++ netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", ++ dev->netdev->dev_addr); ++ } + } + + static int rtl8150_set_mac_address(struct net_device *netdev, void *p) +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c +index 977d9c772554..841d97491592 100644 +--- a/drivers/net/usb/smsc75xx.c ++++ b/drivers/net/usb/smsc75xx.c +@@ -1497,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) + ret = smsc75xx_wait_ready(dev, 0); + if (ret < 0) { + netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); +- return ret; ++ goto free_pdata; + } + + smsc75xx_init_mac_address(dev); +@@ -1506,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) + ret = smsc75xx_reset(dev); + if (ret < 0) { + netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); +- return ret; ++ goto cancel_work; + } + + dev->net->netdev_ops = &smsc75xx_netdev_ops; +@@ -1515,6 +1515,13 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) + dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + return 0; ++ ++cancel_work: ++ cancel_work_sync(&pdata->set_multicast); ++free_pdata: ++ kfree(pdata); ++ dev->data[0] = 0; ++ return ret; + } + + static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) +@@ -1524,7 +1531,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) + cancel_work_sync(&pdata->set_multicast); + netif_dbg(dev, ifdown, dev->net, "free pdata\n"); + kfree(pdata); +- pdata = NULL; + dev->data[0] = 0; + } + } +diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c +index e719ecd69d01..3e6bf6bd0a68 100644 +--- a/drivers/net/usb/smsc95xx.c ++++ b/drivers/net/usb/smsc95xx.c +@@ -1292,11 +1292,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) + + /* Init all registers */ + ret = smsc95xx_reset(dev); ++ if (ret) ++ goto free_pdata; + + /* detect device revision as different features may be available */ + ret = smsc95xx_read_reg(dev, ID_REV, &val); + if (ret < 0) +- return ret; ++ goto free_pdata; ++ + val >>= 16; + pdata->chip_id = val; + pdata->mdix_ctrl = get_mdix_status(dev->net); +@@ -1320,6 +1323,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); + + return 0; ++ ++free_pdata: ++ kfree(pdata); ++ return ret; + } + + static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) +@@ -1327,7 +1334,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + + if (pdata) { +- cancel_delayed_work(&pdata->carrier_check); ++ cancel_delayed_work_sync(&pdata->carrier_check); + netif_dbg(dev, ifdown, dev->net, "free pdata\n"); + kfree(pdata); + pdata = NULL; +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 7118b8263760..a3447f8eaf66 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1357,14 +1357,16 @@ static int virtnet_set_channels(struct net_device *dev, + + get_online_cpus(); + err = virtnet_set_queues(vi, queue_pairs); +- if (!err) { +- netif_set_real_num_tx_queues(dev, queue_pairs); +- netif_set_real_num_rx_queues(dev, queue_pairs); +- +- virtnet_set_affinity(vi); ++ if (err) { ++ put_online_cpus(); ++ goto err; + } ++ virtnet_set_affinity(vi); + put_online_cpus(); + ++ netif_set_real_num_tx_queues(dev, queue_pairs); ++ netif_set_real_num_rx_queues(dev, queue_pairs); ++err: + return err; + } + +diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c +index aabc6ef366b4..d63b83605748 100644 +--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c ++++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c +@@ -691,6 +691,8 @@ vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; ++ if (n > UPT1_RSS_MAX_IND_TABLE_SIZE) ++ return 0; + while (n--) + p[n] = rssConf->indTable[n]; + return 0; +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 58ddb6c90418..0bfadec8b79c 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -889,6 +889,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + for (h = 0; h < FDB_HASH_SIZE; ++h) { + struct vxlan_fdb *f; + ++ rcu_read_lock(); + hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { + struct vxlan_rdst *rd; + +@@ -901,12 +902,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, + NLM_F_MULTI, rd); +- if (err < 0) ++ if (err < 0) { ++ rcu_read_unlock(); + goto out; ++ } + skip: + *idx += 1; + } + } ++ rcu_read_unlock(); + } + out: + return err; +@@ -1521,6 +1525,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, + daddr = eth_hdr(request)->h_source; + ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { ++ if (!ns->opt[i + 1]) { ++ kfree_skb(reply); ++ return NULL; ++ } + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; +@@ -1586,6 +1594,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) + struct neighbour *n; + struct inet6_dev *in6_dev; + ++ rcu_read_lock(); + in6_dev = __in6_dev_get(dev); + if (!in6_dev) + goto out; +@@ -1642,6 +1651,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) + } + + out: ++ rcu_read_unlock(); + consume_skb(skb); + return NETDEV_TX_OK; + } +@@ -2102,7 +2112,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, + else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + +- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ++ tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), + vni, md, flags, udp_sum); +@@ -2161,7 +2171,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, + if (!info) + udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + +- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ++ tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + ttl = ttl ? : ip6_dst_hoplimit(ndst); + skb_scrub_packet(skb, xnet); + err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), +diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig +index 4e9fe75d7067..069f933b0add 100644 +--- a/drivers/net/wan/Kconfig ++++ b/drivers/net/wan/Kconfig +@@ -295,6 +295,7 @@ config SLIC_DS26522 + tristate "Slic Maxim ds26522 card support" + depends on SPI + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST ++ select BITREVERSE + help + This module initializes and configures the slic maxim card + in T1 or E1 mode. +diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c +index b87fe0a01c69..3c02473a20f2 100644 +--- a/drivers/net/wan/cosa.c ++++ b/drivers/net/wan/cosa.c +@@ -903,6 +903,7 @@ static ssize_t cosa_write(struct file *file, + chan->tx_status = 1; + spin_unlock_irqrestore(&cosa->lock, flags); + up(&chan->wsem); ++ kfree(kbuf); + return -ERESTARTSYS; + } + } +diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c +index 87bf05a81db5..fc7d28edee07 100644 +--- a/drivers/net/wan/fsl_ucc_hdlc.c ++++ b/drivers/net/wan/fsl_ucc_hdlc.c +@@ -169,13 +169,17 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) + + priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff), + GFP_KERNEL); +- if (!priv->rx_skbuff) ++ if (!priv->rx_skbuff) { ++ ret = -ENOMEM; + goto free_ucc_pram; ++ } + + priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff), + GFP_KERNEL); +- if (!priv->tx_skbuff) ++ if (!priv->tx_skbuff) { ++ ret = -ENOMEM; + goto free_rx_skbuff; ++ } + + priv->skb_curtx = 0; + priv->skb_dirtytx = 0; +diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c +index 9bd4aa8083ce..6061bff85523 100644 +--- a/drivers/net/wan/hdlc.c ++++ b/drivers/net/wan/hdlc.c +@@ -57,7 +57,15 @@ int hdlc_change_mtu(struct net_device *dev, int new_mtu) + static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *p, struct net_device *orig_dev) + { +- struct hdlc_device *hdlc = dev_to_hdlc(dev); ++ struct hdlc_device *hdlc; ++ ++ /* First make sure "dev" is an HDLC device */ ++ if (!(dev->priv_flags & IFF_WAN_HDLC)) { ++ kfree_skb(skb); ++ return NET_RX_SUCCESS; ++ } ++ ++ hdlc = dev_to_hdlc(dev); + + if (!net_eq(dev_net(dev), &init_net)) { + kfree_skb(skb); +diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c +index a408abc25512..1587789ba934 100644 +--- a/drivers/net/wan/hdlc_cisco.c ++++ b/drivers/net/wan/hdlc_cisco.c +@@ -120,6 +120,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, + skb_put(skb, sizeof(struct cisco_packet)); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +@@ -377,6 +378,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) + memcpy(&state(hdlc)->settings, &new_settings, size); + spin_lock_init(&state(hdlc)->lock); + dev->header_ops = &cisco_header_ops; ++ dev->hard_header_len = sizeof(struct hdlc_header); + dev->type = ARPHRD_CISCO; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_on(dev); +diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c +index b6e0cfb095d3..487605d2b389 100644 +--- a/drivers/net/wan/hdlc_fr.c ++++ b/drivers/net/wan/hdlc_fr.c +@@ -275,63 +275,69 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc, + + static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) + { +- u16 head_len; + struct sk_buff *skb = *skb_p; + +- switch (skb->protocol) { +- case cpu_to_be16(NLPID_CCITT_ANSI_LMI): +- head_len = 4; +- skb_push(skb, head_len); +- skb->data[3] = NLPID_CCITT_ANSI_LMI; +- break; +- +- case cpu_to_be16(NLPID_CISCO_LMI): +- head_len = 4; +- skb_push(skb, head_len); +- skb->data[3] = NLPID_CISCO_LMI; +- break; +- +- case cpu_to_be16(ETH_P_IP): +- head_len = 4; +- skb_push(skb, head_len); +- skb->data[3] = NLPID_IP; +- break; +- +- case cpu_to_be16(ETH_P_IPV6): +- head_len = 4; +- skb_push(skb, head_len); +- skb->data[3] = NLPID_IPV6; +- break; +- +- case cpu_to_be16(ETH_P_802_3): +- head_len = 10; +- if (skb_headroom(skb) < head_len) { +- struct sk_buff *skb2 = skb_realloc_headroom(skb, +- head_len); ++ if (!skb->dev) { /* Control packets */ ++ switch (dlci) { ++ case LMI_CCITT_ANSI_DLCI: ++ skb_push(skb, 4); ++ skb->data[3] = NLPID_CCITT_ANSI_LMI; ++ break; ++ ++ case LMI_CISCO_DLCI: ++ skb_push(skb, 4); ++ skb->data[3] = NLPID_CISCO_LMI; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ } else if (skb->dev->type == ARPHRD_DLCI) { ++ switch (skb->protocol) { ++ case htons(ETH_P_IP): ++ skb_push(skb, 4); ++ skb->data[3] = NLPID_IP; ++ break; ++ ++ case htons(ETH_P_IPV6): ++ skb_push(skb, 4); ++ skb->data[3] = NLPID_IPV6; ++ break; ++ ++ default: ++ skb_push(skb, 10); ++ skb->data[3] = FR_PAD; ++ skb->data[4] = NLPID_SNAP; ++ /* OUI 00-00-00 indicates an Ethertype follows */ ++ skb->data[5] = 0x00; ++ skb->data[6] = 0x00; ++ skb->data[7] = 0x00; ++ /* This should be an Ethertype: */ ++ *(__be16 *)(skb->data + 8) = skb->protocol; ++ } ++ ++ } else if (skb->dev->type == ARPHRD_ETHER) { ++ if (skb_headroom(skb) < 10) { ++ struct sk_buff *skb2 = skb_realloc_headroom(skb, 10); + if (!skb2) + return -ENOBUFS; + dev_kfree_skb(skb); + skb = *skb_p = skb2; + } +- skb_push(skb, head_len); ++ skb_push(skb, 10); + skb->data[3] = FR_PAD; + skb->data[4] = NLPID_SNAP; +- skb->data[5] = FR_PAD; ++ /* OUI 00-80-C2 stands for the 802.1 organization */ ++ skb->data[5] = 0x00; + skb->data[6] = 0x80; + skb->data[7] = 0xC2; ++ /* PID 00-07 stands for Ethernet frames without FCS */ + skb->data[8] = 0x00; +- skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ +- break; ++ skb->data[9] = 0x07; + +- default: +- head_len = 10; +- skb_push(skb, head_len); +- skb->data[3] = FR_PAD; +- skb->data[4] = NLPID_SNAP; +- skb->data[5] = FR_PAD; +- skb->data[6] = FR_PAD; +- skb->data[7] = FR_PAD; +- *(__be16*)(skb->data + 8) = skb->protocol; ++ } else { ++ return -EINVAL; + } + + dlci_to_q922(skb->data, dlci); +@@ -427,14 +433,16 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) + skb_put(skb, pad); + memset(skb->data + len, 0, pad); + } +- skb->protocol = cpu_to_be16(ETH_P_802_3); + } ++ skb->dev = dev; + if (!fr_hard_header(&skb, pvc->dlci)) { + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + if (pvc->state.fecn) /* TX Congestion counter */ + dev->stats.tx_compressed++; + skb->dev = pvc->frad; ++ skb->protocol = htons(ETH_P_HDLC); ++ skb_reset_network_header(skb); + dev_queue_xmit(skb); + return NETDEV_TX_OK; + } +@@ -494,10 +502,8 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) + memset(skb->data, 0, len); + skb_reserve(skb, 4); + if (lmi == LMI_CISCO) { +- skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); + fr_hard_header(&skb, LMI_CISCO_DLCI); + } else { +- skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); + fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); + } + data = skb_tail_pointer(skb); +@@ -557,6 +563,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) + skb_put(skb, i); + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + + dev_queue_xmit(skb); +diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c +index 8a9aced850be..fe543099768c 100644 +--- a/drivers/net/wan/hdlc_ppp.c ++++ b/drivers/net/wan/hdlc_ppp.c +@@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, + + skb->priority = TC_PRIO_CONTROL; + skb->dev = dev; ++ skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); + skb_queue_tail(&tx_queue, skb); + } +@@ -386,11 +387,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, + } + + for (opt = data; len; len -= opt[1], opt += opt[1]) { +- if (len < 2 || len < opt[1]) { +- dev->stats.rx_errors++; +- kfree(out); +- return; /* bad packet, drop silently */ +- } ++ if (len < 2 || opt[1] < 2 || len < opt[1]) ++ goto err_out; + + if (pid == PID_LCP) + switch (opt[0]) { +@@ -398,6 +396,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, + continue; /* MRU always OK and > 1500 bytes? */ + + case LCP_OPTION_ACCM: /* async control character map */ ++ if (opt[1] < sizeof(valid_accm)) ++ goto err_out; + if (!memcmp(opt, valid_accm, + sizeof(valid_accm))) + continue; +@@ -409,6 +409,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, + } + break; + case LCP_OPTION_MAGIC: ++ if (len < 6) ++ goto err_out; + if (opt[1] != 6 || (!opt[2] && !opt[3] && + !opt[4] && !opt[5])) + break; /* reject invalid magic number */ +@@ -427,6 +429,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, + ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); + + kfree(out); ++ return; ++ ++err_out: ++ dev->stats.rx_errors++; ++ kfree(out); + } + + static int ppp_rx(struct sk_buff *skb) +@@ -565,6 +572,13 @@ static void ppp_timer(unsigned long arg) + unsigned long flags; + + spin_lock_irqsave(&ppp->lock, flags); ++ /* mod_timer could be called after we entered this function but ++ * before we got the lock. ++ */ ++ if (timer_pending(&proto->timer)) { ++ spin_unlock_irqrestore(&ppp->lock, flags); ++ return; ++ } + switch (proto->state) { + case STOPPING: + case REQ_SENT: +diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c +index 2f11836078ab..1be781f8ffc1 100644 +--- a/drivers/net/wan/hdlc_raw_eth.c ++++ b/drivers/net/wan/hdlc_raw_eth.c +@@ -101,6 +101,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) + old_qlen = dev->tx_queue_len; + ether_setup(dev); + dev->tx_queue_len = old_qlen; ++ dev->priv_flags &= ~IFF_TX_SKB_SHARING; + eth_hw_addr_random(dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + netif_dormant_off(dev); +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c +index 6676607164d6..24daa1d0e9c5 100644 +--- a/drivers/net/wan/lapbether.c ++++ b/drivers/net/wan/lapbether.c +@@ -56,6 +56,8 @@ struct lapbethdev { + struct list_head node; + struct net_device *ethdev; /* link to ethernet device */ + struct net_device *axdev; /* lapbeth device (lapb#) */ ++ bool up; ++ spinlock_t up_lock; /* Protects "up" */ + }; + + static LIST_HEAD(lapbeth_devices); +@@ -103,8 +105,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe + rcu_read_lock(); + lapbeth = lapbeth_get_x25_dev(dev); + if (!lapbeth) +- goto drop_unlock; +- if (!netif_running(lapbeth->axdev)) ++ goto drop_unlock_rcu; ++ spin_lock_bh(&lapbeth->up_lock); ++ if (!lapbeth->up) + goto drop_unlock; + + len = skb->data[0] + skb->data[1] * 256; +@@ -119,11 +122,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe + goto drop_unlock; + } + out: ++ spin_unlock_bh(&lapbeth->up_lock); + rcu_read_unlock(); + return 0; + drop_unlock: + kfree_skb(skb); + goto out; ++drop_unlock_rcu: ++ rcu_read_unlock(); + drop: + kfree_skb(skb); + return 0; +@@ -151,13 +157,17 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) + static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, + struct net_device *dev) + { ++ struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + +- /* +- * Just to be *really* sure not to send anything if the interface +- * is down, the ethernet device may have gone. ++ spin_lock_bh(&lapbeth->up_lock); ++ if (!lapbeth->up) ++ goto drop; ++ ++ /* There should be a pseudo header of 1 byte added by upper layers. ++ * Check to make sure it is there before reading it. + */ +- if (!netif_running(dev)) ++ if (skb->len < 1) + goto drop; + + switch (skb->data[0]) { +@@ -182,6 +192,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, + goto drop; + } + out: ++ spin_unlock_bh(&lapbeth->up_lock); + return NETDEV_TX_OK; + drop: + kfree_skb(skb); +@@ -195,8 +206,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) + struct net_device *dev; + int size = skb->len; + +- skb->protocol = htons(ETH_P_X25); +- + ptr = skb_push(skb, 2); + + *ptr++ = size % 256; +@@ -207,6 +216,10 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) + + skb->dev = dev = lapbeth->ethdev; + ++ skb->protocol = htons(ETH_P_DEC); ++ ++ skb_reset_network_header(skb); ++ + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); + + dev_queue_xmit(skb); +@@ -271,6 +284,7 @@ static const struct lapb_register_struct lapbeth_callbacks = { + */ + static int lapbeth_open(struct net_device *dev) + { ++ struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + + if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) { +@@ -278,15 +292,21 @@ static int lapbeth_open(struct net_device *dev) + return -ENODEV; + } + +- netif_start_queue(dev); ++ spin_lock_bh(&lapbeth->up_lock); ++ lapbeth->up = true; ++ spin_unlock_bh(&lapbeth->up_lock); ++ + return 0; + } + + static int lapbeth_close(struct net_device *dev) + { ++ struct lapbethdev *lapbeth = netdev_priv(dev); + int err; + +- netif_stop_queue(dev); ++ spin_lock_bh(&lapbeth->up_lock); ++ lapbeth->up = false; ++ spin_unlock_bh(&lapbeth->up_lock); + + if ((err = lapb_unregister(dev)) != LAPB_OK) + pr_err("lapb_unregister error: %d\n", err); +@@ -308,7 +328,7 @@ static void lapbeth_setup(struct net_device *dev) + dev->netdev_ops = &lapbeth_netdev_ops; + dev->destructor = free_netdev; + dev->type = ARPHRD_X25; +- dev->hard_header_len = 3; ++ dev->hard_header_len = 0; + dev->mtu = 1000; + dev->addr_len = 0; + } +@@ -329,12 +349,25 @@ static int lapbeth_new_device(struct net_device *dev) + if (!ndev) + goto out; + ++ /* When transmitting data: ++ * first this driver removes a pseudo header of 1 byte, ++ * then the lapb module prepends an LAPB header of at most 3 bytes, ++ * then this driver prepends a length field of 2 bytes, ++ * then the underlying Ethernet device prepends its own header. ++ */ ++ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len ++ + dev->needed_headroom; ++ ndev->needed_tailroom = dev->needed_tailroom; ++ + lapbeth = netdev_priv(ndev); + lapbeth->axdev = ndev; + + dev_hold(dev); + lapbeth->ethdev = dev; + ++ lapbeth->up = false; ++ spin_lock_init(&lapbeth->up_lock); ++ + rc = -EIO; + if (register_netdevice(ndev)) + goto fail; +diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c +index 04b60ed59ea0..4253ccb79975 100644 +--- a/drivers/net/wan/lmc/lmc_main.c ++++ b/drivers/net/wan/lmc/lmc_main.c +@@ -923,6 +923,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + break; + default: + printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); ++ unregister_hdlc_device(dev); ++ return -EIO; + break; + } + +diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c +index eb56bb5916be..3b983f645726 100644 +--- a/drivers/net/wan/x25_asy.c ++++ b/drivers/net/wan/x25_asy.c +@@ -186,7 +186,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) + netif_wake_queue(sl->dev); + } + +-/* Send one completely decapsulated IP datagram to the IP layer. */ ++/* Send an LAPB frame to the LAPB module to process. */ + + static void x25_asy_bump(struct x25_asy *sl) + { +@@ -198,13 +198,12 @@ static void x25_asy_bump(struct x25_asy *sl) + count = sl->rcount; + dev->stats.rx_bytes += count; + +- skb = dev_alloc_skb(count+1); ++ skb = dev_alloc_skb(count); + if (skb == NULL) { + netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); + dev->stats.rx_dropped++; + return; + } +- skb_push(skb, 1); /* LAPB internal control */ + memcpy(skb_put(skb, count), sl->rbuff, count); + skb->protocol = x25_type_trans(skb, sl->dev); + err = lapb_data_received(skb->dev, skb); +@@ -212,7 +211,6 @@ static void x25_asy_bump(struct x25_asy *sl) + kfree_skb(skb); + printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); + } else { +- netif_rx(skb); + dev->stats.rx_packets++; + } + } +@@ -358,12 +356,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, + */ + + /* +- * Called when I frame data arrives. We did the work above - throw it +- * at the net layer. ++ * Called when I frame data arrive. We add a pseudo header for upper ++ * layers and pass it to upper layers. + */ + + static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) + { ++ if (skb_cow(skb, 1)) { ++ kfree_skb(skb); ++ return NET_RX_DROP; ++ } ++ skb_push(skb, 1); ++ skb->data[0] = X25_IFACE_DATA; ++ ++ skb->protocol = x25_type_trans(skb, dev); ++ + return netif_rx(skb); + } + +@@ -659,7 +666,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) + switch (s) { + case X25_END: + if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && +- sl->rcount > 2) ++ sl->rcount >= 2) + x25_asy_bump(sl); + clear_bit(SLF_ESCAPE, &sl->flags); + sl->rcount = 0; +diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c +index dc6fe93ce71f..e8473047b2d1 100644 +--- a/drivers/net/wimax/i2400m/op-rfkill.c ++++ b/drivers/net/wimax/i2400m/op-rfkill.c +@@ -101,7 +101,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, + if (cmd == NULL) + goto error_alloc; + cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL); +- cmd->hdr.length = sizeof(cmd->sw_rf); ++ cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf)); + cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); + cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION); + cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status)); +diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c +index e492c7f0d311..9f4ee1d125b6 100644 +--- a/drivers/net/wireless/ath/ar5523/ar5523.c ++++ b/drivers/net/wireless/ath/ar5523/ar5523.c +@@ -1769,6 +1769,8 @@ static struct usb_device_id ar5523_id_table[] = { + AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ + AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ + AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ ++ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect ++ SMCWUSBT-G2 */ + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ + AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ + AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c +index a3c218047597..381bff3a2103 100644 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c +@@ -100,6 +100,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) + BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); + + idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ++ ++ if (idx < 0 || idx >= htt->rx_ring.size) { ++ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); ++ idx &= htt->rx_ring.size_mask; ++ ret = -ENOMEM; ++ goto fail; ++ } ++ + while (num > 0) { + skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); + if (!skb) { +@@ -612,6 +620,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, + u8 preamble = 0; + u8 group_id; + u32 info1, info2, info3; ++ u32 stbc, nsts_su; + + info1 = __le32_to_cpu(rxd->ppdu_start.info1); + info2 = __le32_to_cpu(rxd->ppdu_start.info2); +@@ -655,11 +664,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, + TODO check this */ + bw = info2 & 3; + sgi = info3 & 1; ++ stbc = (info2 >> 3) & 1; + group_id = (info2 >> 4) & 0x3F; + + if (GROUP_ID_IS_SU_MIMO(group_id)) { + mcs = (info3 >> 4) & 0x0F; +- nss = ((info2 >> 10) & 0x07) + 1; ++ nsts_su = ((info2 >> 10) & 0x07); ++ if (stbc) ++ nss = (nsts_su >> 2) + 1; ++ else ++ nss = (nsts_su + 1); + } else { + /* Hardware doesn't decode VHT-SIG-B into Rx descriptor + * so it's impossible to decode MCS. Also since +@@ -1568,14 +1582,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar, + ath10k_unchain_msdu(amsdu); + } + ++static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, ++ struct sk_buff_head *amsdu) ++{ ++ u8 *subframe_hdr; ++ struct sk_buff *first; ++ bool is_first, is_last; ++ struct htt_rx_desc *rxd; ++ struct ieee80211_hdr *hdr; ++ size_t hdr_len, crypto_len; ++ enum htt_rx_mpdu_encrypt_type enctype; ++ int bytes_aligned = ar->hw_params.decap_align_bytes; ++ ++ first = skb_peek(amsdu); ++ ++ rxd = (void *)first->data - sizeof(*rxd); ++ hdr = (void *)rxd->rx_hdr_status; ++ ++ is_first = !!(rxd->msdu_end.common.info0 & ++ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); ++ is_last = !!(rxd->msdu_end.common.info0 & ++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); ++ ++ /* Return in case of non-aggregated msdu */ ++ if (is_first && is_last) ++ return true; ++ ++ /* First msdu flag is not set for the first msdu of the list */ ++ if (!is_first) ++ return false; ++ ++ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), ++ RX_MPDU_START_INFO0_ENCRYPT_TYPE); ++ ++ hdr_len = ieee80211_hdrlen(hdr->frame_control); ++ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); ++ ++ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + ++ crypto_len; ++ ++ /* Validate if the amsdu has a proper first subframe. ++ * There are chances a single msdu can be received as amsdu when ++ * the unauthenticated amsdu flag of a QoS header ++ * gets flipped in non-SPP AMSDU's, in such cases the first ++ * subframe has llc/snap header in place of a valid da. ++ * return false if the da matches rfc1042 pattern ++ */ ++ if (ether_addr_equal(subframe_hdr, rfc1042_header)) ++ return false; ++ ++ return true; ++} ++ + static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) + { +- /* FIXME: It might be a good idea to do some fuzzy-testing to drop +- * invalid/dangerous frames. +- */ +- + if (!rx_status->freq) { + ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); + return false; +@@ -1586,6 +1648,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, + return false; + } + ++ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { ++ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); ++ return false; ++ } ++ + return true; + } + +diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h +index 6038b7486f1d..e04e0260035a 100644 +--- a/drivers/net/wireless/ath/ath10k/hw.h ++++ b/drivers/net/wireless/ath/ath10k/hw.h +@@ -558,7 +558,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, + + #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 + #define TARGET_10_4_NUM_WDS_ENTRIES 32 +-#define TARGET_10_4_DMA_BURST_SIZE 0 ++#define TARGET_10_4_DMA_BURST_SIZE 1 + #define TARGET_10_4_MAC_AGGR_DELIM 0 + #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 + #define TARGET_10_4_VOW_CONFIG 0 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 2294ba311c47..314cac2ce087 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -3452,23 +3452,16 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) + static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) + { + struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; +- int ret = 0; +- +- spin_lock_bh(&ar->data_lock); + +- if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { ++ if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) { + ath10k_warn(ar, "wmi mgmt tx queue is full\n"); +- ret = -ENOSPC; +- goto unlock; ++ return -ENOSPC; + } + +- __skb_queue_tail(q, skb); ++ skb_queue_tail(q, skb); + ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + +-unlock: +- spin_unlock_bh(&ar->data_lock); +- +- return ret; ++ return 0; + } + + static enum ath10k_mac_tx_path +@@ -4954,6 +4947,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, + + if (arvif->nohwcrypt && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ++ ret = -EINVAL; + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); + goto err; + } +@@ -6579,7 +6573,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar, + struct ieee80211_channel *channel) + { + int ret; +- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; ++ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; + + lockdep_assert_held(&ar->conf_mutex); + +diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c +index 1af3fed5a72c..1a6851827968 100644 +--- a/drivers/net/wireless/ath/ath6kl/main.c ++++ b/drivers/net/wireless/ath/ath6kl/main.c +@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, + + ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); + ++ if (aid < 1 || aid > AP_MAX_NUM_STA) ++ return; ++ + if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *) assoc_info; +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c +index 55609fc4e50e..73eab12cb3bd 100644 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c +@@ -2648,6 +2648,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class, + return -EINVAL; + } + ++ if (tsid >= 16) { ++ ath6kl_err("invalid tsid: %d\n", tsid); ++ return -EINVAL; ++ } ++ + skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); + if (!skb) + return -ENOMEM; +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h +index 7bda18c61eb6..beaeb5d07c7e 100644 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h +@@ -179,7 +179,8 @@ struct ath_frame_info { + s8 txq; + u8 keyix; + u8 rtscts_rate; +- u8 retries : 7; ++ u8 retries : 6; ++ u8 dyn_smps : 1; + u8 baw_tracked : 1; + u8 tx_power; + enum ath9k_key_type keytype:2; +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c +index b5e12be73f2b..7c409cd43b70 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.c ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c +@@ -447,10 +447,19 @@ static void hif_usb_stop(void *hif_handle) + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + /* The pending URBs have to be canceled. */ ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_pending, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); ++ list_del(&tx_buf->list); ++ usb_free_urb(tx_buf->urb); ++ kfree(tx_buf->buf); ++ kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); + } +@@ -610,6 +619,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, + hif_dev->remain_skb = nskb; + spin_unlock(&hif_dev->rx_lock); + } else { ++ if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { ++ dev_err(&hif_dev->udev->dev, ++ "ath9k_htc: over RX MAX_PKT_NUM\n"); ++ goto err; ++ } + nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); + if (!nskb) { + dev_err(&hif_dev->udev->dev, +@@ -636,9 +650,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, + + static void ath9k_hif_usb_rx_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + int ret; + + if (!skb) +@@ -678,14 +692,15 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb) + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + } + + static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + { +- struct sk_buff *skb = (struct sk_buff *) urb->context; ++ struct rx_buf *rx_buf = (struct rx_buf *)urb->context; ++ struct hif_device_usb *hif_dev = rx_buf->hif_dev; ++ struct sk_buff *skb = rx_buf->skb; + struct sk_buff *nskb; +- struct hif_device_usb *hif_dev = +- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); + int ret; + + if (!skb) +@@ -725,11 +740,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + return; + } + ++ rx_buf->skb = nskb; ++ + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, + USB_REG_IN_PIPE), + nskb->data, MAX_REG_IN_BUF_SIZE, +- ath9k_hif_usb_reg_in_cb, nskb, 1); ++ ath9k_hif_usb_reg_in_cb, rx_buf, 1); + } + + resubmit: +@@ -743,6 +760,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) + return; + free: + kfree_skb(skb); ++ kfree(rx_buf); + urb->context = NULL; + } + +@@ -751,27 +769,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) + struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; + unsigned long flags; + ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_buf, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); + list_del(&tx_buf->list); + usb_free_urb(tx_buf->urb); + kfree(tx_buf->buf); + kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + hif_dev->tx.flags |= HIF_USB_TX_FLUSH; + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + list_for_each_entry_safe(tx_buf, tx_buf_tmp, + &hif_dev->tx.tx_pending, list) { ++ usb_get_urb(tx_buf->urb); ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + usb_kill_urb(tx_buf->urb); + list_del(&tx_buf->list); + usb_free_urb(tx_buf->urb); + kfree(tx_buf->buf); + kfree(tx_buf); ++ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); + } ++ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); + + usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); + } +@@ -788,7 +816,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) + init_usb_anchor(&hif_dev->mgmt_submitted); + + for (i = 0; i < MAX_TX_URB_NUM; i++) { +- tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); ++ tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); + if (!tx_buf) + goto err; + +@@ -825,8 +853,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->rx_submitted); +@@ -834,6 +863,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + + for (i = 0; i < MAX_RX_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -848,11 +883,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_bulk_urb(urb, hif_dev->udev, + usb_rcvbulkpipe(hif_dev->udev, + USB_WLAN_RX_PIPE), + skb->data, MAX_RX_BUF_SIZE, +- ath9k_hif_usb_rx_cb, skb); ++ ath9k_hif_usb_rx_cb, rx_buf); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->rx_submitted); +@@ -878,6 +916,8 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_rx_urbs(hif_dev); + return ret; + } +@@ -889,14 +929,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) + + static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + { +- struct urb *urb = NULL; ++ struct rx_buf *rx_buf = NULL; + struct sk_buff *skb = NULL; ++ struct urb *urb = NULL; + int i, ret; + + init_usb_anchor(&hif_dev->reg_in_submitted); + + for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { + ++ rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); ++ if (!rx_buf) { ++ ret = -ENOMEM; ++ goto err_rxb; ++ } ++ + /* Allocate URB */ + urb = usb_alloc_urb(0, GFP_KERNEL); + if (urb == NULL) { +@@ -911,11 +958,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + goto err_skb; + } + ++ rx_buf->hif_dev = hif_dev; ++ rx_buf->skb = skb; ++ + usb_fill_int_urb(urb, hif_dev->udev, + usb_rcvintpipe(hif_dev->udev, + USB_REG_IN_PIPE), + skb->data, MAX_REG_IN_BUF_SIZE, +- ath9k_hif_usb_reg_in_cb, skb, 1); ++ ath9k_hif_usb_reg_in_cb, rx_buf, 1); + + /* Anchor URB */ + usb_anchor_urb(urb, &hif_dev->reg_in_submitted); +@@ -941,6 +991,8 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) + err_skb: + usb_free_urb(urb); + err_urb: ++ kfree(rx_buf); ++err_rxb: + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); + return ret; + } +@@ -971,7 +1023,7 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev) + return -ENOMEM; + } + +-static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) + { + usb_kill_anchored_urbs(&hif_dev->regout_submitted); + ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); +@@ -1338,8 +1390,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) + + if (hif_dev->flags & HIF_USB_READY) { + ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); +- ath9k_htc_hw_free(hif_dev->htc_handle); + ath9k_hif_usb_dev_deinit(hif_dev); ++ ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); ++ ath9k_htc_hw_free(hif_dev->htc_handle); + } + + usb_set_intfdata(interface, NULL); +diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h +index 7c2ef7ecd98b..835264c36595 100644 +--- a/drivers/net/wireless/ath/ath9k/hif_usb.h ++++ b/drivers/net/wireless/ath/ath9k/hif_usb.h +@@ -84,6 +84,11 @@ struct tx_buf { + struct list_head list; + }; + ++struct rx_buf { ++ struct sk_buff *skb; ++ struct hif_device_usb *hif_dev; ++}; ++ + #define HIF_USB_TX_STOP BIT(0) + #define HIF_USB_TX_FLUSH BIT(1) + +@@ -131,5 +136,6 @@ struct hif_device_usb { + + int ath9k_hif_usb_init(void); + void ath9k_hif_usb_exit(void); ++void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); + + #endif /* HTC_USB_H */ +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +index b65c1b661ade..09d737f3461b 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c +@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) + if (unlikely(r)) { + ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", + reg_offset, r); +- return -EIO; ++ return -1; + } + + return be32_to_cpu(val); +@@ -931,8 +931,9 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, + int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + u16 devid, char *product, u32 drv_info) + { +- struct ieee80211_hw *hw; ++ struct hif_device_usb *hif_dev; + struct ath9k_htc_priv *priv; ++ struct ieee80211_hw *hw; + int ret; + + hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); +@@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, + return 0; + + err_init: +- ath9k_deinit_wmi(priv); ++ ath9k_stop_wmi(priv); ++ hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; ++ ath9k_hif_usb_dealloc_urbs(hif_dev); ++ ath9k_destoy_wmi(priv); + err_free: + ieee80211_free_hw(hw); + return ret; +@@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) + htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; + + ath9k_deinit_device(htc_handle->drv_priv); +- ath9k_deinit_wmi(htc_handle->drv_priv); ++ ath9k_stop_wmi(htc_handle->drv_priv); + ieee80211_free_hw(htc_handle->drv_priv->hw); + } + } +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +index 52b42ecee621..6a9c9b4ef2c9 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +@@ -972,7 +972,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + struct ath_htc_rx_status *rxstatus; + struct ath_rx_status rx_stats; + bool decrypt_error = false; +- __be16 rs_datalen; ++ u16 rs_datalen; + bool is_phyerr; + + if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { +@@ -998,9 +998,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, + * which are not PHY_ERROR (short radar pulses have a length of 3) + */ + if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { +- ath_warn(common, +- "Short RX data len, dropping (dlen: %d)\n", +- rs_datalen); ++ ath_dbg(common, ANY, ++ "Short RX data len, dropping (dlen: %d)\n", ++ rs_datalen); + goto rx_next; + } + +diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c +index fd85f996c554..625823e45d8f 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_hst.c ++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c +@@ -114,6 +114,9 @@ static void htc_process_conn_rsp(struct htc_target *target, + + if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { + epid = svc_rspmsg->endpoint_id; ++ if (epid < 0 || epid >= ENDPOINT_MAX) ++ return; ++ + service_id = be16_to_cpu(svc_rspmsg->service_id); + max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); + endpoint = &target->endpoint[epid]; +@@ -172,6 +175,7 @@ static int htc_config_pipe_credits(struct htc_target *target) + time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); + if (!time_left) { + dev_err(target->dev, "HTC credit config timeout\n"); ++ kfree_skb(skb); + return -ETIMEDOUT; + } + +@@ -208,6 +212,7 @@ static int htc_setup_complete(struct htc_target *target) + time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); + if (!time_left) { + dev_err(target->dev, "HTC start timeout\n"); ++ kfree_skb(skb); + return -ETIMEDOUT; + } + +@@ -281,6 +286,7 @@ int htc_connect_service(struct htc_target *target, + if (!time_left) { + dev_err(target->dev, "Service connection timeout for: %d\n", + service_connreq->service_id); ++ kfree_skb(skb); + return -ETIMEDOUT; + } + +@@ -340,6 +346,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, + + if (skb) { + htc_hdr = (struct htc_frame_hdr *) skb->data; ++ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint)) ++ goto ret; + endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; + skb_pull(skb, sizeof(struct htc_frame_hdr)); + +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c +index e7fca78cdd96..9d664398a41b 100644 +--- a/drivers/net/wireless/ath/ath9k/hw.c ++++ b/drivers/net/wireless/ath/ath9k/hw.c +@@ -285,7 +285,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) + + srev = REG_READ(ah, AR_SREV); + +- if (srev == -EIO) { ++ if (srev == -1) { + ath_err(ath9k_hw_common(ah), + "Failed to read SREV register"); + return false; +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index 58af2fe5be3c..454bf16d6b30 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -302,6 +302,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) + hchan = ah->curchan; + } + ++ if (!hchan) { ++ fastcc = false; ++ hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef); ++ } ++ + if (!ath_prepare_reset(sc)) + fastcc = false; + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c +index 9c16e2a6d185..f100533eb7ad 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.c ++++ b/drivers/net/wireless/ath/ath9k/wmi.c +@@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) + return wmi; + } + +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv) + { + struct wmi *wmi = priv->wmi; + + mutex_lock(&wmi->op_mutex); + wmi->stopped = true; + mutex_unlock(&wmi->op_mutex); ++} + ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) ++{ + kfree(priv->wmi); + } + +@@ -337,6 +340,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", + wmi_cmd_to_name(cmd_id)); + mutex_unlock(&wmi->op_mutex); ++ kfree_skb(skb); + return -ETIMEDOUT; + } + +diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h +index 380175d5ecd7..d8b912206232 100644 +--- a/drivers/net/wireless/ath/ath9k/wmi.h ++++ b/drivers/net/wireless/ath/ath9k/wmi.h +@@ -179,7 +179,6 @@ struct wmi { + }; + + struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); +-void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); + int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, + enum htc_endpoint_id *wmi_ctrl_epid); + int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, +@@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, + void ath9k_wmi_event_tasklet(unsigned long data); + void ath9k_fatal_work(struct work_struct *work); + void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); ++void ath9k_stop_wmi(struct ath9k_htc_priv *priv); ++void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); + + #define WMI_CMD(_wmi_cmd) \ + do { \ +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c +index 0ef27d99bef3..101bd6547265 100644 +--- a/drivers/net/wireless/ath/ath9k/xmit.c ++++ b/drivers/net/wireless/ath/ath9k/xmit.c +@@ -1255,6 +1255,11 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, + is_40, is_sgi, is_sp); + if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) + info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; ++ if (rix >= 8 && fi->dyn_smps) { ++ info->rates[i].RateFlags |= ++ ATH9K_RATESERIES_RTS_CTS; ++ info->flags |= ATH9K_TXDESC_CTSENA; ++ } + + info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, + is_40, false); +@@ -2158,6 +2163,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, + fi->keyix = an->ps_key; + else + fi->keyix = ATH9K_TXKEYIX_INVALID; ++ fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC; + fi->keytype = keytype; + fi->framelen = framelen; + fi->tx_power = txpower; +diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig +index 2e34baeaf764..2b782db20fde 100644 +--- a/drivers/net/wireless/ath/carl9170/Kconfig ++++ b/drivers/net/wireless/ath/carl9170/Kconfig +@@ -15,13 +15,11 @@ config CARL9170 + + config CARL9170_LEDS + bool "SoftLED Support" +- depends on CARL9170 +- select MAC80211_LEDS +- select LEDS_CLASS +- select NEW_LEDS + default y ++ depends on CARL9170 ++ depends on MAC80211_LEDS + help +- This option is necessary, if you want your device' LEDs to blink ++ This option is necessary, if you want your device's LEDs to blink. + + Say Y, unless you need the LEDs for firmware debugging. + +diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c +index 88045f93a76c..62ed0977f32c 100644 +--- a/drivers/net/wireless/ath/carl9170/fw.c ++++ b/drivers/net/wireless/ath/carl9170/fw.c +@@ -351,9 +351,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + + if (SUPP(CARL9170FW_WLANTX_CAB)) { +- if_comb_types |= +- BIT(NL80211_IFTYPE_AP) | +- BIT(NL80211_IFTYPE_P2P_GO); ++ if_comb_types |= BIT(NL80211_IFTYPE_AP); + + #ifdef CONFIG_MAC80211_MESH + if_comb_types |= +diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c +index ffb22a04beeb..202f421e0e37 100644 +--- a/drivers/net/wireless/ath/carl9170/main.c ++++ b/drivers/net/wireless/ath/carl9170/main.c +@@ -582,11 +582,10 @@ static int carl9170_init_interface(struct ar9170 *ar, + ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && + (vif->type != NL80211_IFTYPE_AP)); + +- /* While the driver supports HW offload in a single +- * P2P client configuration, it doesn't support HW +- * offload in the favourit, concurrent P2P GO+CLIENT +- * configuration. Hence, HW offload will always be +- * disabled for P2P. ++ /* The driver used to have P2P GO+CLIENT support, ++ * but since this was dropped and we don't know if ++ * there are any gremlins lurking in the shadows, ++ * so best we keep HW offload disabled for P2P. + */ + ar->disable_offload |= vif->p2p; + +@@ -639,18 +638,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, + if (vif->type == NL80211_IFTYPE_STATION) + break; + +- /* P2P GO [master] use-case +- * Because the P2P GO station is selected dynamically +- * by all participating peers of a WIFI Direct network, +- * the driver has be able to change the main interface +- * operating mode on the fly. +- */ +- if (main_vif->p2p && vif->p2p && +- vif->type == NL80211_IFTYPE_AP) { +- old_main = main_vif; +- break; +- } +- + err = -EBUSY; + rcu_read_unlock(); + +diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c +index ca8797c65312..86beadf0f249 100644 +--- a/drivers/net/wireless/ath/wcn36xx/main.c ++++ b/drivers/net/wireless/ath/wcn36xx/main.c +@@ -158,7 +158,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = { + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .mcs = { + .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, +- .rx_highest = cpu_to_le16(72), ++ .rx_highest = cpu_to_le16(150), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + } + } +diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig +index 2ab6c5951561..2d571fd709c2 100644 +--- a/drivers/net/wireless/ath/wil6210/Kconfig ++++ b/drivers/net/wireless/ath/wil6210/Kconfig +@@ -1,6 +1,7 @@ + config WIL6210 + tristate "Wilocity 60g WiFi card wil6210 support" + select WANT_DEV_COREDUMP ++ select CRC32 + depends on CFG80211 + depends on PCI + default n +diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c +index a635fc6b1722..e57a50cc1d87 100644 +--- a/drivers/net/wireless/broadcom/b43/main.c ++++ b/drivers/net/wireless/broadcom/b43/main.c +@@ -5596,7 +5596,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); +- ++ ieee80211_hw_set(hw, MFP_CAPABLE); + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | +diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c +index a5557d70689f..d1afa74aa144 100644 +--- a/drivers/net/wireless/broadcom/b43/phy_n.c ++++ b/drivers/net/wireless/broadcom/b43/phy_n.c +@@ -5320,7 +5320,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) + + for (i = 0; i < 4; i++) { + if (dev->phy.rev >= 3) +- table[i] = coef[i]; ++ coef[i] = table[i]; + else + coef[i] = 0; + } +diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c +index 9da8bd792702..fe658a9b53cb 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/main.c ++++ b/drivers/net/wireless/broadcom/b43legacy/main.c +@@ -3835,6 +3835,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev) + /* fill hw info */ + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); ++ ieee80211_hw_set(hw, MFP_CAPABLE); /* Allow WPA3 in software */ + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | +diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c +index 35ccf400b02c..87045e30e585 100644 +--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c ++++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c +@@ -571,6 +571,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); ++ goto drop; + } + + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +index 59013572fbe3..d6a4a08fd3c4 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +@@ -30,7 +30,7 @@ + #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 + + #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ +-#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 ++#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 + + #define BRCMF_STA_WME 0x00000002 /* WMM association */ + #define BRCMF_STA_AUTHE 0x00000008 /* Authenticated */ +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +index ab9f136c1593..e306e5a89dd4 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +@@ -1540,6 +1540,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) + BRCMF_TX_IOCTL_MAX_MSG_SIZE, + msgbuf->ioctbuf, + msgbuf->ioctbuf_handle); ++ if (msgbuf->txflow_wq) ++ destroy_workqueue(msgbuf->txflow_wq); + kfree(msgbuf); + } + return -ENOMEM; +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +index b820e80d4b4c..8b56aa627487 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +@@ -1221,6 +1221,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev) + { + struct brcms_info *wl; + struct ieee80211_hw *hw; ++ int ret; + + dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n", + pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class, +@@ -1245,11 +1246,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev) + wl = brcms_attach(pdev); + if (!wl) { + pr_err("%s: brcms_attach failed!\n", __func__); +- return -ENODEV; ++ ret = -ENODEV; ++ goto err_free_ieee80211; + } + brcms_led_register(wl); + + return 0; ++ ++err_free_ieee80211: ++ ieee80211_free_hw(hw); ++ return ret; + } + + static int brcms_suspend(struct bcma_device *pdev) +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +index 93d4cde0eb31..c9f48ec46f4a 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +@@ -5090,8 +5090,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) + pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft; + pi->pi_fptr.detach = wlc_phy_detach_lcnphy; + +- if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) ++ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) { ++ kfree(pi->u.pi_lcnphy); + return false; ++ } + + if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { + if (pi_lcn->lcnphy_tempsense_option == 3) { +diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +index dd29f46d086b..028b37ba9425 100644 +--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c ++++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +@@ -649,8 +649,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, + } + + if (ext->alg != IW_ENCODE_ALG_NONE) { +- memcpy(sec.keys[idx], ext->key, ext->key_len); +- sec.key_sizes[idx] = ext->key_len; ++ int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN); ++ ++ memcpy(sec.keys[idx], ext->key, key_len); ++ sec.key_sizes[idx] = key_len; + sec.flags |= (1 << idx); + if (ext->alg == IW_ENCODE_ALG_WEP) { + sec.encode_alg[idx] = SEC_ALG_WEP; +diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c +index db2373fe8ac3..55573d090503 100644 +--- a/drivers/net/wireless/intel/iwlegacy/common.c ++++ b/drivers/net/wireless/intel/iwlegacy/common.c +@@ -4302,8 +4302,8 @@ il_apm_init(struct il_priv *il) + * power savings, even without L1. + */ + if (il->cfg->set_l0s) { +- pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); +- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { ++ ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); ++ if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { + /* L1-ASPM enabled; disable(!) L0S */ + il_set_bit(il, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +index f4d75ffe3d8a..7f01fb91ea66 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +@@ -518,7 +518,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + const size_t bufsz = sizeof(buf); + int pos = 0; + ++ mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); ++ mutex_unlock(&mvm->mutex); ++ + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index d91ab2b8d667..d46efa8d7073 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -3046,9 +3046,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, + aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + + IWL_DEBUG_TE(mvm, +- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", +- channel->hw_value, req_dur, duration, delay, +- dtim_interval); ++ "ROC: Requesting to remain on channel %u for %ums\n", ++ channel->hw_value, req_dur); ++ IWL_DEBUG_TE(mvm, ++ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", ++ duration, delay, dtim_interval); ++ + /* Set the node address */ + memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +index 6d38eec3f9d3..a78aaf17116e 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +@@ -1104,6 +1104,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) + reprobe = container_of(wk, struct iwl_mvm_reprobe, work); + if (device_reprobe(reprobe->dev)) + dev_err(reprobe->dev, "reprobe failed!\n"); ++ put_device(reprobe->dev); + kfree(reprobe); + module_put(THIS_MODULE); + } +@@ -1202,7 +1203,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) + module_put(THIS_MODULE); + return; + } +- reprobe->dev = mvm->trans->dev; ++ reprobe->dev = get_device(mvm->trans->dev); + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); + schedule_work(&reprobe->work); + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +index e7b873018dca..71edbf7a42ed 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +@@ -1904,18 +1904,38 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) + { + unsigned long flags; +- int offs, ret = 0; ++ int offs = 0; + u32 *vals = buf; + +- if (iwl_trans_grab_nic_access(trans, &flags)) { +- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); +- for (offs = 0; offs < dwords; offs++) +- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); +- iwl_trans_release_nic_access(trans, &flags); +- } else { +- ret = -EBUSY; ++ while (offs < dwords) { ++ /* limit the time we spin here under lock to 1/2s */ ++ unsigned long end = jiffies + HZ / 2; ++ bool resched = false; ++ ++ if (iwl_trans_grab_nic_access(trans, &flags)) { ++ iwl_write32(trans, HBUS_TARG_MEM_RADDR, ++ addr + 4 * offs); ++ ++ while (offs < dwords) { ++ vals[offs] = iwl_read32(trans, ++ HBUS_TARG_MEM_RDAT); ++ offs++; ++ ++ if (time_after(jiffies, end)) { ++ resched = true; ++ break; ++ } ++ } ++ iwl_trans_release_nic_access(trans, &flags); ++ ++ if (resched) ++ cond_resched(); ++ } else { ++ return -EBUSY; ++ } + } +- return ret; ++ ++ return 0; + } + + static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +index 174e45d78c46..24948b28e3e7 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +@@ -676,6 +676,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + ++ if (!txq) { ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); ++ return; ++ } ++ + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", +@@ -1505,6 +1510,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + u32 cmd_pos; + const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; + u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; ++ unsigned long flags2; + + if (WARN(!trans->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, +@@ -1588,10 +1594,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + goto free_dup_buf; + } + +- spin_lock_bh(&txq->lock); ++ spin_lock_irqsave(&txq->lock, flags2); + + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { +- spin_unlock_bh(&txq->lock); ++ spin_unlock_irqrestore(&txq->lock, flags2); + + IWL_ERR(trans, "No space in command queue\n"); + iwl_op_mode_cmd_queue_full(trans->op_mode); +@@ -1752,7 +1758,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, + spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + + out: +- spin_unlock_bh(&txq->lock); ++ spin_unlock_irqrestore(&txq->lock, flags2); + free_dup_buf: + if (idx < 0) + kfree(dup_buf); +diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +index 4e91c74fcfad..de928938c7a1 100644 +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +@@ -1224,13 +1224,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) + if (skb->len < ETH_HLEN) + goto drop; + +- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); +- if (!ctx) +- goto busy; +- +- memset(ctx->buf, 0, BULK_BUF_SIZE); +- buf = ctx->buf->data; +- + tx_control = 0; + + err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, +@@ -1238,6 +1231,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) + if (err) + goto drop; + ++ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); ++ if (!ctx) ++ goto drop; ++ ++ memset(ctx->buf, 0, BULK_BUF_SIZE); ++ buf = ctx->buf->data; ++ + { + __le16 *tx_cntl = (__le16 *)buf; + *tx_cntl = cpu_to_le16(tx_control); +diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c +index 57ad56435dda..8bc0286b4f8c 100644 +--- a/drivers/net/wireless/intersil/p54/p54pci.c ++++ b/drivers/net/wireless/intersil/p54/p54pci.c +@@ -332,10 +332,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) + struct p54p_desc *desc; + dma_addr_t mapping; + u32 idx, i; ++ __le32 device_addr; + + spin_lock_irqsave(&priv->lock, flags); + idx = le32_to_cpu(ring_control->host_idx[1]); + i = idx % ARRAY_SIZE(ring_control->tx_data); ++ device_addr = ((struct p54_hdr *)skb->data)->req_id; + + mapping = pci_map_single(priv->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); +@@ -349,7 +351,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) + + desc = &ring_control->tx_data[i]; + desc->host_addr = cpu_to_le32(mapping); +- desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; ++ desc->device_addr = device_addr; + desc->len = cpu_to_le16(skb->len); + desc->flags = 0; + +diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c +index d0c881dd5846..f1e9cbcfdc16 100644 +--- a/drivers/net/wireless/marvell/libertas/mesh.c ++++ b/drivers/net/wireless/marvell/libertas/mesh.c +@@ -797,19 +797,6 @@ static const struct attribute_group mesh_ie_group = { + .attrs = mesh_ie_attrs, + }; + +-static void lbs_persist_config_init(struct net_device *dev) +-{ +- int ret; +- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group); +- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); +-} +- +-static void lbs_persist_config_remove(struct net_device *dev) +-{ +- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group); +- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group); +-} +- + + /*************************************************************************** + * Initializing and starting, stopping mesh +@@ -1021,6 +1008,10 @@ static int lbs_add_mesh(struct lbs_private *priv) + SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); + + mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; ++ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group; ++ mesh_dev->sysfs_groups[1] = &boot_opts_group; ++ mesh_dev->sysfs_groups[2] = &mesh_ie_group; ++ + /* Register virtual mesh interface */ + ret = register_netdev(mesh_dev); + if (ret) { +@@ -1028,19 +1019,10 @@ static int lbs_add_mesh(struct lbs_private *priv) + goto err_free_netdev; + } + +- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); +- if (ret) +- goto err_unregister; +- +- lbs_persist_config_init(mesh_dev); +- + /* Everything successful */ + ret = 0; + goto done; + +-err_unregister: +- unregister_netdev(mesh_dev); +- + err_free_netdev: + free_netdev(mesh_dev); + +@@ -1063,8 +1045,6 @@ void lbs_remove_mesh(struct lbs_private *priv) + lbs_deb_enter(LBS_DEB_MESH); + netif_stop_queue(mesh_dev); + netif_carrier_off(mesh_dev); +- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); +- lbs_persist_config_remove(mesh_dev); + unregister_netdev(mesh_dev); + priv->mesh_dev = NULL; + kfree(mesh_dev->ieee80211_ptr); +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 94901b0041ce..c597af69f48f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1446,7 +1446,8 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) + { + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); +- static struct mwifiex_sta_node *node; ++ struct mwifiex_sta_node *node; ++ int i; + + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && + priv->media_connected && idx == 0) { +@@ -1456,13 +1457,10 @@ mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev, + mwifiex_send_cmd(priv, HOST_CMD_APCMD_STA_LIST, + HostCmd_ACT_GEN_GET, 0, NULL, true); + +- if (node && (&node->list == &priv->sta_list)) { +- node = NULL; +- return -ENOENT; +- } +- +- node = list_prepare_entry(node, &priv->sta_list, list); +- list_for_each_entry_continue(node, &priv->sta_list, list) { ++ i = 0; ++ list_for_each_entry(node, &priv->sta_list, list) { ++ if (i++ != idx) ++ continue; + ether_addr_copy(mac, node->mac_addr); + return mwifiex_dump_station_info(priv, node, sinfo); + } +diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h +index 395d6ece2cac..341f6ed5b355 100644 +--- a/drivers/net/wireless/marvell/mwifiex/fw.h ++++ b/drivers/net/wireless/marvell/mwifiex/fw.h +@@ -921,7 +921,7 @@ struct mwifiex_tkip_param { + struct mwifiex_aes_param { + u8 pn[WPA_PN_SIZE]; + __le16 key_len; +- u8 key[WLAN_KEY_LEN_CCMP]; ++ u8 key[WLAN_KEY_LEN_CCMP_256]; + } __packed; + + struct mwifiex_wapi_param { +diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c +index b89596c18b41..313b5d9fd08e 100644 +--- a/drivers/net/wireless/marvell/mwifiex/join.c ++++ b/drivers/net/wireless/marvell/mwifiex/join.c +@@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, + + memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); + ++ if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) ++ req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; + memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); + + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", +diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c +index 5fde2e2f1fea..422a8d31ed7f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/scan.c ++++ b/drivers/net/wireless/marvell/mwifiex/scan.c +@@ -1879,7 +1879,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, + chan, CFG80211_BSS_FTYPE_UNKNOWN, + bssid, timestamp, + cap_info_bitmap, beacon_period, +- ie_buf, ie_len, rssi, GFP_KERNEL); ++ ie_buf, ie_len, rssi, GFP_ATOMIC); + if (bss) { + bss_priv = (struct mwifiex_bss_priv *)bss->priv; + bss_priv->band = band; +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c +index 486b8c75cd1f..679cc0035514 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c +@@ -2049,6 +2049,8 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter, + kfree(card->mpa_rx.buf); + card->mpa_tx.buf_size = 0; + card->mpa_rx.buf_size = 0; ++ card->mpa_tx.buf = NULL; ++ card->mpa_rx.buf = NULL; + } + + return ret; +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +index 8548027abf71..aa84fdb70983 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +@@ -586,6 +586,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, + { + struct host_cmd_ds_802_11_key_material *key = + &resp->params.key_material; ++ int len; ++ ++ len = le16_to_cpu(key->key_param_set.key_len); ++ if (len > sizeof(key->key_param_set.key)) ++ return -EINVAL; + + if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { + if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { +@@ -599,9 +604,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, + + memset(priv->aes_key.key_param_set.key, 0, + sizeof(key->key_param_set.key)); +- priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; +- memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, +- le16_to_cpu(priv->aes_key.key_param_set.key_len)); ++ priv->aes_key.key_param_set.key_len = cpu_to_le16(len); ++ memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); + + return 0; + } +@@ -616,9 +620,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp) + { + struct host_cmd_ds_802_11_key_material_v2 *key_v2; +- __le16 len; ++ int len; + + key_v2 = &resp->params.key_material_v2; ++ ++ len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); ++ if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) ++ return -EINVAL; ++ + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { + if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { + mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); +@@ -632,12 +641,11 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, + return 0; + + memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, +- WLAN_KEY_LEN_CCMP); ++ sizeof(key_v2->key_param_set.key_params.aes.key)); + priv->aes_key_v2.key_param_set.key_params.aes.key_len = +- key_v2->key_param_set.key_params.aes.key_len; +- len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; ++ cpu_to_le16(len); + memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, +- key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); ++ key_v2->key_param_set.key_params.aes.key, len); + + return 0; + } +diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c +index b1b400b59d86..66cd38d4f199 100644 +--- a/drivers/net/wireless/marvell/mwl8k.c ++++ b/drivers/net/wireless/marvell/mwl8k.c +@@ -1459,6 +1459,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) + txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); + if (txq->skb == NULL) { + pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); ++ txq->txd = NULL; + return -ENOMEM; + } + +diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c +index 56cad16e70ca..1b68aef03fe2 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/dma.c ++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c +@@ -160,8 +160,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) + + if (new_p) { + /* we have one extra ref from the allocator */ +- __free_pages(e->p, MT_RX_ORDER); +- ++ put_page(e->p); + e->p = new_p; + } + } +@@ -318,7 +317,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + } + + e = &q->e[q->end]; +- e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); +@@ -336,6 +334,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + + q->end = (q->end + 1) % q->entries; + q->used++; ++ e->skb = skb; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c +index da6faea092d6..80d0f64205f8 100644 +--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c ++++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c +@@ -106,7 +106,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) + { + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + +- return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); ++ return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); + } + + static void +diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +index 18d5984b78da..e73613b9f2f5 100644 +--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c ++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +@@ -5422,7 +5422,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); +- usb_free_urb(urb); + goto error; + } + +@@ -5431,6 +5430,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw) + rtl8xxxu_write32(priv, REG_USB_HIMR, val32); + + error: ++ usb_free_urb(urb); + return ret; + } + +@@ -5756,6 +5756,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) + struct rtl8xxxu_priv *priv = hw->priv; + struct rtl8xxxu_rx_urb *rx_urb; + struct rtl8xxxu_tx_urb *tx_urb; ++ struct sk_buff *skb; + unsigned long flags; + int ret, i; + +@@ -5806,6 +5807,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) + rx_urb->hw = hw; + + ret = rtl8xxxu_submit_rx_urb(priv, rx_urb); ++ if (ret) { ++ if (ret != -ENOMEM) { ++ skb = (struct sk_buff *)rx_urb->urb.context; ++ dev_kfree_skb(skb); ++ } ++ rtl8xxxu_queue_rx_urb(priv, rx_urb); ++ } + } + exit: + /* +diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c +index 7de18ed10db8..e1992de500b0 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/base.c ++++ b/drivers/net/wireless/realtek/rtlwifi/base.c +@@ -454,9 +454,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) + } + } + +-static void _rtl_init_deferred_work(struct ieee80211_hw *hw) ++static int _rtl_init_deferred_work(struct ieee80211_hw *hw) + { + struct rtl_priv *rtlpriv = rtl_priv(hw); ++ struct workqueue_struct *wq; ++ ++ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); ++ if (!wq) ++ return -ENOMEM; + + /* <1> timer */ + setup_timer(&rtlpriv->works.watchdog_timer, +@@ -465,11 +470,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) + rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); + /* <2> work queue */ + rtlpriv->works.hw = hw; +- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); +- if (unlikely(!rtlpriv->works.rtl_wq)) { +- pr_err("Failed to allocate work queue\n"); +- return; +- } ++ rtlpriv->works.rtl_wq = wq; + + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, + (void *)rtl_watchdog_wq_callback); +@@ -481,7 +482,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) + (void *)rtl_swlps_rfon_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, + (void *)rtl_fwevt_wq_callback); +- ++ return 0; + } + + void rtl_deinit_deferred_work(struct ieee80211_hw *hw) +@@ -573,9 +574,7 @@ int rtl_init_core(struct ieee80211_hw *hw) + rtlmac->link_state = MAC80211_NOLINK; + + /* <6> init deferred work */ +- _rtl_init_deferred_work(hw); +- +- return 0; ++ return _rtl_init_deferred_work(hw); + } + EXPORT_SYMBOL_GPL(rtl_init_core); + +diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c +index 1f02461de261..e524573aa8a0 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/usb.c ++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c +@@ -752,8 +752,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) + + usb_anchor_urb(urb, &rtlusb->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); +- if (err) ++ if (err) { ++ usb_unanchor_urb(urb); ++ usb_free_urb(urb); + goto err_out; ++ } + usb_free_urb(urb); + } + return 0; +@@ -927,10 +930,8 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, + + WARN_ON(NULL == skb); + _urb = usb_alloc_urb(0, GFP_ATOMIC); +- if (!_urb) { +- kfree_skb(skb); ++ if (!_urb) + return NULL; +- } + _rtl_install_trx_info(rtlusb, skb, ep_num); + usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, + ep_num), skb->data, skb->len, _rtl_tx_complete, skb); +@@ -945,7 +946,6 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + u32 ep_num; + struct urb *_urb = NULL; +- struct sk_buff *_skb = NULL; + + WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); + if (unlikely(IS_USB_STOP(rtlusb))) { +@@ -955,8 +955,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + return; + } + ep_num = rtlusb->ep_map.ep_mapping[qnum]; +- _skb = skb; +- _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); ++ _urb = _rtl_usb_tx_urb_setup(hw, skb, ep_num); + if (unlikely(!_urb)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't allocate urb. Drop skb!\n"); +diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c +index d3acc85932a5..de92107549ee 100644 +--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c ++++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c +@@ -62,6 +62,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, + { /* end: all zeroes */ }, + }; ++MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids); + + /* hwbus_ops implemetation */ + +diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c +index 84624c812a15..f4338bce78f4 100644 +--- a/drivers/net/wireless/st/cw1200/main.c ++++ b/drivers/net/wireless/st/cw1200/main.c +@@ -385,6 +385,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + CW1200_LINK_ID_MAX, + cw1200_skb_dtor, + priv)) { ++ destroy_workqueue(priv->workqueue); + ieee80211_free_hw(hw); + return NULL; + } +@@ -396,6 +397,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, + for (; i > 0; i--) + cw1200_queue_deinit(&priv->tx_queue[i - 1]); + cw1200_queue_stats_deinit(&priv->tx_queue_stats); ++ destroy_workqueue(priv->workqueue); + ieee80211_free_hw(hw); + return NULL; + } +diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c +index ede31f048ef9..247f4310a38f 100644 +--- a/drivers/net/wireless/ti/wl1251/cmd.c ++++ b/drivers/net/wireless/ti/wl1251/cmd.c +@@ -465,9 +465,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len, + cmd->channels[i].channel = channels[i]->hw_value; + } + +- cmd->params.ssid_len = ssid_len; +- if (ssid) +- memcpy(cmd->params.ssid, ssid, ssid_len); ++ if (ssid) { ++ int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN); ++ ++ cmd->params.ssid_len = len; ++ memcpy(cmd->params.ssid, ssid, len); ++ } + + ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); + if (ret < 0) { +diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c +index d0593bc1f1a9..daddeaa66bf4 100644 +--- a/drivers/net/wireless/ti/wl1251/event.c ++++ b/drivers/net/wireless/ti/wl1251/event.c +@@ -84,7 +84,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, + break; + } + +- return 0; ++ return ret; + } + + static void wl1251_event_mbox_dump(struct event_mailbox *mbox) +diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c +index 22009e14a8fc..72991d3a55f1 100644 +--- a/drivers/net/wireless/ti/wl12xx/main.c ++++ b/drivers/net/wireless/ti/wl12xx/main.c +@@ -648,7 +648,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) + wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | + WLCORE_QUIRK_DUAL_PROBE_TMPL | + WLCORE_QUIRK_TKIP_HEADER_SPACE | +- WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; + wl->sr_fw_name = WL127X_FW_NAME_SINGLE; + wl->mr_fw_name = WL127X_FW_NAME_MULTI; +@@ -672,7 +671,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) + wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | + WLCORE_QUIRK_DUAL_PROBE_TMPL | + WLCORE_QUIRK_TKIP_HEADER_SPACE | +- WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; + wl->plt_fw_name = WL127X_PLT_FW_NAME; + wl->sr_fw_name = WL127X_FW_NAME_SINGLE; +@@ -701,7 +699,6 @@ static int wl12xx_identify_chip(struct wl1271 *wl) + wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN | + WLCORE_QUIRK_DUAL_PROBE_TMPL | + WLCORE_QUIRK_TKIP_HEADER_SPACE | +- WLCORE_QUIRK_START_STA_FAILS | + WLCORE_QUIRK_AP_ZERO_SESSION_ID; + + wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, +@@ -1519,6 +1516,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl) + u32 mac1, mac2; + int ret; + ++ /* Device may be in ELP from the bootloader or kexec */ ++ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); ++ if (ret < 0) ++ goto out; ++ ++ usleep_range(500000, 700000); ++ + ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); + if (ret < 0) + goto out; +diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c +index 17d32ce5d16b..a973dac456be 100644 +--- a/drivers/net/wireless/ti/wlcore/main.c ++++ b/drivers/net/wireless/ti/wlcore/main.c +@@ -2833,21 +2833,8 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) + + if (is_ibss) + ret = wl12xx_cmd_role_start_ibss(wl, wlvif); +- else { +- if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) { +- /* +- * TODO: this is an ugly workaround for wl12xx fw +- * bug - we are not able to tx/rx after the first +- * start_sta, so make dummy start+stop calls, +- * and then call start_sta again. +- * this should be fixed in the fw. +- */ +- wl12xx_cmd_role_start_sta(wl, wlvif); +- wl12xx_cmd_role_stop_sta(wl, wlvif); +- } +- ++ else + ret = wl12xx_cmd_role_start_sta(wl, wlvif); +- } + + return ret; + } +diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h +index 1827546ba807..34f0ba17fac9 100644 +--- a/drivers/net/wireless/ti/wlcore/wlcore.h ++++ b/drivers/net/wireless/ti/wlcore/wlcore.h +@@ -557,9 +557,6 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, + /* Each RX/TX transaction requires an end-of-transaction transfer */ + #define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) + +-/* the first start_role(sta) sometimes doesn't work on wl12xx */ +-#define WLCORE_QUIRK_START_STA_FAILS BIT(1) +- + /* wl127x and SPI don't support SDIO block size alignment */ + #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) + +diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h +index 3fbfd19818f1..ca2021bcac14 100644 +--- a/drivers/net/wireless/wl3501.h ++++ b/drivers/net/wireless/wl3501.h +@@ -378,16 +378,7 @@ struct wl3501_get_confirm { + u8 mib_value[100]; + }; + +-struct wl3501_join_req { +- u16 next_blk; +- u8 sig_id; +- u8 reserved; +- struct iw_mgmt_data_rset operational_rset; +- u16 reserved2; +- u16 timeout; +- u16 probe_delay; +- u8 timestamp[8]; +- u8 local_time[8]; ++struct wl3501_req { + u16 beacon_period; + u16 dtim_period; + u16 cap_info; +@@ -400,6 +391,19 @@ struct wl3501_join_req { + struct iw_mgmt_data_rset bss_basic_rset; + }; + ++struct wl3501_join_req { ++ u16 next_blk; ++ u8 sig_id; ++ u8 reserved; ++ struct iw_mgmt_data_rset operational_rset; ++ u16 reserved2; ++ u16 timeout; ++ u16 probe_delay; ++ u8 timestamp[8]; ++ u8 local_time[8]; ++ struct wl3501_req req; ++}; ++ + struct wl3501_join_confirm { + u16 next_blk; + u8 sig_id; +@@ -442,16 +446,7 @@ struct wl3501_scan_confirm { + u16 status; + char timestamp[8]; + char localtime[8]; +- u16 beacon_period; +- u16 dtim_period; +- u16 cap_info; +- u8 bss_type; +- u8 bssid[ETH_ALEN]; +- struct iw_mgmt_essid_pset ssid; +- struct iw_mgmt_ds_pset ds_pset; +- struct iw_mgmt_cf_pset cf_pset; +- struct iw_mgmt_ibss_pset ibss_pset; +- struct iw_mgmt_data_rset bss_basic_rset; ++ struct wl3501_req req; + u8 rssi; + }; + +@@ -470,8 +465,10 @@ struct wl3501_md_req { + u16 size; + u8 pri; + u8 service_class; +- u8 daddr[ETH_ALEN]; +- u8 saddr[ETH_ALEN]; ++ struct { ++ u8 daddr[ETH_ALEN]; ++ u8 saddr[ETH_ALEN]; ++ } addr; + }; + + struct wl3501_md_ind { +@@ -483,8 +480,10 @@ struct wl3501_md_ind { + u8 reception; + u8 pri; + u8 service_class; +- u8 daddr[ETH_ALEN]; +- u8 saddr[ETH_ALEN]; ++ struct { ++ u8 daddr[ETH_ALEN]; ++ u8 saddr[ETH_ALEN]; ++ } addr; + }; + + struct wl3501_md_confirm { +diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c +index 932f3f81e8cf..959844a10861 100644 +--- a/drivers/net/wireless/wl3501_cs.c ++++ b/drivers/net/wireless/wl3501_cs.c +@@ -468,6 +468,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) + struct wl3501_md_req sig = { + .sig_id = WL3501_SIG_MD_REQ, + }; ++ size_t sig_addr_len = sizeof(sig.addr); + u8 *pdata = (char *)data; + int rc = -EIO; + +@@ -483,9 +484,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) + goto out; + } + rc = 0; +- memcpy(&sig.daddr[0], pdata, 12); +- pktlen = len - 12; +- pdata += 12; ++ memcpy(&sig.addr, pdata, sig_addr_len); ++ pktlen = len - sig_addr_len; ++ pdata += sig_addr_len; + sig.data = bf; + if (((*pdata) * 256 + (*(pdata + 1))) > 1500) { + u8 addr4[ETH_ALEN] = { +@@ -588,7 +589,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) + struct wl3501_join_req sig = { + .sig_id = WL3501_SIG_JOIN_REQ, + .timeout = 10, +- .ds_pset = { ++ .req.ds_pset = { + .el = { + .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, + .len = 1, +@@ -597,7 +598,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) + }, + }; + +- memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72); ++ memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req)); + return wl3501_esbq_exec(this, &sig, sizeof(sig)); + } + +@@ -665,35 +666,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) + if (sig.status == WL3501_STATUS_SUCCESS) { + pr_debug("success"); + if ((this->net_type == IW_MODE_INFRA && +- (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || ++ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || + (this->net_type == IW_MODE_ADHOC && +- (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || ++ (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || + this->net_type == IW_MODE_AUTO) { + if (!this->essid.el.len) + matchflag = 1; + else if (this->essid.el.len == 3 && + !memcmp(this->essid.essid, "ANY", 3)) + matchflag = 1; +- else if (this->essid.el.len != sig.ssid.el.len) ++ else if (this->essid.el.len != sig.req.ssid.el.len) + matchflag = 0; +- else if (memcmp(this->essid.essid, sig.ssid.essid, ++ else if (memcmp(this->essid.essid, sig.req.ssid.essid, + this->essid.el.len)) + matchflag = 0; + else + matchflag = 1; + if (matchflag) { + for (i = 0; i < this->bss_cnt; i++) { +- if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) { ++ if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid, ++ sig.req.bssid)) { + matchflag = 0; + break; + } + } + } + if (matchflag && (i < 20)) { +- memcpy(&this->bss_set[i].beacon_period, +- &sig.beacon_period, 73); ++ memcpy(&this->bss_set[i].req, ++ &sig.req, sizeof(sig.req)); + this->bss_cnt++; + this->rssi = sig.rssi; ++ this->bss_set[i].rssi = sig.rssi; + } + } + } else if (sig.status == WL3501_STATUS_TIMEOUT) { +@@ -885,19 +888,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) + if (this->join_sta_bss < this->bss_cnt) { + const int i = this->join_sta_bss; + memcpy(this->bssid, +- this->bss_set[i].bssid, ETH_ALEN); +- this->chan = this->bss_set[i].ds_pset.chan; ++ this->bss_set[i].req.bssid, ETH_ALEN); ++ this->chan = this->bss_set[i].req.ds_pset.chan; + iw_copy_mgmt_info_element(&this->keep_essid.el, +- &this->bss_set[i].ssid.el); ++ &this->bss_set[i].req.ssid.el); + wl3501_mgmt_auth(this); + } + } else { + const int i = this->join_sta_bss; + +- memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN); +- this->chan = this->bss_set[i].ds_pset.chan; ++ memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN); ++ this->chan = this->bss_set[i].req.ds_pset.chan; + iw_copy_mgmt_info_element(&this->keep_essid.el, +- &this->bss_set[i].ssid.el); ++ &this->bss_set[i].req.ssid.el); + wl3501_online(dev); + } + } else { +@@ -979,7 +982,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, + } else { + skb->dev = dev; + skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ +- skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); ++ skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr, ++ sizeof(sig.addr)); + wl3501_receive(this, skb->data, pkt_len); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, dev); +@@ -1574,30 +1578,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, + for (i = 0; i < this->bss_cnt; ++i) { + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; +- memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN); ++ memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN); + current_ev = iwe_stream_add_event(info, current_ev, + extra + IW_SCAN_MAX_DATA, + &iwe, IW_EV_ADDR_LEN); + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; +- iwe.u.data.length = this->bss_set[i].ssid.el.len; ++ iwe.u.data.length = this->bss_set[i].req.ssid.el.len; + current_ev = iwe_stream_add_point(info, current_ev, + extra + IW_SCAN_MAX_DATA, + &iwe, +- this->bss_set[i].ssid.essid); ++ this->bss_set[i].req.ssid.essid); + iwe.cmd = SIOCGIWMODE; +- iwe.u.mode = this->bss_set[i].bss_type; ++ iwe.u.mode = this->bss_set[i].req.bss_type; + current_ev = iwe_stream_add_event(info, current_ev, + extra + IW_SCAN_MAX_DATA, + &iwe, IW_EV_UINT_LEN); + iwe.cmd = SIOCGIWFREQ; +- iwe.u.freq.m = this->bss_set[i].ds_pset.chan; ++ iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan; + iwe.u.freq.e = 0; + current_ev = iwe_stream_add_event(info, current_ev, + extra + IW_SCAN_MAX_DATA, + &iwe, IW_EV_FREQ_LEN); + iwe.cmd = SIOCGIWENCODE; +- if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) ++ if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h +index 5b1d2e8402d9..347c796afd4e 100644 +--- a/drivers/net/xen-netback/common.h ++++ b/drivers/net/xen-netback/common.h +@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */ + char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ + struct xenvif *vif; /* Parent VIF */ + ++ /* ++ * TX/RX common EOI handling. ++ * When feature-split-event-channels = 0, interrupt handler sets ++ * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set ++ * by the RX and TX interrupt handlers. ++ * RX and TX handler threads will issue an EOI when either ++ * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or ++ * NETBK_TX_EOI) are set and they will reset those bits. ++ */ ++ atomic_t eoi_pending; ++#define NETBK_RX_EOI 0x01 ++#define NETBK_TX_EOI 0x02 ++#define NETBK_COMMON_EOI 0x04 ++ + /* Use NAPI for guest TX */ + struct napi_struct napi; + /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ +@@ -356,6 +370,7 @@ int xenvif_dealloc_kthread(void *data); + + irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); + ++bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread); + void xenvif_rx_action(struct xenvif_queue *queue); + void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); + +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c +index 46008f284550..d9d06dc689ed 100644 +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -76,12 +76,28 @@ int xenvif_schedulable(struct xenvif *vif) + !vif->disabled; + } + ++static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) ++{ ++ bool rc; ++ ++ rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); ++ if (rc) ++ napi_schedule(&queue->napi); ++ return rc; ++} ++ + static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) + { + struct xenvif_queue *queue = dev_id; ++ int old; + +- if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) +- napi_schedule(&queue->napi); ++ old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); ++ WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); ++ ++ if (!xenvif_handle_tx_interrupt(queue)) { ++ atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); ++ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); ++ } + + return IRQ_HANDLED; + } +@@ -115,19 +131,48 @@ static int xenvif_poll(struct napi_struct *napi, int budget) + return work_done; + } + ++static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) ++{ ++ bool rc; ++ ++ rc = xenvif_have_rx_work(queue, false); ++ if (rc) ++ xenvif_kick_thread(queue); ++ return rc; ++} ++ + static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) + { + struct xenvif_queue *queue = dev_id; ++ int old; + +- xenvif_kick_thread(queue); ++ old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); ++ WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); ++ ++ if (!xenvif_handle_rx_interrupt(queue)) { ++ atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); ++ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); ++ } + + return IRQ_HANDLED; + } + + irqreturn_t xenvif_interrupt(int irq, void *dev_id) + { +- xenvif_tx_interrupt(irq, dev_id); +- xenvif_rx_interrupt(irq, dev_id); ++ struct xenvif_queue *queue = dev_id; ++ int old; ++ bool has_rx, has_tx; ++ ++ old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); ++ WARN(old, "Interrupt while EOI pending\n"); ++ ++ has_tx = xenvif_handle_tx_interrupt(queue); ++ has_rx = xenvif_handle_rx_interrupt(queue); ++ ++ if (!has_rx && !has_tx) { ++ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); ++ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); ++ } + + return IRQ_HANDLED; + } +@@ -583,7 +628,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, + shared = (struct xen_netif_ctrl_sring *)addr; + BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); + +- err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); ++ err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn); + if (err < 0) + goto err_unmap; + +@@ -641,7 +686,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, + + if (tx_evtchn == rx_evtchn) { + /* feature-split-event-channels == 0 */ +- err = bind_interdomain_evtchn_to_irqhandler( ++ err = bind_interdomain_evtchn_to_irqhandler_lateeoi( + queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, + queue->name, queue); + if (err < 0) +@@ -652,7 +697,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, + /* feature-split-event-channels == 1 */ + snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), + "%s-tx", queue->name); +- err = bind_interdomain_evtchn_to_irqhandler( ++ err = bind_interdomain_evtchn_to_irqhandler_lateeoi( + queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, + queue->tx_irq_name, queue); + if (err < 0) +@@ -662,7 +707,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, + + snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), + "%s-rx", queue->name); +- err = bind_interdomain_evtchn_to_irqhandler( ++ err = bind_interdomain_evtchn_to_irqhandler_lateeoi( + queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, + queue->rx_irq_name, queue); + if (err < 0) +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index a469fbe1abaf..f7fd8b5a6a8c 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) + + if (more_to_do) + napi_schedule(&queue->napi); ++ else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI, ++ &queue->eoi_pending) & ++ (NETBK_TX_EOI | NETBK_COMMON_EOI)) ++ xen_irq_lateeoi(queue->tx_irq, 0); + } + + static void tx_add_credit(struct xenvif_queue *queue) +@@ -1329,7 +1333,15 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) + NULL, + queue->pages_to_map, + nr_mops); +- BUG_ON(ret); ++ if (ret) { ++ unsigned int i; ++ ++ netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", ++ nr_mops, ret); ++ for (i = 0; i < nr_mops; ++i) ++ WARN_ON_ONCE(queue->tx_map_ops[i].status == ++ GNTST_okay); ++ } + } + + work_done = xenvif_tx_submit(queue); +@@ -1615,9 +1627,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif) + irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data) + { + struct xenvif *vif = data; ++ unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS; + +- while (xenvif_ctrl_work_todo(vif)) ++ while (xenvif_ctrl_work_todo(vif)) { + xenvif_ctrl_action(vif); ++ eoi_flag = 0; ++ } ++ ++ xen_irq_lateeoi(irq, eoi_flag); + + return IRQ_HANDLED; + } +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c +index b1cf7c6f407a..ddfb1cfa2dd9 100644 +--- a/drivers/net/xen-netback/rx.c ++++ b/drivers/net/xen-netback/rx.c +@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) + RING_IDX prod, cons; + struct sk_buff *skb; + int needed; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&queue->rx_queue.lock, flags); + + skb = skb_peek(&queue->rx_queue); +- if (!skb) ++ if (!skb) { ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + return false; ++ } + + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) +@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) + if (skb->sw_hash) + needed++; + ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags); ++ + do { + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; +@@ -490,13 +497,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) + return queue->stalled && prod - cons >= 1; + } + +-static bool xenvif_have_rx_work(struct xenvif_queue *queue) ++bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) + { + return xenvif_rx_ring_slots_available(queue) || + (queue->vif->stall_timeout && + (xenvif_rx_queue_stalled(queue) || + xenvif_rx_queue_ready(queue))) || +- kthread_should_stop() || ++ (test_kthread && kthread_should_stop()) || + queue->vif->disabled; + } + +@@ -527,15 +534,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) + { + DEFINE_WAIT(wait); + +- if (xenvif_have_rx_work(queue)) ++ if (xenvif_have_rx_work(queue, true)) + return; + + for (;;) { + long ret; + + prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); +- if (xenvif_have_rx_work(queue)) ++ if (xenvif_have_rx_work(queue, true)) + break; ++ if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI, ++ &queue->eoi_pending) & ++ (NETBK_RX_EOI | NETBK_COMMON_EOI)) ++ xen_irq_lateeoi(queue->rx_irq, 0); ++ + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); + if (!ret) + break; +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c +index b44f37fff890..e6646c8a7bdb 100644 +--- a/drivers/net/xen-netback/xenbus.c ++++ b/drivers/net/xen-netback/xenbus.c +@@ -770,12 +770,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev, + return -ENOMEM; + snprintf(node, maxlen, "%s/rate", dev->nodename); + vif->credit_watch.node = node; ++ vif->credit_watch.will_handle = NULL; + vif->credit_watch.callback = xen_net_rate_changed; + err = register_xenbus_watch(&vif->credit_watch); + if (err) { + pr_err("Failed to set watcher %s\n", vif->credit_watch.node); + kfree(node); + vif->credit_watch.node = NULL; ++ vif->credit_watch.will_handle = NULL; + vif->credit_watch.callback = NULL; + } + return err; +@@ -1038,11 +1040,15 @@ static void connect(struct backend_info *be) + xenvif_carrier_on(be->vif); + + unregister_hotplug_status_watch(be); +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, +- hotplug_status_changed, +- "%s/%s", dev->nodename, "hotplug-status"); +- if (!err) ++ if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, ++ NULL, hotplug_status_changed, ++ "%s/%s", dev->nodename, ++ "hotplug-status"); ++ if (err) ++ goto err; + be->have_hotplug_status_watch = 1; ++ } + + netif_tx_wake_all_queues(be->vif->dev); + +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 6d391a268469..ceaf6b30d683 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -62,6 +62,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); + MODULE_PARM_DESC(max_queues, + "Maximum number of queues per virtual interface"); + ++#define XENNET_TIMEOUT (5 * HZ) ++ + static const struct ethtool_ops xennet_ethtool_ops; + + struct netfront_cb { +@@ -1355,12 +1357,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) + + netif_carrier_off(netdev); + +- xenbus_switch_state(dev, XenbusStateInitialising); +- wait_event(module_wq, +- xenbus_read_driver_state(dev->otherend) != +- XenbusStateClosed && +- xenbus_read_driver_state(dev->otherend) != +- XenbusStateUnknown); ++ do { ++ xenbus_switch_state(dev, XenbusStateInitialising); ++ err = wait_event_timeout(module_wq, ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateClosed && ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateUnknown, XENNET_TIMEOUT); ++ } while (!err); ++ + return netdev; + + exit: +@@ -2172,28 +2177,43 @@ static const struct attribute_group xennet_dev_group = { + }; + #endif /* CONFIG_SYSFS */ + +-static int xennet_remove(struct xenbus_device *dev) ++static void xennet_bus_close(struct xenbus_device *dev) + { +- struct netfront_info *info = dev_get_drvdata(&dev->dev); +- +- dev_dbg(&dev->dev, "%s\n", dev->nodename); ++ int ret; + +- if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { ++ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) ++ return; ++ do { + xenbus_switch_state(dev, XenbusStateClosing); +- wait_event(module_wq, +- xenbus_read_driver_state(dev->otherend) == +- XenbusStateClosing || +- xenbus_read_driver_state(dev->otherend) == +- XenbusStateUnknown); ++ ret = wait_event_timeout(module_wq, ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateClosing || ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateClosed || ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateUnknown, ++ XENNET_TIMEOUT); ++ } while (!ret); ++ ++ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) ++ return; + ++ do { + xenbus_switch_state(dev, XenbusStateClosed); +- wait_event(module_wq, +- xenbus_read_driver_state(dev->otherend) == +- XenbusStateClosed || +- xenbus_read_driver_state(dev->otherend) == +- XenbusStateUnknown); +- } ++ ret = wait_event_timeout(module_wq, ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateClosed || ++ xenbus_read_driver_state(dev->otherend) == ++ XenbusStateUnknown, ++ XENNET_TIMEOUT); ++ } while (!ret); ++} ++ ++static int xennet_remove(struct xenbus_device *dev) ++{ ++ struct netfront_info *info = dev_get_drvdata(&dev->dev); + ++ xennet_bus_close(dev); + xennet_disconnect_backend(info); + + if (info->netdev->reg_state == NETREG_REGISTERED) +diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c +index a466e7978466..11aa6a04ff46 100644 +--- a/drivers/nfc/nfcsim.c ++++ b/drivers/nfc/nfcsim.c +@@ -201,8 +201,7 @@ static void nfcsim_recv_wq(struct work_struct *work) + + if (!IS_ERR(skb)) + dev_kfree_skb(skb); +- +- skb = ERR_PTR(-ENODEV); ++ return; + } + + dev->cb(dev->nfc_digital_dev, dev->arg, skb); +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c +index d9c55830b2b2..6c495664d2cb 100644 +--- a/drivers/nfc/pn533/pn533.c ++++ b/drivers/nfc/pn533/pn533.c +@@ -678,6 +678,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, + if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) + return false; + ++ if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE) ++ return false; ++ + return true; + } + +diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c +index 9d9c8d57a042..64b58455e620 100644 +--- a/drivers/nfc/s3fwrn5/core.c ++++ b/drivers/nfc/s3fwrn5/core.c +@@ -209,6 +209,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, + case S3FWRN5_MODE_FW: + return s3fwrn5_fw_recv_frame(ndev, skb); + default: ++ kfree_skb(skb); + return -ENODEV; + } + } +diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c +index 5f97da1947e3..e6ca1f9a7f63 100644 +--- a/drivers/nfc/s3fwrn5/firmware.c ++++ b/drivers/nfc/s3fwrn5/firmware.c +@@ -304,8 +304,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info) + if (ret < 0) + return ret; + +- if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) ++ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) { ++ release_firmware(fw->fw); + return -EINVAL; ++ } + + memcpy(fw->date, fw->fw->data + 0x00, 12); + fw->date[12] = '\0'; +diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c +index 3ed0adf6479b..5b0c065bd279 100644 +--- a/drivers/nfc/s3fwrn5/i2c.c ++++ b/drivers/nfc/s3fwrn5/i2c.c +@@ -37,8 +37,8 @@ struct s3fwrn5_i2c_phy { + struct i2c_client *i2c_dev; + struct nci_dev *ndev; + +- unsigned int gpio_en; +- unsigned int gpio_fw_wake; ++ int gpio_en; ++ int gpio_fw_wake; + + struct mutex mutex; + +diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c +index 850e75571c8e..bb1e878913f3 100644 +--- a/drivers/nfc/st95hf/core.c ++++ b/drivers/nfc/st95hf/core.c +@@ -981,7 +981,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, + rc = down_killable(&stcontext->exchange_lock); + if (rc) { + WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); +- return rc; ++ goto free_skb_resp; + } + + rc = st95hf_spi_send(&stcontext->spicontext, skb->data, +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c +index 6ccba0d862df..927b574e5d59 100644 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c +@@ -994,6 +994,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, + + err_dma_mask: + pci_clear_master(pdev); ++ pci_release_regions(pdev); + err_pci_regions: + pci_disable_device(pdev); + err_pci_enable: +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c +index dcb32f34a302..25b867a9281b 100644 +--- a/drivers/nvdimm/dimm_devs.c ++++ b/drivers/nvdimm/dimm_devs.c +@@ -317,16 +317,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, + } + static DEVICE_ATTR_RO(state); + +-static ssize_t available_slots_show(struct device *dev, +- struct device_attribute *attr, char *buf) ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) + { +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ++ struct device *dev; + ssize_t rc; + u32 nfree; + + if (!ndd) + return -ENXIO; + ++ dev = ndd->dev; + nvdimm_bus_lock(dev); + nfree = nd_label_nfree(ndd); + if (nfree - 1 > nfree) { +@@ -338,6 +338,18 @@ static ssize_t available_slots_show(struct device *dev, + nvdimm_bus_unlock(dev); + return rc; + } ++ ++static ssize_t available_slots_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ ssize_t rc; ++ ++ device_lock(dev); ++ rc = __available_slots_show(dev_get_drvdata(dev), buf); ++ device_unlock(dev); ++ ++ return rc; ++} + static DEVICE_ATTR_RO(available_slots); + + static struct attribute *nvdimm_attributes[] = { +diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h +index bd29e598bac1..2a820c1fdfcd 100644 +--- a/drivers/nvdimm/nd.h ++++ b/drivers/nvdimm/nd.h +@@ -29,7 +29,6 @@ enum { + * BTT instance + */ + ND_MAX_LANES = 256, +- SECTOR_SHIFT = 9, + INT_LBASIZE_ALIGNMENT = 64, + }; + +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index 96ea6c76be6e..fb2461cc3c69 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -205,6 +205,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) + + static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) + { ++ if (unlikely(ctrl->kato == 0)) ++ return; ++ + pr_debug("ctrl %d start keep-alive timer for %d secs\n", + ctrl->cntlid, ctrl->kato); + +@@ -214,6 +217,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) + + static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) + { ++ if (unlikely(ctrl->kato == 0)) ++ return; ++ + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); + + cancel_delayed_work_sync(&ctrl->ka_work); +@@ -568,9 +574,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) + { + lockdep_assert_held(&ctrl->lock); + +- if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || +- nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || +- nvmet_cc_mps(ctrl->cc) != 0 || ++ /* ++ * Only I/O controllers should verify iosqes,iocqes. ++ * Strictly speaking, the spec says a discovery controller ++ * should verify iosqes,iocqes are zeroed, however that ++ * would break backwards compatibility, so don't enforce it. ++ */ ++ if (ctrl->subsys->type != NVME_NQN_DISC && ++ (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || ++ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) { ++ ctrl->csts = NVME_CSTS_CFS; ++ return; ++ } ++ ++ if (nvmet_cc_mps(ctrl->cc) != 0 || + nvmet_cc_ams(ctrl->cc) != 0 || + nvmet_cc_css(ctrl->cc) != 0) { + ctrl->csts = NVME_CSTS_CFS; +@@ -585,7 +602,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) + * in case a host died before it enabled the controller. Hence, simply + * reset the keep alive timer when the controller is enabled. + */ +- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); ++ if (ctrl->kato) ++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + } + + static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) +diff --git a/drivers/of/address.c b/drivers/of/address.c +index b87b3cdefc6d..b9c05daff9fe 100644 +--- a/drivers/of/address.c ++++ b/drivers/of/address.c +@@ -934,11 +934,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range); + */ + bool of_dma_is_coherent(struct device_node *np) + { +- struct device_node *node = of_node_get(np); ++ struct device_node *node; + + if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT)) + return true; + ++ node = of_node_get(np); ++ + while (node) { + if (of_property_read_bool(node, "dma-coherent")) { + of_node_put(node); +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c +index 744f6250dc98..69d2eeb75b8b 100644 +--- a/drivers/of/fdt.c ++++ b/drivers/of/fdt.c +@@ -607,11 +607,11 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, + + if (size && + early_init_dt_reserve_memory_arch(base, size, nomap) == 0) +- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n", +- uname, &base, (unsigned long)size / SZ_1M); ++ pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", ++ uname, &base, (unsigned long)(size / SZ_1M)); + else +- pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n", +- uname, &base, (unsigned long)size / SZ_1M); ++ pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", ++ uname, &base, (unsigned long)(size / SZ_1M)); + + len -= t_len; + if (first) { +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c +index 4a1cab5b2121..2f72b86555bc 100644 +--- a/drivers/of/of_reserved_mem.c ++++ b/drivers/of/of_reserved_mem.c +@@ -161,9 +161,9 @@ static int __init __reserved_mem_alloc_size(unsigned long node, + ret = early_init_dt_alloc_reserved_memory_arch(size, + align, start, end, nomap, &base); + if (ret == 0) { +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n", ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", + uname, &base, +- (unsigned long)size / SZ_1M); ++ (unsigned long)(size / SZ_1M)); + break; + } + len -= t_len; +@@ -173,8 +173,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node, + ret = early_init_dt_alloc_reserved_memory_arch(size, align, + 0, 0, nomap, &base); + if (ret == 0) +- pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n", +- uname, &base, (unsigned long)size / SZ_1M); ++ pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", ++ uname, &base, (unsigned long)(size / SZ_1M)); + } + + if (base == 0) { +@@ -225,6 +225,16 @@ static int __init __rmem_cmp(const void *a, const void *b) + if (ra->base > rb->base) + return 1; + ++ /* ++ * Put the dynamic allocations (address == 0, size == 0) before static ++ * allocations at address 0x0 so that overlap detection works ++ * correctly. ++ */ ++ if (ra->size < rb->size) ++ return -1; ++ if (ra->size > rb->size) ++ return 1; ++ + return 0; + } + +@@ -242,8 +252,7 @@ static void __init __rmem_check_for_overlap(void) + + this = &reserved_mem[i]; + next = &reserved_mem[i + 1]; +- if (!(this->base && next->base)) +- continue; ++ + if (this->base + this->size > next->base) { + phys_addr_t this_end, next_end; + +diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c +index 188fab57d170..a7f542e784dd 100644 +--- a/drivers/parisc/sba_iommu.c ++++ b/drivers/parisc/sba_iommu.c +@@ -1283,7 +1283,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) + ** (one that doesn't overlap memory or LMMIO space) in the + ** IBASE and IMASK registers. + */ +- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); ++ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; + iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; + + if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { +diff --git a/drivers/pci/access.c b/drivers/pci/access.c +index 7b5cf6d1181a..6f2a07567532 100644 +--- a/drivers/pci/access.c ++++ b/drivers/pci/access.c +@@ -185,17 +185,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); + static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); + + static noinline void pci_wait_cfg(struct pci_dev *dev) ++ __must_hold(&pci_lock) + { +- DECLARE_WAITQUEUE(wait, current); +- +- __add_wait_queue(&pci_cfg_wait, &wait); + do { +- set_current_state(TASK_UNINTERRUPTIBLE); + raw_spin_unlock_irq(&pci_lock); +- schedule(); ++ wait_event(pci_cfg_wait, !dev->block_cfg_access); + raw_spin_lock_irq(&pci_lock); + } while (dev->block_cfg_access); +- __remove_wait_queue(&pci_cfg_wait, &wait); + } + + /* Returns 0 on success, negative values indicate error. */ +diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c +index c288e5a52575..32001787cff3 100644 +--- a/drivers/pci/bus.c ++++ b/drivers/pci/bus.c +@@ -324,12 +324,8 @@ void pci_bus_add_device(struct pci_dev *dev) + + dev->match_driver = true; + retval = device_attach(&dev->dev); +- if (retval < 0 && retval != -EPROBE_DEFER) { ++ if (retval < 0 && retval != -EPROBE_DEFER) + dev_warn(&dev->dev, "device attach failed (%d)\n", retval); +- pci_proc_detach_device(dev); +- pci_remove_sysfs_dev_files(dev); +- return; +- } + + dev->is_added = 1; + } +diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c +index 1dbd09c91a7c..736d9f58438e 100644 +--- a/drivers/pci/host/pci-aardvark.c ++++ b/drivers/pci/host/pci-aardvark.c +@@ -363,10 +363,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) + + advk_pcie_wait_for_link(pcie); + +- reg = PCIE_CORE_LINK_L0S_ENTRY | +- (1 << PCIE_CORE_LINK_WIDTH_SHIFT); +- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); +- + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | +diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c +index a6456b578269..b6a099371ad2 100644 +--- a/drivers/pci/host/pci-xgene-msi.c ++++ b/drivers/pci/host/pci-xgene-msi.c +@@ -393,13 +393,9 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) + if (!msi_group->gic_irq) + continue; + +- irq_set_chained_handler(msi_group->gic_irq, +- xgene_msi_isr); +- err = irq_set_handler_data(msi_group->gic_irq, msi_group); +- if (err) { +- pr_err("failed to register GIC IRQ handler\n"); +- return -EINVAL; +- } ++ irq_set_chained_handler_and_data(msi_group->gic_irq, ++ xgene_msi_isr, msi_group); ++ + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c +index 7f6b454bca65..3ff423220df6 100644 +--- a/drivers/pci/host/pcie-rcar.c ++++ b/drivers/pci/host/pcie-rcar.c +@@ -328,11 +328,12 @@ static struct pci_ops rcar_pcie_ops = { + }; + + static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, +- struct resource *res) ++ struct resource_entry *window) + { + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + resource_size_t res_start; ++ struct resource *res = window->res; + u32 mask; + + rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); +@@ -346,9 +347,9 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, + rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + if (res->flags & IORESOURCE_IO) +- res_start = pci_pio_to_address(res->start); ++ res_start = pci_pio_to_address(res->start) - window->offset; + else +- res_start = res->start; ++ res_start = res->start - window->offset; + + rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); + rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, +@@ -377,7 +378,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: +- rcar_pcie_setup_window(i, pci, res); ++ rcar_pcie_setup_window(i, pci, win); + i++; + break; + case IORESOURCE_BUS: +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c +index d44b55879c67..f154b05f467f 100644 +--- a/drivers/pci/hotplug/acpiphp_glue.c ++++ b/drivers/pci/hotplug/acpiphp_glue.c +@@ -136,13 +136,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) + struct acpiphp_context *context; + + acpi_lock_hp_context(); ++ + context = acpiphp_get_context(adev); +- if (!context || context->func.parent->is_going_away) { +- acpi_unlock_hp_context(); +- return NULL; ++ if (!context) ++ goto unlock; ++ ++ if (context->func.parent->is_going_away) { ++ acpiphp_put_context(context); ++ context = NULL; ++ goto unlock; + } ++ + get_bridge(context->func.parent); + acpiphp_put_context(context); ++ ++unlock: + acpi_unlock_hp_context(); + return context; + } +@@ -530,6 +538,7 @@ static void enable_slot(struct acpiphp_slot *slot) + slot->flags &= (~SLOT_ENABLED); + continue; + } ++ pci_dev_put(dev); + } + } + +diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c +index a796301ea03f..ca9d832bd9f8 100644 +--- a/drivers/pci/hotplug/rpadlpar_sysfs.c ++++ b/drivers/pci/hotplug/rpadlpar_sysfs.c +@@ -39,12 +39,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, + if (nbytes >= MAX_DRC_NAME_LEN) + return 0; + +- memcpy(drc_name, buf, nbytes); ++ strscpy(drc_name, buf, nbytes + 1); + + end = strchr(drc_name, '\n'); +- if (!end) +- end = &drc_name[nbytes]; +- *end = '\0'; ++ if (end) ++ *end = '\0'; + + rc = dlpar_add_slot(drc_name); + if (rc) +@@ -70,12 +69,11 @@ static ssize_t remove_slot_store(struct kobject *kobj, + if (nbytes >= MAX_DRC_NAME_LEN) + return 0; + +- memcpy(drc_name, buf, nbytes); ++ strscpy(drc_name, buf, nbytes + 1); + + end = strchr(drc_name, '\n'); +- if (!end) +- end = &drc_name[nbytes]; +- *end = '\0'; ++ if (end) ++ *end = '\0'; + + rc = dlpar_remove_slot(drc_name); + if (rc) +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c +index 55ca14fbdd2a..77810f424049 100644 +--- a/drivers/pci/msi.c ++++ b/drivers/pci/msi.c +@@ -189,24 +189,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) + * reliably as devices without an INTx disable bit will then generate a + * level IRQ which will never be cleared. + */ +-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) ++void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) + { +- u32 mask_bits = desc->masked; ++ raw_spinlock_t *lock = &desc->dev->msi_lock; ++ unsigned long flags; + + if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) +- return 0; ++ return; + +- mask_bits &= ~mask; +- mask_bits |= flag; ++ raw_spin_lock_irqsave(lock, flags); ++ desc->masked &= ~mask; ++ desc->masked |= flag; + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, +- mask_bits); +- +- return mask_bits; ++ desc->masked); ++ raw_spin_unlock_irqrestore(lock, flags); + } + + static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) + { +- desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); ++ __pci_msi_desc_mask_irq(desc, mask, flag); + } + + static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +@@ -321,10 +322,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + /* Don't touch the hardware now */ + } else if (entry->msi_attrib.is_msix) { + void __iomem *base = pci_msix_desc_addr(entry); ++ bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); ++ ++ /* ++ * The specification mandates that the entry is masked ++ * when the message is modified: ++ * ++ * "If software changes the Address or Data value of an ++ * entry while the entry is unmasked, the result is ++ * undefined." ++ */ ++ if (unmasked) ++ __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); + + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); ++ ++ if (unmasked) ++ __pci_msix_desc_mask_irq(entry, 0); ++ ++ /* Ensure that the writes are visible in the device */ ++ readl(base + PCI_MSIX_ENTRY_DATA); + } else { + int pos = dev->msi_cap; + u16 msgctl; +@@ -345,6 +364,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) + pci_write_config_word(dev, pos + PCI_MSI_DATA_32, + msg->data); + } ++ /* Ensure that the writes are visible in the device */ ++ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); + } + entry->msg = *msg; + } +@@ -639,21 +660,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) + /* Configure MSI capability structure */ + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); + if (ret) { +- msi_mask_irq(entry, mask, ~mask); ++ msi_mask_irq(entry, mask, 0); + free_msi_irqs(dev); + return ret; + } + + ret = msi_verify_entries(dev); + if (ret) { +- msi_mask_irq(entry, mask, ~mask); ++ msi_mask_irq(entry, mask, 0); + free_msi_irqs(dev); + return ret; + } + + ret = populate_msi_sysfs(dev); + if (ret) { +- msi_mask_irq(entry, mask, ~mask); ++ msi_mask_irq(entry, mask, 0); + free_msi_irqs(dev); + return ret; + } +@@ -694,6 +715,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + { + struct cpumask *curmsk, *masks = NULL; + struct msi_desc *entry; ++ void __iomem *addr; + int ret, i; + + if (affinity) { +@@ -716,6 +738,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + + entry->msi_attrib.is_msix = 1; + entry->msi_attrib.is_64 = 1; ++ + if (entries) + entry->msi_attrib.entry_nr = entries[i].entry; + else +@@ -723,6 +746,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + entry->msi_attrib.default_irq = dev->irq; + entry->mask_base = base; + ++ addr = pci_msix_desc_addr(entry); ++ if (addr) ++ entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); ++ + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); + if (masks) + curmsk++; +@@ -733,21 +760,27 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, + return ret; + } + +-static void msix_program_entries(struct pci_dev *dev, +- struct msix_entry *entries) ++static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) + { + struct msi_desc *entry; +- int i = 0; + + for_each_pci_msi_entry(entry, dev) { +- if (entries) +- entries[i++].vector = entry->irq; +- entry->masked = readl(pci_msix_desc_addr(entry) + +- PCI_MSIX_ENTRY_VECTOR_CTRL); +- msix_mask_irq(entry, 1); ++ if (entries) { ++ entries->vector = entry->irq; ++ entries++; ++ } + } + } + ++static void msix_mask_all(void __iomem *base, int tsize) ++{ ++ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; ++ int i; ++ ++ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) ++ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); ++} ++ + /** + * msix_capability_init - configure device's MSI-X capability + * @dev: pointer to the pci_dev data structure of MSI-X device function +@@ -762,22 +795,34 @@ static void msix_program_entries(struct pci_dev *dev, + static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + int nvec, bool affinity) + { +- int ret; +- u16 control; + void __iomem *base; ++ int ret, tsize; ++ u16 control; + +- /* Ensure MSI-X is disabled while it is set up */ +- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++ /* ++ * Some devices require MSI-X to be enabled before the MSI-X ++ * registers can be accessed. Mask all the vectors to prevent ++ * interrupts coming in before they're fully set up. ++ */ ++ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | ++ PCI_MSIX_FLAGS_ENABLE); + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); + /* Request & Map MSI-X table region */ ++ tsize = msix_table_size(control); ++ base = msix_map_region(dev, tsize); + base = msix_map_region(dev, msix_table_size(control)); +- if (!base) +- return -ENOMEM; ++ if (!base) { ++ ret = -ENOMEM; ++ goto out_disable; ++ } ++ ++ /* Ensure that all table entries are masked. */ ++ msix_mask_all(base, tsize); + + ret = msix_setup_entries(dev, base, entries, nvec, affinity); + if (ret) +- return ret; ++ goto out_disable; + + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + if (ret) +@@ -788,15 +833,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + if (ret) + goto out_free; + +- /* +- * Some devices require MSI-X to be enabled before we can touch the +- * MSI-X registers. We need to mask all the vectors to prevent +- * interrupts coming in before they're fully set up. +- */ +- pci_msix_clear_and_set_ctrl(dev, 0, +- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); +- +- msix_program_entries(dev, entries); ++ msix_update_entries(dev, entries); + + ret = populate_msi_sysfs(dev); + if (ret) +@@ -830,6 +867,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + out_free: + free_msi_irqs(dev); + ++out_disable: ++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); ++ + return ret; + } + +@@ -917,8 +957,7 @@ void pci_msi_shutdown(struct pci_dev *dev) + + /* Return the device with MSI unmasked as initial states */ + mask = msi_mask(desc->msi_attrib.multi_cap); +- /* Keep cached state to be restored */ +- __pci_msi_desc_mask_irq(desc, mask, ~mask); ++ msi_mask_irq(desc, mask, 0); + + /* Restore dev->irq to its default pin-assertion irq */ + dev->irq = desc->msi_attrib.default_irq; +@@ -1019,10 +1058,8 @@ void pci_msix_shutdown(struct pci_dev *dev) + return; + + /* Return the device with MSI-X masked as initial states */ +- for_each_pci_msi_entry(entry, dev) { +- /* Keep cached states to be restored */ ++ for_each_pci_msi_entry(entry, dev) + __pci_msix_desc_mask_irq(entry, 1); +- } + + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_intx_for_msi(dev, 1); +diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c +index 51357377efbc..ac169cf0fa02 100644 +--- a/drivers/pci/pci-label.c ++++ b/drivers/pci/pci-label.c +@@ -157,7 +157,7 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) + len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, + obj->buffer.length, + UTF16_LITTLE_ENDIAN, +- buf, PAGE_SIZE); ++ buf, PAGE_SIZE - 1); + buf[len] = '\n'; + } + +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c +index 02b96113ea87..51296c86fb7a 100644 +--- a/drivers/pci/pcie/aspm.c ++++ b/drivers/pci/pcie/aspm.c +@@ -410,16 +410,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) + + /* Setup initial capable state. Will be updated later */ + link->aspm_capable = link->aspm_support; +- /* +- * If the downstream component has pci bridge function, don't +- * do ASPM for now. +- */ +- list_for_each_entry(child, &linkbus->devices, bus_list) { +- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) { +- link->aspm_disable = ASPM_STATE_ALL; +- break; +- } +- } + + /* Get and check endpoint acceptable latencies */ + list_for_each_entry(child, &linkbus->devices, bus_list) { +@@ -843,6 +833,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) + cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); + else + cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); ++ cnt += sprintf(buffer + cnt, "\n"); + return cnt; + } + +diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c +index 3008bba360f3..ec6f6213960b 100644 +--- a/drivers/pci/pcie/ptm.c ++++ b/drivers/pci/pcie/ptm.c +@@ -47,10 +47,6 @@ void pci_ptm_init(struct pci_dev *dev) + if (!pci_is_pcie(dev)) + return; + +- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); +- if (!pos) +- return; +- + /* + * Enable PTM only on interior devices (root ports, switch ports, + * etc.) on the assumption that it causes no link traffic until an +@@ -60,6 +56,23 @@ void pci_ptm_init(struct pci_dev *dev) + pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)) + return; + ++ /* ++ * Switch Downstream Ports are not permitted to have a PTM ++ * capability; their PTM behavior is controlled by the Upstream ++ * Port (PCIe r5.0, sec 7.9.16). ++ */ ++ ups = pci_upstream_bridge(dev); ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM && ++ ups && ups->ptm_enabled) { ++ dev->ptm_granularity = ups->ptm_granularity; ++ dev->ptm_enabled = 1; ++ return; ++ } ++ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM); ++ if (!pos) ++ return; ++ + pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap); + local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8; + +@@ -69,7 +82,6 @@ void pci_ptm_init(struct pci_dev *dev) + * the spec recommendation (PCIe r3.1, sec 7.32.3), select the + * furthest upstream Time Source as the PTM Root. + */ +- ups = pci_upstream_bridge(dev); + if (ups && ups->ptm_enabled) { + ctrl = PCI_PTM_CTRL_ENABLE; + if (ups->ptm_granularity == 0) +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 16611cf3aba4..ddf5ba63b195 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -1251,7 +1251,7 @@ int pci_setup_device(struct pci_dev *dev) + /* device class may be changed after fixup */ + class = dev->class >> 8; + +- if (dev->non_compliant_bars) { ++ if (dev->non_compliant_bars && !dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { + dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); +@@ -1360,13 +1360,33 @@ static void pci_configure_mps(struct pci_dev *dev) + struct pci_dev *bridge = pci_upstream_bridge(dev); + int mps, p_mps, rc; + +- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge)) ++ if (!pci_is_pcie(dev)) + return; + + /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ + if (dev->is_virtfn) + return; + ++ /* ++ * For Root Complex Integrated Endpoints, program the maximum ++ * supported value unless limited by the PCIE_BUS_PEER2PEER case. ++ */ ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { ++ if (pcie_bus_config == PCIE_BUS_PEER2PEER) ++ mps = 128; ++ else ++ mps = 128 << dev->pcie_mpss; ++ rc = pcie_set_mps(dev, mps); ++ if (rc) { ++ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", ++ mps); ++ } ++ return; ++ } ++ ++ if (!bridge || !pci_is_pcie(bridge)) ++ return; ++ + mps = pcie_get_mps(dev); + p_mps = pcie_get_mps(bridge); + +@@ -1674,6 +1694,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) + pci_set_of_node(dev); + + if (pci_setup_device(dev)) { ++ pci_release_of_node(dev); + pci_bus_put(dev->bus); + kfree(dev); + return NULL; +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 496296bc3581..096ba11ac105 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -2046,6 +2046,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); + ++static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) ++{ ++ pci_info(dev, "Disabling ASPM L0s/L1\n"); ++ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); ++} ++ ++/* ++ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the ++ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; ++ * disable both L0s and L1 for now to be safe. ++ */ ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); ++ + /* + * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain + * Link bit cleared after starting the link retrain process to allow this +@@ -3332,6 +3345,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + } + ++/* ++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be ++ * prevented for those affected devices. ++ */ ++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev) ++{ ++ if ((dev->device & 0xffc0) == 0x2340) ++ quirk_no_bus_reset(dev); ++} ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, ++ quirk_nvidia_no_bus_reset); ++ + /* + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. + * The device will throw a Link Down error on AER-capable systems and +@@ -3345,6 +3370,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); + ++/* ++ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS ++ * automatically disables LTSSM when Secondary Bus Reset is received and ++ * the device stops working. Prevent bus reset for these devices. With ++ * this change, the device can be assigned to VMs with VFIO, but it will ++ * leak state between VMs. Reference ++ * https://e2e.ti.com/support/processors/f/791/t/954382 ++ */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset); ++ + static void quirk_no_pm_reset(struct pci_dev *dev) + { + /* +@@ -3898,6 +3933,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, + quirk_dma_func1_alias); ++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */ ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215, ++ quirk_dma_func1_alias); + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, + quirk_dma_func1_alias); +diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c +index 429d34c348b9..14d84d5a0f58 100644 +--- a/drivers/pci/slot.c ++++ b/drivers/pci/slot.c +@@ -303,16 +303,19 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + slot_name = make_slot_name(name); + if (!slot_name) { + err = -ENOMEM; ++ kfree(slot); + goto err; + } + ++ INIT_LIST_HEAD(&slot->list); ++ list_add(&slot->list, &parent->slots); ++ + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", slot_name); +- if (err) ++ if (err) { ++ kobject_put(&slot->kobj); + goto err; +- +- INIT_LIST_HEAD(&slot->list); +- list_add(&slot->list, &parent->slots); ++ } + + down_read(&pci_bus_sem); + list_for_each_entry(dev, &parent->devices, bus_list) +@@ -328,7 +331,6 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, + mutex_unlock(&pci_slot_mutex); + return slot; + err: +- kfree(slot); + slot = ERR_PTR(err); + goto out; + } +diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c +index b91c4da68365..7958250856d3 100644 +--- a/drivers/pci/syscall.c ++++ b/drivers/pci/syscall.c +@@ -21,7 +21,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, + u16 word; + u32 dword; + long err; +- long cfg_ret; ++ int cfg_ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; +@@ -47,7 +47,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, + } + + err = -EIO; +- if (cfg_ret != PCIBIOS_SUCCESSFUL) ++ if (cfg_ret) + goto error; + + switch (len) { +@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, + if (err) + break; + err = pci_user_write_config_byte(dev, off, byte); +- if (err != PCIBIOS_SUCCESSFUL) ++ if (err) + err = -EIO; + break; + +@@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, + if (err) + break; + err = pci_user_write_config_word(dev, off, word); +- if (err != PCIBIOS_SUCCESSFUL) ++ if (err) + err = -EIO; + break; + +@@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, + if (err) + break; + err = pci_user_write_config_dword(dev, off, dword); +- if (err != PCIBIOS_SUCCESSFUL) ++ if (err) + err = -EIO; + break; + +diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c +index aae7e6df99cd..ba13e3c3d6b8 100644 +--- a/drivers/pcmcia/i82092.c ++++ b/drivers/pcmcia/i82092.c +@@ -105,6 +105,7 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i + for (i = 0;idev); + generic_phy = devm_phy_create(phy->dev, NULL, &ops); +- if (IS_ERR(generic_phy)) +- return PTR_ERR(generic_phy); ++ if (IS_ERR(generic_phy)) { ++ error = PTR_ERR(generic_phy); ++ goto clk_unprepare; ++ } + + phy_set_drvdata(generic_phy, phy); + + phy_provider = devm_of_phy_provider_register(phy->dev, + of_phy_simple_xlate); +- if (IS_ERR(phy_provider)) +- return PTR_ERR(phy_provider); ++ if (IS_ERR(phy_provider)) { ++ error = PTR_ERR(phy_provider); ++ goto clk_unprepare; ++ } + + usb_add_phy_dev(&phy->phy); + + return 0; ++ ++clk_unprepare: ++ pm_runtime_disable(phy->dev); ++ clk_unprepare(phy->refclk); ++ return error; + } + + static int dm816x_usb_phy_remove(struct platform_device *pdev) +diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c +index 004d320767e4..bb36cfd4e3e9 100644 +--- a/drivers/phy/phy-s5pv210-usb2.c ++++ b/drivers/phy/phy-s5pv210-usb2.c +@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) + udelay(10); + rst &= ~rstbits; + writel(rst, drv->reg_phy + S5PV210_UPHYRST); ++ /* The following delay is necessary for the reset sequence to be ++ * completed ++ */ ++ udelay(80); + } else { + pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); + pwr |= phypwr; +diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c +index ddb530ee2255..9d57695e1f21 100644 +--- a/drivers/phy/phy-twl4030-usb.c ++++ b/drivers/phy/phy-twl4030-usb.c +@@ -798,7 +798,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) + + usb_remove_phy(&twl->phy); + pm_runtime_get_sync(twl->dev); +- cancel_delayed_work(&twl->id_workaround_work); ++ cancel_delayed_work_sync(&twl->id_workaround_work); + device_remove_file(twl->dev, &dev_attr_vbus); + + /* set transceiver mode to power on defaults */ +diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c +index bd0e65900216..0156134dd022 100644 +--- a/drivers/phy/tegra/xusb.c ++++ b/drivers/phy/tegra/xusb.c +@@ -916,6 +916,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) + reset: + reset_control_assert(padctl->rst); + remove: ++ platform_set_drvdata(pdev, NULL); + soc->ops->remove(padctl); + return err; + } +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +index 49aeba912531..23d2f0ba12db 100644 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +@@ -387,13 +387,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) + { + /* +- * The signal type is GPIO if the signal name has "GPIO" as a prefix. ++ * The signal type is GPIO if the signal name has "GPI" as a prefix. + * strncmp (rather than strcmp) is used to implement the prefix + * requirement. + * +- * expr->signal might look like "GPIOT3" in the GPIO case. ++ * expr->signal might look like "GPIOB1" in the GPIO case. ++ * expr->signal might look like "GPIT0" in the GPI case. + */ +- return strncmp(expr->signal, "GPIO", 4) == 0; ++ return strncmp(expr->signal, "GPI", 3) == 0; + } + + static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs) +diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c +index 54dad89fc9bf..d32aedfc6dd0 100644 +--- a/drivers/pinctrl/devicetree.c ++++ b/drivers/pinctrl/devicetree.c +@@ -40,6 +40,13 @@ struct pinctrl_dt_map { + static void dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps) + { ++ int i; ++ ++ for (i = 0; i < num_maps; ++i) { ++ kfree_const(map[i].dev_name); ++ map[i].dev_name = NULL; ++ } ++ + if (pctldev) { + const struct pinctrl_ops *ops = pctldev->desc->pctlops; + ops->dt_free_map(pctldev, map, num_maps); +@@ -73,7 +80,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, + + /* Initialize common mapping table entry fields */ + for (i = 0; i < num_maps; i++) { +- map[i].dev_name = dev_name(p->dev); ++ const char *devname; ++ ++ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL); ++ if (!devname) ++ goto err_free_map; ++ ++ map[i].dev_name = devname; + map[i].name = statename; + if (pctldev) + map[i].ctrl_dev_name = dev_name(pctldev->dev); +@@ -81,11 +94,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, + + /* Remember the converted mapping table entries */ + dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL); +- if (!dt_map) { +- dev_err(p->dev, "failed to alloc struct pinctrl_dt_map\n"); +- dt_free_map(pctldev, map, num_maps); +- return -ENOMEM; +- } ++ if (!dt_map) ++ goto err_free_map; + + dt_map->pctldev = pctldev; + dt_map->map = map; +@@ -93,6 +103,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, + list_add_tail(&dt_map->node, &p->dt_maps); + + return pinctrl_register_map(map, num_maps, false); ++ ++err_free_map: ++ dt_free_map(pctldev, map, num_maps); ++ return -ENOMEM; + } + + struct pinctrl_dev *of_pinctrl_get(struct device_node *np) +diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +index e2cca91fd266..68108c4c3969 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c +@@ -642,7 +642,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev, + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + if (ret) { +- pinctrl_unregister(ipctl->pctl); + dev_err(&pdev->dev, "Failed to populate subdevices\n"); + return ret; + } +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c +index 1e945aa77734..73d3c4122eb8 100644 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c +@@ -1017,6 +1017,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev, + pm_runtime_put(&vg->pdev->dev); + } + ++static void byt_gpio_direct_irq_check(struct byt_gpio *vg, ++ unsigned int offset) ++{ ++ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); ++ ++ /* ++ * Before making any direction modifications, do a check if gpio is set ++ * for direct IRQ. On Bay Trail, setting GPIO to output does not make ++ * sense, so let's at least inform the caller before they shoot ++ * themselves in the foot. ++ */ ++ if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) ++ dev_info_once(&vg->pdev->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); ++} ++ + static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, + struct pinctrl_gpio_range *range, + unsigned int offset, +@@ -1024,7 +1039,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, + { + struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); +- void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + unsigned long flags; + u32 value; + +@@ -1035,14 +1049,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, + if (input) + value |= BYT_OUTPUT_EN; + else +- /* +- * Before making any direction modifications, do a check if gpio +- * is set for direct IRQ. On baytrail, setting GPIO to output +- * does not make sense, so let's at least warn the caller before +- * they shoot themselves in the foot. +- */ +- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN, +- "Potential Error: Setting GPIO with direct_irq_en to output"); ++ byt_gpio_direct_irq_check(vg, offset); ++ + writel(value, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); +@@ -1258,7 +1266,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, + break; + case PIN_CONFIG_INPUT_DEBOUNCE: + debounce = readl(db_reg); +- debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + + if (arg) + conf |= BYT_DEBOUNCE_EN; +@@ -1267,24 +1274,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, + + switch (arg) { + case 375: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_375US; + break; + case 750: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_750US; + break; + case 1500: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_1500US; + break; + case 3000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_3MS; + break; + case 6000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_6MS; + break; + case 12000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_12MS; + break; + case 24000: ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce |= BYT_DEBOUNCE_PULSE_24MS; + break; + default: +@@ -1382,19 +1396,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) + + static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) + { +- return pinctrl_gpio_direction_input(chip->base + offset); ++ struct byt_gpio *vg = gpiochip_get_data(chip); ++ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); ++ unsigned long flags; ++ u32 reg; ++ ++ raw_spin_lock_irqsave(&byt_lock, flags); ++ ++ reg = readl(val_reg); ++ reg &= ~BYT_DIR_MASK; ++ reg |= BYT_OUTPUT_EN; ++ writel(reg, val_reg); ++ ++ raw_spin_unlock_irqrestore(&byt_lock, flags); ++ return 0; + } + ++/* ++ * Note despite the temptation this MUST NOT be converted into a call to ++ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this ++ * MUST be done as a single BYT_VAL_REG register write. ++ * See the commit message of the commit adding this comment for details. ++ */ + static int byt_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, int value) + { +- int ret = pinctrl_gpio_direction_output(chip->base + offset); ++ struct byt_gpio *vg = gpiochip_get_data(chip); ++ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); ++ unsigned long flags; ++ u32 reg; + +- if (ret) +- return ret; ++ raw_spin_lock_irqsave(&byt_lock, flags); + +- byt_gpio_set(chip, offset, value); ++ byt_gpio_direct_irq_check(vg, offset); + ++ reg = readl(val_reg); ++ reg &= ~BYT_DIR_MASK; ++ if (value) ++ reg |= BYT_LEVEL; ++ else ++ reg &= ~BYT_LEVEL; ++ ++ writel(reg, val_reg); ++ ++ raw_spin_unlock_irqrestore(&byt_lock, flags); + return 0; + } + +diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c +index 04d6fd2be08c..8d0cff3146b8 100644 +--- a/drivers/pinctrl/intel/pinctrl-merrifield.c ++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c +@@ -731,6 +731,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PU_EN; + ++ /* Set default strength value in case none is given */ ++ if (arg == 1) ++ arg = 20000; ++ + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; +@@ -751,6 +755,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; + bits |= BUFCFG_PD_EN; + ++ /* Set default strength value in case none is given */ ++ if (arg == 1) ++ arg = 20000; ++ + switch (arg) { + case 50000: + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c +index a5b7bd3c9bac..c762caba551f 100644 +--- a/drivers/pinctrl/pinctrl-amd.c ++++ b/drivers/pinctrl/pinctrl-amd.c +@@ -140,7 +140,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, + pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); + pin_reg &= ~BIT(DB_TMR_LARGE_OFF); + } else if (debounce < 250000) { +- time = debounce / 15600; ++ time = debounce / 15625; + pin_reg |= time & DB_TMR_OUT_MASK; + pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); + pin_reg |= BIT(DB_TMR_LARGE_OFF); +@@ -150,14 +150,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset, + pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); + pin_reg |= BIT(DB_TMR_LARGE_OFF); + } else { +- pin_reg &= ~DB_CNTRl_MASK; ++ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); + ret = -EINVAL; + } + } else { + pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF); + pin_reg &= ~BIT(DB_TMR_LARGE_OFF); + pin_reg &= ~DB_TMR_OUT_MASK; +- pin_reg &= ~DB_CNTRl_MASK; ++ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); + } + writel(pin_reg, gpio_dev->base + offset * 4); + spin_unlock_irqrestore(&gpio_dev->lock, flags); +@@ -404,7 +404,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) + pin_reg &= ~BIT(LEVEL_TRIG_OFF); + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); + pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; + irq_set_handler_locked(d, handle_edge_irq); + break; + +@@ -412,7 +411,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) + pin_reg &= ~BIT(LEVEL_TRIG_OFF); + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); + pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; + irq_set_handler_locked(d, handle_edge_irq); + break; + +@@ -420,7 +418,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) + pin_reg &= ~BIT(LEVEL_TRIG_OFF); + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); + pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; + irq_set_handler_locked(d, handle_edge_irq); + break; + +@@ -428,8 +425,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) + pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); + pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; +- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); +- pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF; + irq_set_handler_locked(d, handle_level_irq); + break; + +@@ -437,8 +432,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) + pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); + pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; +- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); +- pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF; + irq_set_handler_locked(d, handle_level_irq); + break; + +@@ -903,6 +896,7 @@ static int amd_gpio_remove(struct platform_device *pdev) + static const struct acpi_device_id amd_gpio_acpi_match[] = { + { "AMD0030", 0 }, + { "AMDI0030", 0}, ++ { "AMDI0031", 0}, + { }, + }; + MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match); +diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h +index e8bbb20779d0..83597e1d6dcd 100644 +--- a/drivers/pinctrl/pinctrl-amd.h ++++ b/drivers/pinctrl/pinctrl-amd.h +@@ -250,7 +250,7 @@ static const struct amd_pingroup kerncz_groups[] = { + { + .name = "uart0", + .pins = uart0_pins, +- .npins = 9, ++ .npins = 5, + }, + { + .name = "uart1", +diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c +index 0b0fc2eb48e0..adcdb0585d39 100644 +--- a/drivers/pinctrl/pinctrl-falcon.c ++++ b/drivers/pinctrl/pinctrl-falcon.c +@@ -438,24 +438,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) + + /* load and remap the pad resources of the different banks */ + for_each_compatible_node(np, NULL, "lantiq,pad-falcon") { +- struct platform_device *ppdev = of_find_device_by_node(np); + const __be32 *bank = of_get_property(np, "lantiq,bank", NULL); + struct resource res; ++ struct platform_device *ppdev; + u32 avail; + int pins; + + if (!of_device_is_available(np)) + continue; + +- if (!ppdev) { +- dev_err(&pdev->dev, "failed to find pad pdev\n"); +- continue; +- } + if (!bank || *bank >= PORTS) + continue; + if (of_address_to_resource(np, 0, &res)) + continue; ++ ++ ppdev = of_find_device_by_node(np); ++ if (!ppdev) { ++ dev_err(&pdev->dev, "failed to find pad pdev\n"); ++ continue; ++ } ++ + falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); ++ put_device(&ppdev->dev); + if (IS_ERR(falcon_info.clk[*bank])) { + dev_err(&ppdev->dev, "failed to get clock\n"); + return PTR_ERR(falcon_info.clk[*bank]); +diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c +index 417cd3bd7e0c..17827a88b85e 100644 +--- a/drivers/pinctrl/pinctrl-rockchip.c ++++ b/drivers/pinctrl/pinctrl-rockchip.c +@@ -1815,7 +1815,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset) + if (!bank->domain) + return -ENXIO; + ++ clk_enable(bank->clk); + virq = irq_create_mapping(bank->domain, offset); ++ clk_disable(bank->clk); + + return (virq) ? : -ENXIO; + } +@@ -2365,12 +2367,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev) + static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) + { + struct rockchip_pinctrl *info = dev_get_drvdata(dev); +- int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, +- rk3288_grf_gpio6c_iomux | +- GPIO6C6_SEL_WRITE_ENABLE); ++ int ret; + +- if (ret) +- return ret; ++ if (info->ctrl->type == RK3288) { ++ ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, ++ rk3288_grf_gpio6c_iomux | ++ GPIO6C6_SEL_WRITE_ENABLE); ++ if (ret) ++ return ret; ++ } + + return pinctrl_force_default(info->pctl_dev); + } +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c +index bfdf720db270..8769a579ecb1 100644 +--- a/drivers/pinctrl/pinctrl-single.c ++++ b/drivers/pinctrl/pinctrl-single.c +@@ -1078,7 +1078,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, + + /* If pinconf isn't supported, don't parse properties in below. */ + if (!PCS_HAS_PINCONF) +- return 0; ++ return -ENOTSUPP; + + /* cacluate how much properties are supported in current node */ + for (i = 0; i < ARRAY_SIZE(prop2); i++) { +@@ -1090,7 +1090,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, + nconfs++; + } + if (!nconfs) +- return 0; ++ return -ENOTSUPP; + + func->conf = devm_kzalloc(pcs->dev, + sizeof(struct pcs_conf_vals) * nconfs, +@@ -1203,9 +1203,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, + + if (PCS_HAS_PINCONF) { + res = pcs_parse_pinconf(pcs, np, function, map); +- if (res) ++ if (res == 0) ++ *num_maps = 2; ++ else if (res == -ENOTSUPP) ++ *num_maps = 1; ++ else + goto free_pingroups; +- *num_maps = 2; + } else { + *num_maps = 1; + } +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c +index cef505bfaca2..7e9d5cdc9d88 100644 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c +@@ -1,6 +1,6 @@ + /* + * Copyright (c) 2013, Sony Mobile Communications AB. +- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -967,7 +967,7 @@ static void gpio_muxed_to_pdc(struct irq_domain *pdc_domain, struct irq_data *d) + struct msm_pdc_mux_output *pdc_out = + &pctrl->soc->pdc_mux_out[j]; + +- if (pdc_out->mux == mux) ++ if (pdc_out->mux == gpio) + break; + if (pdc_out->mux) + continue; +diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c +index 59f89d61e393..17ae16dabf5f 100644 +--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c ++++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2018, 2020-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1153,7 +1153,7 @@ static struct msm_gpio_mux_input sdxpoorwills_mux_in[] = { + {28, 54}, + {29, 55}, + {30, 56, 1}, +- {31, 57}, ++ {31, 57, 1}, + {32, 60}, + {33, 61}, + {34, 64}, +@@ -1162,7 +1162,7 @@ static struct msm_gpio_mux_input sdxpoorwills_mux_in[] = { + {37, 71}, + {38, 75}, + {39, 76}, +- {40, 78}, ++ {40, 78, 1}, + {41, 79}, + {42, 80}, + {43, 82}, +diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c +index e8aee6d88a40..6a23136bc813 100644 +--- a/drivers/pinctrl/samsung/pinctrl-exynos.c ++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c +@@ -289,6 +289,7 @@ struct exynos_eint_gpio_save { + u32 eint_con; + u32 eint_fltcon0; + u32 eint_fltcon1; ++ u32 eint_mask; + }; + + /* +@@ -585,10 +586,13 @@ static void exynos_pinctrl_suspend_bank( + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ save->eint_mask = readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); ++ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); + } + + static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +@@ -617,6 +621,9 @@ static void exynos_pinctrl_resume_bank( + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), save->eint_fltcon1); ++ pr_debug("%s: mask %#010x => %#010x\n", bank->name, ++ readl(regs + bank->irq_chip->eint_mask ++ + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); +@@ -624,6 +631,8 @@ static void exynos_pinctrl_resume_bank( + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); ++ writel(save->eint_mask, regs + bank->irq_chip->eint_mask ++ + bank->eint_offset); + } + + static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +diff --git a/drivers/platform/msm/ep_pcie/ep_pcie.c b/drivers/platform/msm/ep_pcie/ep_pcie.c +index ecff4c4e32be..e7dfdc5911e8 100644 +--- a/drivers/platform/msm/ep_pcie/ep_pcie.c ++++ b/drivers/platform/msm/ep_pcie/ep_pcie.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015, 2017-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -102,6 +102,16 @@ struct ep_pcie_hw *ep_pcie_get_phandle(u32 id) + } + EXPORT_SYMBOL(ep_pcie_get_phandle); + ++int ep_pcie_configure_inactivity_timer(struct ep_pcie_hw *phandle, ++ struct ep_pcie_inactivity *param) ++{ ++ if (WARN_ON(!phandle)) ++ return -EINVAL; ++ ++ return phandle->configure_inactivity_timer(param); ++} ++EXPORT_SYMBOL(ep_pcie_configure_inactivity_timer); ++ + int ep_pcie_register_event(struct ep_pcie_hw *phandle, + struct ep_pcie_register_event *reg) + { +@@ -170,10 +180,11 @@ int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx) + } + EXPORT_SYMBOL(ep_pcie_trigger_msi); + +-int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle) ++int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle, ++ enum ep_pcie_event event) + { + if (phandle) +- return phandle->wakeup_host(); ++ return phandle->wakeup_host(event); + + pr_err("ep_pcie:%s: the input driver handle is NULL.", + __func__); +diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h +index 710c180d27ea..29716677598d 100644 +--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h ++++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h +@@ -27,15 +27,20 @@ + #define PCIE20_PARF_SYS_CTRL 0x00 + #define PCIE20_PARF_DB_CTRL 0x10 + #define PCIE20_PARF_PM_CTRL 0x20 ++#define PCIE20_PARF_PM_CTRL_REQ_NOT_ENTR_L1_BIT_MASK BIT(5) + #define PCIE20_PARF_PM_STTS 0x24 ++#define PCIE20_PARF_PM_STTS_LINKST_IN_L1SUB BIT(8) + #define PCIE20_PARF_PHY_CTRL 0x40 + #define PCIE20_PARF_PHY_REFCLK 0x4C + #define PCIE20_PARF_CONFIG_BITS 0x50 + #define PCIE20_PARF_TEST_BUS 0xE4 + #define PCIE20_PARF_MHI_BASE_ADDR_LOWER 0x178 + #define PCIE20_PARF_MHI_BASE_ADDR_UPPER 0x17c ++#define PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER 0x180 ++#define PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET_MASK 0x80000000 + #define PCIE20_PARF_MSI_GEN 0x188 + #define PCIE20_PARF_DEBUG_INT_EN 0x190 ++#define PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT_MASK BIT(0) + #define PCIE20_PARF_MHI_IPA_DBS 0x198 + #define PCIE20_PARF_MHI_IPA_CDB_TARGET_LOWER 0x19C + #define PCIE20_PARF_MHI_IPA_EDB_TARGET_LOWER 0x1A0 +@@ -48,11 +53,27 @@ + #define PCIE20_PARF_INT_ALL_STATUS 0x224 + #define PCIE20_PARF_INT_ALL_CLEAR 0x228 + #define PCIE20_PARF_INT_ALL_MASK 0x22C ++ ++#define PCIE20_PARF_CLKREQ_OVERRIDE 0x2B0 ++#define PCIE20_PARF_CLKREQ_IN_OVERRIDE_STS BIT(5) ++#define PCIE20_PARF_CLKREQ_OE_OVERRIDE_STS BIT(4) ++#define PCIE20_PARF_CLKREQ_IN_OVERRIDE_VAL BIT(3) ++#define PCIE20_PARF_CLKREQ_OE_OVERRIDE_VAL BIT(2) ++#define PCIE20_PARF_CLKREQ_IN_OVERRIDE_ENABLE BIT(1) ++#define PCIE20_PARF_CLKREQ_OE_OVERRIDE_ENABLE BIT(0) ++ + #define PCIE20_PARF_SLV_ADDR_MSB_CTRL 0x2C0 + #define PCIE20_PARF_DBI_BASE_ADDR 0x350 + #define PCIE20_PARF_DBI_BASE_ADDR_HI 0x354 + #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358 + #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE_HI 0x35C ++ ++#define PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS 0x4D0 ++#define PCIE20_PARF_L1SS_SLEEP_MHI_FWD_DISABLE BIT(5) ++#define PCIE20_PARF_L1SS_SLEEP_MHI_FWD_ENABLE BIT(4) ++ ++#define PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_CONFIG 0x4D4 ++ + #define PCIE20_PARF_ATU_BASE_ADDR 0x634 + #define PCIE20_PARF_ATU_BASE_ADDR_HI 0x638 + #define PCIE20_PARF_BUS_DISCONNECT_CTRL 0x648 +@@ -60,10 +81,11 @@ + + #define PCIE20_PARF_DEVICE_TYPE 0x1000 + +-#define PCIE20_ELBI_VERSION 0x00 +-#define PCIE20_ELBI_SYS_CTRL 0x04 +-#define PCIE20_ELBI_SYS_STTS 0x08 +-#define PCIE20_ELBI_CS2_ENABLE 0xA4 ++#define PCIE20_ELBI_VERSION 0x00 ++#define PCIE20_ELBI_SYS_CTRL 0x04 ++#define PCIE20_ELBI_SYS_CTRL_CLK_PM_EN_BIT_MASK BIT(7) ++#define PCIE20_ELBI_SYS_STTS 0x08 ++#define PCIE20_ELBI_CS2_ENABLE 0xA4 + + #define PCIE20_DEVICE_ID_VENDOR_ID 0x00 + #define PCIE20_MASK_DEVICE_ID GENMASK(31, 16) +@@ -132,6 +154,16 @@ + #define PCIE20_BHI_VERSION_UPPER 0x204 + #define PCIE20_BHI_INTVEC 0x220 + ++#define PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L2 0xC04 ++#define PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L1SUB 0xC08 ++#define PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L1 0xC0C ++#define PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L0S 0xC10 ++ ++#define PCIE20_PHY_PCS_STATUS4 0x820 ++ ++#define PCIE20_PHY_PCS_START_OFFSET 0x600 ++#define PCIE20_PHY_PCS_END_OFFSET 0xE60 ++ + #define PCIE20_AUX_CLK_FREQ_REG 0xB40 + + #define PERST_TIMEOUT_US_MIN 1000 +@@ -150,9 +182,15 @@ + #define PHY_READY_TIMEOUT_COUNT 30000 + #define MSI_EXIT_L1SS_WAIT 10 + #define MSI_EXIT_L1SS_WAIT_MAX_COUNT 100 ++#define D3HOT_L1SS_WAIT 10 ++#define D3HOT_L1SS_WAIT_MAX_COUNT 1000 + #define XMLH_LINK_UP 0x400 + #define PARF_XMLH_LINK_UP 0x40000000 + ++#define D3HOT_SLEEP_ENTRY_EXIT_MAX_COUNT 100 ++#define D3HOT_SLEEP_ENTRY_EXIT_TIMEOUT_US_MIN 1000 ++#define D3HOT_SLEEP_ENTRY_EXIT_TIMEOUT_US_MAX 1100 ++ + #define MAX_PROP_SIZE 32 + #define MAX_MSG_LEN 80 + #define MAX_NAME_LEN 80 +@@ -338,6 +376,7 @@ struct ep_pcie_dev_t { + bool active_config; + bool aggregated_irq; + bool mhi_a7_irq; ++ bool m2_autonomous; + u32 dbi_base_reg; + u32 slv_space_reg; + u32 phy_status_reg; +@@ -386,6 +425,19 @@ struct ep_pcie_dev_t { + struct work_struct handle_perst_work; + struct work_struct handle_bme_work; + struct work_struct handle_d3cold_work; ++ atomic_t ep_pcie_dev_wake; ++ struct work_struct handle_d3hot_sleep_work; ++ struct work_struct sched_inact_timer; ++ struct workqueue_struct *d3hot_sleep_wq; ++ int clk_ref_count; ++ spinlock_t d3hot_sleep_lock; ++ bool gdsc_disabled; ++ bool in_d3hot_sleep; ++ bool d3hot_sleep_in_progress; ++ bool clkreq_wake_in_progress; ++ bool ep_wake_in_progress; ++ bool inact_timer_enabled; ++ bool l1ss_sleep_mode_enabled; + }; + + extern struct ep_pcie_dev_t ep_pcie_dev; +@@ -426,6 +478,7 @@ extern int ep_pcie_core_register_event(struct ep_pcie_register_event *reg); + extern int ep_pcie_get_debug_mask(void); + extern void ep_pcie_phy_init(struct ep_pcie_dev_t *dev); + extern bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev); ++extern void ep_pcie_phy_update_pcs(struct ep_pcie_dev_t *dev); + extern void ep_pcie_reg_dump(struct ep_pcie_dev_t *dev, u32 sel, bool linkdown); + extern void ep_pcie_debugfs_init(struct ep_pcie_dev_t *ep_dev); + extern void ep_pcie_debugfs_exit(void); +diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c +index 10fa11a2519e..d21e396c0351 100644 +--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c ++++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c +@@ -37,10 +37,25 @@ + + #include "ep_pcie_com.h" + ++#define PCIE_MHI_STATUS(n) ((n) + 0x148) ++#define TCSR_PERST_SEPARATION_ENABLE 0x270 ++#define PCIE_ISSUE_WAKE 1 ++#define PCIE_MHI_FWD_STATUS_MIN 5000 ++#define PCIE_MHI_FWD_STATUS_MAX 5100 ++#define PCIE_MHI_FWD_COUNT 200 ++#define PCIE_L1SUB_AHB_TIMEOUT_MIN 100 ++#define PCIE_L1SUB_AHB_TIMEOUT_MAX 120 ++/* 1 sec inactivity timer at 19.2 MHz aux clk */ ++#define PCIE_EP_TIMER_US 19200000 ++ + /* debug mask sys interface */ + static int ep_pcie_debug_mask; + static int ep_pcie_debug_keep_resource; + static u32 ep_pcie_bar0_address; ++static bool m2_enabled; ++static u32 clkreq_irq; ++static ktime_t d3hot_sleep_sched_time; ++ + module_param_named(debug_mask, ep_pcie_debug_mask, + int, 0664); + module_param_named(debug_keep_resource, ep_pcie_debug_keep_resource, +@@ -58,7 +73,7 @@ static struct ep_pcie_vreg_info_t ep_pcie_vreg_info[EP_PCIE_MAX_VREG] = { + static struct ep_pcie_gpio_info_t ep_pcie_gpio_info[EP_PCIE_MAX_GPIO] = { + {"perst-gpio", 0, 0, 0, 1}, + {"wake-gpio", 0, 1, 0, 1}, +- {"clkreq-gpio", 0, 1, 0, 0}, ++ {"clkreq-gpio", 0, 0, 0, 1}, + {"mdm2apstatus-gpio", 0, 1, 1, 0} + }; + +@@ -106,6 +121,9 @@ static const struct ep_pcie_irq_info_t ep_pcie_irq_info[EP_PCIE_MAX_IRQ] = { + {"int_global", 0} + }; + ++static int ep_pcie_core_config_inact_timer(struct ep_pcie_inactivity *param); ++static int ep_pcie_core_abort_d3hot_sleep(struct ep_pcie_dev_t *dev); ++ + int ep_pcie_get_debug_mask(void) + { + return ep_pcie_debug_mask; +@@ -337,14 +355,28 @@ static int ep_pcie_clk_init(struct ep_pcie_dev_t *dev) + int i, rc = 0; + struct ep_pcie_clk_info_t *info; + +- EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); ++ EP_PCIE_DBG(dev, "PCIe V%d, Clk ref count %d\n", ++ dev->rev, dev->clk_ref_count); + +- rc = regulator_enable(dev->gdsc); ++ if (dev->clk_ref_count) ++ dump_stack(); + +- if (rc) { +- EP_PCIE_ERR(dev, "PCIe V%d: fail to enable GDSC for %s\n", +- dev->rev, dev->pdev->name); +- return rc; ++ if (dev->gdsc_disabled) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Enabling gdsc\n", ++ dev->rev); ++ rc = regulator_enable(dev->gdsc); ++ if (rc) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: fail to enable GDSC for %s\n", ++ dev->rev, dev->pdev->name); ++ return rc; ++ } ++ dev->gdsc_disabled = false; ++ } else { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: gdsc already enabled\n", ++ dev->rev); + } + + if (dev->bus_client) { +@@ -405,6 +437,7 @@ static int ep_pcie_clk_init(struct ep_pcie_dev_t *dev) + } + + regulator_disable(dev->gdsc); ++ dev->gdsc_disabled = true; + } + + return rc; +@@ -415,25 +448,44 @@ static void ep_pcie_clk_deinit(struct ep_pcie_dev_t *dev) + int i; + int rc; + +- EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); ++ EP_PCIE_DBG(dev, "PCIe V%d, Clock ref count %d\n", ++ dev->rev, dev->clk_ref_count); + +- for (i = EP_PCIE_MAX_CLK - 1; i >= 0; i--) ++ if (!dev->in_d3hot_sleep) { ++ for (i = EP_PCIE_MAX_CLK - 1; i >= 0; i--) + if (dev->clk[i].hdl) + clk_disable_unprepare(dev->clk[i].hdl); + +- if (dev->bus_client) { +- rc = msm_bus_scale_client_update_request(dev->bus_client, 0); +- if (rc) +- EP_PCIE_ERR(dev, +- "PCIe V%d: fail to relinquish bus bandwidth:%d.\n", ++ if (dev->bus_client) { ++ rc = msm_bus_scale_client_update_request( ++ dev->bus_client, 0); ++ if (rc) ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: fail to relinquish bus bandwidth:%d\n", + dev->rev, rc); +- else +- EP_PCIE_DBG(dev, ++ else ++ EP_PCIE_DBG(dev, + "PCIe V%d: relinquish bus bandwidth.\n", + dev->rev); ++ } ++ } else { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d, In d3hot sleep, clks already disabled\n", ++ dev->rev); + } + +- regulator_disable(dev->gdsc); ++ if (!dev->perst_deast) { ++ if (dev->gdsc_disabled) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: gdsc already disabled\n", ++ dev->rev); ++ } ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Disabling gdsc regulator\n", ++ dev->rev); ++ regulator_disable(dev->gdsc); ++ dev->gdsc_disabled = true; ++ } + } + + static int ep_pcie_pipe_clk_init(struct ep_pcie_dev_t *dev) +@@ -441,7 +493,8 @@ static int ep_pcie_pipe_clk_init(struct ep_pcie_dev_t *dev) + int i, rc = 0; + struct ep_pcie_clk_info_t *info; + +- EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); ++ EP_PCIE_DBG(dev, "PCIe V%d, Clock ref count %d\n", ++ dev->rev, dev->clk_ref_count); + + for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) { + info = &dev->pipeclk[i]; +@@ -493,12 +546,25 @@ static void ep_pcie_pipe_clk_deinit(struct ep_pcie_dev_t *dev) + { + int i; + +- EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); ++ EP_PCIE_DBG(dev, "PCIe V%d, Clock ref count %d\n", ++ dev->rev, dev->clk_ref_count); ++ ++ if (!dev->in_d3hot_sleep) { ++ ++ if (dev->clk_ref_count != 1) ++ dump_stack(); + +- for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) ++ for (i = 0; i < EP_PCIE_MAX_PIPE_CLK; i++) + if (dev->pipeclk[i].hdl) + clk_disable_unprepare( +- dev->pipeclk[i].hdl); ++ dev->pipeclk[i].hdl); ++ ++ dev->clk_ref_count--; ++ } else { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d, In d3hot sleep, pipe_clk already disabled\n", ++ dev->rev); ++ } + } + + static void ep_pcie_bar_init(struct ep_pcie_dev_t *dev) +@@ -734,6 +800,17 @@ static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured) + ep_pcie_write_mask(dev->dm_core + PCIE20_L1SUB_CAPABILITY, 0, + 0x1f); + ++ /* ++ * CLK_PM_EN must be set to be able to enable clock power ++ * management capability in the link capability register ++ */ ++ ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, ++ PCIE20_ELBI_SYS_CTRL_CLK_PM_EN_BIT_MASK); ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_ELBI_SYS_CTRL: 0x%x\n", ++ dev->rev, readl_relaxed(dev->elbi + ++ PCIE20_ELBI_SYS_CTRL)); ++ + /* Enable Clock Power Management */ + ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, + PCIE20_MASK_CLOCK_POWER_MAN, 0x1); +@@ -748,12 +825,13 @@ static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured) + PCIE20_MASK_ACK_N_FTS, 0x80); + + EP_PCIE_DBG(dev, +- "After program: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x; L1SUB_CAPABILITY:0x%x; PARF_SYS_CTRL:0x%x\n", ++ "After program: CLASS_CODE_REVISION_ID:0x%x; HDR_TYPE:0x%x; L1SUB_CAPABILITY:0x%x; PARF_SYS_CTRL:0x%x; LINK_CAP:0x%x\n", + readl_relaxed(dev->dm_core + + PCIE20_CLASS_CODE_REVISION_ID), + readl_relaxed(dev->dm_core + PCIE20_BIST_HDR_TYPE), + readl_relaxed(dev->dm_core + PCIE20_L1SUB_CAPABILITY), +- readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL)); ++ readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL), ++ readl_relaxed(dev->dm_core + PCIE20_LINK_CAPABILITIES)); + + /* Configure BARs */ + ep_pcie_bar_init(dev); +@@ -767,6 +845,7 @@ static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured) + BIT(EP_PCIE_INT_EVT_BME) | + BIT(EP_PCIE_INT_EVT_PM_TURNOFF) | + BIT(EP_PCIE_INT_EVT_DSTATE_CHANGE) | ++ BIT(EP_PCIE_INT_EVT_L1SUB_TIMEOUT) | + BIT(EP_PCIE_INT_EVT_LINK_UP)); + if (!dev->mhi_a7_irq) + ep_pcie_write_mask(dev->parf + +@@ -780,7 +859,6 @@ static void ep_pcie_core_init(struct ep_pcie_dev_t *dev, bool configured) + + if (dev->active_config) { + ep_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14); +- + EP_PCIE_DBG2(dev, "PCIe V%d: Enable L1.\n", dev->rev); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0); + } +@@ -1304,6 +1382,293 @@ static void ep_pcie_enumeration_complete(struct ep_pcie_dev_t *dev) + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + } + ++static bool ep_pcie_core_get_clkreq_status(void) ++{ ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: PCIe get clkreq status\n", dev->rev); ++ ++ return ((readl_relaxed(dev->parf + ++ PCIE20_PARF_CLKREQ_OVERRIDE) & BIT(5)) ? false : true); ++} ++ ++static int ep_pcie_core_clkreq_override(bool config) ++{ ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: PCIe clockreq override config:%d\n", ++ dev->rev, config); ++ ++ if (config) { ++ ep_pcie_write_reg_field(dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE, ++ PCIE20_PARF_CLKREQ_IN_OVERRIDE_VAL, BIT(3)); ++ ep_pcie_write_reg_field(dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE, ++ PCIE20_PARF_CLKREQ_IN_OVERRIDE_ENABLE, BIT(1)); ++ } else { ++ ep_pcie_write_reg_field(dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE, ++ PCIE20_PARF_CLKREQ_IN_OVERRIDE_ENABLE, 0); ++ ep_pcie_write_reg_field(dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE, ++ PCIE20_PARF_CLKREQ_IN_OVERRIDE_VAL, 0); ++ } ++ ++ return 0; ++} ++ ++static int ep_pcie_core_config_inact_timer(struct ep_pcie_inactivity *param) ++{ ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: PCIe config inact timer\n", dev->rev); ++ ++ if (!param->enable) { ++ EP_PCIE_DBG(&ep_pcie_dev, ++ "PCIe V%d: timer value being disabled:0x%x\n", ++ ep_pcie_dev.rev, param->enable); ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_DEBUG_INT_EN, ++ PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT_MASK, 0); ++ return 0; ++ } ++ ++ if (param->timer_us & BIT(31)) { ++ EP_PCIE_DBG(&ep_pcie_dev, ++ "PCIe V%d: timer value is a 31 bit value:0x%x\n", ++ ep_pcie_dev.rev, param->timer_us); ++ return -EINVAL; ++ } ++ ++ EP_PCIE_DBG(&ep_pcie_dev, ++ "PCIe V%d: timer value being programmed:0x%x\n", ++ ep_pcie_dev.rev, param->timer_us); ++ ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER, ++ PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET_MASK, 0x1); ++ ++ usleep_range(PCIE_L1SUB_AHB_TIMEOUT_MIN, PCIE_L1SUB_AHB_TIMEOUT_MAX); ++ ep_pcie_write_reg(dev->parf, ++ PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER, ++ param->timer_us); ++ usleep_range(PCIE_L1SUB_AHB_TIMEOUT_MIN, PCIE_L1SUB_AHB_TIMEOUT_MAX); ++ ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER, ++ PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET_MASK, 0x0); ++ ++ /* Enable L1SUB timeout bit to enable corresponding aggregated irq */ ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_DEBUG_INT_EN, ++ PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT_MASK, ++ BIT(0)); ++ ++ return 0; ++} ++ ++ ++static int ep_pcie_core_config_l1ss_sleep_mode(bool config) ++{ ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d:PCIe l1ss sleep mode:%d\n", dev->rev, config); ++ ++ if (config) ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_CONFIG, ++ BIT(0), 1); ++ else ++ ep_pcie_write_reg_field(dev->parf, ++ PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_CONFIG, ++ BIT(0), 0); ++ ++ return 0; ++} ++ ++static int ep_pcie_core_d3hot_sleep_config_disable(void) ++{ ++ int rc = 0, mhi_fwd_status = 0, mhi_fwd_status_cnt = 0; ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: Ungate CLKREQ#\n", dev->rev); ++ ++ /* Undo CLKREQ# override */ ++ rc = ep_pcie_core_clkreq_override(false); ++ if (rc < 0) { ++ EP_PCIE_ERR(dev, "PCIe V%d: CLKREQ# override config failed\n", ++ dev->rev); ++ return rc; ++ } ++ ++ /* Disable L1ss sleep mode */ ++ if (dev->l1ss_sleep_mode_enabled) { ++ EP_PCIE_DBG(dev, "PCIe V%d:Disable L1ss\n", dev->rev); ++ ++ rc = ep_pcie_core_config_l1ss_sleep_mode(false); ++ if (rc < 0) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: L1ss sleep deconfig failed\n", dev->rev); ++ return rc; ++ } ++ ++ /* Check MHI_FWD status */ ++ while (!mhi_fwd_status && ++ mhi_fwd_status_cnt <= PCIE_MHI_FWD_COUNT) { ++ mhi_fwd_status = (readl_relaxed(dev->parf + ++ PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS) & ++ PCIE20_PARF_L1SS_SLEEP_MHI_FWD_DISABLE); ++ mhi_fwd_status_cnt++; ++ usleep_range(PCIE_MHI_FWD_STATUS_MIN, ++ PCIE_MHI_FWD_STATUS_MAX); ++ } ++ ++ if (mhi_fwd_status >= PCIE_MHI_FWD_COUNT) { ++ EP_PCIE_ERR(dev, "PCIe V%d: MHI FWD status not set\n", ++ dev->rev); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ep_pcie_core_d3hot_sleep_config_enable(void) ++{ ++ int rc, ret, mhi_fwd_status = 1, mhi_fwd_status_cnt = 0; ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: Gate CLKREQ#\n", dev->rev); ++ ++ /* Enable CLKREQ# override */ ++ rc = ep_pcie_core_clkreq_override(true); ++ if (rc < 0) { ++ EP_PCIE_ERR(dev, "PCIe V%d: CLKREQ# override config failed\n", ++ dev->rev); ++ return rc; ++ } ++ ++ if (ep_pcie_core_get_clkreq_status()) { ++ EP_PCIE_DBG(dev, "PCIe V%d: CLKREQ status is set\n", dev->rev); ++ rc = -EINVAL; ++ goto disable_clkreq; ++ } ++ ++ if (dev->l1ss_sleep_mode_enabled) { ++ /* Enter L1ss sleep mode */ ++ rc = ep_pcie_core_config_l1ss_sleep_mode(true); ++ if (rc < 0) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: L1ss sleep config failed\n", ++ dev->rev); ++ goto disable_clkreq; ++ } ++ ++ /* Check MHI_FWD status */ ++ while (mhi_fwd_status && ++ mhi_fwd_status_cnt <= PCIE_MHI_FWD_COUNT) { ++ mhi_fwd_status = (readl_relaxed(dev->parf + ++ PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS) & ++ PCIE20_PARF_L1SS_SLEEP_MHI_FWD_ENABLE); ++ mhi_fwd_status_cnt++; ++ usleep_range(PCIE_MHI_FWD_STATUS_MIN, ++ PCIE_MHI_FWD_STATUS_MAX); ++ } ++ ++ if (mhi_fwd_status >= PCIE_MHI_FWD_COUNT) { ++ EP_PCIE_DBG(dev, "PCIe V%d: MHI FWD status not set\n", ++ dev->rev); ++ rc = -EINVAL; ++ goto disable_l1ss_sleep_mode; ++ } ++ ++ m2_enabled = true; ++ } ++ ++ return 0; ++ ++disable_l1ss_sleep_mode: ++ if (dev->l1ss_sleep_mode_enabled) { ++ ret = ep_pcie_core_config_l1ss_sleep_mode(false); ++ if (ret) ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Disable L1ss sleep mode with %d\n", ++ dev->rev, ret); ++ } ++disable_clkreq: ++ ret = ep_pcie_core_clkreq_override(false); ++ if (ret) ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: CLKREQ# override config failed %d\n", ++ dev->rev, ret); ++ ++ return rc; ++} ++ ++static void ep_pcie_core_enable_l1(struct ep_pcie_dev_t *dev) ++{ ++ /* ++ * Configure and enable L1(ss) here immediately after link up ++ * check in HLOS during EP PCIE driver probe. The phy sequence ++ * programmed in PBL is deemed good enough to support L1(ss) for ++ * SDX24. ++ * The below register writes to support L1(ss) are the same we ++ * do when link is initialized by HLOS (as opposed to PBL now) - ++ * writing them here before L1(ss) is enabled to be consistent. ++ */ ++ ++ /* Enable CS for RO(CS) register writes */ ++ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, 0, ++ BIT(0)); ++ /* Set the Endpoint L0s Acceptable Latency to 1us (max) */ ++ ep_pcie_write_reg_field(dev->dm_core, ++ PCIE20_DEVICE_CAPABILITIES, ++ PCIE20_MASK_EP_L0S_ACCPT_LATENCY, 0x7); ++ ++ /* Set the Endpoint L1 Acceptable Latency to 2 us (max) */ ++ ep_pcie_write_reg_field(dev->dm_core, ++ PCIE20_DEVICE_CAPABILITIES, ++ PCIE20_MASK_EP_L1_ACCPT_LATENCY, 0x7); ++ ++ /* Set the L0s Exit Latency to 2us-4us = 0x6 */ ++ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, ++ PCIE20_MASK_L1_EXIT_LATENCY, 0x6); ++ ++ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ ++ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, ++ PCIE20_MASK_L0S_EXIT_LATENCY, 0x6); ++ ++ /* ++ * CLK_PM_EN must be set to be able to enable clock power ++ * management capability in the link capability register ++ */ ++ ep_pcie_write_mask(dev->elbi + PCIE20_ELBI_SYS_CTRL, 0, ++ PCIE20_ELBI_SYS_CTRL_CLK_PM_EN_BIT_MASK); ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_ELBI_SYS_CTRL: 0x%x\n", ++ dev->rev, readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_CTRL)); ++ ++ /* Enable Clock Power Management */ ++ ep_pcie_write_reg_field(dev->dm_core, PCIE20_LINK_CAPABILITIES, ++ PCIE20_MASK_CLOCK_POWER_MAN, 0x1); ++ ++ EP_PCIE_DBG(dev, ++ "After program: PCIE20_LINK_CAPABILITIES:0x%x\n", ++ readl_relaxed(dev->dm_core + PCIE20_LINK_CAPABILITIES)); ++ ++ /* Disable CS for RO(CS) register writes */ ++ ep_pcie_write_mask(dev->dm_core + PCIE20_MISC_CONTROL_1, BIT(0), ++ 0); ++ ++ /* Enable L1 by clearing REQ_NOT_ENTR_L1 bit */ ++ ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, ++ PCIE20_PARF_PM_CTRL_REQ_NOT_ENTR_L1_BIT_MASK, 0); ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: PARF_PM_CTRL: 0x%x\n", ++ dev->rev, readl_relaxed(dev->parf + PCIE20_PARF_PM_CTRL)); ++} ++ + int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) + { + int ret = 0; +@@ -1388,6 +1753,7 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) + EP_PCIE_INFO(dev, + "PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n", + dev->rev); ++ ep_pcie_core_enable_l1(dev); + /* + * Read and save the subsystem id set in PBL + * (needed for restore during D3->D0) +@@ -1559,7 +1925,7 @@ int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt) + EP_PCIE_INFO(dev, + "PCIe V%d: link initialized for LE PCIe endpoint\n", + dev->rev); +- place_marker( ++ update_marker( + "PCIe - link initialized for LE PCIe endpoint\n"); + } + +@@ -1650,11 +2016,13 @@ int ep_pcie_core_disable_endpoint(void) + goto out; + } + +- dev->link_status = EP_PCIE_LINK_DISABLED; +- dev->power_on = false; ++ if (!m2_enabled) { ++ dev->link_status = EP_PCIE_LINK_DISABLED; ++ dev->power_on = false; + +- EP_PCIE_DBG(dev, "PCIe V%d: shut down the link.\n", +- dev->rev); ++ EP_PCIE_DBG(dev, "PCIe V%d: shut down the link\n", ++ dev->rev); ++ } + + ep_pcie_pipe_clk_deinit(dev); + ep_pcie_clk_deinit(dev); +@@ -1805,6 +2173,12 @@ static irqreturn_t ep_pcie_handle_pm_turnoff_irq(int irq, void *data) + EP_PCIE_DBG2(dev, + "PCIe V%d: No. %ld PM_TURNOFF is received.\n", + dev->rev, dev->pm_to_counter); ++ /* ++ * The below write to exit from L1 was earlier done in D3 handler, ++ * but that would prevent power saving on platforms that do not ++ * support D3 cold. ++ */ ++ ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1)); + EP_PCIE_DBG2(dev, "PCIe V%d: Put the link into L23.\n", dev->rev); + ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(2)); + +@@ -1813,11 +2187,80 @@ static irqreturn_t ep_pcie_handle_pm_turnoff_irq(int irq, void *data) + return IRQ_HANDLED; + } + ++static void ep_pcie_core_log_l1_debug_regs(struct ep_pcie_dev_t *dev) ++{ ++ /* Log register values for debug purpose */ ++ EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_CAP_LINKCTRLSTATUS: 0x%x\n", ++ dev->rev, ++ readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS)); ++ EP_PCIE_DBG(dev, "PCIe V%d: PM_STTS: 0x%x PM_CTRL: 0x%x\n", ++ dev->rev, ++ readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS), ++ readl_relaxed(dev->parf + PCIE20_PARF_PM_CTRL)); ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: L1SUB_CAPABILITY:0x%x; PHY_PCS_STATUS4:0x%x\n", ++ dev->rev, ++ readl_relaxed(dev->dm_core + PCIE20_L1SUB_CAPABILITY), ++ readl_relaxed(dev->phy + PCIE20_PHY_PCS_STATUS4)); ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: L2_CNT: 0x%x; L1SUB_CNT: 0x%x; L1_CNT: 0x%x; L0S: 0x%x;\n", ++ dev->rev, ++ readl_relaxed(dev->mmio + PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L2), ++ readl_relaxed(dev->mmio + ++ PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L1SUB), ++ readl_relaxed(dev->mmio + PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L1), ++ readl_relaxed(dev->mmio + ++ PCIE20_PARF_DBG_CNT_PM_LINKST_IN_L0S)); ++ EP_PCIE_DBG(dev, "PCIe V%d: PCIE20_PARF_CLKREQ_OVERRIDE: 0x%x\n", ++ dev->rev, ++ readl_relaxed(dev->parf + PCIE20_PARF_CLKREQ_OVERRIDE)); ++} ++ ++static void ep_pcie_update_phy_seq_in_l1ss(struct ep_pcie_dev_t *dev) ++{ ++ u32 count = 0; ++ u32 val; ++ ++ /* ++ * Following sequence was provided by PHY design team to update the ++ * phy sequence : ++ * PBL -> Boot up (done) -> update PCS settings only -> go to L1ss ++ * -> update all PHY settings -> come out of L1ss (this will happen ++ * upon subsequent link activity) ++ */ ++ ++ /* Update PCS phy registers */ ++ ep_pcie_phy_update_pcs(dev); ++ /* Wait until link enters L1ss */ ++ do { ++ val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS); ++ if (val & PCIE20_PARF_PM_STTS_LINKST_IN_L1SUB) ++ break; ++ udelay(D3HOT_L1SS_WAIT); ++ count++; ++ } while (count < D3HOT_L1SS_WAIT_MAX_COUNT); ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: PCIE20_PARF_PM_STTS: 0x%x; PCS_PCS_STATUS4: 0x%x\n", ++ dev->rev, ++ readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS), ++ readl_relaxed(dev->phy + PCIE20_PHY_PCS_STATUS4)); ++ ++ if (count >= D3HOT_L1SS_WAIT_MAX_COUNT) { ++ EP_PCIE_DBG(dev, "PCIe V%d: Link did not enter L1ss\n", ++ dev->rev); ++ return; ++ } ++ /* Update full phy sequence */ ++ ep_pcie_phy_init(dev); ++} ++ + static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) + { + struct ep_pcie_dev_t *dev = data; + unsigned long irqsave_flags; + u32 dstate; ++ struct ep_pcie_inactivity inact_param; + + spin_lock_irqsave(&dev->isr_lock, irqsave_flags); + +@@ -1833,7 +2276,15 @@ static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld change to D3 state.\n", + dev->rev, dev->d3_counter); +- ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1)); ++ /* ++ * On some customer platforms the RC may not support ++ * D3 cold. So we try to update the latest phy sequence ++ * when we get the first D3 hot. We wait for the link to be ++ * in L1ss to minimize the possibility of traffic on the link ++ * while the phy sequence is updated. ++ */ ++ if (dev->d3_counter == 1) ++ ep_pcie_update_phy_seq_in_l1ss(dev); + + if (dev->enumerated) + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_HOT); +@@ -1841,13 +2292,39 @@ static irqreturn_t ep_pcie_handle_dstate_change_irq(int irq, void *data) + EP_PCIE_DBG(dev, + "PCIe V%d: do not notify client about this D3 hot event since enumeration by HLOS is not done yet.\n", + dev->rev); ++ ep_pcie_core_log_l1_debug_regs(dev); ++ /* ++ * Hold a wakelock since the mhi wakelock will be released while ++ * processing M3, and we might miss the L1ss inactivity timer ++ * interrupt if apps goes into suspend. ++ */ ++ if (!atomic_read(&dev->ep_pcie_dev_wake)) { ++ pm_stay_awake(&dev->pdev->dev); ++ atomic_set(&dev->ep_pcie_dev_wake, 1); ++ EP_PCIE_DBG(dev, "PCIe V%d: Acquired wakelock\n", ++ dev->rev); ++ } ++ queue_work(dev->d3hot_sleep_wq, &dev->sched_inact_timer); + } else if (dstate == 0) { + dev->l23_ready = false; + dev->d0_counter++; + EP_PCIE_DBG(dev, + "PCIe V%d: No. %ld change to D0 state.\n", + dev->rev, dev->d0_counter); ++ /* ++ * Disable inactivity timer if it was enabled since ++ * we got a D0 before it fired ++ */ ++ if (dev->inact_timer_enabled) { ++ inact_param.enable = false; ++ ep_pcie_core_config_inact_timer(&inact_param); ++ dev->inact_timer_enabled = false; ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Disabled inactivity timer\n", ++ dev->rev); ++ } + ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D0); ++ ep_pcie_core_log_l1_debug_regs(dev); + } else { + EP_PCIE_ERR(dev, + "PCIe V%d:invalid D state change to 0x%x.\n", +@@ -1911,6 +2388,78 @@ static void handle_perst_func(struct work_struct *work) + ep_pcie_enumeration(dev); + } + ++static void handle_d3hot_sleep_func(struct work_struct *work) ++{ ++ struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, ++ handle_d3hot_sleep_work); ++ uint32_t val; ++ int rc; ++ ktime_t time; ++ unsigned long flags; ++ bool d3hot_sleep = false; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: d3hot_sleep start delta %d ms\n", ++ dev->rev, ++ (int)(ktime_ms_delta(ktime_get(), d3hot_sleep_sched_time))); ++ ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ if (dev->ep_wake_in_progress) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: EP wake in progress, exiting\n", dev->rev); ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ return; ++ } ++ dev->d3hot_sleep_in_progress = true; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ /* Check if link is in L1ss */ ++ val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS); ++ if (val & PCIE20_PARF_PM_STTS_LINKST_IN_L1SUB) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Link in L1ss\n", dev->rev); ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Clkreq gpio val is %d\n", ++ dev->rev, ++ (gpio_get_value(dev->gpio[EP_PCIE_GPIO_CLKREQ].num))); ++ ++ rc = ep_pcie_core_d3hot_sleep_config_enable(); ++ if (rc) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: D3hot sleep config failed\n", ++ dev->rev); ++ goto exit; ++ } ++ ++ time = ktime_get(); ++ ep_pcie_pipe_clk_deinit(dev); ++ ep_pcie_clk_deinit(dev); ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Clk deinit time %d ms\n", ++ dev->rev, (int)ktime_ms_delta(ktime_get(), time)); ++ ++ enable_irq(clkreq_irq); ++ d3hot_sleep = true; ++ } else { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Link not in L1ss\n", dev->rev); ++ } ++ ++exit: ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ dev->d3hot_sleep_in_progress = false; ++ if (d3hot_sleep) ++ dev->in_d3hot_sleep = true; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ /* Release wakelock to allow apps suspend */ ++ if (atomic_read(&dev->ep_pcie_dev_wake)) { ++ pm_relax(&dev->pdev->dev); ++ atomic_set(&dev->ep_pcie_dev_wake, 0); ++ EP_PCIE_DBG(dev, "PCIe V%d: Released wakelock\n", dev->rev); ++ } ++} ++ + static void handle_d3cold_func(struct work_struct *work) + { + struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, +@@ -1920,6 +2469,11 @@ static void handle_d3cold_func(struct work_struct *work) + "PCIe V%d: shutdown PCIe link due to PERST assertion before BME is set.\n", + dev->rev); + ep_pcie_core_disable_endpoint(); ++ if (atomic_read(&dev->ep_pcie_dev_wake)) { ++ pm_relax(&dev->pdev->dev); ++ atomic_set(&dev->ep_pcie_dev_wake, 0); ++ EP_PCIE_DBG(dev, "PCIe V%d: Released wakelock\n", dev->rev); ++ } + dev->no_notify = false; + } + +@@ -1983,17 +2537,158 @@ static irqreturn_t ep_pcie_handle_perst_irq(int irq, void *data) + } + + out: ++ /* Set trigger type based on the next expected value of perst gpio */ ++ irq_set_irq_type(gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num), ++ (perst ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH)); ++ + spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); + + return IRQ_HANDLED; + } + ++static int ep_pcie_core_abort_d3hot_sleep(struct ep_pcie_dev_t *dev) ++{ ++ int ret = 0; ++ unsigned long flags; ++ struct ep_pcie_inactivity inact_param; ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Entry\n", dev->rev); ++ ++ if (dev->inact_timer_enabled) { ++ /* Disable inactivity timer */ ++ inact_param.enable = false; ++ ep_pcie_core_config_inact_timer(&inact_param); ++ dev->inact_timer_enabled = false; ++ } ++ ++ if (dev->in_d3hot_sleep) { ++ /* ++ * Disable clkreq irq which was enabled when entering ++ * D3hot sleep ++ */ ++ disable_irq_nosync(clkreq_irq); ++ ++ ret = ep_pcie_clk_init(dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clocks\n", ++ dev->rev); ++ goto exit; ++ } ++ ret = ep_pcie_pipe_clk_init(dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: failed to enable pipe clocks\n", ++ dev->rev); ++ goto exit; ++ } ++ ret = ep_pcie_core_d3hot_sleep_config_disable(); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: failed to disable d3hot sleep: %d\n", ++ dev->rev, ret); ++ goto exit; ++ } ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Disabled d3 hot sleep\n", dev->rev); ++ } ++ ++exit: ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ if (!ret) ++ dev->in_d3hot_sleep = false; ++ dev->ep_wake_in_progress = false; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ return ret; ++} ++ ++static irqreturn_t ep_pcie_handle_clkreq_irq(int irq, void *data) ++{ ++ struct ep_pcie_dev_t *dev = data; ++ int ret; ++ ktime_t time; ++ u32 clkreq_val; ++ unsigned long flags; ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: received clkreq irq\n", dev->rev); ++ ++ clkreq_val = gpio_get_value(dev->gpio[EP_PCIE_GPIO_CLKREQ].num); ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Clkreq gpio val is %d\n", ++ dev->rev, clkreq_val); ++ /* ++ * Clkreq is active low, so the gpio value should be 0 if RC ++ * asserts it. Sometimes we see the value is 1 which might be ++ * due to noise, ignore in such cases. ++ */ ++ if (clkreq_val) { ++ EP_PCIE_DBG(dev, "PCIe V%d: clkreq gpio is 1, ignoring\n", ++ dev->rev); ++ return IRQ_HANDLED; ++ } ++ ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ if (dev->ep_wake_in_progress) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: EP initiated wake in progress, returning\n", ++ dev->rev); ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ return IRQ_HANDLED; ++ } ++ dev->clkreq_wake_in_progress = true; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ disable_irq_nosync(clkreq_irq); ++ ++ /* enable clocks and ungate clkreq */ ++ EP_PCIE_DBG(dev, "PCIe V%d: Enabling clocks in irq\n", dev->rev); ++ ++ time = ktime_get(); ++ ret = ep_pcie_clk_init(dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable clocks\n", ++ dev->rev); ++ goto exit; ++ } ++ ret = ep_pcie_pipe_clk_init(dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, "PCIe V%d: failed to enable pipe clocks\n", ++ dev->rev); ++ goto exit; ++ } ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: Clk init time %d ms\n", ++ dev->rev, (int)ktime_ms_delta(ktime_get(), time)); ++ ++ ret = ep_pcie_core_d3hot_sleep_config_disable(); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: failed to disable d3hot sleep: %d\n", ++ dev->rev, ret); ++ goto exit; ++ } ++ ++exit: ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ dev->clkreq_wake_in_progress = false; ++ if (!ret) ++ dev->in_d3hot_sleep = false; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ return IRQ_HANDLED; ++} ++ + static irqreturn_t ep_pcie_handle_global_irq(int irq, void *data) + { + struct ep_pcie_dev_t *dev = data; + int i; + u32 status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS); + u32 mask = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK); ++ struct ep_pcie_inactivity inact_param; + + ep_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status); + +@@ -2042,9 +2737,22 @@ static irqreturn_t ep_pcie_handle_global_irq(int irq, void *data) + dev->rev); + ep_pcie_handle_linkup_irq(irq, data); + break; ++ case EP_PCIE_INT_EVT_L1SUB_TIMEOUT: ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: L1sub inactivity timer fired\n", ++ dev->rev); ++ /* Disable inactivity timer */ ++ inact_param.enable = false; ++ ep_pcie_core_config_inact_timer(&inact_param); ++ dev->inact_timer_enabled = false; ++ /* Schedule D3 hot sleep work */ ++ d3hot_sleep_sched_time = ktime_get(); ++ queue_work(dev->d3hot_sleep_wq, ++ &dev->handle_d3hot_sleep_work); ++ break; + default: + EP_PCIE_ERR(dev, +- "PCIe V%d: Unexpected event %d is caught!\n", ++ "PCIe V%d: Unexpected event %d\n", + dev->rev, i); + } + } +@@ -2053,6 +2761,48 @@ static irqreturn_t ep_pcie_handle_global_irq(int irq, void *data) + return IRQ_HANDLED; + } + ++static void enable_inact_timer(struct work_struct *work) ++{ ++ struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t, ++ sched_inact_timer); ++ int ret; ++ unsigned long flags; ++ struct ep_pcie_inactivity inact_param; ++ ++ EP_PCIE_DBG(dev, "PCIe V%d: Entry\n", dev->rev); ++ ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ if (dev->ep_wake_in_progress) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: EP wakeup pending, not enabling L1ss inact timer\n", ++ dev->rev); ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ /* ++ * Release wakelock here since we did not enable the ++ * inactivity timer ++ */ ++ if (atomic_read(&dev->ep_pcie_dev_wake)) { ++ pm_relax(&dev->pdev->dev); ++ atomic_set(&dev->ep_pcie_dev_wake, 0); ++ EP_PCIE_DBG(dev, "PCIe V%d: Released wakelock\n", ++ dev->rev); ++ } ++ return; ++ } ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ ++ inact_param.enable = true; ++ inact_param.timer_us = PCIE_EP_TIMER_US; ++ ret = ep_pcie_core_config_inact_timer(&inact_param); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Failed to enable inact timer\n", dev->rev); ++ return; ++ } ++ ++ dev->inact_timer_enabled = true; ++} ++ + int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) + { + int ret; +@@ -2065,6 +2815,16 @@ int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) + INIT_WORK(&dev->handle_perst_work, handle_perst_func); + INIT_WORK(&dev->handle_bme_work, handle_bme_func); + INIT_WORK(&dev->handle_d3cold_work, handle_d3cold_func); ++ INIT_WORK(&dev->sched_inact_timer, enable_inact_timer); ++ ++ dev->d3hot_sleep_wq = alloc_workqueue("d3hot_sleep_wq", ++ WQ_HIGHPRI, 0); ++ if (!dev->d3hot_sleep_wq) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Cannot alloc d3hot sleep wq\n", dev->rev); ++ return -ENOMEM; ++ } ++ INIT_WORK(&dev->handle_d3hot_sleep_work, handle_d3hot_sleep_func); + + if (dev->aggregated_irq) { + ret = devm_request_irq(pdev, +@@ -2166,11 +2926,19 @@ int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) + } + + perst_irq: ++ /* ++ * Check initial state of perst gpio to set the trigger type ++ * based on the next expected level of the gpio ++ */ ++ if (gpio_get_value(dev->gpio[EP_PCIE_GPIO_PERST].num) == 1) ++ dev->perst_deast = true; ++ + /* register handler for PERST interrupt */ + perst_irq = gpio_to_irq(dev->gpio[EP_PCIE_GPIO_PERST].num); + ret = devm_request_irq(pdev, perst_irq, + ep_pcie_handle_perst_irq, +- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, ++ ((dev->perst_deast ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH) ++ | IRQF_EARLY_RESUME), + "ep_pcie_perst", dev); + if (ret) { + EP_PCIE_ERR(dev, +@@ -2187,6 +2955,30 @@ int32_t ep_pcie_irq_init(struct ep_pcie_dev_t *dev) + return ret; + } + ++ /* register handler for clkreq interrupt */ ++ clkreq_irq = gpio_to_irq(dev->gpio[EP_PCIE_GPIO_CLKREQ].num); ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Register for CLKREQ interrupt %d\n", ++ dev->rev, clkreq_irq); ++ irq_set_status_flags(clkreq_irq, IRQ_NOAUTOEN); ++ ret = devm_request_threaded_irq(pdev, clkreq_irq, NULL, ++ ep_pcie_handle_clkreq_irq, ++ IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_EARLY_RESUME, ++ "ep_pcie_clkreq", dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Unable to request CLKREQ interrupt %d\n", ++ dev->rev, clkreq_irq); ++ return ret; ++ } ++ ret = enable_irq_wake(clkreq_irq); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Unable to enable wake for clkreq interrupt %d\n", ++ dev->rev, clkreq_irq); ++ return ret; ++ } ++ + return 0; + } + +@@ -2514,9 +3306,107 @@ int ep_pcie_core_trigger_msi(u32 idx) + return EP_PCIE_ERROR; + } + +-int ep_pcie_core_wakeup_host(void) ++static void ep_pcie_core_issue_inband_pme(void) + { + struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ unsigned long irqsave_flags; ++ u32 pm_ctrl = 0; ++ ++ spin_lock_irqsave(&dev->isr_lock, irqsave_flags); ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: request to assert inband wake\n", ++ dev->rev); ++ ++ pm_ctrl = readl_relaxed(dev->parf + PCIE20_PARF_PM_CTRL); ++ ep_pcie_write_reg(dev->parf, PCIE20_PARF_PM_CTRL, ++ (pm_ctrl | BIT(4))); ++ ep_pcie_write_reg(dev->parf, PCIE20_PARF_PM_CTRL, pm_ctrl); ++ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: completed assert for inband wake\n", ++ dev->rev); ++ ++ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags); ++} ++ ++static int ep_pcie_core_wakeup_host(enum ep_pcie_event event) ++{ ++ struct ep_pcie_dev_t *dev = &ep_pcie_dev; ++ int ret; ++ unsigned long flags; ++ bool wait_for_wake = false; ++ bool wait_for_sleep = false; ++ int retries = 0; ++ ++ if (event == EP_PCIE_EVENT_PM_D3_HOT) { ++ /* ++ * Check if we are entering D3hot sleep or ++ * exiting out of it ++ */ ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ if (dev->in_d3hot_sleep) { ++ if (dev->clkreq_wake_in_progress) ++ wait_for_wake = true; ++ } else { ++ if (dev->d3hot_sleep_in_progress) ++ wait_for_sleep = true; ++ } ++ dev->ep_wake_in_progress = true; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ if (wait_for_wake || wait_for_sleep) { ++ /* ++ * Either wake from clkreq interrupt is in progress OR ++ * D3hot sleep is in progress ++ */ ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: EP wake : wait for D3hot sleep %s\n", ++ dev->rev, (wait_for_wake ? "exit" : "entry")); ++ while (retries < D3HOT_SLEEP_ENTRY_EXIT_MAX_COUNT) { ++ usleep_range( ++ D3HOT_SLEEP_ENTRY_EXIT_TIMEOUT_US_MIN, ++ D3HOT_SLEEP_ENTRY_EXIT_TIMEOUT_US_MAX); ++ /* Check if clkreq based wake is complete */ ++ if (wait_for_wake && ++ !dev->clkreq_wake_in_progress) ++ break; ++ /* Check if d3hot sleep is complete */ ++ if (wait_for_sleep && ++ !dev->d3hot_sleep_in_progress) ++ break; ++ retries++; ++ } ++ if (retries == D3HOT_SLEEP_ENTRY_EXIT_MAX_COUNT) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: d3 hot sleep entry/exit timeout\n", ++ dev->rev); ++ return EP_PCIE_ERROR; ++ } ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: EP wake : wait for D3hot sleep %s complete\n", ++ dev->rev, (wait_for_wake ? "exit" : "entry")); ++ } ++ if (wait_for_wake) { ++ /* We already exited out of D3hot sleep above */ ++ spin_lock_irqsave(&dev->d3hot_sleep_lock, flags); ++ dev->ep_wake_in_progress = false; ++ spin_unlock_irqrestore(&dev->d3hot_sleep_lock, flags); ++ } else { ++ /* ++ * Either we are in D3hot sleep OR ++ * we haven't started the D3hot sleep process yet ++ */ ++ ret = ep_pcie_core_abort_d3hot_sleep(dev); ++ if (ret) { ++ EP_PCIE_ERR(dev, ++ "PCIe V%d: Failed to wake from d3 hot sleep\n", ++ dev->rev); ++ return ret; ++ } ++ } ++ ep_pcie_core_issue_inband_pme(); ++ return 0; ++ } + + if (dev->perst_deast && !dev->l23_ready) { + EP_PCIE_ERR(dev, +@@ -2572,6 +3462,7 @@ struct ep_pcie_hw hw_drv = { + .enable_endpoint = ep_pcie_core_enable_endpoint, + .disable_endpoint = ep_pcie_core_disable_endpoint, + .mask_irq_event = ep_pcie_core_mask_irq_event, ++ .configure_inactivity_timer = ep_pcie_core_config_inact_timer, + }; + + static int ep_pcie_probe(struct platform_device *pdev) +@@ -2690,6 +3581,13 @@ static int ep_pcie_probe(struct platform_device *pdev) + + ep_pcie_dev.rev = 1711211; + ep_pcie_dev.pdev = pdev; ++ ep_pcie_dev.m2_autonomous = ++ of_property_read_bool((&pdev->dev)->of_node, ++ "qcom,pcie-m2-autonomous"); ++ EP_PCIE_DBG(&ep_pcie_dev, ++ "PCIe V%d: MHI M2 autonomous is %s enabled\n", ++ ep_pcie_dev.rev, ep_pcie_dev.m2_autonomous ? "" : "not"); ++ + memcpy(ep_pcie_dev.vreg, ep_pcie_vreg_info, + sizeof(ep_pcie_vreg_info)); + memcpy(ep_pcie_dev.gpio, ep_pcie_gpio_info, +@@ -2705,6 +3603,9 @@ static int ep_pcie_probe(struct platform_device *pdev) + memcpy(ep_pcie_dev.irq, ep_pcie_irq_info, + sizeof(ep_pcie_irq_info)); + ++ ep_pcie_dev.gdsc_disabled = true; ++ ep_pcie_dev.l1ss_sleep_mode_enabled = false; ++ + ret = ep_pcie_get_resources(&ep_pcie_dev, + ep_pcie_dev.pdev); + if (ret) { +@@ -2733,6 +3634,14 @@ static int ep_pcie_probe(struct platform_device *pdev) + goto irq_failure; + } + ++ /* ++ * Wakelock is required since the L1ss inactivity ++ * timer interrupt is not wake capable ++ */ ++ device_init_wakeup(&ep_pcie_dev.pdev->dev, true); ++ /* Initialize wake flag to 0 */ ++ atomic_set(&ep_pcie_dev.ep_pcie_dev_wake, 0); ++ + if (ep_pcie_dev.perst_enum && + !gpio_get_value(ep_pcie_dev.gpio[EP_PCIE_GPIO_PERST].num)) { + EP_PCIE_DBG2(&ep_pcie_dev, +@@ -2837,6 +3746,7 @@ static int __init ep_pcie_init(void) + mutex_init(&ep_pcie_dev.ext_mtx); + spin_lock_init(&ep_pcie_dev.ext_lock); + spin_lock_init(&ep_pcie_dev.isr_lock); ++ spin_lock_init(&ep_pcie_dev.d3hot_sleep_lock); + + ep_pcie_debugfs_init(&ep_pcie_dev); + +diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c +index a6ec33f4827c..9bf08e8101ba 100644 +--- a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c ++++ b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -357,7 +357,7 @@ static ssize_t ep_pcie_cmd_debug(struct file *file, + ep_pcie_config_outbound_iatu(phandle, entries, 2); + break; + case 13: /* wake up the host */ +- ep_pcie_wakeup_host(phandle); ++ ep_pcie_wakeup_host(phandle, EP_PCIE_EVENT_PM_D3_HOT); + break; + case 14: /* Configure routing of doorbells */ + ep_pcie_config_db_routing(phandle, chdb_cfg, erdb_cfg); +diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_phy.c b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c +index f813bb93cfa2..6f15c6910ad1 100644 +--- a/drivers/platform/msm/ep_pcie/ep_pcie_phy.c ++++ b/drivers/platform/msm/ep_pcie/ep_pcie_phy.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -163,3 +163,32 @@ bool ep_pcie_phy_is_ready(struct ep_pcie_dev_t *dev) + else + return true; + } ++ ++void ep_pcie_phy_update_pcs(struct ep_pcie_dev_t *dev) ++{ ++ int i; ++ struct ep_pcie_phy_info_t *phy_init; ++ ++ if (dev->phy_init_len && dev->phy_init) { ++ EP_PCIE_DBG(dev, ++ "PCIe V%d: PHY V%d: Update PCS settings\n", ++ dev->rev, dev->phy_rev); ++ ++ phy_init = dev->phy_init; ++ for (i = 0; i < dev->phy_init_len; i++) { ++ if (phy_init->offset < PCIE20_PHY_PCS_START_OFFSET || ++ phy_init->offset >= PCIE20_PHY_PCS_END_OFFSET) { ++ phy_init++; ++ continue; ++ } ++ ep_pcie_write_reg(dev->phy, ++ phy_init->offset, ++ phy_init->val); ++ if (phy_init->delay) ++ usleep_range(phy_init->delay, ++ phy_init->delay + 1); ++ phy_init++; ++ } ++ return; ++ } ++} +diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +index d69988a132d3..af5f06c234dd 100644 +--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c ++++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2018,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +index 92f566a0458a..984ab1740b4e 100644 +--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h ++++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2018,2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +index 3df8f49e5f0b..a920c20bc186 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -208,6 +208,9 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip, + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); ++ /* Add prefetech buf size. */ ++ tbl_mem.size += ++ ipahal_get_hw_prefetch_buf_size(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR("fail to alloc sys tbl of size %d\n", + tbl_mem.size); +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +index e4164561cb51..b74a49095ae7 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -62,7 +62,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem) + } + + static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, +- u32 hdr_base_addr) ++ u64 hdr_base_addr) + { + struct ipa3_hdr_proc_ctx_entry *entry; + int ret; +@@ -97,7 +97,8 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + hdr_base_addr, + entry->hdr->offset_entry, + &entry->l2tp_params, +- &entry->generic_params); ++ &entry->generic_params, ++ ipa3_ctx->use_64_bit_dma_mask); + if (ret) + return ret; + } +@@ -114,10 +115,10 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + * + * Returns: 0 on success, negative on failure + */ +-static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr, ++static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem) + { +- u32 hdr_base_addr; ++ u64 hdr_base_addr; + gfp_t flag = GFP_KERNEL; + + mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4; +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +index 83750e3d808f..3fabf5ba5e1f 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h ++++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +@@ -390,6 +390,8 @@ + (ipa3_ctx->ipa_config_is_auto)) + #define IPA_RULE_CNT_MAX 512 + ++#define IPA_RULE_CNT_MAX 512 ++ + struct ipa3_active_client_htable_entry { + struct hlist_node list; + char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +index cc15baa7511b..13b62c04090e 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +@@ -256,11 +256,12 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, + ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; + } + +- if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) { +- IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", +- params->ev_ctx_host->wp); +- goto fail_alloc_ch; +- } ++ /** ++ * compare host evt ring wp with base ptr condition was added to check ++ * whether MHI driver ring db or not, but in wrap around case wp and ++ * base ptr can be same so removing it. ++ * if evt-ring has no credit, gsi will crash. ++ */ + + IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", + ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +index 22459d4a8fac..d5ae0940180f 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -168,6 +168,9 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip, + /* only body (no header) */ + tbl_mem.size = tbl->sz[rlt] - + ipahal_get_hw_tbl_hdr_width(); ++ /* Add prefetech buf size. */ ++ tbl_mem.size += ++ ipahal_get_hw_prefetch_buf_size(); + if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) { + IPAERR_RL("fail to alloc sys tbl of size %d\n", + tbl_mem.size); +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c +index 1f245e702de7..642e00232f78 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -1191,6 +1191,13 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset, + memcpy(base + offset, hdr, hdr_len); + } + ++/* Header address update logic. */ ++#define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \ ++ do { \ ++ hdr_lsb = lower_32_bits(addr); \ ++ hdr_msb = upper_32_bits(addr); \ ++ } while (0) ++ + /* + * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to + * base address and offset given. +@@ -1205,26 +1212,33 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset, + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + * @generic_params: generic proc_ctx params ++ * @is_64: Indicates whether header base address/dma base address is 64 bit. + */ + static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + void *const base, u32 offset, + u32 hdr_len, bool is_hdr_proc_ctx, +- dma_addr_t phys_base, u32 hdr_base_addr, ++ dma_addr_t phys_base, u64 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params, +- struct ipa_eth_II_to_eth_II_ex_procparams *generic_params){ ++ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params, ++ bool is_64) ++{ ++ u64 hdr_addr; ++ + if (type == IPA_HDR_PROC_NONE) { + struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; +- ctx->hdr_add.tlv.length = 1; ++ ctx->hdr_add.tlv.length = is_64 ? 2 : 1; + ctx->hdr_add.tlv.value = hdr_len; +- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : ++ hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; +- IPAHAL_DBG("header address 0x%x\n", +- ctx->hdr_add.hdr_addr); ++ IPAHAL_DBG("header address 0x%llx\n", ++ hdr_addr); ++ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr, ++ ctx->hdr_add.hdr_addr_hi, hdr_addr); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; +@@ -1234,12 +1248,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; +- ctx->hdr_add.tlv.length = 1; ++ ctx->hdr_add.tlv.length = is_64 ? 2 : 1; + ctx->hdr_add.tlv.value = hdr_len; +- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : ++ hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; +- IPAHAL_DBG("header address 0x%x\n", +- ctx->hdr_add.hdr_addr); ++ IPAHAL_DBG("header address 0x%llx\n", ++ hdr_addr); ++ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr, ++ ctx->hdr_add.hdr_addr_hi, hdr_addr); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = +@@ -1261,12 +1277,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; +- ctx->hdr_add.tlv.length = 1; ++ ctx->hdr_add.tlv.length = is_64 ? 2 : 1; + ctx->hdr_add.tlv.value = hdr_len; +- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : ++ hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; +- IPAHAL_DBG("header address 0x%x length %d\n", +- ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value); ++ IPAHAL_DBG("header address 0x%llx length %d\n", ++ hdr_addr, ctx->hdr_add.tlv.value); ++ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr, ++ ctx->hdr_add.hdr_addr_hi, hdr_addr); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = +@@ -1324,12 +1342,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, + ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; +- ctx->hdr_add.tlv.length = 1; ++ ctx->hdr_add.tlv.length = is_64 ? 2 : 1; + ctx->hdr_add.tlv.value = hdr_len; +- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : ++ hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; +- IPAHAL_DBG("header address 0x%x\n", +- ctx->hdr_add.hdr_addr); ++ IPAHAL_DBG("header address 0x%llx\n", ++ hdr_addr); ++ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr, ++ ctx->hdr_add.hdr_addr_hi, hdr_addr); + ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->cmd.length = 0; + switch (type) { +@@ -1411,11 +1431,11 @@ struct ipahal_hdr_funcs { + int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, +- u32 hdr_base_addr, ++ u64 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params, + struct ipa_eth_II_to_eth_II_ex_procparams +- *generic_params); ++ *generic_params, bool is_64); + + int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type); + }; +@@ -1482,18 +1502,20 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr, + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + * @generic_params: generic proc_ctx params ++ * @is_64: Indicates whether header base address/dma base address is 64 bit. + */ + int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *const base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, +- u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry, ++ u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params, +- struct ipa_eth_II_to_eth_II_ex_procparams *generic_params) ++ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params, ++ bool is_64) + { + IPAHAL_DBG( +- "type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n" ++ "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n" + , type, base, offset, hdr_len, is_hdr_proc_ctx, +- hdr_base_addr, offset_entry); ++ hdr_base_addr, offset_entry, is_64); + + if (!base || + !hdr_len || +@@ -1501,7 +1523,7 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + (!is_hdr_proc_ctx && !offset_entry) || + (!is_hdr_proc_ctx && !hdr_base_addr)) { + IPAHAL_ERR( +- "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n" ++ "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%llu is_hdr_proc_ctx:%d offset_entry:%pK\n" + , hdr_len, &phys_base, hdr_base_addr + , is_hdr_proc_ctx, offset_entry); + return -EINVAL; +@@ -1510,7 +1532,7 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset, + hdr_len, is_hdr_proc_ctx, phys_base, + hdr_base_addr, offset_entry, l2tp_params, +- generic_params); ++ generic_params, is_64); + } + + /* +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h +index f8720b0c4e14..ec214e39a5f1 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -636,14 +636,16 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len); + * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters + * @generic_params: generic proc_ctx params ++ * @is_64: Indicates whether header base address/dma base address is 64 bit. + */ + int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, + void *base, u32 offset, u32 hdr_len, + bool is_hdr_proc_ctx, dma_addr_t phys_base, +- u32 hdr_base_addr, ++ u64 hdr_base_addr, + struct ipa_hdr_offset_entry *offset_entry, + struct ipa_l2tp_hdr_proc_ctx_params *l2tp_params, +- struct ipa_eth_II_to_eth_II_ex_procparams *generic_params); ++ struct ipa_eth_II_to_eth_II_ex_procparams *generic_params, ++ bool is_64); + + /* + * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +index 06bd062a612c..e99969aad0b3 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -49,7 +49,9 @@ + * @flt_generate_eq: Generate flt equation attributes from rule attributes + * @rt_parse_hw_rule: Parse rt rule read from H/W + * @flt_parse_hw_rule: Parse flt rule read from H/W +- * @eq_bitfield: Array of the bit fields of the support equations ++ * @eq_bitfield: Array of the bit fields of the support equations. ++ * 0xFF means the equation is not supported ++ * @prefetech_buf_size: Prefetch buf size; + */ + struct ipahal_fltrt_obj { + bool support_hash; +@@ -79,6 +81,7 @@ struct ipahal_fltrt_obj { + int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule); + int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule); + u8 eq_bitfield[IPA_EQ_MAX]; ++ u32 prefetech_buf_size; + }; + + +@@ -495,6 +498,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, ++ IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, + }, + + /* IPAv4 */ +@@ -539,6 +543,7 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, ++ IPA3_0_HW_RULE_PREFETCH_BUF_SIZE, + }, + }; + +@@ -3034,6 +3039,12 @@ u32 ipahal_get_lcl_tbl_addr_alignment(void) + return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment; + } + ++/* Get the H/W (flt/rt) prefetch buf size */ ++u32 ipahal_get_hw_prefetch_buf_size(void) ++{ ++ return ipahal_fltrt_objs[ipahal_ctx->hw_type].prefetech_buf_size; ++} ++ + /* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h +index 3ee883b6fb20..03d6d8fa3569 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -139,6 +139,9 @@ u32 ipahal_get_hw_tbl_hdr_width(void); + */ + u32 ipahal_get_lcl_tbl_addr_alignment(void); + ++/* Get the H/W (flt/rt) prefetch buf size */ ++u32 ipahal_get_hw_prefetch_buf_size(void); ++ + /* + * Rule priority is used to distinguish rules order + * at the integrated table consisting from hashable and +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h +index 645383a8f1cf..c419f15c8195 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -50,6 +50,7 @@ enum ipa_fltrt_equations { + #define IPA3_0_HW_TBL_ADDR_MASK (127) + #define IPA3_0_HW_RULE_BUF_SIZE (256) + #define IPA3_0_HW_RULE_START_ALIGNMENT (7) ++#define IPA3_0_HW_RULE_PREFETCH_BUF_SIZE (128) + + + /* +diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h +index d7e1541555b5..eb97ee2b2481 100644 +--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h ++++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -659,6 +659,7 @@ struct ipa_hw_hdr_proc_ctx_tlv { + struct ipa_hw_hdr_proc_ctx_hdr_add { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + u32 hdr_addr; ++ u32 hdr_addr_hi; + }; + + /** +diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c +index 1c5dcaf2670d..e01c1b16eca1 100644 +--- a/drivers/platform/msm/mhi_dev/mhi.c ++++ b/drivers/platform/msm/mhi_dev/mhi.c +@@ -808,7 +808,7 @@ static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event, + + mhi_update_state_info(MHI_STATE_CONNECTED); + mhi_log(MHI_MSG_CRITICAL, "Device in M0 State\n"); +- place_marker("MHI - Device in M0 State\n"); ++ update_marker("MHI - Device in M0 State\n"); + + if (!mhi_ctx->mhi_int) + ep_pcie_mask_irq_event(mhi_ctx->phandle, +@@ -1608,6 +1608,14 @@ static void mhi_update_state_info_all(enum mhi_ctrl_info info) + + mhi_ctx->ctrl_info = info; + for (i = 0; i < MHI_MAX_SOFTWARE_CHANNELS; ++i) { ++ /* ++ * Skip channel state info change ++ * if channel is already in the desired state. ++ */ ++ if (channel_state_info[i].ctrl_info == info || ++ (info == MHI_STATE_DISCONNECTED && ++ channel_state_info[i].ctrl_info == MHI_STATE_CONFIGURED)) ++ continue; + channel_state_info[i].ctrl_info = info; + /* Notify kernel clients */ + mhi_dev_trigger_cb(i); +diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c +index 4ec1c02e1111..24bb7e9be13a 100644 +--- a/drivers/platform/msm/mhi_dev/mhi_sm.c ++++ b/drivers/platform/msm/mhi_dev/mhi_sm.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include "mhi_hwio.h" + #include "mhi_sm.h" + #include +@@ -597,15 +598,22 @@ static int mhi_sm_change_to_M3(void) + static int mhi_sm_wakeup_host(enum mhi_dev_event event) + { + int res = 0; ++ enum ep_pcie_event pcie_event; + + MHI_SM_FUNC_ENTRY(); + + if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) { + /* +- * ep_pcie driver is responsible to send the right wakeup +- * event, assert WAKE#, according to Link state +- */ +- res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle); ++ * Check and send D3_HOT to enable waking up the host ++ * using inband PME. ++ */ ++ if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D3_HOT_STATE) ++ pcie_event = EP_PCIE_EVENT_PM_D3_HOT; ++ else ++ pcie_event = EP_PCIE_EVENT_PM_D3_COLD; ++ ++ res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle, ++ pcie_event); + if (res) { + MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n", + res); +@@ -921,7 +929,8 @@ int mhi_dev_sm_init(struct mhi_dev *mhi_dev) + + /*init debugfs*/ + mhi_sm_debugfs_init(); +- mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq"); ++ mhi_sm_ctx->mhi_sm_wq = alloc_workqueue( ++ "mhi_sm_wq", WQ_HIGHPRI | WQ_UNBOUND, 1); + if (!mhi_sm_ctx->mhi_sm_wq) { + MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n"); + res = -ENOMEM; +diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c +index df998f658f6d..43769a8fc00a 100644 +--- a/drivers/platform/msm/qcom-geni-se.c ++++ b/drivers/platform/msm/qcom-geni-se.c +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2019,2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -356,6 +356,8 @@ static int geni_se_select_fifo_mode(void __iomem *base) + static int geni_se_select_dma_mode(void __iomem *base) + { + unsigned int geni_dma_mode = 0; ++ unsigned int common_geni_m_irq_en; ++ int proto = get_se_proto(base); + + geni_write_reg(0, base, SE_GSI_EVENT_EN); + geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR); +@@ -363,8 +365,13 @@ static int geni_se_select_dma_mode(void __iomem *base) + geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR); + geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR); + geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN); +- geni_write_reg(0x00000000, base, SE_GENI_M_IRQ_EN); +- geni_write_reg(0x00000000, base, SE_GENI_S_IRQ_EN); ++ ++ /* Do not disable all GENI interrupts */ ++ common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN); ++ if (proto != UART) ++ common_geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | ++ M_RX_FIFO_WATERMARK_EN); ++ geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN); + + geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN); + geni_dma_mode |= GENI_DMA_MODE_EN; +diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c +index c9c52f76b9fb..1f285f4e4449 100644 +--- a/drivers/platform/msm/sps/bam.c ++++ b/drivers/platform/msm/sps/bam.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -2169,13 +2169,16 @@ void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option) + u32 pipe = pipe_index; + u32 desc_fifo_addr; + u32 desc_fifo_size; +- u32 *desc_fifo; ++ u32 __iomem *desc_fifo; + int i; + char desc_info[MAX_MSG_LEN]; ++ struct sps_bam *dev; + + if (base == NULL) + return; + ++ dev = to_sps_bam_dev(virt_addr); ++ + desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe); + desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe, + P_DESC_FIFO_SIZE); +@@ -2197,7 +2200,14 @@ void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option) + "BAM_P_DESC_FIFO_SIZE: 0x%x (%d)\n\n", + desc_fifo_addr, desc_fifo_size, desc_fifo_size); + +- desc_fifo = (u32 *) phys_to_virt(desc_fifo_addr); ++ if (dev->props.options & SPS_BAM_SMMU_EN) { ++ struct sps_pipe *pipe_indx = dev->pipes[pipe_index]; ++ ++ SPS_DUMP("%s", "SMMU is enabled\n"); ++ desc_fifo = pipe_indx->map->desc.base; ++ } else { ++ desc_fifo = (u32 __iomem *) phys_to_virt(desc_fifo_addr); ++ } + + if (option == 100) { + SPS_DUMP("%s", +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 1515c9480f89..ec3cbb7844bc 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -124,6 +124,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { + {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ + {KE_IGNORE, 0x81, {KEY_SLEEP} }, + {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */ ++ {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */ + {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, + {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} }, + {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, +@@ -228,6 +229,7 @@ static int mailled = -1; + static int brightness = -1; + static int threeg = -1; + static int force_series; ++static int force_caps = -1; + static bool ec_raw_mode; + static bool has_type_aa; + static u16 commun_func_bitmap; +@@ -237,11 +239,13 @@ module_param(mailled, int, 0444); + module_param(brightness, int, 0444); + module_param(threeg, int, 0444); + module_param(force_series, int, 0444); ++module_param(force_caps, int, 0444); + module_param(ec_raw_mode, bool, 0444); + MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); + MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); + MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); + MODULE_PARM_DESC(force_series, "Force a different laptop series"); ++MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value"); + MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); + + struct acer_data { +@@ -2197,7 +2201,7 @@ static int __init acer_wmi_init(void) + } + /* WMID always provides brightness methods */ + interface->capability |= ACER_CAP_BRIGHTNESS; +- } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { ++ } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) { + pr_err("No WMID device detection method found\n"); + return -ENODEV; + } +@@ -2227,6 +2231,9 @@ static int __init acer_wmi_init(void) + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) + interface->capability &= ~ACER_CAP_BRIGHTNESS; + ++ if (force_caps != -1) ++ interface->capability = force_caps; ++ + if (wmi_has_guid(WMID_GUID3)) { + if (ec_raw_mode) { + if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c +index abd9d83f6009..403d966223ee 100644 +--- a/drivers/platform/x86/hp_accel.c ++++ b/drivers/platform/x86/hp_accel.c +@@ -101,6 +101,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids); + static int lis3lv02d_acpi_init(struct lis3lv02d *lis3) + { + struct acpi_device *dev = lis3->bus_priv; ++ if (!lis3->init_required) ++ return 0; ++ + if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI, + NULL, NULL) != AE_OK) + return -EINVAL; +@@ -366,6 +369,7 @@ static int lis3lv02d_add(struct acpi_device *device) + } + + /* call the core layer do its init */ ++ lis3_dev.init_required = true; + ret = lis3lv02d_init_device(&lis3_dev); + if (ret) + return ret; +@@ -413,11 +417,27 @@ static int lis3lv02d_suspend(struct device *dev) + + static int lis3lv02d_resume(struct device *dev) + { ++ lis3_dev.init_required = false; ++ lis3lv02d_poweron(&lis3_dev); ++ return 0; ++} ++ ++static int lis3lv02d_restore(struct device *dev) ++{ ++ lis3_dev.init_required = true; + lis3lv02d_poweron(&lis3_dev); + return 0; + } + +-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); ++static const struct dev_pm_ops hp_accel_pm = { ++ .suspend = lis3lv02d_suspend, ++ .resume = lis3lv02d_resume, ++ .freeze = lis3lv02d_suspend, ++ .thaw = lis3lv02d_resume, ++ .poweroff = lis3lv02d_suspend, ++ .restore = lis3lv02d_restore, ++}; ++ + #define HP_ACCEL_PM (&hp_accel_pm) + #else + #define HP_ACCEL_PM NULL +diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c +index 12dbb5063376..a5c645b9e3f2 100644 +--- a/drivers/platform/x86/intel-hid.c ++++ b/drivers/platform/x86/intel-hid.c +@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) + return AE_OK; + + if (acpi_match_device_ids(dev, ids) == 0) +- if (acpi_create_platform_device(dev, NULL)) ++ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) + dev_info(&dev->dev, + "intel-hid: created platform device\n"); + +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c +index a74340dff530..1cf2a38add5f 100644 +--- a/drivers/platform/x86/intel-vbtn.c ++++ b/drivers/platform/x86/intel-vbtn.c +@@ -168,7 +168,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) + return AE_OK; + + if (acpi_match_device_ids(dev, ids) == 0) +- if (acpi_create_platform_device(dev, NULL)) ++ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) + dev_info(&dev->dev, + "intel-vbtn: created platform device\n"); + +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c +index b7dfe06261f1..9865d11eda75 100644 +--- a/drivers/platform/x86/intel_punit_ipc.c ++++ b/drivers/platform/x86/intel_punit_ipc.c +@@ -330,6 +330,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = { + { "INT34D4", 0 }, + { } + }; ++MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids); + + static struct platform_driver intel_punit_ipc_driver = { + .probe = intel_punit_ipc_probe, +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index 60ee94e57242..c8a8bcda7b84 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -2476,7 +2476,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, + */ + static int hotkey_kthread(void *data) + { +- struct tp_nvram_state s[2]; ++ struct tp_nvram_state s[2] = { 0 }; + u32 poll_mask, event_mask; + unsigned int si, so; + unsigned long t; +@@ -6044,6 +6044,7 @@ enum thermal_access_mode { + enum { /* TPACPI_THERMAL_TPEC_* */ + TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */ + TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */ ++ TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */ + TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */ + + TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */ +@@ -6242,7 +6243,7 @@ static const struct attribute_group thermal_temp_input8_group = { + + static int __init thermal_init(struct ibm_init_struct *iibm) + { +- u8 t, ta1, ta2; ++ u8 t, ta1, ta2, ver = 0; + int i; + int acpi_tmp7; + int res; +@@ -6257,7 +6258,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm) + * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for + * non-implemented, thermal sensors return 0x80 when + * not available ++ * The above rule is unfortunately flawed. This has been seen with ++ * 0xC2 (power supply ID) causing thermal control problems. ++ * The EC version can be determined by offset 0xEF and at least for ++ * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7 ++ * are not thermal registers. + */ ++ if (!acpi_ec_read(TP_EC_FUNCREV, &ver)) ++ pr_warn("Thinkpad ACPI EC unable to access EC version\n"); + + ta1 = ta2 = 0; + for (i = 0; i < 8; i++) { +@@ -6267,11 +6275,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm) + ta1 = 0; + break; + } +- if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { +- ta2 |= t; +- } else { +- ta1 = 0; +- break; ++ if (ver < 3) { ++ if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { ++ ta2 |= t; ++ } else { ++ ta1 = 0; ++ break; ++ } + } + } + if (ta1 == 0) { +@@ -6287,9 +6297,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm) + thermal_read_mode = TPACPI_THERMAL_NONE; + } + } else { +- thermal_read_mode = +- (ta2 != 0) ? +- TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; ++ if (ver >= 3) ++ thermal_read_mode = TPACPI_THERMAL_TPEC_8; ++ else ++ thermal_read_mode = ++ (ta2 != 0) ? ++ TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; + } + } else if (acpi_tmp7) { + if (tpacpi_is_ibm() && +@@ -6640,8 +6653,10 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) + list_for_each_entry(child, &device->children, node) { + acpi_status status = acpi_evaluate_object(child->handle, "_BCL", + NULL, &buffer); +- if (ACPI_FAILURE(status)) ++ if (ACPI_FAILURE(status)) { ++ buffer.length = ACPI_ALLOCATE_BUFFER; + continue; ++ } + + obj = (union acpi_object *)buffer.pointer; + if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c +index 79a228937072..90b17cf74e9f 100644 +--- a/drivers/platform/x86/toshiba_acpi.c ++++ b/drivers/platform/x86/toshiba_acpi.c +@@ -1497,7 +1497,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, + struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file)); + char *buffer; + char *cmd; +- int lcd_out, crt_out, tv_out; ++ int lcd_out = -1, crt_out = -1, tv_out = -1; + int remain = count; + int value; + int ret; +@@ -1534,7 +1534,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, + + kfree(cmd); + +- lcd_out = crt_out = tv_out = -1; + ret = get_video_status(dev, &video_out); + if (!ret) { + unsigned int new_video_out = video_out; +@@ -2867,6 +2866,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) + + if (!dev->info_supported && !dev->system_event_supported) { + pr_warn("No hotkey query interface found\n"); ++ error = -EINVAL; + goto err_remove_filter; + } + +diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c +index 04ca990e8f6c..dcfc7025f384 100644 +--- a/drivers/power/reset/at91-sama5d2_shdwc.c ++++ b/drivers/power/reset/at91-sama5d2_shdwc.c +@@ -36,7 +36,7 @@ + + #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */ + #define AT91_SHDW_WKUPDBC_SHIFT 24 +-#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16) ++#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24) + #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \ + & AT91_SHDW_WKUPDBC_MASK) + +diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c +index be3d81ff51cc..a44e3427fdeb 100644 +--- a/drivers/power/reset/gpio-poweroff.c ++++ b/drivers/power/reset/gpio-poweroff.c +@@ -84,6 +84,7 @@ static const struct of_device_id of_gpio_poweroff_match[] = { + { .compatible = "gpio-poweroff", }, + {}, + }; ++MODULE_DEVICE_TABLE(of, of_gpio_poweroff_match); + + static struct platform_driver gpio_poweroff_driver = { + .probe = gpio_poweroff_probe, +diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c +index e9e749f87517..8fb43c4438e6 100644 +--- a/drivers/power/reset/vexpress-poweroff.c ++++ b/drivers/power/reset/vexpress-poweroff.c +@@ -150,6 +150,7 @@ static struct platform_driver vexpress_reset_driver = { + .driver = { + .name = "vexpress-reset", + .of_match_table = vexpress_reset_of_match, ++ .suppress_bind_attrs = true, + }, + }; + +diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c +index 63c57dc82ac1..4eda5065b5bb 100644 +--- a/drivers/power/supply/88pm860x_battery.c ++++ b/drivers/power/supply/88pm860x_battery.c +@@ -436,7 +436,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) + int ret; + int data; + int bat_remove; +- int soc; ++ int soc = 0; + + /* measure enable on GPADC1 */ + data = MEAS1_GP1; +@@ -499,7 +499,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) + } + mutex_unlock(&info->lock); + +- calc_soc(info, OCV_MODE_ACTIVE, &soc); ++ ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); ++ if (ret < 0) ++ goto out; + + data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); + bat_remove = data & BAT_WU_LOG; +diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig +index a5cbb7110ee3..aac0ed71a9ca 100644 +--- a/drivers/power/supply/Kconfig ++++ b/drivers/power/supply/Kconfig +@@ -424,7 +424,7 @@ config CHARGER_BQ24257 + tristate "TI BQ24250/24251/24257 battery charger driver" + depends on I2C + depends on GPIOLIB || COMPILE_TEST +- depends on REGMAP_I2C ++ select REGMAP_I2C + help + Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery + chargers. +@@ -490,7 +490,8 @@ config BATTERY_GOLDFISH + + config BATTERY_RT5033 + tristate "RT5033 fuel gauge support" +- depends on MFD_RT5033 ++ depends on I2C ++ select REGMAP_I2C + help + This adds support for battery fuel gauge in Richtek RT5033 PMIC. + The fuelgauge calculates and determines the battery state of charge +diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c +index 6ffdc18f2599..9f17d81767ea 100644 +--- a/drivers/power/supply/ab8500_btemp.c ++++ b/drivers/power/supply/ab8500_btemp.c +@@ -1181,6 +1181,7 @@ static const struct of_device_id ab8500_btemp_match[] = { + { .compatible = "stericsson,ab8500-btemp", }, + { }, + }; ++MODULE_DEVICE_TABLE(of, ab8500_btemp_match); + + static struct platform_driver ab8500_btemp_driver = { + .probe = ab8500_btemp_probe, +diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c +index 48a11fd86a7f..56b502331433 100644 +--- a/drivers/power/supply/ab8500_charger.c ++++ b/drivers/power/supply/ab8500_charger.c +@@ -409,6 +409,14 @@ static void ab8500_enable_disable_sw_fallback(struct ab8500_charger *di, + static void ab8500_power_supply_changed(struct ab8500_charger *di, + struct power_supply *psy) + { ++ /* ++ * This happens if we get notifications or interrupts and ++ * the platform has been configured not to support one or ++ * other type of charging. ++ */ ++ if (!psy) ++ return; ++ + if (di->autopower_cfg) { + if (!di->usb.charger_connected && + !di->ac.charger_connected && +@@ -435,7 +443,15 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di, + if (!connected) + di->flags.vbus_drop_end = false; + +- sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, "present"); ++ /* ++ * Sometimes the platform is configured not to support ++ * USB charging and no psy has been created, but we still ++ * will get these notifications. ++ */ ++ if (di->usb_chg.psy) { ++ sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, ++ "present"); ++ } + + if (connected) { + mutex_lock(&di->charger_attached_mutex); +@@ -3736,6 +3752,7 @@ static const struct of_device_id ab8500_charger_match[] = { + { .compatible = "stericsson,ab8500-charger", }, + { }, + }; ++MODULE_DEVICE_TABLE(of, ab8500_charger_match); + + static struct platform_driver ab8500_charger_driver = { + .probe = ab8500_charger_probe, +diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c +index ea8c26a108f0..d6079e892e11 100644 +--- a/drivers/power/supply/ab8500_fg.c ++++ b/drivers/power/supply/ab8500_fg.c +@@ -3229,6 +3229,7 @@ static const struct of_device_id ab8500_fg_match[] = { + { .compatible = "stericsson,ab8500-fg", }, + { }, + }; ++MODULE_DEVICE_TABLE(of, ab8500_fg_match); + + static struct platform_driver ab8500_fg_driver = { + .probe = ab8500_fg_probe, +diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c +index 13f23c00538b..043836e6d347 100644 +--- a/drivers/power/supply/charger-manager.c ++++ b/drivers/power/supply/charger-manager.c +@@ -1489,6 +1489,7 @@ static const struct of_device_id charger_manager_match[] = { + }, + {}, + }; ++MODULE_DEVICE_TABLE(of, charger_manager_match); + + static struct charger_desc *of_cm_parse_desc(struct device *dev) + { +diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c +index f627b39f64bf..b77fd751945d 100644 +--- a/drivers/power/supply/generic-adc-battery.c ++++ b/drivers/power/supply/generic-adc-battery.c +@@ -384,7 +384,7 @@ static int gab_remove(struct platform_device *pdev) + } + + kfree(adc_bat->psy_desc.properties); +- cancel_delayed_work(&adc_bat->bat_work); ++ cancel_delayed_work_sync(&adc_bat->bat_work); + return 0; + } + +diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c +index cd614fe69d14..e800beff1f8f 100644 +--- a/drivers/power/supply/lp8788-charger.c ++++ b/drivers/power/supply/lp8788-charger.c +@@ -532,7 +532,7 @@ static int lp8788_set_irqs(struct platform_device *pdev, + + ret = request_threaded_irq(virq, NULL, + lp8788_charger_irq_thread, +- 0, name, pchg); ++ IRQF_ONESHOT, name, pchg); + if (ret) + break; + } +@@ -603,27 +603,14 @@ static void lp8788_setup_adc_channel(struct device *dev, + return; + + /* ADC channel for battery voltage */ +- chan = iio_channel_get(dev, pdata->adc_vbatt); ++ chan = devm_iio_channel_get(dev, pdata->adc_vbatt); + pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan; + + /* ADC channel for battery temperature */ +- chan = iio_channel_get(dev, pdata->adc_batt_temp); ++ chan = devm_iio_channel_get(dev, pdata->adc_batt_temp); + pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan; + } + +-static void lp8788_release_adc_channel(struct lp8788_charger *pchg) +-{ +- int i; +- +- for (i = 0; i < LP8788_NUM_CHG_ADC; i++) { +- if (!pchg->chan[i]) +- continue; +- +- iio_channel_release(pchg->chan[i]); +- pchg->chan[i] = NULL; +- } +-} +- + static ssize_t lp8788_show_charger_status(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -744,7 +731,6 @@ static int lp8788_charger_remove(struct platform_device *pdev) + lp8788_irq_unregister(pdev, pchg); + sysfs_remove_group(&pdev->dev.kobj, &lp8788_attr_group); + lp8788_psy_unregister(pchg); +- lp8788_release_adc_channel(pchg); + + return 0; + } +diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c +index 78561b6884fc..9ef218d76aa9 100644 +--- a/drivers/power/supply/pm2301_charger.c ++++ b/drivers/power/supply/pm2301_charger.c +@@ -1098,7 +1098,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client, + ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), + NULL, + pm2xxx_charger_irq[0].isr, +- pm2->pdata->irq_type, ++ pm2->pdata->irq_type | IRQF_ONESHOT, + pm2xxx_charger_irq[0].name, pm2); + + if (ret != 0) { +diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c +index 18b49eec3bd1..2c0b562d81cc 100644 +--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c ++++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c +@@ -4168,6 +4168,9 @@ static int fg_psy_get_property(struct power_supply *psy, + case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: + rc = fg_get_time_to_full(chip, &pval->intval); + break; ++ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: ++ rc = fg_get_time_to_full(chip, &pval->intval); ++ break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: + rc = fg_get_time_to_empty(chip, &pval->intval); + break; +@@ -4427,6 +4430,7 @@ static enum power_supply_property fg_psy_props[] = { + POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, ++ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_SOC_REPORTING_READY, + POWER_SUPPLY_PROP_DEBUG_BATTERY, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, +diff --git a/drivers/power/supply/qcom/qpnp-linear-charger.c b/drivers/power/supply/qcom/qpnp-linear-charger.c +index 9b400803ca58..11af02dd0e8b 100644 +--- a/drivers/power/supply/qcom/qpnp-linear-charger.c ++++ b/drivers/power/supply/qcom/qpnp-linear-charger.c +@@ -1704,7 +1704,9 @@ static int qpnp_batt_power_set_property(struct power_supply *psy, + return -EINVAL; + } + +- power_supply_changed(chip->batt_psy); ++ if (chip->bat_if_base && chip->batt_psy) ++ power_supply_changed(chip->batt_psy); ++ + return rc; + } + +@@ -2667,7 +2669,12 @@ static irqreturn_t qpnp_lbc_usbin_valid_irq_handler(int irq, void *_chip) + else + extcon_set_cable_state_(chip->extcon, + EXTCON_USB, false); +- power_supply_changed(chip->usb_psy); ++ } ++ ++ power_supply_changed(chip->usb_psy); ++ if (chip->bat_if_base) { ++ pr_debug("power supply changed batt_psy\n"); ++ power_supply_changed(chip->batt_psy); + } + + return IRQ_HANDLED; +diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c +index 222dc0c0706e..9211c49649ef 100644 +--- a/drivers/power/supply/qcom/qpnp-smb2.c ++++ b/drivers/power/supply/qcom/qpnp-smb2.c +@@ -238,6 +238,7 @@ static int __audio_headset_drp_wait_ms = 100; + module_param_named( + audio_headset_drp_wait_ms, __audio_headset_drp_wait_ms, int, 0600 + ); ++static bool shutdown_when_usbabsent; + + #define MICRO_1P5A 1500000 + #define MICRO_P1A 100000 +@@ -557,6 +558,9 @@ static int smb2_parse_dt(struct smb2 *chip) + chg->ufp_only_mode = of_property_read_bool(node, + "qcom,ufp-only-mode"); + ++ shutdown_when_usbabsent = of_property_read_bool(node, ++ "qcom,shutdown-enable"); ++ + return 0; + } + +@@ -1262,6 +1266,7 @@ static enum power_supply_property smb2_batt_props[] = { + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, ++ POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE, + POWER_SUPPLY_PROP_OP_DISABLE_CHARGE, + }; +@@ -1408,6 +1413,7 @@ static int smb2_batt_get_prop(struct power_supply *psy, + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ++ case POWER_SUPPLY_PROP_CYCLE_COUNT: + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_TEMP: + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: +@@ -3038,6 +3044,11 @@ static int smb2_probe(struct platform_device *pdev) + } + usb_present = val.intval; + ++ if (shutdown_when_usbabsent && !usb_present) { ++ pr_err("ARGlass: no usb present, shutting down\n"); ++ orderly_poweroff(true); ++ } ++ + rc = smblib_get_prop_batt_present(chg, &val); + if (rc < 0) { + pr_err("Couldn't get batt present rc=%d\n", rc); +diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c +index db7350fecf8d..0f093d800290 100644 +--- a/drivers/power/supply/qcom/smb-lib.c ++++ b/drivers/power/supply/qcom/smb-lib.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2019, 2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -4787,6 +4787,11 @@ static int typec_try_sink(struct smb_charger *chg) + goto try_sink_exit; + } + ++ if (stat & TYPEC_VBUS_ERROR_STATUS_BIT) { ++ pr_err_ratelimited("vbus-error, vbus not detected - exit sink entry\n"); ++ goto try_sink_exit; ++ } ++ + /* + * Ensure sink since drp may put us in source if other + * side switches back to Rd +@@ -4879,8 +4884,6 @@ static void typec_sink_insertion(struct smb_charger *chg) + + static void typec_sink_removal(struct smb_charger *chg) + { +- smblib_set_charge_param(chg, &chg->param.freq_boost, +- chg->chg_freq.freq_above_otg_threshold); + chg->boost_current_ua = 0; + } + +@@ -5055,6 +5058,7 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) + smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n", + rc); + ++ smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_removal); + typec_sink_removal(chg); + smblib_update_usb_type(chg); + +@@ -5081,6 +5085,8 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg) + + if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) { + typec_sink_insertion(chg); ++ smblib_set_charge_param(chg, &chg->param.freq_boost, ++ chg->chg_freq.freq_above_otg_threshold); + } else { + rc = smblib_request_dpdm(chg, true); + if (rc < 0) +diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h +index 8b4fd41ff345..efbf1a431975 100644 +--- a/drivers/power/supply/qcom/smb-lib.h ++++ b/drivers/power/supply/qcom/smb-lib.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -732,4 +732,5 @@ int smblib_get_prop_batt_temp(struct smb_charger *chg, + + int smblib_init(struct smb_charger *chg); + int smblib_deinit(struct smb_charger *chg); ++void orderly_poweroff(bool force); + #endif /* __SMB2_CHARGER_H */ +diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c +index bcdd83048492..9310b85f3405 100644 +--- a/drivers/power/supply/rt5033_battery.c ++++ b/drivers/power/supply/rt5033_battery.c +@@ -167,9 +167,16 @@ static const struct i2c_device_id rt5033_battery_id[] = { + }; + MODULE_DEVICE_TABLE(i2c, rt5033_battery_id); + ++static const struct of_device_id rt5033_battery_of_match[] = { ++ { .compatible = "richtek,rt5033-battery", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, rt5033_battery_of_match); ++ + static struct i2c_driver rt5033_battery_driver = { + .driver = { + .name = "rt5033-battery", ++ .of_match_table = rt5033_battery_of_match, + }, + .probe = rt5033_battery_probe, + .remove = rt5033_battery_remove, +diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c +index 0ffe5cd3abf6..06b412c43aa7 100644 +--- a/drivers/power/supply/s3c_adc_battery.c ++++ b/drivers/power/supply/s3c_adc_battery.c +@@ -392,7 +392,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev) + gpio_free(pdata->gpio_charge_finished); + } + +- cancel_delayed_work(&bat_work); ++ cancel_delayed_work_sync(&bat_work); + + if (pdata->exit) + pdata->exit(); +diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c +index 072c5189bd6d..0655dbdc7000 100644 +--- a/drivers/power/supply/smb347-charger.c ++++ b/drivers/power/supply/smb347-charger.c +@@ -1141,6 +1141,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg) + switch (reg) { + case IRQSTAT_A: + case IRQSTAT_C: ++ case IRQSTAT_D: + case IRQSTAT_E: + case IRQSTAT_F: + case STAT_A: +diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c +index 57246cdbd042..925abec45380 100644 +--- a/drivers/power/supply/test_power.c ++++ b/drivers/power/supply/test_power.c +@@ -344,6 +344,7 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp) + static int param_get_ac_online(char *buffer, const struct kernel_param *kp) + { + strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +@@ -357,6 +358,7 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp) + static int param_get_usb_online(char *buffer, const struct kernel_param *kp) + { + strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +@@ -371,6 +373,7 @@ static int param_set_battery_status(const char *key, + static int param_get_battery_status(char *buffer, const struct kernel_param *kp) + { + strcpy(buffer, map_get_key(map_status, battery_status, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +@@ -385,6 +388,7 @@ static int param_set_battery_health(const char *key, + static int param_get_battery_health(char *buffer, const struct kernel_param *kp) + { + strcpy(buffer, map_get_key(map_health, battery_health, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +@@ -400,6 +404,7 @@ static int param_get_battery_present(char *buffer, + const struct kernel_param *kp) + { + strcpy(buffer, map_get_key(map_present, battery_present, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +@@ -417,6 +422,7 @@ static int param_get_battery_technology(char *buffer, + { + strcpy(buffer, + map_get_key(map_technology, battery_technology, "unknown")); ++ strcat(buffer, "\n"); + return strlen(buffer); + } + +diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c +index 1b4b5e09538e..297bf58f0d4f 100644 +--- a/drivers/power/supply/tps65090-charger.c ++++ b/drivers/power/supply/tps65090-charger.c +@@ -311,7 +311,7 @@ static int tps65090_charger_probe(struct platform_device *pdev) + + if (irq != -ENXIO) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, +- tps65090_charger_isr, 0, "tps65090-charger", cdata); ++ tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata); + if (ret) { + dev_err(cdata->dev, + "Unable to register irq %d err %d\n", irq, +diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c +index 9fd019f9b88c..a6b4eb61b4bb 100644 +--- a/drivers/power/supply/tps65217_charger.c ++++ b/drivers/power/supply/tps65217_charger.c +@@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) + if (irq != -ENXIO) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + tps65217_charger_irq, +- 0, "tps65217-charger", ++ IRQF_ONESHOT, "tps65217-charger", + charger); + if (ret) { + dev_err(charger->dev, +diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c +index 5b10b50f8686..5c064df7d81f 100644 +--- a/drivers/powercap/powercap_sys.c ++++ b/drivers/powercap/powercap_sys.c +@@ -379,9 +379,9 @@ static void create_power_zone_common_attributes( + &dev_attr_max_energy_range_uj.attr; + if (power_zone->ops->get_energy_uj) { + if (power_zone->ops->reset_energy_uj) +- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; ++ dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR; + else +- dev_attr_energy_uj.attr.mode = S_IRUGO; ++ dev_attr_energy_uj.attr.mode = S_IRUSR; + power_zone->zone_dev_attrs[count++] = + &dev_attr_energy_uj.attr; + } +diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c +index 8c3f5adf1bc6..2d7618375662 100644 +--- a/drivers/ps3/ps3stor_lib.c ++++ b/drivers/ps3/ps3stor_lib.c +@@ -201,7 +201,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) + dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf)); + dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf, + dev->bounce_size, DMA_BIDIRECTIONAL); +- if (!dev->bounce_dma) { ++ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) { + dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n", + __func__, __LINE__); + error = -ENODEV; +diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c +index 31b01035d0ab..8cfba3614e60 100644 +--- a/drivers/pwm/pwm-bcm-iproc.c ++++ b/drivers/pwm/pwm-bcm-iproc.c +@@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + u64 tmp, multi, rate; + u32 value, prescale; + +- rate = clk_get_rate(ip->clk); +- + value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); + + if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) +@@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + else + state->polarity = PWM_POLARITY_INVERSED; + ++ rate = clk_get_rate(ip->clk); ++ if (rate == 0) { ++ state->period = 0; ++ state->duty_cycle = 0; ++ return; ++ } ++ + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); + prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); + prescale &= IPROC_PWM_PRESCALE_MAX; +diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c +index 744d56197286..1cc671797953 100644 +--- a/drivers/pwm/pwm-rockchip.c ++++ b/drivers/pwm/pwm-rockchip.c +@@ -366,7 +366,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev) + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { +- clk_unprepare(pc->clk); + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + } + +diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c +index 6c6b44fd3f43..2d11ac277de8 100644 +--- a/drivers/pwm/pwm-spear.c ++++ b/drivers/pwm/pwm-spear.c +@@ -231,10 +231,6 @@ static int spear_pwm_probe(struct platform_device *pdev) + static int spear_pwm_remove(struct platform_device *pdev) + { + struct spear_pwm_chip *pc = platform_get_drvdata(pdev); +- int i; +- +- for (i = 0; i < NUM_PWM; i++) +- pwm_disable(&pc->chip.pwms[i]); + + /* clk was prepared in probe, hence unprepare it here */ + clk_unprepare(pc->clk); +diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c +index 7e8906d6ab7a..d76698ce6472 100644 +--- a/drivers/pwm/pwm-tegra.c ++++ b/drivers/pwm/pwm-tegra.c +@@ -228,7 +228,6 @@ static int tegra_pwm_probe(struct platform_device *pdev) + static int tegra_pwm_remove(struct platform_device *pdev) + { + struct tegra_pwm_chip *pc = platform_get_drvdata(pdev); +- unsigned int i; + int err; + + if (WARN_ON(!pc)) +@@ -238,18 +237,6 @@ static int tegra_pwm_remove(struct platform_device *pdev) + if (err < 0) + return err; + +- for (i = 0; i < pc->chip.npwm; i++) { +- struct pwm_device *pwm = &pc->chip.pwms[i]; +- +- if (!pwm_is_enabled(pwm)) +- if (clk_prepare_enable(pc->clk) < 0) +- continue; +- +- pwm_writel(pc, i, 0); +- +- clk_disable_unprepare(pc->clk); +- } +- + reset_control_assert(pc->rst); + clk_disable_unprepare(pc->clk); + +diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig +index d6d2f20c4597..21df2816def7 100644 +--- a/drivers/rapidio/Kconfig ++++ b/drivers/rapidio/Kconfig +@@ -25,7 +25,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS + config RAPIDIO_DMA_ENGINE + bool "DMA Engine support for RapidIO" + depends on RAPIDIO +- select DMADEVICES ++ depends on DMADEVICES + select DMA_ENGINE + help + Say Y here if you want to use DMA Engine frameork for RapidIO data +diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c +index ebe8e8dc4677..c246d3a2fc5f 100644 +--- a/drivers/rapidio/devices/rio_mport_cdev.c ++++ b/drivers/rapidio/devices/rio_mport_cdev.c +@@ -901,15 +901,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, + if (pinned < 0) { + rmcd_error("get_user_pages err=%ld", pinned); + nr_pages = 0; +- } else ++ } else { + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); ++ /* ++ * Set nr_pages up to mean "how many pages to unpin, in ++ * the error handler: ++ */ ++ nr_pages = pinned; ++ } + ret = -EFAULT; +- /* +- * Set nr_pages up to mean "how many pages to unpin, in +- * the error handler: +- */ +- nr_pages = pinned; + goto err_pg; + } + +@@ -1739,6 +1740,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + struct rio_mport *mport; ++ struct device *dev; + size_t size; + u32 rval; + u32 swpinfo = 0; +@@ -1753,8 +1755,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, + dev_info.comptag, dev_info.destid, dev_info.hopcount); + +- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { ++ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); ++ if (dev) { + rmcd_debug(RDEV, "device %s already exists", dev_info.name); ++ put_device(dev); + return -EEXIST; + } + +diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c +index b29fc258eeba..ab57a1eb519f 100644 +--- a/drivers/rapidio/rio_cm.c ++++ b/drivers/rapidio/rio_cm.c +@@ -2136,6 +2136,14 @@ static int riocm_add_mport(struct device *dev, + return -ENODEV; + } + ++ cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); ++ if (!cm->rx_wq) { ++ rio_release_inb_mbox(mport, cmbox); ++ rio_release_outb_mbox(mport, cmbox); ++ kfree(cm); ++ return -ENOMEM; ++ } ++ + /* + * Allocate and register inbound messaging buffers to be ready + * to receive channel and system management requests +@@ -2146,15 +2154,6 @@ static int riocm_add_mport(struct device *dev, + cm->rx_slots = RIOCM_RX_RING_SIZE; + mutex_init(&cm->rx_lock); + riocm_rx_fill(cm, RIOCM_RX_RING_SIZE); +- cm->rx_wq = create_workqueue(DRV_NAME "/rxq"); +- if (!cm->rx_wq) { +- riocm_error("failed to allocate IBMBOX_%d on %s", +- cmbox, mport->name); +- rio_release_outb_mbox(mport, cmbox); +- kfree(cm); +- return -ENOMEM; +- } +- + INIT_WORK(&cm->rx_work, rio_ibmsg_handler); + + cm->tx_slot = 0; +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c +index a3ade9e4ef47..86776d45b68e 100644 +--- a/drivers/regulator/axp20x-regulator.c ++++ b/drivers/regulator/axp20x-regulator.c +@@ -415,7 +415,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) + static int axp20x_regulator_parse_dt(struct platform_device *pdev) + { + struct device_node *np, *regulators; +- int ret; ++ int ret = 0; + u32 dcdcfreq = 0; + + np = of_node_get(pdev->dev.parent->of_node); +@@ -430,13 +430,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) + ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); + if (ret < 0) { + dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret); +- return ret; + } +- + of_node_put(regulators); + } + +- return 0; ++ of_node_put(np); ++ return ret; + } + + static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode) +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c +index 0d8130450dd3..095ec338de13 100644 +--- a/drivers/regulator/core.c ++++ b/drivers/regulator/core.c +@@ -1052,7 +1052,6 @@ static int _regulator_do_enable(struct regulator_dev *rdev); + /** + * set_machine_constraints - sets regulator constraints + * @rdev: regulator source +- * @constraints: constraints to apply + * + * Allows platform initialisation code to define and constrain + * regulator circuits e.g. valid voltage/current ranges, etc. NOTE: +@@ -1060,21 +1059,11 @@ static int _regulator_do_enable(struct regulator_dev *rdev); + * regulator operations to proceed i.e. set_voltage, set_current_limit, + * set_mode. + */ +-static int set_machine_constraints(struct regulator_dev *rdev, +- const struct regulation_constraints *constraints) ++static int set_machine_constraints(struct regulator_dev *rdev) + { + int ret = 0; + const struct regulator_ops *ops = rdev->desc->ops; + +- if (constraints) +- rdev->constraints = kmemdup(constraints, sizeof(*constraints), +- GFP_KERNEL); +- else +- rdev->constraints = kzalloc(sizeof(*constraints), +- GFP_KERNEL); +- if (!rdev->constraints) +- return -ENOMEM; +- + ret = machine_constraints_voltage(rdev, rdev->constraints); + if (ret != 0) + return ret; +@@ -1218,7 +1207,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + const char *consumer_dev_name, + const char *supply) + { +- struct regulator_map *node; ++ struct regulator_map *node, *new_node; + int has_dev; + + if (supply == NULL) +@@ -1229,6 +1218,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + else + has_dev = 0; + ++ new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); ++ if (new_node == NULL) ++ return -ENOMEM; ++ ++ new_node->regulator = rdev; ++ new_node->supply = supply; ++ ++ if (has_dev) { ++ new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); ++ if (new_node->dev_name == NULL) { ++ kfree(new_node); ++ return -ENOMEM; ++ } ++ } ++ ++ mutex_lock(®ulator_list_mutex); + list_for_each_entry(node, ®ulator_map_list, list) { + if (node->dev_name && consumer_dev_name) { + if (strcmp(node->dev_name, consumer_dev_name) != 0) +@@ -1246,26 +1251,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, + node->regulator->desc->name, + supply, + dev_name(&rdev->dev), rdev_get_name(rdev)); +- return -EBUSY; ++ goto fail; + } + +- node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); +- if (node == NULL) +- return -ENOMEM; +- +- node->regulator = rdev; +- node->supply = supply; +- +- if (has_dev) { +- node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); +- if (node->dev_name == NULL) { +- kfree(node); +- return -ENOMEM; +- } +- } ++ list_add(&new_node->list, ®ulator_map_list); ++ mutex_unlock(®ulator_list_mutex); + +- list_add(&node->list, ®ulator_map_list); + return 0; ++ ++fail: ++ mutex_unlock(®ulator_list_mutex); ++ kfree(new_node->dev_name); ++ kfree(new_node); ++ return -EBUSY; + } + + static void unset_regulator_supplies(struct regulator_dev *rdev) +@@ -1579,6 +1577,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) + } + } + ++ if (r == rdev) { ++ dev_err(dev, "Supply for %s (%s) resolved to itself\n", ++ rdev->desc->name, rdev->supply_name); ++ if (!have_full_constraints()) ++ return -EINVAL; ++ r = dummy_regulator_rdev; ++ get_device(&r->dev); ++ } ++ + /* Recursively resolve the supply of the supply */ + ret = regulator_resolve_supply(r); + if (ret < 0) { +@@ -3244,6 +3251,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev) + ret = rdev->desc->fixed_uV; + } else if (rdev->supply) { + ret = _regulator_get_voltage(rdev->supply->rdev); ++ } else if (rdev->supply_name) { ++ return -EPROBE_DEFER; + } else { + return -EINVAL; + } +@@ -4317,7 +4326,6 @@ struct regulator_dev * + regulator_register(const struct regulator_desc *regulator_desc, + const struct regulator_config *cfg) + { +- const struct regulation_constraints *constraints = NULL; + const struct regulator_init_data *init_data; + struct regulator_config *config = NULL; + static atomic_t regulator_no = ATOMIC_INIT(-1); +@@ -4417,40 +4425,51 @@ regulator_register(const struct regulator_desc *regulator_desc, + + /* set regulator constraints */ + if (init_data) +- constraints = &init_data->constraints; ++ rdev->constraints = kmemdup(&init_data->constraints, ++ sizeof(*rdev->constraints), ++ GFP_KERNEL); ++ else ++ rdev->constraints = kzalloc(sizeof(*rdev->constraints), ++ GFP_KERNEL); ++ if (!rdev->constraints) { ++ ret = -ENOMEM; ++ goto wash; ++ } + + if (init_data && init_data->supply_regulator) + rdev->supply_name = init_data->supply_regulator; + else if (regulator_desc->supply_name) + rdev->supply_name = regulator_desc->supply_name; + +- /* +- * Attempt to resolve the regulator supply, if specified, +- * but don't return an error if we fail because we will try +- * to resolve it again later as more regulators are added. +- */ +- if (regulator_resolve_supply(rdev)) +- rdev_dbg(rdev, "unable to resolve supply\n"); +- +- ret = set_machine_constraints(rdev, constraints); ++ ret = set_machine_constraints(rdev); ++ if (ret == -EPROBE_DEFER) { ++ /* Regulator might be in bypass mode and so needs its supply ++ * to set the constraints */ ++ /* FIXME: this currently triggers a chicken-and-egg problem ++ * when creating -SUPPLY symlink in sysfs to a regulator ++ * that is just being created */ ++ ret = regulator_resolve_supply(rdev); ++ if (!ret) ++ ret = set_machine_constraints(rdev); ++ else ++ rdev_dbg(rdev, "unable to resolve supply early: %pe\n", ++ ERR_PTR(ret)); ++ } + if (ret < 0) + goto wash; + + /* add consumers devices */ + if (init_data) { +- mutex_lock(®ulator_list_mutex); + for (i = 0; i < init_data->num_consumer_supplies; i++) { + ret = set_consumer_device_supply(rdev, + init_data->consumer_supplies[i].dev_name, + init_data->consumer_supplies[i].supply); + if (ret < 0) { +- mutex_unlock(®ulator_list_mutex); + dev_err(dev, "Failed to set supply %s\n", + init_data->consumer_supplies[i].supply); + goto unset_supplies; + } + } +- mutex_unlock(®ulator_list_mutex); + } + + if (!rdev->desc->ops->get_voltage && +diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c +index 9ececfef42d6..bd91c95f73e0 100644 +--- a/drivers/regulator/da9052-regulator.c ++++ b/drivers/regulator/da9052-regulator.c +@@ -258,7 +258,8 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev, + case DA9052_ID_BUCK3: + case DA9052_ID_LDO2: + case DA9052_ID_LDO3: +- ret = (new_sel - old_sel) * info->step_uV / 6250; ++ ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV, ++ 6250); + break; + } + +diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c +index 6d17357b3a24..5f5f63eb8c76 100644 +--- a/drivers/regulator/ti-abb-regulator.c ++++ b/drivers/regulator/ti-abb-regulator.c +@@ -342,8 +342,17 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) + return ret; + } + +- /* If data is exactly the same, then just update index, no change */ + info = &abb->info[sel]; ++ /* ++ * When Linux kernel is starting up, we are'nt sure of the ++ * Bias configuration that bootloader has configured. ++ * So, we get to know the actual setting the first time ++ * we are asked to transition. ++ */ ++ if (abb->current_info_idx == -EINVAL) ++ goto just_set_abb; ++ ++ /* If data is exactly the same, then just update index, no change */ + oinfo = &abb->info[abb->current_info_idx]; + if (!memcmp(info, oinfo, sizeof(*info))) { + dev_dbg(dev, "%s: Same data new idx=%d, old idx=%d\n", __func__, +@@ -351,6 +360,7 @@ static int ti_abb_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) + goto out; + } + ++just_set_abb: + ret = ti_abb_set_opp(rdev, abb, info); + + out: +diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c +index 2e0caaaa766a..72fc33bba99c 100644 +--- a/drivers/remoteproc/qcom_q6v5_pil.c ++++ b/drivers/remoteproc/qcom_q6v5_pil.c +@@ -193,6 +193,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw) + { + struct q6v5 *qproc = rproc->priv; + ++ /* MBA is restricted to a maximum size of 1M */ ++ if (fw->size > qproc->mba_size || fw->size > SZ_1M) { ++ dev_err(qproc->dev, "MBA firmware load failed\n"); ++ return -EINVAL; ++ } ++ + memcpy(qproc->mba_region, fw->data, fw->size); + + return 0; +diff --git a/drivers/reset/core.c b/drivers/reset/core.c +index d0ebca301afc..c15c898fe0dc 100644 +--- a/drivers/reset/core.c ++++ b/drivers/reset/core.c +@@ -263,7 +263,10 @@ static struct reset_control *__reset_control_get_internal( + if (!rstc) + return ERR_PTR(-ENOMEM); + +- try_module_get(rcdev->owner); ++ if (!try_module_get(rcdev->owner)) { ++ kfree(rstc); ++ return ERR_PTR(-ENODEV); ++ } + + rstc->rcdev = rcdev; + list_add(&rstc->list, &rcdev->reset_control_head); +diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c +index 1799fd423901..54ae04333d75 100644 +--- a/drivers/reset/reset-ti-syscon.c ++++ b/drivers/reset/reset-ti-syscon.c +@@ -58,8 +58,8 @@ struct ti_syscon_reset_data { + unsigned int nr_controls; + }; + +-#define to_ti_syscon_reset_data(rcdev) \ +- container_of(rcdev, struct ti_syscon_reset_data, rcdev) ++#define to_ti_syscon_reset_data(_rcdev) \ ++ container_of(_rcdev, struct ti_syscon_reset_data, rcdev) + + /** + * ti_syscon_reset_assert() - assert device reset +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c +index 182fdd00e290..ecd61573dd31 100644 +--- a/drivers/rtc/rtc-max77686.c ++++ b/drivers/rtc/rtc-max77686.c +@@ -718,8 +718,8 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info) + + add_rtc_irq: + ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq, +- IRQF_TRIGGER_FALLING | IRQF_ONESHOT | +- IRQF_SHARED, 0, info->drv_data->rtc_irq_chip, ++ IRQF_ONESHOT | IRQF_SHARED, ++ 0, info->drv_data->rtc_irq_chip, + &info->rtc_irq_data); + if (ret < 0) { + dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret); +diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c +index 31e7e23cc5be..9396b69f75e8 100644 +--- a/drivers/rtc/rtc-proc.c ++++ b/drivers/rtc/rtc-proc.c +@@ -26,8 +26,8 @@ static bool is_rtc_hctosys(struct rtc_device *rtc) + int size; + char name[NAME_SIZE]; + +- size = scnprintf(name, NAME_SIZE, "rtc%d", rtc->id); +- if (size > NAME_SIZE) ++ size = snprintf(name, NAME_SIZE, "rtc%d", rtc->id); ++ if (size >= NAME_SIZE) + return false; + + return !strncmp(name, CONFIG_RTC_HCTOSYS_DEVICE, NAME_SIZE); +diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c +index d08da371912c..93b1d8d9d2e9 100644 +--- a/drivers/rtc/rtc-rx8010.c ++++ b/drivers/rtc/rtc-rx8010.c +@@ -423,16 +423,26 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) + } + } + +-static struct rtc_class_ops rx8010_rtc_ops = { ++static const struct rtc_class_ops rx8010_rtc_ops_default = { + .read_time = rx8010_get_time, + .set_time = rx8010_set_time, + .ioctl = rx8010_ioctl, + }; + ++static const struct rtc_class_ops rx8010_rtc_ops_alarm = { ++ .read_time = rx8010_get_time, ++ .set_time = rx8010_set_time, ++ .ioctl = rx8010_ioctl, ++ .read_alarm = rx8010_read_alarm, ++ .set_alarm = rx8010_set_alarm, ++ .alarm_irq_enable = rx8010_alarm_irq_enable, ++}; ++ + static int rx8010_probe(struct i2c_client *client, + const struct i2c_device_id *id) + { + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); ++ const struct rtc_class_ops *rtc_ops; + struct rx8010_data *rx8010; + int err = 0; + +@@ -463,16 +473,16 @@ static int rx8010_probe(struct i2c_client *client, + + if (err) { + dev_err(&client->dev, "unable to request IRQ\n"); +- client->irq = 0; +- } else { +- rx8010_rtc_ops.read_alarm = rx8010_read_alarm; +- rx8010_rtc_ops.set_alarm = rx8010_set_alarm; +- rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable; ++ return err; + } ++ ++ rtc_ops = &rx8010_rtc_ops_alarm; ++ } else { ++ rtc_ops = &rx8010_rtc_ops_default; + } + + rx8010->rtc = devm_rtc_device_register(&client->dev, client->name, +- &rx8010_rtc_ops, THIS_MODULE); ++ rtc_ops, THIS_MODULE); + + if (IS_ERR(rx8010->rtc)) { + dev_err(&client->dev, "unable to register the class device\n"); +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index 0da246505f70..b72d4243bfb9 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -3399,8 +3399,6 @@ void dasd_generic_remove(struct ccw_device *cdev) + struct dasd_device *device; + struct dasd_block *block; + +- cdev->handler = NULL; +- + device = dasd_device_from_cdev(cdev); + if (IS_ERR(device)) { + dasd_remove_sysfs_files(cdev); +@@ -3419,6 +3417,7 @@ void dasd_generic_remove(struct ccw_device *cdev) + * no quite down yet. + */ + dasd_set_target_state(device, DASD_STATE_NEW); ++ cdev->handler = NULL; + /* dasd_delete_device destroys the device reference. */ + block = device->block; + dasd_delete_device(device); +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c +index f40d606f86c9..2002684a68b3 100644 +--- a/drivers/s390/block/dasd_alias.c ++++ b/drivers/s390/block/dasd_alias.c +@@ -255,7 +255,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + return; + device->discipline->get_uid(device, &uid); + spin_lock_irqsave(&lcu->lock, flags); +- list_del_init(&device->alias_list); + /* make sure that the workers don't use this device */ + if (device == lcu->suc_data.device) { + spin_unlock_irqrestore(&lcu->lock, flags); +@@ -282,6 +281,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) + + spin_lock_irqsave(&aliastree.lock, flags); + spin_lock(&lcu->lock); ++ list_del_init(&device->alias_list); + if (list_empty(&lcu->grouplist) && + list_empty(&lcu->active_devices) && + list_empty(&lcu->inactive_devices)) { +@@ -461,11 +461,19 @@ static int read_unit_address_configuration(struct dasd_device *device, + spin_unlock_irqrestore(&lcu->lock, flags); + + rc = dasd_sleep_on(cqr); +- if (rc && !suborder_not_supported(cqr)) { ++ if (!rc) ++ goto out; ++ ++ if (suborder_not_supported(cqr)) { ++ /* suborder not supported or device unusable for IO */ ++ rc = -EOPNOTSUPP; ++ } else { ++ /* IO failed but should be retried */ + spin_lock_irqsave(&lcu->lock, flags); + lcu->flags |= NEED_UAC_UPDATE; + spin_unlock_irqrestore(&lcu->lock, flags); + } ++out: + dasd_kfree_request(cqr, cqr->memdev); + return rc; + } +@@ -502,6 +510,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) + return rc; + + spin_lock_irqsave(&lcu->lock, flags); ++ /* ++ * there is another update needed skip the remaining handling ++ * the data might already be outdated ++ * but especially do not add the device to an LCU with pending ++ * update ++ */ ++ if (lcu->flags & NEED_UAC_UPDATE) ++ goto out; + lcu->pav = NO_PAV; + for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { + switch (lcu->uac->unit[i].ua_type) { +@@ -520,6 +536,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) + alias_list) { + _add_device_to_lcu(lcu, device, refdev); + } ++out: + spin_unlock_irqrestore(&lcu->lock, flags); + return 0; + } +@@ -624,6 +641,7 @@ int dasd_alias_add_device(struct dasd_device *device) + } + if (lcu->flags & UPDATE_PENDING) { + list_move(&device->alias_list, &lcu->active_devices); ++ private->pavgroup = NULL; + _schedule_lcu_update(lcu, device); + } + spin_unlock_irqrestore(&lcu->lock, flags); +diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c +index 68d6ee7ae504..3e8b5906548f 100644 +--- a/drivers/s390/char/sclp_vt220.c ++++ b/drivers/s390/char/sclp_vt220.c +@@ -34,8 +34,8 @@ + #define SCLP_VT220_MINOR 65 + #define SCLP_VT220_DRIVER_NAME "sclp_vt220" + #define SCLP_VT220_DEVICE_NAME "ttysclp" +-#define SCLP_VT220_CONSOLE_NAME "ttyS" +-#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */ ++#define SCLP_VT220_CONSOLE_NAME "ttysclp" ++#define SCLP_VT220_CONSOLE_INDEX 0 /* console=ttysclp0 */ + + /* Representation of a single write request */ + struct sclp_vt220_request { +diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c +index 876c7e6e3a99..6b8e7086c807 100644 +--- a/drivers/s390/cio/chp.c ++++ b/drivers/s390/cio/chp.c +@@ -254,6 +254,9 @@ static ssize_t chp_status_write(struct device *dev, + if (!num_args) + return count; + ++ /* Wait until previous actions have settled. */ ++ css_wait_for_slow_path(); ++ + if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) { + mutex_lock(&cp->lock); + error = s390_vary_chpid(cp->chpid, 1); +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c +index 67903c93328b..6e695bc96b88 100644 +--- a/drivers/s390/cio/chsc.c ++++ b/drivers/s390/cio/chsc.c +@@ -769,8 +769,6 @@ int chsc_chp_vary(struct chp_id chpid, int on) + { + struct channel_path *chp = chpid_to_chp(chpid); + +- /* Wait until previous actions have settled. */ +- css_wait_for_slow_path(); + /* + * Redo PathVerification on the devices the chpid connects to + */ +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c +index 39a2b0cde9e4..d81fdcd6a1fe 100644 +--- a/drivers/s390/cio/css.c ++++ b/drivers/s390/cio/css.c +@@ -529,6 +529,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) + rc = css_evaluate_known_subchannel(sch, 1); + if (rc == -EAGAIN) + css_schedule_eval(sch->schid); ++ /* ++ * The loop might take long time for platforms with lots of ++ * known devices. Allow scheduling here. ++ */ ++ cond_resched(); + } + return 0; + } +diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h +index 7e70f9298cc1..11f6ebd04545 100644 +--- a/drivers/s390/cio/qdio.h ++++ b/drivers/s390/cio/qdio.h +@@ -376,7 +376,6 @@ static inline int multicast_outbound(struct qdio_q *q) + extern u64 last_ai_time; + + /* prototypes for thin interrupt */ +-void qdio_setup_thinint(struct qdio_irq *irq_ptr); + int qdio_establish_thinint(struct qdio_irq *irq_ptr); + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); + void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c +index d0090c5c88e7..a64615a10352 100644 +--- a/drivers/s390/cio/qdio_setup.c ++++ b/drivers/s390/cio/qdio_setup.c +@@ -479,7 +479,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); +- qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, +diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c +index debe69adfc70..aecb6445a567 100644 +--- a/drivers/s390/cio/qdio_thinint.c ++++ b/drivers/s390/cio/qdio_thinint.c +@@ -268,17 +268,19 @@ int __init tiqdio_register_thinints(void) + + int qdio_establish_thinint(struct qdio_irq *irq_ptr) + { ++ int rc; ++ + if (!is_thinint_irq(irq_ptr)) + return 0; +- return set_subchannel_ind(irq_ptr, 0); +-} + +-void qdio_setup_thinint(struct qdio_irq *irq_ptr) +-{ +- if (!is_thinint_irq(irq_ptr)) +- return; + irq_ptr->dsci = get_indicator(); + DBF_HEX(&irq_ptr->dsci, sizeof(void *)); ++ ++ rc = set_subchannel_ind(irq_ptr, 0); ++ if (rc) ++ put_indicator(irq_ptr->dsci); ++ ++ return rc; + } + + void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c +index 51152681aba6..c878c8796616 100644 +--- a/drivers/s390/net/qeth_l2_main.c ++++ b/drivers/s390/net/qeth_l2_main.c +@@ -1675,6 +1675,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, + int extrasize; + + QETH_CARD_TEXT(card, 2, "brstchng"); ++ if (qports->num_entries == 0) { ++ QETH_CARD_TEXT(card, 2, "BPempty"); ++ return; ++ } + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); + return; +diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c +index c7be7bb37209..b9b4491d732a 100644 +--- a/drivers/scsi/BusLogic.c ++++ b/drivers/scsi/BusLogic.c +@@ -3081,11 +3081,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, + ccb->opcode = BLOGIC_INITIATOR_CCB_SG; + ccb->datalen = count * sizeof(struct blogic_sg_seg); + if (blogic_multimaster_type(adapter)) +- ccb->data = (void *)((unsigned int) ccb->dma_handle + ++ ccb->data = (unsigned int) ccb->dma_handle + + ((unsigned long) &ccb->sglist - +- (unsigned long) ccb)); ++ (unsigned long) ccb); + else +- ccb->data = ccb->sglist; ++ ccb->data = virt_to_32bit_virt(ccb->sglist); + + scsi_for_each_sg(command, sg, count, i) { + ccb->sglist[i].segbytes = sg_dma_len(sg); +diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h +index b53ec2f1e8cd..5c950a7a1b1c 100644 +--- a/drivers/scsi/BusLogic.h ++++ b/drivers/scsi/BusLogic.h +@@ -821,7 +821,7 @@ struct blogic_ccb { + unsigned char cdblen; /* Byte 2 */ + unsigned char sense_datalen; /* Byte 3 */ + u32 datalen; /* Bytes 4-7 */ +- void *data; /* Bytes 8-11 */ ++ u32 data; /* Bytes 8-11 */ + unsigned char:8; /* Byte 12 */ + unsigned char:8; /* Byte 13 */ + enum blogic_adapter_status adapter_status; /* Byte 14 */ +diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c +index 867b864f5047..4bca37d52bad 100644 +--- a/drivers/scsi/FlashPoint.c ++++ b/drivers/scsi/FlashPoint.c +@@ -40,7 +40,7 @@ struct sccb_mgr_info { + u16 si_per_targ_ultra_nego; + u16 si_per_targ_no_disc; + u16 si_per_targ_wide_nego; +- u16 si_flags; ++ u16 si_mflags; + unsigned char si_card_family; + unsigned char si_bustype; + unsigned char si_card_model[3]; +@@ -1070,22 +1070,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) + ScamFlg = + (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); + +- pCardInfo->si_flags = 0x0000; ++ pCardInfo->si_mflags = 0x0000; + + if (i & 0x01) +- pCardInfo->si_flags |= SCSI_PARITY_ENA; ++ pCardInfo->si_mflags |= SCSI_PARITY_ENA; + + if (!(i & 0x02)) +- pCardInfo->si_flags |= SOFT_RESET; ++ pCardInfo->si_mflags |= SOFT_RESET; + + if (i & 0x10) +- pCardInfo->si_flags |= EXTENDED_TRANSLATION; ++ pCardInfo->si_mflags |= EXTENDED_TRANSLATION; + + if (ScamFlg & SCAM_ENABLED) +- pCardInfo->si_flags |= FLAG_SCAM_ENABLED; ++ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED; + + if (ScamFlg & SCAM_LEVEL2) +- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2; ++ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2; + + j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); + if (i & 0x04) { +@@ -1101,7 +1101,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) + + if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD)) + +- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN; ++ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN; + + pCardInfo->si_card_family = HARPOON_FAMILY; + pCardInfo->si_bustype = BUSTYPE_PCI; +@@ -1137,15 +1137,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) + + if (pCardInfo->si_card_model[1] == '3') { + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) +- pCardInfo->si_flags |= LOW_BYTE_TERM; ++ pCardInfo->si_mflags |= LOW_BYTE_TERM; + } else if (pCardInfo->si_card_model[2] == '0') { + temp = RD_HARPOON(ioport + hp_xfer_pad); + WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4))); + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) +- pCardInfo->si_flags |= LOW_BYTE_TERM; ++ pCardInfo->si_mflags |= LOW_BYTE_TERM; + WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4))); + if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) +- pCardInfo->si_flags |= HIGH_BYTE_TERM; ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM; + WR_HARPOON(ioport + hp_xfer_pad, temp); + } else { + temp = RD_HARPOON(ioport + hp_ee_ctrl); +@@ -1163,9 +1163,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) + WR_HARPOON(ioport + hp_ee_ctrl, temp); + WR_HARPOON(ioport + hp_xfer_pad, temp2); + if (!(temp3 & BIT(7))) +- pCardInfo->si_flags |= LOW_BYTE_TERM; ++ pCardInfo->si_mflags |= LOW_BYTE_TERM; + if (!(temp3 & BIT(6))) +- pCardInfo->si_flags |= HIGH_BYTE_TERM; ++ pCardInfo->si_mflags |= HIGH_BYTE_TERM; + } + + ARAM_ACCESS(ioport); +@@ -1272,7 +1272,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info + WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id); + CurrCard->ourId = pCardInfo->si_id; + +- i = (unsigned char)pCardInfo->si_flags; ++ i = (unsigned char)pCardInfo->si_mflags; + if (i & SCSI_PARITY_ENA) + WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P)); + +@@ -1286,14 +1286,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info + j |= SCSI_TERM_ENA_H; + WR_HARPOON(ioport + hp_ee_ctrl, j); + +- if (!(pCardInfo->si_flags & SOFT_RESET)) { ++ if (!(pCardInfo->si_mflags & SOFT_RESET)) { + + FPT_sresb(ioport, thisCard); + + FPT_scini(thisCard, pCardInfo->si_id, 0); + } + +- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS) ++ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS) + CurrCard->globalFlags |= F_NO_FILTER; + + if (pCurrNvRam) { +diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c +index 065f11a1964d..39deea8601d6 100644 +--- a/drivers/scsi/aacraid/aachba.c ++++ b/drivers/scsi/aacraid/aachba.c +@@ -1929,13 +1929,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + scsicmd->scsi_done(scsicmd); +- return 1; ++ return 0; + } + + dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", +@@ -2023,13 +2023,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense(&dev->fsa_dev[cid].sense_data, +- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, ++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), + SCSI_SENSE_BUFFERSIZE)); + scsicmd->scsi_done(scsicmd); +- return 1; ++ return 0; + } + + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", +diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c +index def3208dd290..9b5832b46dec 100644 +--- a/drivers/scsi/aic7xxx/aic7xxx_core.c ++++ b/drivers/scsi/aic7xxx/aic7xxx_core.c +@@ -500,7 +500,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port) + return ((ahc_inb(ahc, port)) + | (ahc_inb(ahc, port+1) << 8) + | (ahc_inb(ahc, port+2) << 16) +- | (ahc_inb(ahc, port+3) << 24) ++ | (((uint64_t)ahc_inb(ahc, port+3)) << 24) + | (((uint64_t)ahc_inb(ahc, port+4)) << 32) + | (((uint64_t)ahc_inb(ahc, port+5)) << 40) + | (((uint64_t)ahc_inb(ahc, port+6)) << 48) +diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c +index 12b88294d667..76ad20e49126 100644 +--- a/drivers/scsi/arm/acornscsi.c ++++ b/drivers/scsi/arm/acornscsi.c +@@ -2913,8 +2913,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) + + ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); +- if (!ashost->base || !ashost->fast) ++ if (!ashost->base || !ashost->fast) { ++ ret = -ENOMEM; + goto out_put; ++ } + + host->irq = ec->irq; + ashost->host = host; +diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c +index edce5f3cfdba..93ba83e3148e 100644 +--- a/drivers/scsi/arm/cumana_2.c ++++ b/drivers/scsi/arm/cumana_2.c +@@ -454,7 +454,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); +- free_irq(ec->irq, host); ++ free_irq(ec->irq, info); + + out_release: + fas216_release(host); +diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c +index e93e047f4316..65bb34ce93b9 100644 +--- a/drivers/scsi/arm/eesox.c ++++ b/drivers/scsi/arm/eesox.c +@@ -575,7 +575,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); +- free_irq(ec->irq, host); ++ free_irq(ec->irq, info); + + out_remove: + fas216_remove(host); +diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c +index 79aa88911b7f..b5e4a25ea1ef 100644 +--- a/drivers/scsi/arm/powertec.c ++++ b/drivers/scsi/arm/powertec.c +@@ -382,7 +382,7 @@ static int powertecscsi_probe(struct expansion_card *ec, + + if (info->info.scsi.dma != NO_DMA) + free_dma(info->info.scsi.dma); +- free_irq(ec->irq, host); ++ free_irq(ec->irq, info); + + out_release: + fas216_release(host); +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c +index 741cc96379cb..628bf2e6a526 100644 +--- a/drivers/scsi/be2iscsi/be_main.c ++++ b/drivers/scsi/be2iscsi/be_main.c +@@ -5847,6 +5847,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, + pci_disable_msix(phba->pcidev); + pci_dev_put(phba->pcidev); + iscsi_host_free(phba->shost); ++ pci_disable_pcie_error_reporting(pcidev); + pci_set_drvdata(pcidev, NULL); + disable_pci: + pci_release_regions(pcidev); +diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig +index d401a096dfc7..2eb2476852b1 100644 +--- a/drivers/scsi/bnx2fc/Kconfig ++++ b/drivers/scsi/bnx2fc/Kconfig +@@ -4,6 +4,7 @@ config SCSI_BNX2X_FCOE + depends on (IPV6 || IPV6=n) + depends on LIBFC + depends on LIBFCOE ++ depends on MMU + select NETDEVICES + select ETHERNET + select NET_VENDOR_BROADCOM +diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig +index ba30ff86d581..b27a3738d940 100644 +--- a/drivers/scsi/bnx2i/Kconfig ++++ b/drivers/scsi/bnx2i/Kconfig +@@ -3,6 +3,7 @@ config SCSI_BNX2_ISCSI + depends on NET + depends on PCI + depends on (IPV6 || IPV6=n) ++ depends on MMU + select SCSI_ISCSI_ATTRS + select NETDEVICES + select ETHERNET +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c +index dab195f04da7..06ca0495f3e8 100644 +--- a/drivers/scsi/csiostor/csio_hw.c ++++ b/drivers/scsi/csiostor/csio_hw.c +@@ -1973,7 +1973,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info, + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); +- ret = EINVAL; ++ ret = -EINVAL; + goto bye; + } + +diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c +index 60c288526355..dce885276235 100644 +--- a/drivers/scsi/device_handler/scsi_dh_alua.c ++++ b/drivers/scsi/device_handler/scsi_dh_alua.c +@@ -564,10 +564,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) + * even though it shouldn't according to T10. + * The retry without rtpg_ext_hdr_req set + * handles this. ++ * Note: some arrays return a sense key of ILLEGAL_REQUEST ++ * with ASC 00h if they don't support the extended header. + */ + if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && +- sense_hdr.sense_key == ILLEGAL_REQUEST && +- sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) { ++ sense_hdr.sense_key == ILLEGAL_REQUEST) { + pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; + goto retry; + } +@@ -657,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) + rcu_read_lock(); + list_for_each_entry_rcu(h, + &tmp_pg->dh_list, node) { +- /* h->sdev should always be valid */ +- BUG_ON(!h->sdev); ++ if (!h->sdev) ++ continue; + h->sdev->access_state = desc[0]; + } + rcu_read_unlock(); +@@ -704,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) + pg->expiry = 0; + rcu_read_lock(); + list_for_each_entry_rcu(h, &pg->dh_list, node) { +- BUG_ON(!h->sdev); ++ if (!h->sdev) ++ continue; + h->sdev->access_state = + (pg->state & SCSI_ACCESS_STATE_MASK); + if (pg->pref) +@@ -1149,7 +1151,6 @@ static void alua_bus_detach(struct scsi_device *sdev) + spin_lock(&h->pg_lock); + pg = h->pg; + rcu_assign_pointer(h->pg, NULL); +- h->sdev = NULL; + spin_unlock(&h->pg_lock); + if (pg) { + spin_lock_irq(&pg->lock); +@@ -1158,6 +1159,7 @@ static void alua_bus_detach(struct scsi_device *sdev) + kref_put(&pg->kref, release_port_group); + } + sdev->handler_data = NULL; ++ synchronize_rcu(); + kfree(h); + } + +diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c +index 06fbd0b0c68a..6ddb3e9f21ba 100644 +--- a/drivers/scsi/device_handler/scsi_dh_rdac.c ++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c +@@ -526,8 +526,8 @@ static int initialize_controller(struct scsi_device *sdev, + if (!h->ctlr) + err = SCSI_DH_RES_TEMP_UNAVAIL; + else { +- list_add_rcu(&h->node, &h->ctlr->dh_list); + h->sdev = sdev; ++ list_add_rcu(&h->node, &h->ctlr->dh_list); + } + spin_unlock(&list_lock); + } +@@ -852,11 +852,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) + spin_lock(&list_lock); + if (h->ctlr) { + list_del_rcu(&h->node); +- h->sdev = NULL; + kref_put(&h->ctlr->kref, release_controller); + } + spin_unlock(&list_lock); + sdev->handler_data = NULL; ++ synchronize_rcu(); + kfree(h); + } + +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index 3c2f34db937b..f5f3a8113bc5 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -267,9 +267,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) + WARN_ON(!fcf_dev); + new->fcf_dev = NULL; + fcoe_fcf_device_delete(fcf_dev); +- kfree(new); + mutex_unlock(&cdev->lock); + } ++ kfree(new); + } + + /** +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c +index 58ce9020d69c..389c13e1c978 100644 +--- a/drivers/scsi/fnic/fnic_main.c ++++ b/drivers/scsi/fnic/fnic_main.c +@@ -735,6 +735,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + for (i = 0; i < FNIC_IO_LOCKS; i++) + spin_lock_init(&fnic->io_req_lock[i]); + ++ err = -ENOMEM; + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) + goto err_out_free_resources; +diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h +index 3fd8b83ffbf9..8039c809cef2 100644 +--- a/drivers/scsi/gdth.h ++++ b/drivers/scsi/gdth.h +@@ -177,9 +177,6 @@ + #define MSG_SIZE 34 /* size of message structure */ + #define MSG_REQUEST 0 /* async. event: message */ + +-/* cacheservice defines */ +-#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */ +- + /* DPMEM constants */ + #define DPMEM_MAGIC 0xC0FFEE11 + #define IC_HEADER_BYTES 48 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c +index 258a3f9a2519..b6a9773326e3 100644 +--- a/drivers/scsi/hosts.c ++++ b/drivers/scsi/hosts.c +@@ -265,12 +265,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, + + device_enable_async_suspend(&shost->shost_dev); + ++ get_device(&shost->shost_gendev); + error = device_add(&shost->shost_dev); + if (error) + goto out_del_gendev; + +- get_device(&shost->shost_gendev); +- + if (shost->transportt->host_size) { + shost->shost_data = kzalloc(shost->transportt->host_size, + GFP_KERNEL); +@@ -307,6 +306,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, + out_del_dev: + device_del(&shost->shost_dev); + out_del_gendev: ++ /* ++ * Host state is SHOST_RUNNING so we have to explicitly release ++ * ->shost_dev. ++ */ ++ put_device(&shost->shost_dev); + device_del(&shost->shost_gendev); + out_destroy_freelist: + device_disable_async_suspend(&shost->shost_gendev); +@@ -368,7 +372,7 @@ static void scsi_host_dev_release(struct device *dev) + + ida_simple_remove(&host_index_ida, shost->host_no); + +- if (parent) ++ if (shost->shost_state != SHOST_CREATED) + put_device(parent); + kfree(shost); + } +@@ -421,8 +425,10 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + mutex_init(&shost->scan_mutex); + + index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); +- if (index < 0) +- goto fail_kfree; ++ if (index < 0) { ++ kfree(shost); ++ return NULL; ++ } + shost->host_no = index; + + shost->dma_channel = 0xff; +@@ -509,7 +515,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + shost_printk(KERN_WARNING, shost, + "error handler thread failed to spawn, error = %ld\n", + PTR_ERR(shost->ehandler)); +- goto fail_index_remove; ++ shost->ehandler = NULL; ++ goto fail; + } + + shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", +@@ -518,17 +525,18 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) + if (!shost->tmf_work_q) { + shost_printk(KERN_WARNING, shost, + "failed to create tmf workq\n"); +- goto fail_kthread; ++ goto fail; + } + scsi_proc_hostdir_add(shost->hostt); + return shost; ++ fail: ++ /* ++ * Host state is still SHOST_CREATED and that is enough to release ++ * ->shost_gendev. scsi_host_dev_release() will free ++ * dev_name(&shost->shost_dev). ++ */ ++ put_device(&shost->shost_gendev); + +- fail_kthread: +- kthread_stop(shost->ehandler); +- fail_index_remove: +- ida_simple_remove(&host_index_ida, shost->host_no); +- fail_kfree: +- kfree(shost); + return NULL; + } + EXPORT_SYMBOL(scsi_host_alloc); +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c +index b82df8cdf962..7f1d6d52d48b 100644 +--- a/drivers/scsi/hpsa.c ++++ b/drivers/scsi/hpsa.c +@@ -8937,7 +8937,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) +- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ ++ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + + /* Monitor the controller for firmware lockups */ + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; +@@ -8949,6 +8949,8 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) + h->heartbeat_sample_interval); + return 0; + ++clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ ++ kfree(h->lastlogicals); + clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_performant_mode(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c +index 54dea767dfde..7865feb8e5e8 100644 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c +@@ -2891,8 +2891,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) + unsigned long flags = 0; + + spin_lock_irqsave(shost->host_lock, flags); +- if (sdev->type == TYPE_DISK) ++ if (sdev->type == TYPE_DISK) { + sdev->allow_restart = 1; ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); ++ } + spin_unlock_irqrestore(shost->host_lock, flags); + return 0; + } +@@ -4804,6 +4806,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) + if (IS_ERR(vhost->work_thread)) { + dev_err(dev, "Couldn't create kernel thread: %ld\n", + PTR_ERR(vhost->work_thread)); ++ rc = PTR_ERR(vhost->work_thread); + goto free_host_mem; + } + +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c +index e1730227b448..f299839698a3 100644 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c +@@ -425,6 +425,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue, + int rc = 0; + struct vio_dev *vdev = to_vio_dev(hostdata->dev); + ++ set_adapter_info(hostdata); ++ + /* Re-enable the CRQ */ + do { + if (rc) +diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c +index d453667612f8..15d64f96e623 100644 +--- a/drivers/scsi/iscsi_boot_sysfs.c ++++ b/drivers/scsi/iscsi_boot_sysfs.c +@@ -360,7 +360,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset, + boot_kobj->kobj.kset = boot_kset->kset; + if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype, + NULL, name, index)) { +- kfree(boot_kobj); ++ kobject_put(&boot_kobj->kobj); + return NULL; + } + boot_kobj->data = data; +diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c +index 9aaa74e349cc..65f0dbfc3a45 100644 +--- a/drivers/scsi/jazz_esp.c ++++ b/drivers/scsi/jazz_esp.c +@@ -170,7 +170,9 @@ static int esp_jazz_probe(struct platform_device *dev) + if (!esp->command_block) + goto fail_unmap_regs; + +- host->irq = platform_get_irq(dev, 0); ++ host->irq = err = platform_get_irq(dev, 0); ++ if (err < 0) ++ goto fail_unmap_command_block; + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); + if (err < 0) + goto fail_unmap_command_block; +diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c +index 880a9068ca12..ef06af4e3611 100644 +--- a/drivers/scsi/libfc/fc_disc.c ++++ b/drivers/scsi/libfc/fc_disc.c +@@ -595,8 +595,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + mutex_lock(&disc->disc_mutex); + if (PTR_ERR(fp) == -FC_EX_CLOSED) + goto out; +- if (IS_ERR(fp)) +- goto redisc; ++ if (IS_ERR(fp)) { ++ mutex_lock(&disc->disc_mutex); ++ fc_disc_restart(disc); ++ mutex_unlock(&disc->disc_mutex); ++ goto out; ++ } + + cp = fc_frame_payload_get(fp, sizeof(*cp)); + if (!cp) +@@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + new_rdata->disc_id = disc->disc_id; + lport->tt.rport_login(new_rdata); + } +- goto out; ++ goto free_fp; + } + rdata->disc_id = disc->disc_id; + lport->tt.rport_login(rdata); +@@ -635,6 +639,8 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, + redisc: + fc_disc_restart(disc); + } ++free_fp: ++ fc_frame_free(fp); + out: + mutex_unlock(&disc->disc_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c +index d0a86ef80652..59fd6101f188 100644 +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c +@@ -1585,8 +1585,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + rc = fc_exch_done_locked(ep); + WARN_ON(fc_seq_exch(sp) != ep); + spin_unlock_bh(&ep->ex_lock); +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + } + + /* +@@ -1605,6 +1610,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) + if (!fc_invoke_resp(ep, sp, fp)) + fc_frame_free(fp); + ++skip_resp: + fc_exch_release(ep); + return; + rel: +@@ -1848,10 +1854,16 @@ static void fc_exch_reset(struct fc_exch *ep) + + fc_exch_hold(ep); + +- if (!rc) ++ if (!rc) { + fc_exch_delete(ep); ++ } else { ++ FC_EXCH_DBG(ep, "ep is completed already," ++ "hence skip calling the resp\n"); ++ goto skip_resp; ++ } + + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); ++skip_resp: + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); + } +diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c +index ae93f45f9cd8..a36817fb0673 100644 +--- a/drivers/scsi/libfc/fc_lport.c ++++ b/drivers/scsi/libfc/fc_lport.c +@@ -1751,7 +1751,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + + if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " +- "lport->mfs:%hu\n", mfs, lport->mfs); ++ "lport->mfs:%u\n", mfs, lport->mfs); + fc_lport_error(lport, fp); + goto out; + } +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c +index c4336b01db23..30e954bb6c81 100644 +--- a/drivers/scsi/libiscsi.c ++++ b/drivers/scsi/libiscsi.c +@@ -570,8 +570,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) + if (conn->task == task) + conn->task = NULL; + +- if (conn->ping_task == task) +- conn->ping_task = NULL; ++ if (READ_ONCE(conn->ping_task) == task) ++ WRITE_ONCE(conn->ping_task, NULL); + + /* release get from queueing */ + __iscsi_put_task(task); +@@ -780,6 +780,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + task->conn->session->age); + } + ++ if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK)) ++ WRITE_ONCE(conn->ping_task, task); ++ + if (!ihost->workq) { + if (iscsi_prep_mgmt_task(conn, task)) + goto free_task; +@@ -987,8 +990,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) + struct iscsi_nopout hdr; + struct iscsi_task *task; + +- if (!rhdr && conn->ping_task) +- return -EINVAL; ++ if (!rhdr) { ++ if (READ_ONCE(conn->ping_task)) ++ return -EINVAL; ++ WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK); ++ } + + memset(&hdr, 0, sizeof(struct iscsi_nopout)); + hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; +@@ -1003,11 +1009,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) + + task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); + if (!task) { ++ if (!rhdr) ++ WRITE_ONCE(conn->ping_task, NULL); + iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); + return -EIO; + } else if (!rhdr) { + /* only track our nops */ +- conn->ping_task = task; + conn->last_ping = jiffies; + } + +@@ -1020,7 +1027,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task, + struct iscsi_conn *conn = task->conn; + int rc = 0; + +- if (conn->ping_task != task) { ++ if (READ_ONCE(conn->ping_task) != task) { + /* + * If this is not in response to one of our + * nops then it must be from userspace. +@@ -1377,7 +1384,6 @@ void iscsi_session_failure(struct iscsi_session *session, + enum iscsi_err err) + { + struct iscsi_conn *conn; +- struct device *dev; + + spin_lock_bh(&session->frwd_lock); + conn = session->leadconn; +@@ -1386,10 +1392,8 @@ void iscsi_session_failure(struct iscsi_session *session, + return; + } + +- dev = get_device(&conn->cls_conn->dev); ++ iscsi_get_conn(conn->cls_conn); + spin_unlock_bh(&session->frwd_lock); +- if (!dev) +- return; + /* + * if the host is being removed bypass the connection + * recovery initialization because we are going to kill +@@ -1399,7 +1403,7 @@ void iscsi_session_failure(struct iscsi_session *session, + iscsi_conn_error_event(conn->cls_conn, err); + else + iscsi_conn_failure(conn, err); +- put_device(dev); ++ iscsi_put_conn(conn->cls_conn); + } + EXPORT_SYMBOL_GPL(iscsi_session_failure); + +@@ -1561,14 +1565,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) + } + rc = iscsi_prep_scsi_cmd_pdu(conn->task); + if (rc) { +- if (rc == -ENOMEM || rc == -EACCES) { +- spin_lock_bh(&conn->taskqueuelock); +- list_add_tail(&conn->task->running, +- &conn->cmdqueue); +- conn->task = NULL; +- spin_unlock_bh(&conn->taskqueuelock); +- goto done; +- } else ++ if (rc == -ENOMEM || rc == -EACCES) ++ fail_scsi_task(conn->task, DID_IMM_RETRY); ++ else + fail_scsi_task(conn->task, DID_ABORT); + spin_lock_bh(&conn->taskqueuelock); + continue; +@@ -1960,7 +1959,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn) + */ + static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) + { +- if (conn->ping_task && ++ if (READ_ONCE(conn->ping_task) && + time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + + (conn->ping_timeout * HZ), jiffies)) + return 1; +@@ -2095,7 +2094,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) + * Checking the transport already or nop from a cmd timeout still + * running + */ +- if (conn->ping_task) { ++ if (READ_ONCE(conn->ping_task)) { + task->have_checked_conn = true; + rc = BLK_EH_RESET_TIMER; + goto done; +@@ -3361,125 +3360,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, + + switch(param) { + case ISCSI_PARAM_FAST_ABORT: +- len = sprintf(buf, "%d\n", session->fast_abort); ++ len = sysfs_emit(buf, "%d\n", session->fast_abort); + break; + case ISCSI_PARAM_ABORT_TMO: +- len = sprintf(buf, "%d\n", session->abort_timeout); ++ len = sysfs_emit(buf, "%d\n", session->abort_timeout); + break; + case ISCSI_PARAM_LU_RESET_TMO: +- len = sprintf(buf, "%d\n", session->lu_reset_timeout); ++ len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); + break; + case ISCSI_PARAM_TGT_RESET_TMO: +- len = sprintf(buf, "%d\n", session->tgt_reset_timeout); ++ len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); + break; + case ISCSI_PARAM_INITIAL_R2T_EN: +- len = sprintf(buf, "%d\n", session->initial_r2t_en); ++ len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); + break; + case ISCSI_PARAM_MAX_R2T: +- len = sprintf(buf, "%hu\n", session->max_r2t); ++ len = sysfs_emit(buf, "%hu\n", session->max_r2t); + break; + case ISCSI_PARAM_IMM_DATA_EN: +- len = sprintf(buf, "%d\n", session->imm_data_en); ++ len = sysfs_emit(buf, "%d\n", session->imm_data_en); + break; + case ISCSI_PARAM_FIRST_BURST: +- len = sprintf(buf, "%u\n", session->first_burst); ++ len = sysfs_emit(buf, "%u\n", session->first_burst); + break; + case ISCSI_PARAM_MAX_BURST: +- len = sprintf(buf, "%u\n", session->max_burst); ++ len = sysfs_emit(buf, "%u\n", session->max_burst); + break; + case ISCSI_PARAM_PDU_INORDER_EN: +- len = sprintf(buf, "%d\n", session->pdu_inorder_en); ++ len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); + break; + case ISCSI_PARAM_DATASEQ_INORDER_EN: +- len = sprintf(buf, "%d\n", session->dataseq_inorder_en); ++ len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); + break; + case ISCSI_PARAM_DEF_TASKMGMT_TMO: +- len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); ++ len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); + break; + case ISCSI_PARAM_ERL: +- len = sprintf(buf, "%d\n", session->erl); ++ len = sysfs_emit(buf, "%d\n", session->erl); + break; + case ISCSI_PARAM_TARGET_NAME: +- len = sprintf(buf, "%s\n", session->targetname); ++ len = sysfs_emit(buf, "%s\n", session->targetname); + break; + case ISCSI_PARAM_TARGET_ALIAS: +- len = sprintf(buf, "%s\n", session->targetalias); ++ len = sysfs_emit(buf, "%s\n", session->targetalias); + break; + case ISCSI_PARAM_TPGT: +- len = sprintf(buf, "%d\n", session->tpgt); ++ len = sysfs_emit(buf, "%d\n", session->tpgt); + break; + case ISCSI_PARAM_USERNAME: +- len = sprintf(buf, "%s\n", session->username); ++ len = sysfs_emit(buf, "%s\n", session->username); + break; + case ISCSI_PARAM_USERNAME_IN: +- len = sprintf(buf, "%s\n", session->username_in); ++ len = sysfs_emit(buf, "%s\n", session->username_in); + break; + case ISCSI_PARAM_PASSWORD: +- len = sprintf(buf, "%s\n", session->password); ++ len = sysfs_emit(buf, "%s\n", session->password); + break; + case ISCSI_PARAM_PASSWORD_IN: +- len = sprintf(buf, "%s\n", session->password_in); ++ len = sysfs_emit(buf, "%s\n", session->password_in); + break; + case ISCSI_PARAM_IFACE_NAME: +- len = sprintf(buf, "%s\n", session->ifacename); ++ len = sysfs_emit(buf, "%s\n", session->ifacename); + break; + case ISCSI_PARAM_INITIATOR_NAME: +- len = sprintf(buf, "%s\n", session->initiatorname); ++ len = sysfs_emit(buf, "%s\n", session->initiatorname); + break; + case ISCSI_PARAM_BOOT_ROOT: +- len = sprintf(buf, "%s\n", session->boot_root); ++ len = sysfs_emit(buf, "%s\n", session->boot_root); + break; + case ISCSI_PARAM_BOOT_NIC: +- len = sprintf(buf, "%s\n", session->boot_nic); ++ len = sysfs_emit(buf, "%s\n", session->boot_nic); + break; + case ISCSI_PARAM_BOOT_TARGET: +- len = sprintf(buf, "%s\n", session->boot_target); ++ len = sysfs_emit(buf, "%s\n", session->boot_target); + break; + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: +- len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); ++ len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); + break; + case ISCSI_PARAM_DISCOVERY_SESS: +- len = sprintf(buf, "%u\n", session->discovery_sess); ++ len = sysfs_emit(buf, "%u\n", session->discovery_sess); + break; + case ISCSI_PARAM_PORTAL_TYPE: +- len = sprintf(buf, "%s\n", session->portal_type); ++ len = sysfs_emit(buf, "%s\n", session->portal_type); + break; + case ISCSI_PARAM_CHAP_AUTH_EN: +- len = sprintf(buf, "%u\n", session->chap_auth_en); ++ len = sysfs_emit(buf, "%u\n", session->chap_auth_en); + break; + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: +- len = sprintf(buf, "%u\n", session->discovery_logout_en); ++ len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); + break; + case ISCSI_PARAM_BIDI_CHAP_EN: +- len = sprintf(buf, "%u\n", session->bidi_chap_en); ++ len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); + break; + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: +- len = sprintf(buf, "%u\n", session->discovery_auth_optional); ++ len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); + break; + case ISCSI_PARAM_DEF_TIME2WAIT: +- len = sprintf(buf, "%d\n", session->time2wait); ++ len = sysfs_emit(buf, "%d\n", session->time2wait); + break; + case ISCSI_PARAM_DEF_TIME2RETAIN: +- len = sprintf(buf, "%d\n", session->time2retain); ++ len = sysfs_emit(buf, "%d\n", session->time2retain); + break; + case ISCSI_PARAM_TSID: +- len = sprintf(buf, "%u\n", session->tsid); ++ len = sysfs_emit(buf, "%u\n", session->tsid); + break; + case ISCSI_PARAM_ISID: +- len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", ++ len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", + session->isid[0], session->isid[1], + session->isid[2], session->isid[3], + session->isid[4], session->isid[5]); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: +- len = sprintf(buf, "%u\n", session->discovery_parent_idx); ++ len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + if (session->discovery_parent_type) +- len = sprintf(buf, "%s\n", ++ len = sysfs_emit(buf, "%s\n", + session->discovery_parent_type); + else +- len = sprintf(buf, "\n"); ++ len = sysfs_emit(buf, "\n"); + break; + default: + return -ENOSYS; +@@ -3511,16 +3510,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + if (sin) +- len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); ++ len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); + else +- len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); ++ len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); + break; + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_LOCAL_PORT: + if (sin) +- len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); ++ len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + else +- len = sprintf(buf, "%hu\n", ++ len = sysfs_emit(buf, "%hu\n", + be16_to_cpu(sin6->sin6_port)); + break; + default: +@@ -3539,88 +3538,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, + + switch(param) { + case ISCSI_PARAM_PING_TMO: +- len = sprintf(buf, "%u\n", conn->ping_timeout); ++ len = sysfs_emit(buf, "%u\n", conn->ping_timeout); + break; + case ISCSI_PARAM_RECV_TMO: +- len = sprintf(buf, "%u\n", conn->recv_timeout); ++ len = sysfs_emit(buf, "%u\n", conn->recv_timeout); + break; + case ISCSI_PARAM_MAX_RECV_DLENGTH: +- len = sprintf(buf, "%u\n", conn->max_recv_dlength); ++ len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: +- len = sprintf(buf, "%u\n", conn->max_xmit_dlength); ++ len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); + break; + case ISCSI_PARAM_HDRDGST_EN: +- len = sprintf(buf, "%d\n", conn->hdrdgst_en); ++ len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); + break; + case ISCSI_PARAM_DATADGST_EN: +- len = sprintf(buf, "%d\n", conn->datadgst_en); ++ len = sysfs_emit(buf, "%d\n", conn->datadgst_en); + break; + case ISCSI_PARAM_IFMARKER_EN: +- len = sprintf(buf, "%d\n", conn->ifmarker_en); ++ len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); + break; + case ISCSI_PARAM_OFMARKER_EN: +- len = sprintf(buf, "%d\n", conn->ofmarker_en); ++ len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); + break; + case ISCSI_PARAM_EXP_STATSN: +- len = sprintf(buf, "%u\n", conn->exp_statsn); ++ len = sysfs_emit(buf, "%u\n", conn->exp_statsn); + break; + case ISCSI_PARAM_PERSISTENT_PORT: +- len = sprintf(buf, "%d\n", conn->persistent_port); ++ len = sysfs_emit(buf, "%d\n", conn->persistent_port); + break; + case ISCSI_PARAM_PERSISTENT_ADDRESS: +- len = sprintf(buf, "%s\n", conn->persistent_address); ++ len = sysfs_emit(buf, "%s\n", conn->persistent_address); + break; + case ISCSI_PARAM_STATSN: +- len = sprintf(buf, "%u\n", conn->statsn); ++ len = sysfs_emit(buf, "%u\n", conn->statsn); + break; + case ISCSI_PARAM_MAX_SEGMENT_SIZE: +- len = sprintf(buf, "%u\n", conn->max_segment_size); ++ len = sysfs_emit(buf, "%u\n", conn->max_segment_size); + break; + case ISCSI_PARAM_KEEPALIVE_TMO: +- len = sprintf(buf, "%u\n", conn->keepalive_tmo); ++ len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); + break; + case ISCSI_PARAM_LOCAL_PORT: +- len = sprintf(buf, "%u\n", conn->local_port); ++ len = sysfs_emit(buf, "%u\n", conn->local_port); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: +- len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); + break; + case ISCSI_PARAM_TCP_NAGLE_DISABLE: +- len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); + break; + case ISCSI_PARAM_TCP_WSF_DISABLE: +- len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); + break; + case ISCSI_PARAM_TCP_TIMER_SCALE: +- len = sprintf(buf, "%u\n", conn->tcp_timer_scale); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_EN: +- len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); + break; + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: +- len = sprintf(buf, "%u\n", conn->fragment_disable); ++ len = sysfs_emit(buf, "%u\n", conn->fragment_disable); + break; + case ISCSI_PARAM_IPV4_TOS: +- len = sprintf(buf, "%u\n", conn->ipv4_tos); ++ len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); + break; + case ISCSI_PARAM_IPV6_TC: +- len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); ++ len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); + break; + case ISCSI_PARAM_IPV6_FLOW_LABEL: +- len = sprintf(buf, "%u\n", conn->ipv6_flow_label); ++ len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); + break; + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: +- len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); ++ len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); + break; + case ISCSI_PARAM_TCP_XMIT_WSF: +- len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); + break; + case ISCSI_PARAM_TCP_RECV_WSF: +- len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); ++ len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); + break; + case ISCSI_PARAM_LOCAL_IPADDR: +- len = sprintf(buf, "%s\n", conn->local_ipaddr); ++ len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); + break; + default: + return -ENOSYS; +@@ -3638,13 +3637,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, + + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: +- len = sprintf(buf, "%s\n", ihost->netdev); ++ len = sysfs_emit(buf, "%s\n", ihost->netdev); + break; + case ISCSI_HOST_PARAM_HWADDRESS: +- len = sprintf(buf, "%s\n", ihost->hwaddress); ++ len = sysfs_emit(buf, "%s\n", ihost->hwaddress); + break; + case ISCSI_HOST_PARAM_INITIATOR_NAME: +- len = sprintf(buf, "%s\n", ihost->initiatorname); ++ len = sysfs_emit(buf, "%s\n", ihost->initiatorname); + break; + default: + return -ENOSYS; +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c +index 87f5e694dbed..8800ff615bdd 100644 +--- a/drivers/scsi/libsas/sas_ata.c ++++ b/drivers/scsi/libsas/sas_ata.c +@@ -219,15 +219,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) + memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); + task->total_xfer_len = qc->nbytes; + task->num_scatter = qc->n_elem; ++ task->data_dir = qc->dma_dir; ++ } else if (qc->tf.protocol == ATA_PROT_NODATA) { ++ task->data_dir = DMA_NONE; + } else { + for_each_sg(qc->sg, sg, qc->n_elem, si) + xfer += sg_dma_len(sg); + + task->total_xfer_len = xfer; + task->num_scatter = si; ++ task->data_dir = qc->dma_dir; + } +- +- task->data_dir = qc->dma_dir; + task->scatter = qc->sg; + task->ata_task.retry_count = 1; + task->task_state_flags = SAS_TASK_STATE_PENDING; +diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c +index d3c5297c6c89..30e0730f613e 100644 +--- a/drivers/scsi/libsas/sas_port.c ++++ b/drivers/scsi/libsas/sas_port.c +@@ -41,7 +41,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy + + static void sas_resume_port(struct asd_sas_phy *phy) + { +- struct domain_device *dev; ++ struct domain_device *dev, *n; + struct asd_sas_port *port = phy->port; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); +@@ -60,7 +60,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) + * 1/ presume every device came back + * 2/ force the next revalidation to check all expander phys + */ +- list_for_each_entry(dev, &port->dev_list, dev_list_node) { ++ list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { + int i, rc; + + rc = sas_notify_lldd_dev_found(dev); +diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c +index 52afbcff362f..b7940fffca63 100644 +--- a/drivers/scsi/lpfc/lpfc_ct.c ++++ b/drivers/scsi/lpfc/lpfc_ct.c +@@ -1541,8 +1541,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, sizeof(struct lpfc_name)); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); +@@ -1558,8 +1558,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + /* This string MUST be consistent with other FC platforms + * supported by Broadcom. +@@ -1583,8 +1583,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, phba->SerialNumber, + sizeof(ae->un.AttrString)); +@@ -1605,8 +1605,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, phba->ModelName, + sizeof(ae->un.AttrString)); +@@ -1626,8 +1626,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, phba->ModelDesc, + sizeof(ae->un.AttrString)); +@@ -1649,8 +1649,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t i, j, incr, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + /* Convert JEDEC ID to ascii for hardware version */ + incr = vp->rev.biuRev; +@@ -1679,8 +1679,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, lpfc_release_version, + sizeof(ae->un.AttrString)); +@@ -1701,8 +1701,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); +@@ -1726,8 +1726,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); + len = strnlen(ae->un.AttrString, +@@ -1746,8 +1746,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", + init_utsname()->sysname, +@@ -1769,7 +1769,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); + size = FOURBYTES + sizeof(uint32_t); +@@ -1785,8 +1785,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + len = lpfc_vport_symbolic_node_name(vport, + ae->un.AttrString, 256); +@@ -1804,7 +1804,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + /* Nothing is defined for this currently */ + ae->un.AttrInt = cpu_to_be32(0); +@@ -1821,7 +1821,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + /* Each driver instance corresponds to a single port */ + ae->un.AttrInt = cpu_to_be32(1); +@@ -1838,8 +1838,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, sizeof(struct lpfc_name)); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, + sizeof(struct lpfc_name)); +@@ -1857,8 +1857,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); + len = strnlen(ae->un.AttrString, +@@ -1877,7 +1877,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + /* Driver doesn't have access to this information */ + ae->un.AttrInt = cpu_to_be32(0); +@@ -1894,8 +1894,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, "EMULEX", + sizeof(ae->un.AttrString)); +@@ -1916,8 +1916,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 32); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ +@@ -1936,7 +1936,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + ae->un.AttrInt = 0; + if (!(phba->hba_flag & HBA_FCOE_MODE)) { +@@ -1986,7 +1986,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + switch (phba->fc_linkspeed) { +@@ -2050,7 +2050,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + hsp = (struct serv_parm *)&vport->fc_sparam; + ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | +@@ -2070,8 +2070,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), + "/sys/class/scsi_host/host%d", shost->host_no); +@@ -2091,8 +2091,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", + init_utsname()->nodename); +@@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, sizeof(struct lpfc_name)); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); +@@ -2130,8 +2130,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, sizeof(struct lpfc_name)); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); +@@ -2148,8 +2148,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); + len += (len & 3) ? (4 - (len & 3)) : 4; +@@ -2167,7 +2167,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); + else +@@ -2185,7 +2185,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); +@@ -2200,8 +2200,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, sizeof(struct lpfc_name)); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrWWN, &vport->fabric_portname, + sizeof(struct lpfc_name)); +@@ -2218,8 +2218,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 32); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */ + ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */ +@@ -2237,7 +2237,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + /* Link Up - operational */ + ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); + size = FOURBYTES + sizeof(uint32_t); +@@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + vport->fdmi_num_disc = lpfc_find_map_node(vport); + ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); + size = FOURBYTES + sizeof(uint32_t); +@@ -2269,7 +2269,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); +@@ -2284,8 +2284,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, "Smart SAN Initiator", + sizeof(ae->un.AttrString)); +@@ -2305,8 +2305,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); +@@ -2326,8 +2326,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, "Smart SAN Version 2.0", + sizeof(ae->un.AttrString)); +@@ -2348,8 +2348,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t len, size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; +- memset(ae, 0, 256); ++ ae = &ad->AttrValue; ++ memset(ae, 0, sizeof(*ae)); + + strncpy(ae->un.AttrString, phba->ModelName, + sizeof(ae->un.AttrString)); +@@ -2368,7 +2368,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + + /* SRIOV (type 3) is not supported */ + if (vport->vpi) +@@ -2388,7 +2388,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(0); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); +@@ -2403,7 +2403,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, + struct lpfc_fdmi_attr_entry *ae; + uint32_t size; + +- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; ++ ae = &ad->AttrValue; + ae->un.AttrInt = cpu_to_be32(1); + size = FOURBYTES + sizeof(uint32_t); + ad->AttrLen = cpu_to_be16(size); +@@ -2551,7 +2551,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + /* Registered Port List */ + /* One entry (port) per adapter */ + rh->rpl.EntryCnt = cpu_to_be32(1); +- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, ++ memcpy(&rh->rpl.pe.PortName, ++ &phba->pport->fc_sparam.portName, + sizeof(struct lpfc_name)); + + /* point to the HBA attribute block */ +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c +index a63542bac153..feb00585a7a4 100644 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c +@@ -1061,7 +1061,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, + memset(dstbuf, 0, 33); + size = (nbytes < 32) ? nbytes : 32; + if (copy_from_user(dstbuf, buf, size)) +- return 0; ++ return -EFAULT; + + if (dent == phba->debug_InjErrLBA) { + if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) +@@ -1069,7 +1069,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, + } + + if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) +- return 0; ++ return -EINVAL; + + if (dent == phba->debug_writeGuard) + phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; +diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c +index 4901bf24916b..f17adfe1326b 100644 +--- a/drivers/scsi/lpfc/lpfc_els.c ++++ b/drivers/scsi/lpfc/lpfc_els.c +@@ -1159,6 +1159,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + phba->fcf.fcf_redisc_attempted = 0; /* reset */ + goto out; + } ++ } else if (vport->port_state > LPFC_FLOGI && ++ vport->fc_flag & FC_PT2PT) { ++ /* ++ * In a p2p topology, it is possible that discovery has ++ * already progressed, and this completion can be ignored. ++ * Recheck the indicated topology. ++ */ ++ if (!sp->cmn.fPort) ++ goto out; + } + + flogifail: +@@ -3865,7 +3874,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + out: + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { + spin_lock_irq(shost->host_lock); +- ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); ++ if (mbox) ++ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; ++ ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; + spin_unlock_irq(shost->host_lock); + + /* If the node is not being used by another discovery thread, +@@ -7606,6 +7617,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + spin_lock_irq(shost->host_lock); + if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { + spin_unlock_irq(shost->host_lock); ++ if (newnode) ++ lpfc_nlp_put(ndlp); + goto dropit; + } + spin_unlock_irq(shost->host_lock); +diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h +index 3b970d370600..daab21f940fb 100644 +--- a/drivers/scsi/lpfc/lpfc_hw.h ++++ b/drivers/scsi/lpfc/lpfc_hw.h +@@ -1289,25 +1289,8 @@ struct fc_rdp_res_frame { + /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ + #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ + +-/* +- * Registered Port List Format +- */ +-struct lpfc_fdmi_reg_port_list { +- uint32_t EntryCnt; +- uint32_t pe; /* Variable-length array */ +-}; +- +- + /* Definitions for HBA / Port attribute entries */ + +-struct lpfc_fdmi_attr_def { /* Defined in TLV format */ +- /* Structure is in Big Endian format */ +- uint32_t AttrType:16; +- uint32_t AttrLen:16; +- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ +-}; +- +- + /* Attribute Entry */ + struct lpfc_fdmi_attr_entry { + union { +@@ -1318,7 +1301,13 @@ struct lpfc_fdmi_attr_entry { + } un; + }; + +-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) ++struct lpfc_fdmi_attr_def { /* Defined in TLV format */ ++ /* Structure is in Big Endian format */ ++ uint32_t AttrType:16; ++ uint32_t AttrLen:16; ++ /* Marks start of Value (ATTRIBUTE_ENTRY) */ ++ struct lpfc_fdmi_attr_entry AttrValue; ++} __packed; + + /* + * HBA Attribute Block +@@ -1342,13 +1331,20 @@ struct lpfc_fdmi_hba_ident { + struct lpfc_name PortName; + }; + ++/* ++ * Registered Port List Format ++ */ ++struct lpfc_fdmi_reg_port_list { ++ uint32_t EntryCnt; ++ struct lpfc_fdmi_port_entry pe; ++} __packed; ++ + /* + * Register HBA(RHBA) + */ + struct lpfc_fdmi_reg_hba { + struct lpfc_fdmi_hba_ident hi; +- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ +-/* struct lpfc_fdmi_attr_block ab; */ ++ struct lpfc_fdmi_reg_port_list rpl; + }; + + /* +diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c +index fefef2884d59..30b5f65b29d1 100644 +--- a/drivers/scsi/lpfc/lpfc_nportdisc.c ++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c +@@ -1606,8 +1606,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, + ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; + + lpfc_issue_els_logo(vport, ndlp, 0); +- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; +- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + return ndlp->nlp_state; + } + +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 1c34dc335549..0e7915ecb85a 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -15252,7 +15252,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, + if (cmd_iocbq) { + ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; + lpfc_nlp_put(ndlp); +- lpfc_nlp_not_used(ndlp); + lpfc_sli_release_iocbq(phba, cmd_iocbq); + } + +@@ -15648,6 +15647,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) + list_add_tail(&iocbq->list, &first_iocbq->list); + } + } ++ /* Free the sequence's header buffer */ ++ if (!first_iocbq) ++ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); ++ + return first_iocbq; + } + +diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c +index e18bbc66e83b..77cb16d8dfd3 100644 +--- a/drivers/scsi/lpfc/lpfc_vport.c ++++ b/drivers/scsi/lpfc/lpfc_vport.c +@@ -624,27 +624,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) + vport->port_state < LPFC_VPORT_READY) + return -EAGAIN; + } ++ + /* +- * This is a bit of a mess. We want to ensure the shost doesn't get +- * torn down until we're done with the embedded lpfc_vport structure. +- * +- * Beyond holding a reference for this function, we also need a +- * reference for outstanding I/O requests we schedule during delete +- * processing. But once we scsi_remove_host() we can no longer obtain +- * a reference through scsi_host_get(). +- * +- * So we take two references here. We release one reference at the +- * bottom of the function -- after delinking the vport. And we +- * release the other at the completion of the unreg_vpi that get's +- * initiated after we've disposed of all other resources associated +- * with the port. ++ * Take early refcount for outstanding I/O requests we schedule during ++ * delete processing for unreg_vpi. Always keep this before ++ * scsi_remove_host() as we can no longer obtain a reference through ++ * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. + */ + if (!scsi_host_get(shost)) + return VPORT_INVAL; +- if (!scsi_host_get(shost)) { +- scsi_host_put(shost); +- return VPORT_INVAL; +- } ++ + lpfc_free_sysfs_attr(vport); + + lpfc_debugfs_terminate(vport); +@@ -792,8 +781,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport) + if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || + lpfc_mbx_unreg_vpi(vport)) + scsi_host_put(shost); +- } else ++ } else { + scsi_host_put(shost); ++ } + + lpfc_free_vpi(phba, vport->vpi); + vport->work_port_events = 0; +diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c +index 4cf9ed96414f..d61df49e4e1b 100644 +--- a/drivers/scsi/megaraid/megaraid_mm.c ++++ b/drivers/scsi/megaraid/megaraid_mm.c +@@ -250,7 +250,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) + mimd_t mimd; + uint32_t adapno; + int iterator; +- ++ bool is_found; + + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { + *rval = -EFAULT; +@@ -266,12 +266,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) + + adapter = NULL; + iterator = 0; ++ is_found = false; + + list_for_each_entry(adapter, &adapters_list_g, list) { +- if (iterator++ == adapno) break; ++ if (iterator++ == adapno) { ++ is_found = true; ++ break; ++ } + } + +- if (!adapter) { ++ if (!is_found) { + *rval = -ENODEV; + return NULL; + } +@@ -739,6 +743,7 @@ ioctl_done(uioc_t *kioc) + uint32_t adapno; + int iterator; + mraid_mmadp_t* adapter; ++ bool is_found; + + /* + * When the kioc returns from driver, make sure it still doesn't +@@ -761,19 +766,23 @@ ioctl_done(uioc_t *kioc) + iterator = 0; + adapter = NULL; + adapno = kioc->adapno; ++ is_found = false; + + con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " + "ioctl that was timedout before\n")); + + list_for_each_entry(adapter, &adapters_list_g, list) { +- if (iterator++ == adapno) break; ++ if (iterator++ == adapno) { ++ is_found = true; ++ break; ++ } + } + + kioc->timedout = 0; + +- if (adapter) { ++ if (is_found) + mraid_mm_dealloc_kioc( adapter, kioc ); +- } ++ + } + else { + wake_up(&wait_q); +diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c +index 1753e42826dd..a880abf5abaa 100644 +--- a/drivers/scsi/mesh.c ++++ b/drivers/scsi/mesh.c +@@ -1044,6 +1044,8 @@ static void handle_error(struct mesh_state *ms) + while ((in_8(&mr->bus_status1) & BS1_RST) != 0) + udelay(1); + printk("done\n"); ++ if (ms->dma_started) ++ halt_dma(ms); + handle_reset(ms); + /* request_q is empty, no point in mesh_start() */ + return; +@@ -1356,7 +1358,8 @@ static void halt_dma(struct mesh_state *ms) + ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), + ms->tgts[ms->conn_tgt].data_goes_out); + } +- scsi_dma_unmap(cmd); ++ if (cmd) ++ scsi_dma_unmap(cmd); + ms->dma_started = 0; + } + +@@ -1711,6 +1714,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd) + + spin_lock_irqsave(ms->host->host_lock, flags); + ++ if (ms->dma_started) ++ halt_dma(ms); ++ + /* Reset the controller & dbdma channel */ + out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ + out_8(&mr->exception, 0xff); /* clear all exception bits */ +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c +index 7a340d597344..b583436c8e20 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +@@ -3166,7 +3166,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) + ioc->scsi_lookup = NULL; + } + kfree(ioc->hpr_lookup); ++ ioc->hpr_lookup = NULL; + kfree(ioc->internal_lookup); ++ ioc->internal_lookup = NULL; + if (ioc->chain_lookup) { + for (i = 0; i < ioc->chain_depth; i++) { + if (ioc->chain_lookup[i].chain_buffer) +@@ -4475,7 +4477,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, +- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); ++ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index f0a3bb4961e5..e832a67bf638 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -5199,8 +5199,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) + handle, parent_handle, (unsigned long long) + sas_expander->sas_address, sas_expander->num_phys); + +- if (!sas_expander->num_phys) ++ if (!sas_expander->num_phys) { ++ rc = -1; + goto out_fail; ++ } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { +diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c +index 39285070f3b5..17ec51f9d988 100644 +--- a/drivers/scsi/mvumi.c ++++ b/drivers/scsi/mvumi.c +@@ -2476,6 +2476,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) + if (IS_ERR(mhba->dm_thread)) { + dev_err(&mhba->pdev->dev, + "failed to create device scan thread\n"); ++ ret = PTR_ERR(mhba->dm_thread); + mutex_unlock(&mhba->sas_discovery_mutex); + goto fail_create_thread; + } +diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c +index 9fc675f57e33..f54115d74f51 100644 +--- a/drivers/scsi/pm8001/pm8001_init.c ++++ b/drivers/scsi/pm8001/pm8001_init.c +@@ -1061,7 +1061,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev, + + pm8001_init_sas_add(pm8001_ha); + /* phy setting support for motherboard controller */ +- if (pm8001_configure_phy_settings(pm8001_ha)) ++ rc = pm8001_configure_phy_settings(pm8001_ha); ++ if (rc) + goto err_out_shost; + + pm8001_post_sas_ha_init(shost, chip); +diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c +index e64a13f0bce1..61a2da30f94b 100644 +--- a/drivers/scsi/pm8001/pm8001_sas.c ++++ b/drivers/scsi/pm8001/pm8001_sas.c +@@ -795,7 +795,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) +- return res; ++ goto ex_err; + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = ccb_tag; +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index 33f4181ba9f7..591e2e89ae9f 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -1909,6 +1909,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); + + if (IS_FWI2_CAPABLE(ha)) { ++ int rval; ++ + stats = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*stats), &stats_dma, GFP_KERNEL); + if (!stats) { +@@ -1918,7 +1920,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) + } + + /* reset firmware statistics */ +- qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); ++ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); ++ if (rval != QLA_SUCCESS) ++ ql_log(ql_log_warn, vha, 0x70de, ++ "Resetting ISP statistics failed: rval = %d\n", ++ rval); + + dma_free_coherent(&ha->pdev->dev, sizeof(*stats), + stats, stats_dma); +diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c +index 104e13ae3428..9ec18463b452 100644 +--- a/drivers/scsi/qla2xxx/qla_nx.c ++++ b/drivers/scsi/qla2xxx/qla_nx.c +@@ -1102,7 +1102,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, + return ret; + } + +- if (qla82xx_flash_set_write_enable(ha)) ++ ret = qla82xx_flash_set_write_enable(ha); ++ if (ret < 0) + goto done_write; + + qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c +index b889caa556a0..6ef7a094ee51 100644 +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -1224,6 +1224,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt) + "Waiting for %d IRQ commands to complete (tgt %p)", + tgt->irq_cmd_count, tgt); + ++ mutex_lock(&tgt->ha->optrom_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); + spin_lock_irqsave(&ha->hardware_lock, flags); + while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) { +@@ -1235,6 +1236,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt) + tgt->tgt_stopped = 1; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + mutex_unlock(&vha->vha_tgt.tgt_mutex); ++ mutex_unlock(&tgt->ha->optrom_mutex); + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", + tgt); +diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h +index 07ea4fcf4f88..983ec09da650 100644 +--- a/drivers/scsi/qla2xxx/qla_target.h ++++ b/drivers/scsi/qla2xxx/qla_target.h +@@ -112,7 +112,6 @@ + (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ + QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) + #endif +-#endif + + #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ + ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ +@@ -323,6 +322,7 @@ struct ctio_to_2xxx { + #ifndef CTIO_RET_TYPE + #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ + #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ ++#endif + + struct fcp_hdr { + uint8_t r_ctl; +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c +index 9c2c7fe61280..ba83c36b76bd 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c +@@ -878,7 +878,8 @@ qla27xx_template_checksum(void *p, ulong size) + static inline int + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) + { +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0; ++ return qla27xx_template_checksum(tmp, ++ le32_to_cpu(tmp->template_size)) == 0; + } + + static inline int +@@ -894,7 +895,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) + ulong len; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + tmp = memcpy(vha->hw->fw_dump, tmp, len); + ql27xx_edit_template(vha, tmp); + qla27xx_walk_template(vha, tmp, tmp, &len); +@@ -910,7 +911,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { +- len = tmp->template_size; ++ len = le32_to_cpu(tmp->template_size); + qla27xx_walk_template(vha, tmp, NULL, &len); + } + +@@ -922,7 +923,7 @@ qla27xx_fwdt_template_size(void *p) + { + struct qla27xx_fwdt_template *tmp = p; + +- return tmp->template_size; ++ return le32_to_cpu(tmp->template_size); + } + + ulong +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h +index 141c1c5e73f4..2d3e1a8349b3 100644 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h +@@ -13,7 +13,7 @@ + struct __packed qla27xx_fwdt_template { + uint32_t template_type; + uint32_t entry_offset; +- uint32_t template_size; ++ __le32 template_size; + uint32_t reserved_1; + + uint32_t entry_count; +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index abdd6f93c8fe..324cddd4656e 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -855,6 +855,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +@@ -1019,6 +1020,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); ++ qlt_stop_phase2(vha->vha_tgt.qla_tgt); + } + + return count; +diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c +index 3fda5836aac6..f10088a1d38c 100644 +--- a/drivers/scsi/qla4xxx/ql4_os.c ++++ b/drivers/scsi/qla4xxx/ql4_os.c +@@ -1223,7 +1223,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); + exit_host_stats: + if (ql_iscsi_stats) +- dma_free_coherent(&ha->pdev->dev, host_stats_size, ++ dma_free_coherent(&ha->pdev->dev, stats_size, + ql_iscsi_stats, iscsi_stats_dma); + + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c +index d7118d3767c3..99bfb003be3f 100644 +--- a/drivers/scsi/scsi_debug.c ++++ b/drivers/scsi/scsi_debug.c +@@ -4986,6 +4986,12 @@ static int __init scsi_debug_init(void) + pr_err("submit_queues must be 1 or more\n"); + return -EINVAL; + } ++ ++ if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { ++ pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); ++ return -EINVAL; ++ } ++ + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), + GFP_KERNEL); + if (sdebug_q_arr == NULL) +diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c +index d596b76eea64..aad9195b356a 100644 +--- a/drivers/scsi/scsi_devinfo.c ++++ b/drivers/scsi/scsi_devinfo.c +@@ -451,7 +451,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + /* + * vendor strings must be an exact match + */ +- if (vmax != strlen(devinfo->vendor) || ++ if (vmax != strnlen(devinfo->vendor, ++ sizeof(devinfo->vendor)) || + memcmp(devinfo->vendor, vskip, vmax)) + continue; + +@@ -459,7 +460,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, + * @model specifies the full string, and + * must be larger or equal to devinfo->model + */ +- mlen = strlen(devinfo->model); ++ mlen = strnlen(devinfo->model, sizeof(devinfo->model)); + if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) + continue; + return devinfo; +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c +index 3ccc858318fe..d135bd3051a6 100644 +--- a/drivers/scsi/scsi_lib.c ++++ b/drivers/scsi/scsi_lib.c +@@ -919,6 +919,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) + case 0x07: /* operation in progress */ + case 0x08: /* Long write in progress */ + case 0x09: /* self test in progress */ ++ case 0x11: /* notify (enable spinup) required */ + case 0x14: /* space allocation in progress */ + action = ACTION_DELAYED_RETRY; + break; +@@ -1033,10 +1034,10 @@ int scsi_init_io(struct scsi_cmnd *cmd) + struct scsi_device *sdev = cmd->device; + struct request *rq = cmd->request; + bool is_mq = (rq->mq_ctx != NULL); +- int error; ++ int error = BLKPREP_KILL; + + if (WARN_ON_ONCE(!rq->nr_phys_segments)) +- return -EINVAL; ++ goto err_exit; + + error = scsi_init_sgtable(rq, &cmd->sdb); + if (error) +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index 5de47410fdbd..6d3520387f1a 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -460,7 +460,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, + error = shost->hostt->target_alloc(starget); + + if(error) { +- dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); ++ if (error != -ENXIO) ++ dev_err(dev, "target allocation failed, error %d\n", error); + /* don't want scsi_target_reap to do the final + * put because it will be under the host lock */ + scsi_target_destroy(starget); +@@ -1733,15 +1734,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) + */ + static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) + { +- struct async_scan_data *data; ++ struct async_scan_data *data = NULL; + unsigned long flags; + + if (strncmp(scsi_scan_type, "sync", 4) == 0) + return NULL; + ++ mutex_lock(&shost->scan_mutex); + if (shost->async_scan) { + shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); +- return NULL; ++ goto err; + } + + data = kmalloc(sizeof(*data), GFP_KERNEL); +@@ -1752,7 +1754,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) + goto err; + init_completion(&data->prev_finished); + +- mutex_lock(&shost->scan_mutex); + spin_lock_irqsave(shost->host_lock, flags); + shost->async_scan = 1; + spin_unlock_irqrestore(shost->host_lock, flags); +@@ -1767,6 +1768,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) + return data; + + err: ++ mutex_unlock(&shost->scan_mutex); + kfree(data); + return NULL; + } +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c +index 42b97f119623..8d10b35caed5 100644 +--- a/drivers/scsi/scsi_transport_iscsi.c ++++ b/drivers/scsi/scsi_transport_iscsi.c +@@ -119,7 +119,11 @@ show_transport_handle(struct device *dev, struct device_attribute *attr, + char *buf) + { + struct iscsi_internal *priv = dev_to_iscsi_internal(dev); +- return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ return sysfs_emit(buf, "%llu\n", ++ (unsigned long long)iscsi_handle(priv->iscsi_transport)); + } + static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); + +@@ -129,7 +133,7 @@ show_transport_##name(struct device *dev, \ + struct device_attribute *attr,char *buf) \ + { \ + struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ +- return sprintf(buf, format"\n", priv->iscsi_transport->name); \ ++ return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ + } \ + static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); + +@@ -170,7 +174,7 @@ static ssize_t + show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) + { + struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); +- return sprintf(buf, "%llu\n", (unsigned long long) ep->id); ++ return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); + } + static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); + +@@ -423,39 +427,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); + struct iscsi_transport *t = iface->transport; +- int param; +- int param_type; ++ int param = -1; + + if (attr == &dev_attr_iface_enabled.attr) + param = ISCSI_NET_PARAM_IFACE_ENABLE; +- else if (attr == &dev_attr_iface_vlan_id.attr) +- param = ISCSI_NET_PARAM_VLAN_ID; +- else if (attr == &dev_attr_iface_vlan_priority.attr) +- param = ISCSI_NET_PARAM_VLAN_PRIORITY; +- else if (attr == &dev_attr_iface_vlan_enabled.attr) +- param = ISCSI_NET_PARAM_VLAN_ENABLED; +- else if (attr == &dev_attr_iface_mtu.attr) +- param = ISCSI_NET_PARAM_MTU; +- else if (attr == &dev_attr_iface_port.attr) +- param = ISCSI_NET_PARAM_PORT; +- else if (attr == &dev_attr_iface_ipaddress_state.attr) +- param = ISCSI_NET_PARAM_IPADDR_STATE; +- else if (attr == &dev_attr_iface_delayed_ack_en.attr) +- param = ISCSI_NET_PARAM_DELAYED_ACK_EN; +- else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) +- param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; +- else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) +- param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; +- else if (attr == &dev_attr_iface_tcp_wsf.attr) +- param = ISCSI_NET_PARAM_TCP_WSF; +- else if (attr == &dev_attr_iface_tcp_timer_scale.attr) +- param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; +- else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) +- param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; +- else if (attr == &dev_attr_iface_cache_id.attr) +- param = ISCSI_NET_PARAM_CACHE_ID; +- else if (attr == &dev_attr_iface_redirect_en.attr) +- param = ISCSI_NET_PARAM_REDIRECT_EN; + else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) + param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_iface_header_digest.attr) +@@ -492,6 +467,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, + param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; + else if (attr == &dev_attr_iface_initiator_name.attr) + param = ISCSI_IFACE_PARAM_INITIATOR_NAME; ++ ++ if (param != -1) ++ return t->attr_is_visible(ISCSI_IFACE_PARAM, param); ++ ++ if (attr == &dev_attr_iface_vlan_id.attr) ++ param = ISCSI_NET_PARAM_VLAN_ID; ++ else if (attr == &dev_attr_iface_vlan_priority.attr) ++ param = ISCSI_NET_PARAM_VLAN_PRIORITY; ++ else if (attr == &dev_attr_iface_vlan_enabled.attr) ++ param = ISCSI_NET_PARAM_VLAN_ENABLED; ++ else if (attr == &dev_attr_iface_mtu.attr) ++ param = ISCSI_NET_PARAM_MTU; ++ else if (attr == &dev_attr_iface_port.attr) ++ param = ISCSI_NET_PARAM_PORT; ++ else if (attr == &dev_attr_iface_ipaddress_state.attr) ++ param = ISCSI_NET_PARAM_IPADDR_STATE; ++ else if (attr == &dev_attr_iface_delayed_ack_en.attr) ++ param = ISCSI_NET_PARAM_DELAYED_ACK_EN; ++ else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) ++ param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; ++ else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) ++ param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; ++ else if (attr == &dev_attr_iface_tcp_wsf.attr) ++ param = ISCSI_NET_PARAM_TCP_WSF; ++ else if (attr == &dev_attr_iface_tcp_timer_scale.attr) ++ param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; ++ else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) ++ param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; ++ else if (attr == &dev_attr_iface_cache_id.attr) ++ param = ISCSI_NET_PARAM_CACHE_ID; ++ else if (attr == &dev_attr_iface_redirect_en.attr) ++ param = ISCSI_NET_PARAM_REDIRECT_EN; + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + if (attr == &dev_attr_ipv4_iface_ipaddress.attr) + param = ISCSI_NET_PARAM_IPV4_ADDR; +@@ -582,32 +589,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, + return 0; + } + +- switch (param) { +- case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: +- case ISCSI_IFACE_PARAM_HDRDGST_EN: +- case ISCSI_IFACE_PARAM_DATADGST_EN: +- case ISCSI_IFACE_PARAM_IMM_DATA_EN: +- case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: +- case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: +- case ISCSI_IFACE_PARAM_PDU_INORDER_EN: +- case ISCSI_IFACE_PARAM_ERL: +- case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: +- case ISCSI_IFACE_PARAM_FIRST_BURST: +- case ISCSI_IFACE_PARAM_MAX_R2T: +- case ISCSI_IFACE_PARAM_MAX_BURST: +- case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: +- case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: +- case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: +- case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: +- case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: +- case ISCSI_IFACE_PARAM_INITIATOR_NAME: +- param_type = ISCSI_IFACE_PARAM; +- break; +- default: +- param_type = ISCSI_NET_PARAM; +- } +- +- return t->attr_is_visible(param_type, param); ++ return t->attr_is_visible(ISCSI_NET_PARAM, param); + } + + static struct attribute *iscsi_iface_attrs[] = { +@@ -2323,6 +2305,18 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) + } + EXPORT_SYMBOL_GPL(iscsi_destroy_conn); + ++void iscsi_put_conn(struct iscsi_cls_conn *conn) ++{ ++ put_device(&conn->dev); ++} ++EXPORT_SYMBOL_GPL(iscsi_put_conn); ++ ++void iscsi_get_conn(struct iscsi_cls_conn *conn) ++{ ++ get_device(&conn->dev); ++} ++EXPORT_SYMBOL_GPL(iscsi_get_conn); ++ + /* + * iscsi interface functions + */ +@@ -2782,6 +2776,9 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) + struct iscsi_cls_session *session; + int err = 0, value = 0; + ++ if (ev->u.set_param.len > PAGE_SIZE) ++ return -EINVAL; ++ + session = iscsi_session_lookup(ev->u.set_param.sid); + conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); + if (!conn || !session) +@@ -2929,6 +2926,9 @@ iscsi_set_host_param(struct iscsi_transport *transport, + if (!transport->set_host_param) + return -ENOSYS; + ++ if (ev->u.set_host_param.len > PAGE_SIZE) ++ return -EINVAL; ++ + shost = scsi_host_lookup(ev->u.set_host_param.host_no); + if (!shost) { + printk(KERN_ERR "set_host_param could not find host no %u\n", +@@ -3191,7 +3191,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_flashnode.host_no); + err = -ENODEV; +- goto put_host; ++ goto exit_set_fnode; + } + + idx = ev->u.set_flashnode.flashnode_idx; +@@ -3515,6 +3515,7 @@ static int + iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + { + int err = 0; ++ u32 pdu_len; + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_transport *transport = NULL; + struct iscsi_internal *priv; +@@ -3522,6 +3523,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep = NULL; + ++ if (!netlink_capable(skb, CAP_SYS_ADMIN)) ++ return -EPERM; ++ + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) + *group = ISCSI_NL_GRP_UIP; + else +@@ -3627,6 +3631,14 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) + err = -EINVAL; + break; + case ISCSI_UEVENT_SEND_PDU: ++ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); ++ ++ if ((ev->u.send_pdu.hdr_size > pdu_len) || ++ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { ++ err = -EINVAL; ++ break; ++ } ++ + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); + if (conn) + ev->r.retcode = transport->send_pdu(conn, +@@ -4031,7 +4043,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr, + char *buf) + { + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); +- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); ++ return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); + } + static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, + NULL); +@@ -4040,7 +4052,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr, + char *buf) + { + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); +- return sprintf(buf, "%d\n", session->creator); ++ return sysfs_emit(buf, "%d\n", session->creator); + } + static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, + NULL); +@@ -4049,7 +4061,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr, + char *buf) + { + struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); +- return sprintf(buf, "%d\n", session->target_id); ++ return sysfs_emit(buf, "%d\n", session->target_id); + } + static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, + show_priv_session_target_id, NULL); +@@ -4062,8 +4074,8 @@ show_priv_session_##field(struct device *dev, \ + struct iscsi_cls_session *session = \ + iscsi_dev_to_session(dev->parent); \ + if (session->field == -1) \ +- return sprintf(buf, "off\n"); \ +- return sprintf(buf, format"\n", session->field); \ ++ return sysfs_emit(buf, "off\n"); \ ++ return sysfs_emit(buf, format"\n", session->field); \ + } + + #define iscsi_priv_session_attr_store(field) \ +diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c +index 319868f3f674..083cd11ce7d7 100644 +--- a/drivers/scsi/scsi_transport_spi.c ++++ b/drivers/scsi/scsi_transport_spi.c +@@ -353,7 +353,7 @@ store_spi_transport_##field(struct device *dev, \ + struct spi_transport_attrs *tp \ + = (struct spi_transport_attrs *)&starget->starget_data; \ + \ +- if (i->f->set_##field) \ ++ if (!i->f->set_##field) \ + return -EINVAL; \ + val = simple_strtoul(buf, NULL, 0); \ + if (val > tp->max_##field) \ +diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c +index b0f5220ae23a..fad68cb028d6 100644 +--- a/drivers/scsi/sni_53c710.c ++++ b/drivers/scsi/sni_53c710.c +@@ -71,6 +71,7 @@ static int snirm710_probe(struct platform_device *dev) + struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; + struct resource *res; ++ int rc; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) +@@ -96,7 +97,9 @@ static int snirm710_probe(struct platform_device *dev) + goto out_kfree; + host->this_id = 7; + host->base = base; +- host->irq = platform_get_irq(dev, 0); ++ host->irq = rc = platform_get_irq(dev, 0); ++ if (rc < 0) ++ goto out_put_host; + if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { + printk(KERN_ERR "snirm710: request_irq failed!\n"); + goto out_put_host; +diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c +index cc484cb287d2..9b63e46edffc 100644 +--- a/drivers/scsi/sr.c ++++ b/drivers/scsi/sr.c +@@ -216,6 +216,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev) + return DISK_EVENT_EJECT_REQUEST; + else if (med->media_event_code == 2) + return DISK_EVENT_MEDIA_CHANGE; ++ else if (med->media_event_code == 3) ++ return DISK_EVENT_MEDIA_CHANGE; + return 0; + } + +@@ -745,7 +747,7 @@ static int sr_probe(struct device *dev) + cd->cdi.disk = disk; + + if (register_cdrom(&cd->cdi)) +- goto fail_put; ++ goto fail_minor; + + /* + * Initialize block layer runtime PM stuffs before the +@@ -763,6 +765,10 @@ static int sr_probe(struct device *dev) + + return 0; + ++fail_minor: ++ spin_lock(&sr_index_lock); ++ clear_bit(minor, sr_index_bits); ++ spin_unlock(&sr_index_lock); + fail_put: + put_disk(disk); + fail_free: +diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c +index 618422ea3a41..0d58227431e4 100644 +--- a/drivers/scsi/st.c ++++ b/drivers/scsi/st.c +@@ -1267,8 +1267,8 @@ static int st_open(struct inode *inode, struct file *filp) + spin_lock(&st_use_lock); + if (STp->in_use) { + spin_unlock(&st_use_lock); +- scsi_tape_put(STp); + DEBC_printk(STp, "Device already in use.\n"); ++ scsi_tape_put(STp); + return (-EBUSY); + } + +diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c +index d50c5ed8f428..167ae2d29e47 100644 +--- a/drivers/scsi/sun3x_esp.c ++++ b/drivers/scsi/sun3x_esp.c +@@ -233,7 +233,9 @@ static int esp_sun3x_probe(struct platform_device *dev) + if (!esp->command_block) + goto fail_unmap_regs_dma; + +- host->irq = platform_get_irq(dev, 0); ++ host->irq = err = platform_get_irq(dev, 0); ++ if (err < 0) ++ goto fail_unmap_command_block; + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, + "SUN3X ESP", esp); + if (err < 0) +diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h +index 9d1343987ebf..34a65b3e3f1b 100644 +--- a/drivers/scsi/ufs/ufs_quirks.h ++++ b/drivers/scsi/ufs/ufs_quirks.h +@@ -22,6 +22,7 @@ + + #define MAX_MODEL_LEN 16 + ++#define UFS_VENDOR_MICRON 0x12C + #define UFS_VENDOR_TOSHIBA 0x198 + #define UFS_VENDOR_SAMSUNG 0x1CE + #define UFS_VENDOR_SKHYNIX 0x1AD +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index a302ec1dc12d..57cf40b21d23 100755 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -1545,6 +1545,7 @@ static void ufshcd_ungate_work(struct work_struct *work) + int ufshcd_hold(struct ufs_hba *hba, bool async) + { + int rc = 0; ++ bool flush_result; + unsigned long flags; + + if (!ufshcd_is_clkgating_allowed(hba)) +@@ -1576,7 +1577,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) + break; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); +- flush_work(&hba->clk_gating.ungate_work); ++ flush_result = flush_work(&hba->clk_gating.ungate_work); ++ if (hba->clk_gating.is_suspended && !flush_result) ++ goto out; + spin_lock_irqsave(hba->host->host_lock, flags); + goto start; + } +@@ -2566,6 +2569,8 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) + /* Write UIC Cmd */ + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); ++ /* Make sure that UIC command is committed immediately */ ++ wmb(); + } + + /** +@@ -4500,6 +4505,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) + u8 status; + int ret; + bool reenable_intr = false; ++ int wait_retries = 6; /* Allows 3secs max wait time */ + + mutex_lock(&hba->uic_cmd_mutex); + init_completion(&uic_async_done); +@@ -4525,11 +4531,42 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) + goto out; + } + ++more_wait: + if (!wait_for_completion_timeout(hba->uic_async_done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { ++ u32 intr_status = 0; ++ s64 ts_since_last_intr; ++ + dev_err(hba->dev, + "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", + cmd->command, cmd->argument3); ++ /* ++ * The controller must have triggered interrupt but ISR couldn't ++ * run due to interrupt starvation. ++ * Or ISR must have executed just after the timeout ++ * (which clears IS registers) ++ * If either of these two cases is true, then ++ * wait for little more time for completion. ++ */ ++ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); ++ ts_since_last_intr = ktime_ms_delta(ktime_get(), ++ hba->ufs_stats.last_intr_ts); ++ ++ if ((intr_status & UFSHCD_UIC_PWR_MASK) || ++ ((hba->ufs_stats.last_intr_status & UFSHCD_UIC_PWR_MASK) && ++ (ts_since_last_intr < (s64)UIC_CMD_TIMEOUT))) { ++ dev_info(hba->dev, "IS:0x%08x last_intr_sts:0x%08x last_intr_ts:%lld, retry-cnt:%d\n", ++ intr_status, hba->ufs_stats.last_intr_status, ++ hba->ufs_stats.last_intr_ts, wait_retries); ++ if (wait_retries--) ++ goto more_wait; ++ ++ /* ++ * If same state continues event after more wait time, ++ * something must be hogging CPU. ++ */ ++ WARN_ON(1); ++ } + ret = -ETIMEDOUT; + goto out; + } +@@ -6867,7 +6904,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) + */ + static irqreturn_t ufshcd_intr(int irq, void *__hba) + { +- u32 intr_status, enabled_intr_status; ++ u32 intr_status, enabled_intr_status = 0; + irqreturn_t retval = IRQ_NONE; + struct ufs_hba *hba = __hba; + int retries = hba->nutrs; +@@ -6883,7 +6920,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) + * read, make sure we handle them by checking the interrupt status + * again in a loop until we process all of the reqs before returning. + */ +- do { ++ while (intr_status && retries--) { + enabled_intr_status = + intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + if (intr_status) +@@ -6892,7 +6929,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); + + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); +- } while (intr_status && --retries); ++ } + + if (retval == IRQ_NONE) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", +@@ -7030,20 +7067,17 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) + { + struct Scsi_Host *host; + struct ufs_hba *hba; +- unsigned int tag; + u32 pos; + int err; +- u8 resp = 0xF; +- struct ufshcd_lrb *lrbp; ++ u8 resp = 0xF, lun; + unsigned long flags; + + host = cmd->device->host; + hba = shost_priv(host); +- tag = cmd->request->tag; + + ufshcd_print_cmd_log(hba); +- lrbp = &hba->lrb[tag]; +- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); ++ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); ++ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; +@@ -7052,7 +7086,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) + + /* clear the commands that were pending for corresponding LUN */ + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { +- if (hba->lrb[pos].lun == lrbp->lun) { ++ if (hba->lrb[pos].lun == lun) { + err = ufshcd_clear_cmd(hba, pos); + if (err) + break; +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c +index df6fabcce4f7..4d2172c115c6 100644 +--- a/drivers/scsi/vmw_pvscsi.c ++++ b/drivers/scsi/vmw_pvscsi.c +@@ -577,7 +577,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, + case BTSTAT_SUCCESS: + case BTSTAT_LINKED_COMMAND_COMPLETED: + case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: +- /* If everything went fine, let's move on.. */ ++ /* ++ * Commands like INQUIRY may transfer less data than ++ * requested by the initiator via bufflen. Set residual ++ * count to make upper layer aware of the actual amount ++ * of data returned. ++ */ ++ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); + cmd->result = (DID_OK << 16); + break; + +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c +index 91f5c951850f..44463afb8015 100644 +--- a/drivers/soc/fsl/qbman/qman.c ++++ b/drivers/soc/fsl/qbman/qman.c +@@ -146,7 +146,7 @@ struct qm_eqcr_entry { + u32 tag; + struct qm_fd fd; + u8 __reserved3[32]; +-} __packed; ++} __packed __aligned(8); + #define QM_EQCR_VERB_VBIT 0x80 + #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ + #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 +diff --git a/drivers/soc/qcom/bgcom_interface.c b/drivers/soc/qcom/bgcom_interface.c +index f09ed0cd2887..bb0e06ec6e39 100644 +--- a/drivers/soc/qcom/bgcom_interface.c ++++ b/drivers/soc/qcom/bgcom_interface.c +@@ -134,9 +134,9 @@ static void bgcom_load_twm_bg_work(struct work_struct *work) + } else { + dev->bg_twm_wear_load = true; + dev->pil_h = subsystem_get_with_fwname("bg-wear", +- "bg-twm-wear"); ++ "bg-twm"); + if (!dev->pil_h) +- pr_err("failed to load bg-twm-wear\n"); ++ pr_err("failed to load bg-twm\n"); + } + } + +@@ -614,7 +614,7 @@ static int ssr_bg_cb(struct notifier_block *this, + break; + case SUBSYS_AFTER_SHUTDOWN: + if (dev->pending_bg_twm_wear_load) { +- /* Load bg-twm-wear */ ++ /* Load bg-twm */ + dev->pending_bg_twm_wear_load = false; + queue_work(dev->bgdaemon_wq, + &dev->bgdaemon_load_twm_bg_work); +diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c +index 56e05fdd702d..737a7a10b243 100644 +--- a/drivers/soc/qcom/bgcom_spi.c ++++ b/drivers/soc/qcom/bgcom_spi.c +@@ -161,7 +161,7 @@ int bgcom_set_spi_state(enum bgcom_spi_state state) + { + struct bg_spi_priv *bg_spi = container_of(bg_com_drv, + struct bg_spi_priv, lhandle); +- struct device spi_dev = bg_spi->spi->master->dev; ++ const struct device spi_dev = bg_spi->spi->master->dev; + ktime_t time_start, delta; + s64 time_elapsed; + +@@ -177,7 +177,7 @@ int bgcom_set_spi_state(enum bgcom_spi_state state) + while (!pm_runtime_status_suspended(spi_dev.parent)) { + delta = ktime_sub(ktime_get(), time_start); + time_elapsed = ktime_to_ms(delta); +- BUG_ON(time_elapsed > 5 * MSEC_PER_SEC); ++ WARN_ON(time_elapsed > 5 * MSEC_PER_SEC); + msleep(100); + } + } +@@ -277,7 +277,9 @@ static int bgcom_transfer(void *handle, uint8_t *tx_buf, + tx_xfer->rx_buf = rx_buf; + + tx_xfer->len = txn_len; ++ pm_runtime_get_sync(spi->master->dev.parent); + ret = spi_sync(spi, &bg_spi->msg1); ++ pm_runtime_put_sync_suspend(spi->master->dev.parent); + mutex_unlock(&bg_spi->xfer_mutex); + + if (ret) +diff --git a/drivers/soc/qcom/boot_marker.c b/drivers/soc/qcom/boot_marker.c +index c506aa3cd5a4..a43f550665d3 100644 +--- a/drivers/soc/qcom/boot_marker.c ++++ b/drivers/soc/qcom/boot_marker.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -41,6 +41,27 @@ struct boot_marker { + static struct dentry *dent_bkpi, *dent_bkpi_status, *dent_mpm_timer; + static struct boot_marker boot_marker_list; + ++/* ++ * Caller is expected to hold the list spinlock. ++ */ ++static void delete_boot_marker(const char *name) ++{ ++ struct boot_marker *marker; ++ struct boot_marker *temp_addr; ++ ++ list_for_each_entry_safe(marker, temp_addr, &boot_marker_list.list, ++ list) { ++ if (strnstr(marker->marker_name, name, ++ strlen(marker->marker_name))) { ++ list_del(&marker->list); ++ kfree(marker); ++ } ++ } ++} ++ ++/* ++ * Caller is expected to hold the list spinlock. ++ */ + static void _create_boot_marker(const char *name, + unsigned long long int timer_value) + { +@@ -59,13 +80,39 @@ static void _create_boot_marker(const char *name, + sizeof(new_boot_marker->marker_name)); + new_boot_marker->timer_value = timer_value; + +- spin_lock(&boot_marker_list.slock); + list_add_tail(&(new_boot_marker->list), &(boot_marker_list.list)); ++} ++ ++/* ++ * Update existing boot marker. Delete existing boot marker and add it ++ * to the tail of boot marker list (to keep timestamp in order). Used to ++ * avoid duplicate boot markers. ++ */ ++void update_marker(const char *name) ++{ ++ struct boot_marker *marker; ++ struct boot_marker *temp_addr; ++ ++ unsigned long long timer_value = msm_timer_get_sclk_ticks(); ++ ++ spin_lock(&boot_marker_list.slock); ++ list_for_each_entry_safe(marker, temp_addr, &boot_marker_list.list, ++ list) { ++ if (strnstr(marker->marker_name, name, ++ strlen(marker->marker_name))) { ++ delete_boot_marker(marker->marker_name); ++ break; ++ } ++ } ++ ++ _create_boot_marker(name, timer_value); + spin_unlock(&boot_marker_list.slock); + } ++EXPORT_SYMBOL(update_marker); + + static void set_bootloader_stats(void) + { ++ spin_lock(&boot_marker_list.slock); + _create_boot_marker("M - APPSBL Start - ", + readl_relaxed(&boot_stats->bootloader_start)); + _create_boot_marker("M - APPSBL Display Init - ", +@@ -78,11 +125,14 @@ static void set_bootloader_stats(void) + readl_relaxed(&boot_stats->bootloader_checksum)); + _create_boot_marker("M - APPSBL End - ", + readl_relaxed(&boot_stats->bootloader_end)); ++ spin_unlock(&boot_marker_list.slock); + } + + void place_marker(const char *name) + { ++ spin_lock(&boot_marker_list.slock); + _create_boot_marker((char *) name, msm_timer_get_sclk_ticks()); ++ spin_unlock(&boot_marker_list.slock); + } + EXPORT_SYMBOL(place_marker); + +diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c +index 945d02fc55e1..49b415e39c01 100644 +--- a/drivers/soc/qcom/icnss.c ++++ b/drivers/soc/qcom/icnss.c +@@ -4851,6 +4851,9 @@ static int icnss_probe(struct platform_device *pdev) + return -EEXIST; + } + ++ if (of_property_read_bool(pdev->dev.of_node, "qcom,icnss-disable")) ++ return -ENODEV; ++ + icnss_pr_dbg("Platform driver probe\n"); + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); +diff --git a/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c +index 7cf264456c35..d51aca45d1ca 100644 +--- a/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c ++++ b/drivers/soc/qcom/ipc_router_mhi_dev_xprt.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2019, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -28,7 +28,7 @@ if (ipc_router_mhi_dev_xprt_debug_mask) \ + + #define MODULE_NAME "ipc_router_mhi_dev_xprt" + #define XPRT_NAME_LEN 32 +-#define IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE 8192 ++#define IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE 24576 + #define MHI_IPCR_ASYNC_TIMEOUT msecs_to_jiffies(1000) + #define MAX_IPCR_WR_REQ 128 + +diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c +index fc11f68d5ac2..738d4f77b72b 100644 +--- a/drivers/soc/qcom/ipc_router_mhi_xprt.c ++++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c +@@ -359,10 +359,12 @@ static void qcom_mhi_ipc_router_remove(struct mhi_device *mhi_dev) + { + struct ipc_router_mhi_xprt *mhi_xprtp = dev_get_drvdata(&mhi_dev->dev); + ++ init_completion(&mhi_xprtp->sft_close_complete); + msm_ipc_router_xprt_notify(&mhi_xprtp->xprt, + IPC_ROUTER_XPRT_EVENT_CLOSE, NULL); + D("%s: Notified IPC Router of %s CLOSE\n", + __func__, mhi_xprtp->xprt.name); ++ wait_for_completion(&mhi_xprtp->sft_close_complete); + dev_set_drvdata(&mhi_dev->dev, NULL); + } + +diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c +index 6542861a2f48..4d9878e38102 100644 +--- a/drivers/soc/qcom/memshare/msm_memshare.c ++++ b/drivers/soc/qcom/memshare/msm_memshare.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2013-2018,2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -52,10 +53,11 @@ struct memshare_driver { + + struct memshare_child { + struct device *dev; ++ int client_id; + }; + + static struct memshare_driver *memsh_drv; +-static struct memshare_child *memsh_child; ++static struct memshare_child *memsh_child[MAX_CLIENTS]; + static struct mem_blocks memblock[MAX_CLIENTS]; + static uint32_t num_clients; + static struct msg_desc mem_share_svc_alloc_req_desc = { +@@ -357,6 +359,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + int dest_vmids[1] = {VMID_HLOS}; + int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC}; + struct notif_data *notifdata = NULL; ++ struct memshare_child *client_node = NULL; + + mutex_lock(&memsh_drv->mem_share); + +@@ -393,6 +396,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + case SUBSYS_AFTER_POWERUP: + pr_debug("memshare: Modem has booted up\n"); + for (i = 0; i < MAX_CLIENTS; i++) { ++ client_node = memsh_child[i]; + size = memblock[i].size; + if (memblock[i].free_memory > 0 && + bootup_request >= 2) { +@@ -442,7 +446,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + */ + size += MEMSHARE_GUARD_BYTES; + } +- dma_free_attrs(memsh_drv->dev, ++ dma_free_attrs(client_node->dev, + size, memblock[i].virtual_addr, + memblock[i].phy_addr, + attrs); +@@ -464,29 +468,29 @@ static struct notifier_block nb = { + .notifier_call = modem_notifier_cb, + }; + +-static void shared_hyp_mapping(int client_id) ++static void shared_hyp_mapping(int index) + { + int ret; + u32 source_vmlist[1] = {VMID_HLOS}; + int dest_vmids[1] = {VMID_MSS_MSA}; + int dest_perms[1] = {PERM_READ|PERM_WRITE}; + +- if (client_id == DHMS_MEM_CLIENT_INVALID) { ++ if (index >= MAX_CLIENTS) { + pr_err("memshare: %s, Invalid Client\n", __func__); + return; + } + +- ret = hyp_assign_phys(memblock[client_id].phy_addr, +- memblock[client_id].size, ++ ret = hyp_assign_phys(memblock[index].phy_addr, ++ memblock[index].size, + source_vmlist, 1, dest_vmids, + dest_perms, 1); + + if (ret != 0) { + pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n", +- memblock[client_id].size, ret); ++ memblock[index].size, ret); + return; + } +- memblock[client_id].hyp_mapping = 1; ++ memblock[index].hyp_mapping = 1; + } + + static int handle_alloc_req(void *req_h, void *req, void *conn_h) +@@ -535,8 +539,9 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) + { + struct mem_alloc_generic_req_msg_v01 *alloc_req; + struct mem_alloc_generic_resp_msg_v01 *alloc_resp; +- int rc, resp = 0; +- int client_id; ++ struct memshare_child *client_node = NULL; ++ int rc, resp = 0, i; ++ int index = DHMS_MEM_CLIENT_INVALID; + uint32_t size = 0; + + mutex_lock(&memsh_drv->mem_share); +@@ -551,53 +556,63 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) + } + alloc_resp->resp.result = QMI_RESULT_FAILURE_V01; + alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01; +- client_id = check_client(alloc_req->client_id, alloc_req->proc_id, ++ index = check_client(alloc_req->client_id, alloc_req->proc_id, + CHECK); + +- if (client_id >= MAX_CLIENTS) { +- pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", +- __func__, alloc_req->client_id, +- alloc_req->proc_id); ++ if (index >= MAX_CLIENTS) { ++ pr_err( ++ "memshare_alloc: client not found for index: %d, requested client: %d, proc_id: %d\n", ++ index, alloc_req->client_id, alloc_req->proc_id); + kfree(alloc_resp); + alloc_resp = NULL; + mutex_unlock(&memsh_drv->mem_share); + return -EINVAL; + } + +- if (!memblock[client_id].allotted) { ++ for (i = 0; i < MAX_CLIENTS; i++) { ++ if (memsh_child[i]->client_id == alloc_req->client_id) { ++ client_node = memsh_child[i]; ++ pr_info( ++ "memshare_alloc: found client with client_id: %d, index: %d\n", ++ alloc_req->client_id, index); ++ break; ++ } ++ } ++ ++ if (!memblock[index].allotted) { + if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0) + size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES; + else + size = alloc_req->num_bytes; +- rc = memshare_alloc(memsh_drv->dev, size, +- &memblock[client_id]); ++ rc = memshare_alloc(client_node->dev, size, ++ &memblock[index]); + if (rc) { + pr_err("memshare: %s,Unable to allocate memory for requested client\n", + __func__); + resp = 1; + } + if (!resp) { +- memblock[client_id].free_memory += 1; +- memblock[client_id].allotted = 1; +- memblock[client_id].size = alloc_req->num_bytes; +- memblock[client_id].peripheral = alloc_req->proc_id; ++ memblock[index].free_memory += 1; ++ memblock[index].allotted = 1; ++ memblock[index].size = alloc_req->num_bytes; ++ memblock[index].peripheral = alloc_req->proc_id; + } + } + pr_debug("memshare: In %s, free memory count for client id: %d = %d", +- __func__, memblock[client_id].client_id, +- memblock[client_id].free_memory); ++ __func__, memblock[index].client_id, ++ memblock[index].free_memory); + +- memblock[client_id].sequence_id = alloc_req->sequence_id; +- memblock[client_id].alloc_request = 1; ++ memblock[index].sequence_id = alloc_req->sequence_id; ++ memblock[index].alloc_request = 1; + +- fill_alloc_response(alloc_resp, client_id, &resp); ++ fill_alloc_response(alloc_resp, index, &resp); + /* + * Perform the Hypervisor mapping in order to avoid XPU viloation + * to the allocated region for Modem Clients + */ +- if (!memblock[client_id].hyp_mapping && +- memblock[client_id].allotted) +- shared_hyp_mapping(client_id); ++ if (!memblock[index].hyp_mapping && ++ memblock[index].allotted) ++ shared_hyp_mapping(index); + mutex_unlock(&memsh_drv->mem_share); + pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n", + alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes, +@@ -646,8 +661,9 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) + { + struct mem_free_generic_req_msg_v01 *free_req; + struct mem_free_generic_resp_msg_v01 free_resp; +- int rc, flag = 0, ret = 0, size = 0; +- uint32_t client_id; ++ struct memshare_child *client_node = NULL; ++ int rc, flag = 0, ret = 0, size = 0, i; ++ int index = DHMS_MEM_CLIENT_INVALID; + u32 source_vmlist[1] = {VMID_MSS_MSA}; + int dest_vmids[1] = {VMID_HLOS}; + int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC}; +@@ -660,29 +676,38 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) + free_resp.resp.result = QMI_RESULT_FAILURE_V01; + pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id, + free_req->proc_id); +- client_id = check_client(free_req->client_id, free_req->proc_id, FREE); +- if (client_id == DHMS_MEM_CLIENT_INVALID) { +- pr_err("memshare: %s, Invalid client request to free memory\n", +- __func__); ++ index = check_client(free_req->client_id, free_req->proc_id, FREE); ++ ++ if (index >= MAX_CLIENTS) { ++ pr_err("memshare_free: invalid client request to free memory\n"); + flag = 1; +- } else if (!memblock[client_id].guarantee && +- !memblock[client_id].client_request && +- memblock[client_id].allotted) { +- pr_debug("memshare: %s:client_id:%d - size: %d", +- __func__, client_id, memblock[client_id].size); +- ret = hyp_assign_phys(memblock[client_id].phy_addr, +- memblock[client_id].size, source_vmlist, 1, ++ } ++ ++ for (i = 0; i < MAX_CLIENTS; i++) { ++ if (memsh_child[i]->client_id == free_req->client_id) { ++ client_node = memsh_child[i]; ++ break; ++ } ++ } ++ ++ if (!flag && !memblock[index].guarantee && ++ !memblock[index].client_request && ++ memblock[index].allotted) { ++ pr_debug("memshare: %s: index: %d - size: %d", ++ __func__, index, memblock[index].size); ++ ret = hyp_assign_phys(memblock[index].phy_addr, ++ memblock[index].size, source_vmlist, 1, + dest_vmids, dest_perms, 1); +- if (ret && memblock[client_id].hyp_mapping == 1) { ++ if (ret && memblock[index].hyp_mapping == 1) { + /* + * This is an error case as hyp mapping was successful + * earlier but during unmap it lead to failure. + */ + pr_err("memshare: %s, failed to unmap the region for client id:%d\n", +- __func__, client_id); ++ __func__, index); + } +- size = memblock[client_id].size; +- if (memblock[client_id].client_id == 1) { ++ size = memblock[index].size; ++ if (memblock[index].client_id == 1) { + /* + * Check if the client id + * is of diag so that free +@@ -692,14 +717,14 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) + */ + size += MEMSHARE_GUARD_BYTES; + } +- dma_free_attrs(memsh_drv->dev, size, +- memblock[client_id].virtual_addr, +- memblock[client_id].phy_addr, ++ dma_free_attrs(client_node->dev, size, ++ memblock[index].virtual_addr, ++ memblock[index].phy_addr, + attrs); +- free_client(client_id); ++ free_client(index); + } else { + pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n", +- __func__, client_id); ++ __func__, index); + } + + if (flag) { +@@ -724,7 +749,7 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) + + static int handle_query_size_req(void *req_h, void *req, void *conn_h) + { +- int rc, client_id; ++ int rc, index = DHMS_MEM_CLIENT_INVALID; + struct mem_query_size_req_msg_v01 *query_req; + struct mem_query_size_rsp_msg_v01 *query_resp; + +@@ -738,10 +763,10 @@ static int handle_query_size_req(void *req_h, void *req, void *conn_h) + } + pr_debug("memshare: query request client id: %d proc _id: %d\n", + query_req->client_id, query_req->proc_id); +- client_id = check_client(query_req->client_id, query_req->proc_id, ++ index = check_client(query_req->client_id, query_req->proc_id, + CHECK); + +- if (client_id >= MAX_CLIENTS) { ++ if (index >= MAX_CLIENTS) { + pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", + __func__, query_req->client_id, + query_req->proc_id); +@@ -751,9 +776,9 @@ static int handle_query_size_req(void *req_h, void *req, void *conn_h) + return -EINVAL; + } + +- if (memblock[client_id].size) { ++ if (memblock[index].size) { + query_resp->size_valid = 1; +- query_resp->size = memblock[client_id].size; ++ query_resp->size = memblock[index].size; + } else { + query_resp->size_valid = 1; + query_resp->size = 0; +@@ -963,6 +988,7 @@ static int memshare_child_probe(struct platform_device *pdev) + uint32_t size, client_id; + const char *name; + struct memshare_child *drv; ++ struct device_node *mem_node; + + drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child), + GFP_KERNEL); +@@ -971,8 +997,7 @@ static int memshare_child_probe(struct platform_device *pdev) + return -ENOMEM; + + drv->dev = &pdev->dev; +- memsh_child = drv; +- platform_set_drvdata(pdev, memsh_child); ++ platform_set_drvdata(pdev, drv); + + rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size", + &size); +@@ -1015,6 +1040,21 @@ static int memshare_child_probe(struct platform_device *pdev) + + memblock[num_clients].size = size; + memblock[num_clients].client_id = client_id; ++ drv->client_id = client_id; ++ ++ mem_node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); ++ of_node_put(mem_node); ++ if (mem_node) { ++ rc = of_reserved_mem_device_init(&pdev->dev); ++ if (rc) { ++ pr_err("memshare: Failed to initialize memory region rc: %d\n", ++ rc); ++ return rc; ++ } ++ pr_info("memshare: Memory allocation from shared DMA pool\n"); ++ } else { ++ pr_info("memshare: Continuing with allocation from CMA\n"); ++ } + + /* + * Memshare allocation for guaranteed clients +@@ -1022,12 +1062,17 @@ static int memshare_child_probe(struct platform_device *pdev) + if (memblock[num_clients].guarantee && size > 0) { + if (client_id == 1) + size += MEMSHARE_GUARD_BYTES; +- rc = memshare_alloc(memsh_child->dev, ++ rc = memshare_alloc(drv->dev, + size, + &memblock[num_clients]); + if (rc) { + pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n", + __func__, rc); ++ mem_node = of_parse_phandle(pdev->dev.of_node, ++ "memory-region", 0); ++ of_node_put(mem_node); ++ if (mem_node) ++ of_reserved_mem_device_release(&pdev->dev); + return rc; + } + memblock[num_clients].allotted = 1; +@@ -1051,6 +1096,7 @@ static int memshare_child_probe(struct platform_device *pdev) + memblock[num_clients].file_created = 1; + } + ++ memsh_child[num_clients] = drv; + num_clients++; + + return 0; +diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c +index c5bfec093579..8ef479b9c211 100644 +--- a/drivers/soc/qcom/peripheral-loader.c ++++ b/drivers/soc/qcom/peripheral-loader.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -725,7 +725,9 @@ static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt, + if (ret) + return ret; + +- place_marker("M - Modem Image Start Loading"); ++ if (!strcmp(desc->name, "modem")) ++ place_marker("M - Modem Image Start Loading"); ++ + pil_info(desc, "loading from %pa to %pa\n", &priv->region_start, + &priv->region_end); + +@@ -1194,7 +1196,10 @@ int pil_boot(struct pil_desc *desc) + } + trace_pil_event("reset_done", desc); + pil_info(desc, "Brought out of reset\n"); +- place_marker("M - Modem out of reset"); ++ ++ if (!strcmp(desc->name, "modem")) ++ place_marker("M - Modem out of reset"); ++ + desc->modem_ssr = false; + err_auth_and_reset: + if (ret && desc->subsys_vmid > 0) { +diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c +index a1c30bab8b65..e65aa72687b8 100644 +--- a/drivers/soc/qcom/rpmh_master_stat.c ++++ b/drivers/soc/qcom/rpmh_master_stat.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -249,8 +249,11 @@ static int msm_rpmh_master_stats_probe(struct platform_device *pdev) + ret = of_property_read_u32(pdev->dev.of_node, + "qcom,use-alt-unit", + &use_alt_unit); +- if (ret) ++ if (ret) { + use_alt_unit = -1; ++ /* optional property should not fail probe */ ++ ret = 0; ++ } + + rpmh_unit_base = of_iomap(pdev->dev.of_node, 0); + if (!rpmh_unit_base) { +diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c +index f51fb2ea7200..4c5767c73b7a 100644 +--- a/drivers/soc/qcom/smp2p.c ++++ b/drivers/soc/qcom/smp2p.c +@@ -314,15 +314,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, + static int smp2p_update_bits(void *data, u32 mask, u32 value) + { + struct smp2p_entry *entry = data; ++ unsigned long flags; + u32 orig; + u32 val; + +- spin_lock(&entry->lock); ++ spin_lock_irqsave(&entry->lock, flags); + val = orig = readl(entry->value); + val &= ~mask; + val |= value; + writel(val, entry->value); +- spin_unlock(&entry->lock); ++ spin_unlock_irqrestore(&entry->lock, flags); + + if (val != orig) + qcom_smp2p_kick(entry->smp2p); +diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c +index 5373f4c16b54..4403b89561fd 100644 +--- a/drivers/soc/tegra/fuse/speedo-tegra210.c ++++ b/drivers/soc/tegra/fuse/speedo-tegra210.c +@@ -105,7 +105,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num) + unsigned int i; + + for (i = 0; i < num; i++) +- if (value < speedos[num]) ++ if (value < speedos[i]) + return i; + + return -EINVAL; +diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c +index 1a7b5caa127b..b86bea453732 100644 +--- a/drivers/soc/ti/knav_dma.c ++++ b/drivers/soc/ti/knav_dma.c +@@ -752,8 +752,9 @@ static int knav_dma_probe(struct platform_device *pdev) + pm_runtime_enable(kdev->dev); + ret = pm_runtime_get_sync(kdev->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(kdev->dev); + dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); +- return ret; ++ goto err_pm_disable; + } + + /* Initialise all packet dmas */ +@@ -767,13 +768,21 @@ static int knav_dma_probe(struct platform_device *pdev) + + if (list_empty(&kdev->list)) { + dev_err(dev, "no valid dma instance\n"); +- return -ENODEV; ++ ret = -ENODEV; ++ goto err_put_sync; + } + + debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, + &knav_dma_debug_ops); + + return ret; ++ ++err_put_sync: ++ pm_runtime_put_sync(kdev->dev); ++err_pm_disable: ++ pm_runtime_disable(kdev->dev); ++ ++ return ret; + } + + static int knav_dma_remove(struct platform_device *pdev) +diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c +index b73e3534f67b..5248649b0b41 100644 +--- a/drivers/soc/ti/knav_qmss_queue.c ++++ b/drivers/soc/ti/knav_qmss_queue.c +@@ -1717,6 +1717,7 @@ static int knav_queue_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(&pdev->dev); + dev_err(dev, "Failed to enable QMSS\n"); + return ret; + } +@@ -1784,9 +1785,10 @@ static int knav_queue_probe(struct platform_device *pdev) + if (ret) + goto err; + +- regions = of_get_child_by_name(node, "descriptor-regions"); ++ regions = of_get_child_by_name(node, "descriptor-regions"); + if (!regions) { + dev_err(dev, "descriptor-regions not specified\n"); ++ ret = -ENODEV; + goto err; + } + ret = knav_queue_setup_regions(kdev, regions); +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig +index fc96f62a4838..37c55881b916 100644 +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -775,4 +775,7 @@ endif # SPI_MASTER + + # (slave support would go here) + ++config SPI_DYNAMIC ++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE ++ + endif # SPI +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c +index 63231760facc..5453910d8abc 100644 +--- a/drivers/spi/spi-bcm-qspi.c ++++ b/drivers/spi/spi-bcm-qspi.c +@@ -698,7 +698,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + if (buf) + buf[tp.byte] = read_rxram_slot_u8(qspi, slot); + dev_dbg(&qspi->pdev->dev, "RD %02x\n", +- buf ? buf[tp.byte] : 0xff); ++ buf ? buf[tp.byte] : 0x0); + } else { + u16 *buf = tp.trans->rx_buf; + +@@ -706,7 +706,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) + buf[tp.byte / 2] = read_rxram_slot_u16(qspi, + slot); + dev_dbg(&qspi->pdev->dev, "RD %04x\n", +- buf ? buf[tp.byte] : 0xffff); ++ buf ? buf[tp.byte / 2] : 0x0); + } + + update_qspi_trans_byte_count(qspi, &tp, +@@ -761,13 +761,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) + while (!tstatus && slot < MSPI_NUM_CDRAM) { + if (tp.trans->bits_per_word <= 8) { + const u8 *buf = tp.trans->tx_buf; +- u8 val = buf ? buf[tp.byte] : 0xff; ++ u8 val = buf ? buf[tp.byte] : 0x00; + + write_txram_slot_u8(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); + } else { + const u16 *buf = tp.trans->tx_buf; +- u16 val = buf ? buf[tp.byte / 2] : 0xffff; ++ u16 val = buf ? buf[tp.byte / 2] : 0x0000; + + write_txram_slot_u16(qspi, slot, val); + dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); +@@ -1185,7 +1185,7 @@ int bcm_qspi_probe(struct platform_device *pdev, + if (!of_match_node(bcm_qspi_of_match, dev->of_node)) + return -ENODEV; + +- master = spi_alloc_master(dev, sizeof(struct bcm_qspi)); ++ master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi)); + if (!master) { + dev_err(dev, "error allocating spi_master\n"); + return -ENOMEM; +@@ -1218,21 +1218,17 @@ int bcm_qspi_probe(struct platform_device *pdev, + + if (res) { + qspi->base[MSPI] = devm_ioremap_resource(dev, res); +- if (IS_ERR(qspi->base[MSPI])) { +- ret = PTR_ERR(qspi->base[MSPI]); +- goto qspi_resource_err; +- } ++ if (IS_ERR(qspi->base[MSPI])) ++ return PTR_ERR(qspi->base[MSPI]); + } else { +- goto qspi_resource_err; ++ return 0; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); + if (res) { + qspi->base[BSPI] = devm_ioremap_resource(dev, res); +- if (IS_ERR(qspi->base[BSPI])) { +- ret = PTR_ERR(qspi->base[BSPI]); +- goto qspi_resource_err; +- } ++ if (IS_ERR(qspi->base[BSPI])) ++ return PTR_ERR(qspi->base[BSPI]); + qspi->bspi_mode = true; + } else { + qspi->bspi_mode = false; +@@ -1243,18 +1239,14 @@ int bcm_qspi_probe(struct platform_device *pdev, + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg"); + if (res) { + qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); +- if (IS_ERR(qspi->base[CHIP_SELECT])) { +- ret = PTR_ERR(qspi->base[CHIP_SELECT]); +- goto qspi_resource_err; +- } ++ if (IS_ERR(qspi->base[CHIP_SELECT])) ++ return PTR_ERR(qspi->base[CHIP_SELECT]); + } + + qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id), + GFP_KERNEL); +- if (!qspi->dev_ids) { +- ret = -ENOMEM; +- goto qspi_resource_err; +- } ++ if (!qspi->dev_ids) ++ return -ENOMEM; + + for (val = 0; val < num_irqs; val++) { + irq = -1; +@@ -1330,7 +1322,7 @@ int bcm_qspi_probe(struct platform_device *pdev, + qspi->xfer_mode.addrlen = -1; + qspi->xfer_mode.hp = -1; + +- ret = devm_spi_register_master(&pdev->dev, master); ++ ret = spi_register_master(master); + if (ret < 0) { + dev_err(dev, "can't register master\n"); + goto qspi_reg_err; +@@ -1343,8 +1335,6 @@ int bcm_qspi_probe(struct platform_device *pdev, + clk_disable_unprepare(qspi->clk); + qspi_probe_err: + kfree(qspi->dev_ids); +-qspi_resource_err: +- spi_master_put(master); + return ret; + } + /* probe function to be called by SoC specific platform driver probe */ +@@ -1355,10 +1345,10 @@ int bcm_qspi_remove(struct platform_device *pdev) + struct bcm_qspi *qspi = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); ++ spi_unregister_master(qspi->master); + bcm_qspi_hw_uninit(qspi); + clk_disable_unprepare(qspi->clk); + kfree(qspi->dev_ids); +- spi_unregister_master(qspi->master); + + return 0; + } +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c +index eab27d41ba83..6824beae18e4 100644 +--- a/drivers/spi/spi-bcm2835.c ++++ b/drivers/spi/spi-bcm2835.c +@@ -737,7 +737,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + struct resource *res; + int err; + +- master = spi_alloc_master(&pdev->dev, sizeof(*bs)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); + if (!master) { + dev_err(&pdev->dev, "spi_alloc_master() failed\n"); + return -ENOMEM; +@@ -759,23 +759,20 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bs->regs = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(bs->regs)) { +- err = PTR_ERR(bs->regs); +- goto out_master_put; +- } ++ if (IS_ERR(bs->regs)) ++ return PTR_ERR(bs->regs); + + bs->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(bs->clk)) { + err = PTR_ERR(bs->clk); + dev_err(&pdev->dev, "could not get clk: %d\n", err); +- goto out_master_put; ++ return err; + } + + bs->irq = platform_get_irq(pdev, 0); + if (bs->irq <= 0) { + dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); +- err = bs->irq ? bs->irq : -ENODEV; +- goto out_master_put; ++ return bs->irq ? bs->irq : -ENODEV; + } + + clk_prepare_enable(bs->clk); +@@ -790,21 +787,20 @@ static int bcm2835_spi_probe(struct platform_device *pdev) + dev_name(&pdev->dev), master); + if (err) { + dev_err(&pdev->dev, "could not request IRQ: %d\n", err); +- goto out_clk_disable; ++ goto out_dma_release; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); +- goto out_clk_disable; ++ goto out_dma_release; + } + + return 0; + +-out_clk_disable: ++out_dma_release: ++ bcm2835_dma_release(master); + clk_disable_unprepare(bs->clk); +-out_master_put: +- spi_master_put(master); + return err; + } + +@@ -813,6 +809,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + /* Clear FIFOs, and disable the HW block */ + bcm2835_wr(bs, BCM2835_SPI_CS, + BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c +index e075712c501e..88772efda830 100644 +--- a/drivers/spi/spi-bcm2835aux.c ++++ b/drivers/spi/spi-bcm2835aux.c +@@ -407,7 +407,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + unsigned long clk_hz; + int err; + +- master = spi_alloc_master(&pdev->dev, sizeof(*bs)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); + if (!master) { + dev_err(&pdev->dev, "spi_alloc_master() failed\n"); + return -ENOMEM; +@@ -439,30 +439,27 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + /* the main area */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bs->regs = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(bs->regs)) { +- err = PTR_ERR(bs->regs); +- goto out_master_put; +- } ++ if (IS_ERR(bs->regs)) ++ return PTR_ERR(bs->regs); + + bs->clk = devm_clk_get(&pdev->dev, NULL); + if ((!bs->clk) || (IS_ERR(bs->clk))) { + err = PTR_ERR(bs->clk); + dev_err(&pdev->dev, "could not get clk: %d\n", err); +- goto out_master_put; ++ return err; + } + + bs->irq = platform_get_irq(pdev, 0); + if (bs->irq <= 0) { + dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); +- err = bs->irq ? bs->irq : -ENODEV; +- goto out_master_put; ++ return bs->irq ? bs->irq : -ENODEV; + } + + /* this also enables the HW block */ + err = clk_prepare_enable(bs->clk); + if (err) { + dev_err(&pdev->dev, "could not prepare clock: %d\n", err); +- goto out_master_put; ++ return err; + } + + /* just checking if the clock returns a sane value */ +@@ -485,7 +482,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + goto out_clk_disable; + } + +- err = devm_spi_register_master(&pdev->dev, master); ++ err = spi_register_master(master); + if (err) { + dev_err(&pdev->dev, "could not register SPI master: %d\n", err); + goto out_clk_disable; +@@ -495,8 +492,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) + + out_clk_disable: + clk_disable_unprepare(bs->clk); +-out_master_put: +- spi_master_put(master); + return err; + } + +@@ -505,6 +500,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm2835aux_spi *bs = spi_master_get_devdata(master); + ++ spi_unregister_master(master); ++ + bcm2835aux_spi_reset_hw(bs); + + /* disable the HW block by releasing the clock */ +diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c +index 1c57ce64abba..e383c6368915 100644 +--- a/drivers/spi/spi-cadence.c ++++ b/drivers/spi/spi-cadence.c +@@ -118,6 +118,7 @@ struct cdns_spi { + void __iomem *regs; + struct clk *ref_clk; + struct clk *pclk; ++ unsigned int clk_rate; + u32 speed_hz; + const u8 *txbuf; + u8 *rxbuf; +@@ -253,7 +254,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, + u32 ctrl_reg, baud_rate_val; + unsigned long frequency; + +- frequency = clk_get_rate(xspi->ref_clk); ++ frequency = xspi->clk_rate; + + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); + +@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev) + master->auto_runtime_pm = true; + master->mode_bits = SPI_CPOL | SPI_CPHA; + ++ xspi->clk_rate = clk_get_rate(xspi->ref_clk); + /* Set to default valid value */ +- master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; ++ master->max_speed_hz = xspi->clk_rate / 4; + xspi->speed_hz = master->max_speed_hz; + + master->bits_per_word_mask = SPI_BPW_MASK(8); +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c +index 1905d20c229f..62ff6130727b 100644 +--- a/drivers/spi/spi-davinci.c ++++ b/drivers/spi/spi-davinci.c +@@ -1099,13 +1099,13 @@ static int davinci_spi_remove(struct platform_device *pdev) + spi_bitbang_stop(&dspi->bitbang); + + clk_disable_unprepare(dspi->clk); +- spi_master_put(master); + + if (dspi->dma_rx) { + dma_release_channel(dspi->dma_rx); + dma_release_channel(dspi->dma_tx); + } + ++ spi_master_put(master); + return 0; + } + +diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c +index b62a99caacc0..a41adea48618 100644 +--- a/drivers/spi/spi-dln2.c ++++ b/drivers/spi/spi-dln2.c +@@ -783,7 +783,7 @@ static int dln2_spi_probe(struct platform_device *pdev) + + static int dln2_spi_remove(struct platform_device *pdev) + { +- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); ++ struct spi_master *master = platform_get_drvdata(pdev); + struct dln2_spi *dln2 = spi_master_get_devdata(master); + + pm_runtime_disable(&pdev->dev); +diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c +index e31971f91475..c079ab36275f 100644 +--- a/drivers/spi/spi-dw-mid.c ++++ b/drivers/spi/spi-dw-mid.c +@@ -155,6 +155,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, + if (!xfer->tx_buf) + return NULL; + ++ memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = dws->dma_addr; + txconf.dst_maxburst = 16; +@@ -201,6 +202,7 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + if (!xfer->rx_buf) + return NULL; + ++ memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = dws->dma_addr; + rxconf.src_maxburst = 16; +@@ -226,19 +228,23 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, + + static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) + { +- u16 dma_ctrl = 0; ++ u16 imr = 0, dma_ctrl = 0; + + dw_writel(dws, DW_SPI_DMARDLR, 0xf); + dw_writel(dws, DW_SPI_DMATDLR, 0x10); + +- if (xfer->tx_buf) ++ if (xfer->tx_buf) { + dma_ctrl |= SPI_DMA_TDMAE; +- if (xfer->rx_buf) ++ imr |= SPI_INT_TXOI; ++ } ++ if (xfer->rx_buf) { + dma_ctrl |= SPI_DMA_RDMAE; ++ imr |= SPI_INT_RXUI | SPI_INT_RXOI; ++ } + dw_writel(dws, DW_SPI_DMACR, dma_ctrl); + + /* Set the interrupt mask */ +- spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); ++ spi_umask_intr(dws, imr); + + dws->transfer_handler = dma_transfer; + +@@ -268,7 +274,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) + dma_async_issue_pending(dws->txchan); + } + +- return 0; ++ return 1; + } + + static void mid_spi_dma_stop(struct dw_spi *dws) +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c +index babf0a337e96..91f44e3e1930 100644 +--- a/drivers/spi/spi-dw.c ++++ b/drivers/spi/spi-dw.c +@@ -384,11 +384,8 @@ static int dw_spi_transfer_one(struct spi_master *master, + + spi_enable_chip(dws, 1); + +- if (dws->dma_mapped) { +- ret = dws->dma_ops->dma_transfer(dws, transfer); +- if (ret < 0) +- return ret; +- } ++ if (dws->dma_mapped) ++ return dws->dma_ops->dma_transfer(dws, transfer); + + if (chip->poll_mode) + return poll_transfer(dws); +@@ -500,6 +497,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num); + spin_lock_init(&dws->buf_lock); + ++ spi_master_set_devdata(master, dws); ++ + ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); +@@ -531,8 +530,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) + } + } + +- spi_master_set_devdata(master, dws); +- ret = devm_spi_register_master(dev, master); ++ ret = spi_register_master(master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_dma_exit; +@@ -556,6 +554,8 @@ void dw_spi_remove_host(struct dw_spi *dws) + { + dw_spi_debugfs_remove(dws); + ++ spi_unregister_master(dws->master); ++ + if (dws->dma_ops && dws->dma_ops->dma_exit) + dws->dma_ops->dma_exit(dws); + +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index db3b6e9151a8..672152b49d95 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -70,7 +70,7 @@ + #define SPI_SR 0x2c + #define SPI_SR_EOQF 0x10000000 + #define SPI_SR_TCFQF 0x80000000 +-#define SPI_SR_CLEAR 0xdaad0000 ++#define SPI_SR_CLEAR 0x9aaf0000 + + #define SPI_RSER 0x30 + #define SPI_RSER_EOQFE 0x10000000 +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c +index 2e65b70c7879..2a340234c85c 100644 +--- a/drivers/spi/spi-img-spfi.c ++++ b/drivers/spi/spi-img-spfi.c +@@ -771,8 +771,10 @@ static int img_spfi_resume(struct device *dev) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret) ++ if (ret) { ++ pm_runtime_put_noidle(dev); + return ret; ++ } + spfi_reset(spfi); + pm_runtime_put(dev); + +diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c +index 50e620f4e8fe..cac38753d0cd 100644 +--- a/drivers/spi/spi-loopback-test.c ++++ b/drivers/spi/spi-loopback-test.c +@@ -74,7 +74,7 @@ static struct spi_test spi_tests[] = { + { + .description = "tx/rx-transfer - crossing PAGE_SIZE", + .fill_option = FILL_COUNT_8, +- .iterate_len = { ITERATE_MAX_LEN }, ++ .iterate_len = { ITERATE_LEN }, + .iterate_tx_align = ITERATE_ALIGN, + .iterate_rx_align = ITERATE_ALIGN, + .transfers = { +@@ -803,7 +803,7 @@ static int spi_test_run_iter(struct spi_device *spi, + test.transfers[i].len = len; + if (test.transfers[i].tx_buf) + test.transfers[i].tx_buf += tx_off; +- if (test.transfers[i].tx_buf) ++ if (test.transfers[i].rx_buf) + test.transfers[i].rx_buf += rx_off; + } + +diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c +index 899d7a8f0889..dd0bf25d4550 100644 +--- a/drivers/spi/spi-mt65xx.c ++++ b/drivers/spi/spi-mt65xx.c +@@ -337,14 +337,15 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, + mtk_spi_prepare_transfer(master, xfer); + mtk_spi_setup_packet(master); + +- cnt = xfer->len / 4; +- iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); +- +- remainder = xfer->len % 4; +- if (remainder > 0) { +- reg_val = 0; +- memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); +- writel(reg_val, mdata->base + SPI_TX_DATA_REG); ++ if (xfer->tx_buf) { ++ cnt = xfer->len / 4; ++ iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); ++ remainder = xfer->len % 4; ++ if (remainder > 0) { ++ reg_val = 0; ++ memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); ++ writel(reg_val, mdata->base + SPI_TX_DATA_REG); ++ } + } + + mtk_spi_enable_transfer(master); +diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c +index 76a8425be227..2eeb0fe2eed2 100644 +--- a/drivers/spi/spi-omap-100k.c ++++ b/drivers/spi/spi-omap-100k.c +@@ -251,7 +251,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi, + else + word_len = spi->bits_per_word; + +- if (spi->bits_per_word > 32) ++ if (word_len > 32) + return -EINVAL; + cs->word_len = word_len; + +@@ -435,7 +435,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev) + + static int omap1_spi100k_remove(struct platform_device *pdev) + { +- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); ++ struct spi_master *master = platform_get_drvdata(pdev); + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + pm_runtime_disable(&pdev->dev); +@@ -449,7 +449,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev) + #ifdef CONFIG_PM + static int omap1_spi100k_runtime_suspend(struct device *dev) + { +- struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); ++ struct spi_master *master = dev_get_drvdata(dev); + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + + clk_disable_unprepare(spi100k->ick); +@@ -460,7 +460,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev) + + static int omap1_spi100k_runtime_resume(struct device *dev) + { +- struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); ++ struct spi_master *master = dev_get_drvdata(dev); + struct omap1_spi100k *spi100k = spi_master_get_devdata(master); + int ret; + +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c +index 2f4df804c4d8..9a97ad973c41 100644 +--- a/drivers/spi/spi-pic32.c ++++ b/drivers/spi/spi-pic32.c +@@ -839,6 +839,7 @@ static int pic32_spi_probe(struct platform_device *pdev) + return 0; + + err_bailout: ++ pic32_spi_dma_unprep(pic32s); + clk_disable_unprepare(pic32s->clk); + err_master: + spi_master_put(master); +diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c +index 58d2d48e16a5..c37e35aeafa4 100644 +--- a/drivers/spi/spi-pxa2xx-pci.c ++++ b/drivers/spi/spi-pxa2xx-pci.c +@@ -21,7 +21,8 @@ enum { + PORT_BSW1, + PORT_BSW2, + PORT_CE4100, +- PORT_LPT, ++ PORT_LPT0, ++ PORT_LPT1, + }; + + struct pxa_spi_info { +@@ -48,8 +49,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 }; + static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 }; + static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 }; + +-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 }; +-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 }; ++static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 }; ++static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 }; ++static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 }; ++static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 }; + + static bool lpss_dma_filter(struct dma_chan *chan, void *param) + { +@@ -158,12 +161,19 @@ static struct pxa_spi_info spi_info_configs[] = { + .num_chipselect = 1, + .max_clk_rate = 50000000, + }, +- [PORT_LPT] = { ++ [PORT_LPT0] = { + .type = LPSS_LPT_SSP, + .port_id = 0, + .setup = lpss_spi_setup, +- .tx_param = &lpt_tx_param, +- .rx_param = &lpt_rx_param, ++ .tx_param = &lpt0_tx_param, ++ .rx_param = &lpt0_rx_param, ++ }, ++ [PORT_LPT1] = { ++ .type = LPSS_LPT_SSP, ++ .port_id = 1, ++ .setup = lpss_spi_setup, ++ .tx_param = &lpt1_tx_param, ++ .rx_param = &lpt1_rx_param, + }, + }; + +@@ -251,8 +261,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = { + { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, + { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 }, + { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 }, +- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT }, +- { }, ++ { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 }, ++ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 }, ++ { } + }; + MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); + +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 2f84d7653afd..9bb6a574ab2f 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1606,7 +1606,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + return -ENODEV; + } + +- master = spi_alloc_master(dev, sizeof(struct driver_data)); ++ master = devm_spi_alloc_master(dev, sizeof(*drv_data)); + if (!master) { + dev_err(&pdev->dev, "cannot alloc spi_master\n"); + pxa_ssp_free(ssp); +@@ -1774,7 +1774,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); +- status = devm_spi_register_master(&pdev->dev, master); ++ status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "problem registering spi master\n"); + goto out_error_clock_enabled; +@@ -1788,7 +1788,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) + free_irq(ssp->irq, drv_data); + + out_error_master_alloc: +- spi_master_put(master); + pxa_ssp_free(ssp); + return status; + } +@@ -1804,6 +1803,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) + + pm_runtime_get_sync(&pdev->dev); + ++ spi_unregister_master(drv_data->master); ++ + /* Disable the SSP at the peripheral and SOC level */ + pxa2xx_spi_write(drv_data, SSCR0, 0); + clk_disable_unprepare(ssp->clk); +diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c +index 3641d0e20135..1d7fd6dbaf87 100644 +--- a/drivers/spi/spi-rb4xx.c ++++ b/drivers/spi/spi-rb4xx.c +@@ -148,7 +148,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev) + if (IS_ERR(spi_base)) + return PTR_ERR(spi_base); + +- master = spi_alloc_master(&pdev->dev, sizeof(*rbspi)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi)); + if (!master) + return -ENOMEM; + +diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S +index 059f2dc1fda2..1565c792da07 100644 +--- a/drivers/spi/spi-s3c24xx-fiq.S ++++ b/drivers/spi/spi-s3c24xx-fiq.S +@@ -36,7 +36,6 @@ + @ and an offset to the irq acknowledgment word + + ENTRY(s3c24xx_spi_fiq_rx) +-s3c24xx_spi_fix_rx: + .word fiq_rx_end - fiq_rx_start + .word fiq_rx_irq_ack - fiq_rx_start + fiq_rx_start: +@@ -50,7 +49,7 @@ fiq_rx_start: + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 +- subnes pc, lr, #4 @@ return, still have work to do ++ subsne pc, lr, #4 @@ return, still have work to do + + @@ set IRQ controller so that next op will trigger IRQ + mov fiq_rtmp, #0 +@@ -62,7 +61,6 @@ fiq_rx_irq_ack: + fiq_rx_end: + + ENTRY(s3c24xx_spi_fiq_txrx) +-s3c24xx_spi_fiq_txrx: + .word fiq_txrx_end - fiq_txrx_start + .word fiq_txrx_irq_ack - fiq_txrx_start + fiq_txrx_start: +@@ -77,7 +75,7 @@ fiq_txrx_start: + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 +- subnes pc, lr, #4 @@ return, still have work to do ++ subsne pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] +@@ -89,7 +87,6 @@ fiq_txrx_irq_ack: + fiq_txrx_end: + + ENTRY(s3c24xx_spi_fiq_tx) +-s3c24xx_spi_fix_tx: + .word fiq_tx_end - fiq_tx_start + .word fiq_tx_irq_ack - fiq_tx_start + fiq_tx_start: +@@ -102,7 +99,7 @@ fiq_tx_start: + strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] + + subs fiq_rcount, fiq_rcount, #1 +- subnes pc, lr, #4 @@ return, still have work to do ++ subsne pc, lr, #4 @@ return, still have work to do + + mov fiq_rtmp, #0 + str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] +diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c +index f63714ffb62f..e4306a0f7f17 100644 +--- a/drivers/spi/spi-sc18is602.c ++++ b/drivers/spi/spi-sc18is602.c +@@ -247,13 +247,12 @@ static int sc18is602_probe(struct i2c_client *client, + struct sc18is602_platform_data *pdata = dev_get_platdata(dev); + struct sc18is602 *hw; + struct spi_master *master; +- int error; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | + I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) + return -EINVAL; + +- master = spi_alloc_master(dev, sizeof(struct sc18is602)); ++ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602)); + if (!master) + return -ENOMEM; + +@@ -304,15 +303,7 @@ static int sc18is602_probe(struct i2c_client *client, + master->min_speed_hz = hw->freq / 128; + master->max_speed_hz = hw->freq / 4; + +- error = devm_spi_register_master(dev, master); +- if (error) +- goto error_reg; +- +- return 0; +- +-error_reg: +- spi_master_put(master); +- return error; ++ return devm_spi_register_master(dev, master); + } + + static const struct i2c_device_id sc18is602_id[] = { +diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c +index 2bf53f0e27d9..e1fd29068777 100644 +--- a/drivers/spi/spi-sh.c ++++ b/drivers/spi/spi-sh.c +@@ -450,7 +450,7 @@ static int spi_sh_probe(struct platform_device *pdev) + return -ENODEV; + } + +- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); + if (master == NULL) { + dev_err(&pdev->dev, "spi_alloc_master error.\n"); + return -ENOMEM; +@@ -468,16 +468,14 @@ static int spi_sh_probe(struct platform_device *pdev) + break; + default: + dev_err(&pdev->dev, "No support width\n"); +- ret = -ENODEV; +- goto error1; ++ return -ENODEV; + } + ss->irq = irq; + ss->master = master; + ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (ss->addr == NULL) { + dev_err(&pdev->dev, "ioremap error.\n"); +- ret = -ENOMEM; +- goto error1; ++ return -ENOMEM; + } + INIT_LIST_HEAD(&ss->queue); + spin_lock_init(&ss->lock); +@@ -487,7 +485,7 @@ static int spi_sh_probe(struct platform_device *pdev) + ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); + if (ret < 0) { + dev_err(&pdev->dev, "request_irq error\n"); +- goto error1; ++ return ret; + } + + master->num_chipselect = 2; +@@ -506,9 +504,6 @@ static int spi_sh_probe(struct platform_device *pdev) + + error3: + free_irq(irq, ss); +- error1: +- spi_master_put(master); +- + return ret; + } + +diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c +index 710adbc2485f..05f7316d5a52 100644 +--- a/drivers/spi/spi-st-ssc4.c ++++ b/drivers/spi/spi-st-ssc4.c +@@ -379,13 +379,14 @@ static int spi_st_probe(struct platform_device *pdev) + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&pdev->dev, "Failed to register master\n"); +- goto clk_disable; ++ goto rpm_disable; + } + + return 0; + +-clk_disable: ++rpm_disable: + pm_runtime_disable(&pdev->dev); ++clk_disable: + clk_disable_unprepare(spi_st->clk); + put_master: + spi_master_put(master); +diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c +index 7e7da97982aa..8c3f5a00fd9e 100644 +--- a/drivers/spi/spi-sun6i.c ++++ b/drivers/spi/spi-sun6i.c +@@ -163,7 +163,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, + struct spi_transfer *tfr) + { + struct sun6i_spi *sspi = spi_master_get_devdata(master); +- unsigned int mclk_rate, div, timeout; ++ unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; + unsigned int start, end, tx_time; + unsigned int tx_len = 0; + int ret = 0; +@@ -241,18 +241,20 @@ static int sun6i_spi_transfer_one(struct spi_master *master, + * First try CDR2, and if we can't reach the expected + * frequency, fall back to CDR1. + */ +- div = mclk_rate / (2 * tfr->speed_hz); +- if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { +- if (div > 0) +- div--; +- +- reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; ++ div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); ++ div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); ++ if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { ++ reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; + } else { +- div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); ++ div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); + reg = SUN6I_CLK_CTL_CDR1(div); + } + + sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg); ++ /* Finally enable the bus - doing so before might raise SCK to HIGH */ ++ reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG); ++ reg |= SUN6I_GBL_CTL_BUS_ENABLE; ++ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg); + + /* Setup the transfer now... */ + if (sspi->tx_buf) +@@ -336,7 +338,7 @@ static int sun6i_spi_runtime_resume(struct device *dev) + } + + sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, +- SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP); ++ SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP); + + return 0; + +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c +index e37712bed0b2..d1ca8f619b82 100644 +--- a/drivers/spi/spi-tegra114.c ++++ b/drivers/spi/spi-tegra114.c +@@ -801,6 +801,7 @@ static int tegra_spi_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(tspi->dev); + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +@@ -1214,6 +1215,7 @@ static int tegra_spi_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c +index b6558bb6f9df..4b9541e1726a 100644 +--- a/drivers/spi/spi-tegra20-sflash.c ++++ b/drivers/spi/spi-tegra20-sflash.c +@@ -564,6 +564,7 @@ static int tegra_sflash_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c +index cf2a329fd895..9f14560686b6 100644 +--- a/drivers/spi/spi-tegra20-slink.c ++++ b/drivers/spi/spi-tegra20-slink.c +@@ -761,6 +761,7 @@ static int tegra_slink_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(tspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(tspi->dev); + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +@@ -1197,6 +1198,7 @@ static int tegra_slink_resume(struct device *dev) + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { ++ pm_runtime_put_noidle(dev); + dev_err(dev, "pm runtime failed, e = %d\n", ret); + return ret; + } +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c +index 4cb72a8e4646..b0a5486936c0 100644 +--- a/drivers/spi/spi-ti-qspi.c ++++ b/drivers/spi/spi-ti-qspi.c +@@ -175,6 +175,7 @@ static int ti_qspi_setup(struct spi_device *spi) + + ret = pm_runtime_get_sync(qspi->dev); + if (ret < 0) { ++ pm_runtime_put_noidle(qspi->dev); + dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); + return ret; + } +diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c +index fe707440f8c3..9b24ebbba346 100644 +--- a/drivers/spi/spi-topcliff-pch.c ++++ b/drivers/spi/spi-topcliff-pch.c +@@ -585,8 +585,10 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); + if (data->pkt_tx_buff != NULL) { + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); +- if (!data->pkt_rx_buff) ++ if (!data->pkt_rx_buff) { + kfree(data->pkt_tx_buff); ++ data->pkt_tx_buff = NULL; ++ } + } + + if (!data->pkt_rx_buff) { +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index d74d341f9890..84e2296c45a2 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -422,6 +422,12 @@ static LIST_HEAD(spi_master_list); + */ + static DEFINE_MUTEX(board_lock); + ++/* ++ * Prevents addition of devices with same chip select and ++ * addition of devices below an unregistering controller. ++ */ ++static DEFINE_MUTEX(spi_add_lock); ++ + /** + * spi_alloc_device - Allocate a new SPI device + * @master: Controller to which device is connected +@@ -500,7 +506,6 @@ static int spi_dev_check(struct device *dev, void *data) + */ + int spi_add_device(struct spi_device *spi) + { +- static DEFINE_MUTEX(spi_add_lock); + struct spi_master *master = spi->master; + struct device *dev = master->dev.parent; + int status; +@@ -529,6 +534,13 @@ int spi_add_device(struct spi_device *spi) + goto done; + } + ++ /* Controller may unregister concurrently */ ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && ++ !device_is_registered(&master->dev)) { ++ status = -ENODEV; ++ goto done; ++ } ++ + if (master->cs_gpios) + spi->cs_gpio = master->cs_gpios[spi->chip_select]; + +@@ -1827,6 +1839,47 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) + } + EXPORT_SYMBOL_GPL(spi_alloc_master); + ++static void devm_spi_release_master(struct device *dev, void *master) ++{ ++ spi_master_put(*(struct spi_master **)master); ++} ++ ++/** ++ * devm_spi_alloc_master - resource-managed spi_alloc_master() ++ * @dev: physical device of SPI master ++ * @size: how much zeroed driver-private data to allocate ++ * Context: can sleep ++ * ++ * Allocate an SPI master and automatically release a reference on it ++ * when @dev is unbound from its driver. Drivers are thus relieved from ++ * having to call spi_master_put(). ++ * ++ * The arguments to this function are identical to spi_alloc_master(). ++ * ++ * Return: the SPI master structure on success, else NULL. ++ */ ++struct spi_master *devm_spi_alloc_master(struct device *dev, unsigned int size) ++{ ++ struct spi_master **ptr, *master; ++ ++ ptr = devres_alloc(devm_spi_release_master, sizeof(*ptr), ++ GFP_KERNEL); ++ if (!ptr) ++ return NULL; ++ ++ master = spi_alloc_master(dev, size); ++ if (master) { ++ master->devm_allocated = true; ++ *ptr = master; ++ devres_add(dev, ptr); ++ } else { ++ devres_free(ptr); ++ } ++ ++ return master; ++} ++EXPORT_SYMBOL_GPL(devm_spi_alloc_master); ++ + #ifdef CONFIG_OF + static int of_spi_register_master(struct spi_master *master) + { +@@ -2025,7 +2078,11 @@ static int __unregister(struct device *dev, void *null) + */ + void spi_unregister_master(struct spi_master *master) + { +- int dummy; ++ /* Prevent addition of new devices, unregister existing ones */ ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) ++ mutex_lock(&spi_add_lock); ++ ++ device_for_each_child(&master->dev, NULL, __unregister); + + if (master->queued) { + if (spi_destroy_queue(master)) +@@ -2036,8 +2093,16 @@ void spi_unregister_master(struct spi_master *master) + list_del(&master->list); + mutex_unlock(&board_lock); + +- dummy = device_for_each_child(&master->dev, NULL, __unregister); +- device_unregister(&master->dev); ++ device_del(&master->dev); ++ ++ /* Release the last reference on the master if its driver ++ * has not yet been converted to devm_spi_alloc_master(). ++ */ ++ if (!master->devm_allocated) ++ put_device(&master->dev); ++ ++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) ++ mutex_unlock(&spi_add_lock); + } + EXPORT_SYMBOL_GPL(spi_unregister_master); + +diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c +index bada281fc202..b71fbb9817a5 100644 +--- a/drivers/spi/spi_qsd.c ++++ b/drivers/spi/spi_qsd.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -43,10 +43,22 @@ + #include + #include + #include ++#include + #include "spi_qsd.h" + + #define SPI_MAX_BYTES_PER_WORD (4) + ++#define spi_ipc(log_ctx, print, dev, x...) do { \ ++if (log_ctx) \ ++ ipc_log_string(log_ctx, x); \ ++if (print) { \ ++ if (dev) \ ++ dev_err((dev), x); \ ++ else \ ++ pr_err(x); \ ++} \ ++} while (0) ++ + static int msm_spi_pm_resume_runtime(struct device *device); + static int msm_spi_pm_suspend_runtime(struct device *device); + static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd); +@@ -954,6 +966,8 @@ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id) + * processing of interrupt. + */ + mb(); ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s op_stat = 0x%x\n", __func__, ++ op); + if (op & SPI_OP_INPUT_SERVICE_FLAG) + ret |= msm_spi_input_irq(irq, dev_id); + +@@ -1097,6 +1111,8 @@ static irqreturn_t msm_spi_error_irq(int irq, void *dev_id) + msm_spi_ack_clk_err(dd); + /* Ensure clearing of QUP_ERROR_FLAGS was completed */ + mb(); ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s spi_err = 0x%x\n", ++ __func__, spi_err); + return IRQ_HANDLED; + } + +@@ -1414,6 +1430,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd) + return ret; + } + } ++ ++ spi_ipc(dd->ipc_logs, false, dd->dev, ++ "%s(): bpw:%d speed:%d msg_len:%d timeout:%u xfer_mode:%d\n", ++ __func__, bpw, max_speed, dd->cur_msg_len, timeout, dd->tx_mode); ++ + msm_spi_set_qup_io_modes(dd); + msm_spi_set_spi_config(dd, bpw); + msm_spi_set_qup_config(dd, bpw); +@@ -1538,6 +1559,7 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag) + + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): End\n", __func__); + } + + static void reset_core(struct msm_spi *dd) +@@ -1602,13 +1624,13 @@ static int get_local_resources(struct msm_spi *dd) + dev_err(dd->dev, + "%s: error configuring GPIOs\n", + __func__); +- return ret; ++ goto ret_err; + } + } + + ret = msm_spi_request_gpios(dd); + if (ret) +- return ret; ++ goto ret_err; + + ret = clk_prepare_enable(dd->clk); + if (ret) +@@ -1624,6 +1646,8 @@ static int get_local_resources(struct msm_spi *dd) + clk_disable_unprepare(dd->clk); + clk0_err: + msm_spi_free_gpios(dd); ++ret_err: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s: ret %d\n", __func__, ret); + return ret; + } + +@@ -1644,6 +1668,8 @@ static int msm_spi_transfer_one(struct spi_master *master, + + dd = spi_master_get_devdata(master); + ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s START\n", __func__); ++ + /* Check message parameters */ + if (xfer->speed_hz > dd->pdata->max_clock_speed || + (xfer->bits_per_word && +@@ -1714,44 +1740,109 @@ static int msm_spi_transfer_one(struct spi_master *master, + mutex_unlock(&dd->core_lock); + if (dd->suspended) + wake_up_interruptible(&dd->continue_suspend); ++ ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s ret = %d END\n", ++ __func__, status_error); + return status_error; + } + +-static int msm_spi_prepare_transfer_hardware(struct spi_master *master) ++static int msm_spi_pm_get_sync(struct device *dev) + { +- struct msm_spi *dd = spi_master_get_devdata(master); +- int resume_state = 0; +- +- resume_state = pm_runtime_get_sync(dd->dev); +- if (resume_state < 0) +- goto spi_finalize; ++ int ret; + + /* + * Counter-part of system-suspend when runtime-pm is not enabled. + * This way, resume can be left empty and device will be put in + * active mode only if client requests anything on the bus + */ +- if (!pm_runtime_enabled(dd->dev)) +- resume_state = msm_spi_pm_resume_runtime(dd->dev); ++ if (!pm_runtime_enabled(dev)) { ++ dev_info(dev, "%s: pm_runtime not enabled\n", __func__); ++ ret = msm_spi_pm_resume_runtime(dev); ++ } else { ++ ret = pm_runtime_get_sync(dev); ++ } ++ ++ return ret; ++} ++ ++static int msm_spi_pm_put_sync(struct device *dev) ++{ ++ int ret = 0; ++ ++ if (!pm_runtime_enabled(dev)) { ++ dev_info(dev, "%s: pm_runtime not enabled\n", __func__); ++ ret = msm_spi_pm_suspend_runtime(dev); ++ } else { ++ pm_runtime_mark_last_busy(dev); ++ pm_runtime_put_autosuspend(dev); ++ } ++ ++ return ret; ++} ++ ++static int msm_spi_prepare_message(struct spi_master *master, ++ struct spi_message *spi_msg) ++{ ++ struct msm_spi *dd = spi_master_get_devdata(master); ++ int resume_state; ++ ++ resume_state = msm_spi_pm_get_sync(dd->dev); + if (resume_state < 0) +- goto spi_finalize; +- if (dd->suspended) { +- resume_state = -EBUSY; +- goto spi_finalize; ++ return resume_state; ++ ++ return 0; ++} ++ ++static int msm_spi_unprepare_message(struct spi_master *master, ++ struct spi_message *spi_msg) ++{ ++ struct msm_spi *dd = spi_master_get_devdata(master); ++ int ret; ++ ++ ret = msm_spi_pm_put_sync(dd->dev); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++static int msm_spi_prepare_transfer_hardware(struct spi_master *master) ++{ ++ struct msm_spi *dd = spi_master_get_devdata(master); ++ int resume_state; ++ ++ if (!dd->pdata->shared_ee) { ++ resume_state = msm_spi_pm_get_sync(dd->dev); ++ if (resume_state < 0) ++ goto spi_finalize; ++ ++ if (dd->suspended) { ++ resume_state = -EBUSY; ++ goto spi_finalize; ++ } + } ++ ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s() End\n", __func__); + return 0; + + spi_finalize: + spi_finalize_current_message(master); ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): resume_state = %d\n", ++ __func__, resume_state); + return resume_state; + } + + static int msm_spi_unprepare_transfer_hardware(struct spi_master *master) + { + struct msm_spi *dd = spi_master_get_devdata(master); ++ int ret; ++ ++ if (!dd->pdata->shared_ee) { ++ ret = msm_spi_pm_put_sync(dd->dev); ++ if (ret < 0) ++ return ret; ++ } + +- pm_runtime_mark_last_busy(dd->dev); +- pm_runtime_put_autosuspend(dd->dev); + return 0; + } + +@@ -1839,6 +1930,8 @@ static int msm_spi_setup(struct spi_device *spi) + pm_runtime_put_autosuspend(dd->dev); + + err_setup_exit: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s: ret %d End\n", ++ __func__, rc); + return rc; + } + +@@ -2249,6 +2342,8 @@ static struct msm_spi_platform_data *msm_spi_dt_to_pdata( + &pdata->rt_priority, DT_OPT, DT_BOOL, 0}, + {"qcom,shared", + &pdata->is_shared, DT_OPT, DT_BOOL, 0}, ++ {"qcom,shared_ee", ++ &pdata->shared_ee, DT_OPT, DT_BOOL, 0}, + {NULL, NULL, 0, 0, 0}, + }; + +@@ -2574,6 +2669,12 @@ static int msm_spi_probe(struct platform_device *pdev) + goto err_probe_reqmem; + } + ++ /* This property is required for Dual EE use case of spi */ ++ if (dd->pdata->shared_ee) { ++ master->prepare_message = msm_spi_prepare_message; ++ master->unprepare_message = msm_spi_unprepare_message; ++ } ++ + pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); +@@ -2589,6 +2690,12 @@ static int msm_spi_probe(struct platform_device *pdev) + goto err_attrs; + } + spi_debugfs_init(dd); ++ dd->ipc_logs = ipc_log_context_create(4, dev_name(dd->dev), 0); ++ if (!dd->ipc_logs) ++ dev_info(&pdev->dev, "%s: failed to create ipc log cntxt\n", ++ __func__); ++ ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s: success\n", __func__); + + return 0; + +@@ -2618,7 +2725,7 @@ static int msm_spi_pm_suspend_runtime(struct device *device) + goto suspend_exit; + + if (dd->suspended) +- return 0; ++ goto suspend_exit; + + /* + * Make sure nothing is added to the queue while we're +@@ -2645,6 +2752,7 @@ static int msm_spi_pm_suspend_runtime(struct device *device) + mutex_unlock(&dd->core_lock); + + suspend_exit: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): End\n", __func__); + return 0; + } + +@@ -2663,11 +2771,15 @@ static int msm_spi_pm_resume_runtime(struct device *device) + goto resume_exit; + + if (!dd->suspended) +- return 0; ++ goto resume_exit; ++ + if (!dd->is_init_complete) { + ret = init_resources(pdev); +- if (ret != 0) ++ if (ret != 0) { ++ spi_ipc(dd->ipc_logs, false, dd->dev, ++ "%s: resume err with init_res %d\n", __func__, ret); + return ret; ++ } + + dd->is_init_complete = true; + } +@@ -2676,8 +2788,11 @@ static int msm_spi_pm_resume_runtime(struct device *device) + + if (!dd->pdata->is_shared) { + ret = get_local_resources(dd); +- if (ret) ++ if (ret) { ++ spi_ipc(dd->ipc_logs, false, dd->dev, ++ "%s: resume err with local_res %d\n", __func__, ret); + return ret; ++ } + } + if (!dd->pdata->is_shared && dd->use_dma) { + msm_spi_bam_pipe_connect(dd, &dd->bam.prod, +@@ -2688,23 +2803,26 @@ static int msm_spi_pm_resume_runtime(struct device *device) + dd->suspended = 0; + + resume_exit: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): End\n", __func__); + return 0; + } + + #ifdef CONFIG_PM_SLEEP + static int msm_spi_suspend(struct device *device) + { ++ struct platform_device *pdev = to_platform_device(device); ++ struct spi_master *master = platform_get_drvdata(pdev); ++ struct msm_spi *dd; ++ ++ dev_dbg(device, "system suspend\n"); ++ if (!master) ++ goto suspend_exit; ++ dd = spi_master_get_devdata(master); ++ if (!dd) ++ goto suspend_exit; ++ + if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) { +- struct platform_device *pdev = to_platform_device(device); +- struct spi_master *master = platform_get_drvdata(pdev); +- struct msm_spi *dd; +- +- dev_dbg(device, "system suspend"); +- if (!master) +- goto suspend_exit; +- dd = spi_master_get_devdata(master); +- if (!dd) +- goto suspend_exit; ++ + msm_spi_pm_suspend_runtime(device); + + /* +@@ -2714,18 +2832,33 @@ static int msm_spi_suspend(struct device *device) + pm_runtime_set_suspended(device); + pm_runtime_enable(device); + } ++ + suspend_exit: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): End\n", __func__); + return 0; + } + + static int msm_spi_resume(struct device *device) + { ++ struct platform_device *pdev = to_platform_device(device); ++ struct spi_master *master = platform_get_drvdata(pdev); ++ struct msm_spi *dd; ++ ++ dev_dbg(device, "system resume\n"); ++ if (!master) ++ goto resume_exit; ++ dd = spi_master_get_devdata(master); ++ if (!dd) ++ goto resume_exit; ++ + /* + * Rely on runtime-PM to call resume in case it is enabled + * Even if it's not enabled, rely on 1st client transaction to do + * clock ON and gpio configuration + */ +- dev_dbg(device, "system resume"); ++ ++resume_exit: ++ spi_ipc(dd->ipc_logs, false, dd->dev, "%s(): End\n", __func__); + return 0; + } + #else +@@ -2738,6 +2871,7 @@ static int msm_spi_remove(struct platform_device *pdev) + { + struct spi_master *master = platform_get_drvdata(pdev); + struct msm_spi *dd = spi_master_get_devdata(master); ++ int ret = 0; + + spi_debugfs_exit(dd); + sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp); +@@ -2746,6 +2880,8 @@ static int msm_spi_remove(struct platform_device *pdev) + dd->dma_teardown(dd); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); ++ if (dd->ipc_logs) ++ ret = ipc_log_context_destroy(dd->ipc_logs); + clk_put(dd->clk); + clk_put(dd->pclk); + msm_spi_clk_path_teardown(dd); +@@ -2753,7 +2889,7 @@ static int msm_spi_remove(struct platform_device *pdev) + spi_unregister_master(master); + spi_master_put(master); + +- return 0; ++ return ret; + } + + static const struct of_device_id msm_spi_dt_match[] = { +diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h +index b18b55e2a143..8da8a706e971 100644 +--- a/drivers/spi/spi_qsd.h ++++ b/drivers/spi/spi_qsd.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -304,6 +304,7 @@ struct msm_spi { + struct msm_bus_client_handle *bus_cl_hdl; + unsigned long mem_phys_addr; + size_t mem_size; ++ void *ipc_logs; /* ipc logs handler */ + int input_fifo_size; + int output_fifo_size; + u32 rx_bytes_remaining; +diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c +index 4f1b6aac93b1..bdc59949d340 100644 +--- a/drivers/spi/spidev.c ++++ b/drivers/spi/spidev.c +@@ -636,15 +636,20 @@ static int spidev_open(struct inode *inode, struct file *filp) + static int spidev_release(struct inode *inode, struct file *filp) + { + struct spidev_data *spidev; ++ int dofree; + + mutex_lock(&device_list_lock); + spidev = filp->private_data; + filp->private_data = NULL; + ++ spin_lock_irq(&spidev->spi_lock); ++ /* ... after we unbound from the underlying device? */ ++ dofree = (spidev->spi == NULL); ++ spin_unlock_irq(&spidev->spi_lock); ++ + /* last close? */ + spidev->users--; + if (!spidev->users) { +- int dofree; + + kfree(spidev->tx_buffer); + spidev->tx_buffer = NULL; +@@ -652,19 +657,14 @@ static int spidev_release(struct inode *inode, struct file *filp) + kfree(spidev->rx_buffer); + spidev->rx_buffer = NULL; + +- spin_lock_irq(&spidev->spi_lock); +- if (spidev->spi) +- spidev->speed_hz = spidev->spi->max_speed_hz; +- +- /* ... after we unbound from the underlying device? */ +- dofree = (spidev->spi == NULL); +- spin_unlock_irq(&spidev->spi_lock); +- + if (dofree) + kfree(spidev); ++ else ++ spidev->speed_hz = spidev->spi->max_speed_hz; + } + #ifdef CONFIG_SPI_SLAVE +- spi_slave_abort(spidev->spi); ++ if (!dofree) ++ spi_slave_abort(spidev->spi); + #endif + mutex_unlock(&device_list_lock); + +@@ -810,13 +810,13 @@ static int spidev_remove(struct spi_device *spi) + { + struct spidev_data *spidev = spi_get_drvdata(spi); + ++ /* prevent new opens */ ++ mutex_lock(&device_list_lock); + /* make sure ops on existing fds can abort cleanly */ + spin_lock_irq(&spidev->spi_lock); + spidev->spi = NULL; + spin_unlock_irq(&spidev->spi_lock); + +- /* prevent new opens */ +- mutex_lock(&device_list_lock); + list_del(&spidev->device_entry); + device_destroy(spidev_class, spidev->devt); + clear_bit(MINOR(spidev->devt), minors); +diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c +index 2278e43614bd..5e10514ef80c 100644 +--- a/drivers/ssb/sdio.c ++++ b/drivers/ssb/sdio.c +@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer, + sdio_claim_host(bus->host_sdio); + if (unlikely(ssb_sdio_switch_core(bus, dev))) { + error = -EIO; +- memset((void *)buffer, 0xff, count); + goto err_out; + } + offset |= bus->sdio_sbaddr & 0xffff; +diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c +index 7fec28cf3850..46decb1669c9 100644 +--- a/drivers/staging/android/ion/ion.c ++++ b/drivers/staging/android/ion/ion.c +@@ -1349,42 +1349,45 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) + static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) + { + struct ion_buffer *buffer = dmabuf->priv; ++ void *vaddr; ++ ++ if (!buffer->heap->ops->map_kernel) { ++ pr_err("%s: map kernel is not implemented by this heap.\n", ++ __func__); ++ return ERR_PTR(-ENOTTY); ++ } ++ mutex_lock(&buffer->lock); ++ vaddr = ion_buffer_kmap_get(buffer); ++ mutex_unlock(&buffer->lock); + +- return buffer->vaddr + offset * PAGE_SIZE; ++ if (IS_ERR(vaddr)) ++ return vaddr; ++ ++ return vaddr + offset * PAGE_SIZE; + } + + static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, + void *ptr) + { ++ struct ion_buffer *buffer = dmabuf->priv; ++ ++ if (buffer->heap->ops->map_kernel) { ++ mutex_lock(&buffer->lock); ++ ion_buffer_kmap_put(buffer); ++ mutex_unlock(&buffer->lock); ++ } ++ + } + + static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) + { +- struct ion_buffer *buffer = dmabuf->priv; +- void *vaddr; +- +- if (!buffer->heap->ops->map_kernel) { +- pr_err("%s: map kernel is not implemented by this heap.\n", +- __func__); +- return -ENODEV; +- } +- +- mutex_lock(&buffer->lock); +- vaddr = ion_buffer_kmap_get(buffer); +- mutex_unlock(&buffer->lock); +- return PTR_ERR_OR_ZERO(vaddr); ++ return 0; + } + + static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) + { +- struct ion_buffer *buffer = dmabuf->priv; +- +- mutex_lock(&buffer->lock); +- ion_buffer_kmap_put(buffer); +- mutex_unlock(&buffer->lock); +- + return 0; + } + +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c +index 43d3f92cd418..b27fc68d03dc 100644 +--- a/drivers/staging/android/ion/ion_heap.c ++++ b/drivers/staging/android/ion/ion_heap.c +@@ -109,12 +109,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + + static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) + { +- void *addr = vm_map_ram(pages, num, -1, pgprot); ++ void *addr = vmap(pages, num, VM_MAP, pgprot); + + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); +- vm_unmap_ram(addr, num); ++ vunmap(addr); + + return 0; + } +diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c +index 954ed2c5d807..a9aa89cf8778 100644 +--- a/drivers/staging/android/vsoc.c ++++ b/drivers/staging/android/vsoc.c +@@ -269,7 +269,8 @@ static int do_create_fd_scoped_permission( + atomic_t *owner_ptr = NULL; + struct vsoc_device_region *managed_region_p; + +- if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) || ++ if (copy_from_user(&np->permission, ++ &arg->perm, sizeof(np->permission)) || + copy_from_user(&managed_fd, + &arg->managed_region_fd, sizeof(managed_fd))) { + return -EFAULT; +diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c +index ccd1a91290bf..9058ef473c33 100644 +--- a/drivers/staging/comedi/drivers/addi_apci_1032.c ++++ b/drivers/staging/comedi/drivers/addi_apci_1032.c +@@ -115,14 +115,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, + unsigned int *data) + { + struct apci1032_private *devpriv = dev->private; +- unsigned int shift, oldmask; ++ unsigned int shift, oldmask, himask, lomask; + + switch (data[0]) { + case INSN_CONFIG_DIGITAL_TRIG: + if (data[1] != 0) + return -EINVAL; + shift = data[3]; +- oldmask = (1U << shift) - 1; ++ if (shift < 32) { ++ oldmask = (1U << shift) - 1; ++ himask = data[4] << shift; ++ lomask = data[5] << shift; ++ } else { ++ oldmask = 0xffffffffu; ++ himask = 0; ++ lomask = 0; ++ } + switch (data[2]) { + case COMEDI_DIGITAL_TRIG_DISABLE: + devpriv->ctrl = 0; +@@ -145,8 +153,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, + devpriv->mode2 &= oldmask; + } + /* configure specified channels */ +- devpriv->mode1 |= data[4] << shift; +- devpriv->mode2 |= data[5] << shift; ++ devpriv->mode1 |= himask; ++ devpriv->mode2 |= lomask; + break; + case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: + if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | +@@ -163,8 +171,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, + devpriv->mode2 &= oldmask; + } + /* configure specified channels */ +- devpriv->mode1 |= data[4] << shift; +- devpriv->mode2 |= data[5] << shift; ++ devpriv->mode1 |= himask; ++ devpriv->mode2 |= lomask; + break; + default: + return -EINVAL; +@@ -261,6 +269,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) + struct apci1032_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; + unsigned int ctrl; ++ unsigned short val; + + /* check interrupt is from this device */ + if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & +@@ -276,7 +285,8 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) + outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); + + s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; +- comedi_buf_write_samples(s, &s->state, 1); ++ val = s->state; ++ comedi_buf_write_samples(s, &val, 1); + comedi_handle_events(dev, s); + + /* enable the interrupt */ +diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c +index 63991c49ff23..1f25f565041c 100644 +--- a/drivers/staging/comedi/drivers/addi_apci_1500.c ++++ b/drivers/staging/comedi/drivers/addi_apci_1500.c +@@ -217,7 +217,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) + struct comedi_device *dev = d; + struct apci1500_private *devpriv = dev->private; + struct comedi_subdevice *s = dev->read_subdev; +- unsigned int status = 0; ++ unsigned short status = 0; + unsigned int val; + + val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); +@@ -247,14 +247,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) + * + * Mask Meaning + * ---------- ------------------------------------------ +- * 0x00000001 Event 1 has occurred +- * 0x00000010 Event 2 has occurred +- * 0x00000100 Counter/timer 1 has run down (not implemented) +- * 0x00001000 Counter/timer 2 has run down (not implemented) +- * 0x00010000 Counter 3 has run down (not implemented) +- * 0x00100000 Watchdog has run down (not implemented) +- * 0x01000000 Voltage error +- * 0x10000000 Short-circuit error ++ * 0b00000001 Event 1 has occurred ++ * 0b00000010 Event 2 has occurred ++ * 0b00000100 Counter/timer 1 has run down (not implemented) ++ * 0b00001000 Counter/timer 2 has run down (not implemented) ++ * 0b00010000 Counter 3 has run down (not implemented) ++ * 0b00100000 Watchdog has run down (not implemented) ++ * 0b01000000 Voltage error ++ * 0b10000000 Short-circuit error + */ + comedi_buf_write_samples(s, &status, 1); + comedi_handle_events(dev, s); +@@ -461,13 +461,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, + struct apci1500_private *devpriv = dev->private; + unsigned int trig = data[1]; + unsigned int shift = data[3]; +- unsigned int hi_mask = data[4] << shift; +- unsigned int lo_mask = data[5] << shift; +- unsigned int chan_mask = hi_mask | lo_mask; +- unsigned int old_mask = (1 << shift) - 1; +- unsigned int pm = devpriv->pm[trig] & old_mask; +- unsigned int pt = devpriv->pt[trig] & old_mask; +- unsigned int pp = devpriv->pp[trig] & old_mask; ++ unsigned int hi_mask; ++ unsigned int lo_mask; ++ unsigned int chan_mask; ++ unsigned int old_mask; ++ unsigned int pm; ++ unsigned int pt; ++ unsigned int pp; ++ unsigned int invalid_chan; + + if (trig > 1) { + dev_dbg(dev->class_dev, +@@ -475,11 +476,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, + return -EINVAL; + } + +- if (chan_mask > 0xffff) { ++ if (shift <= 16) { ++ hi_mask = data[4] << shift; ++ lo_mask = data[5] << shift; ++ old_mask = (1U << shift) - 1; ++ invalid_chan = (data[4] | data[5]) >> (16 - shift); ++ } else { ++ hi_mask = 0; ++ lo_mask = 0; ++ old_mask = 0xffff; ++ invalid_chan = data[4] | data[5]; ++ } ++ chan_mask = hi_mask | lo_mask; ++ ++ if (invalid_chan) { + dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); + return -EINVAL; + } + ++ pm = devpriv->pm[trig] & old_mask; ++ pt = devpriv->pt[trig] & old_mask; ++ pp = devpriv->pp[trig] & old_mask; ++ + switch (data[2]) { + case COMEDI_DIGITAL_TRIG_DISABLE: + /* clear trigger configuration */ +diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c +index 9bfb79c2e5c8..1b4ba19d599e 100644 +--- a/drivers/staging/comedi/drivers/addi_apci_1564.c ++++ b/drivers/staging/comedi/drivers/addi_apci_1564.c +@@ -340,14 +340,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, + unsigned int *data) + { + struct apci1564_private *devpriv = dev->private; +- unsigned int shift, oldmask; ++ unsigned int shift, oldmask, himask, lomask; + + switch (data[0]) { + case INSN_CONFIG_DIGITAL_TRIG: + if (data[1] != 0) + return -EINVAL; + shift = data[3]; +- oldmask = (1U << shift) - 1; ++ if (shift < 32) { ++ oldmask = (1U << shift) - 1; ++ himask = data[4] << shift; ++ lomask = data[5] << shift; ++ } else { ++ oldmask = 0xffffffffu; ++ himask = 0; ++ lomask = 0; ++ } + switch (data[2]) { + case COMEDI_DIGITAL_TRIG_DISABLE: + devpriv->ctrl = 0; +@@ -371,8 +379,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, + devpriv->mode2 &= oldmask; + } + /* configure specified channels */ +- devpriv->mode1 |= data[4] << shift; +- devpriv->mode2 |= data[5] << shift; ++ devpriv->mode1 |= himask; ++ devpriv->mode2 |= lomask; + break; + case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: + if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | +@@ -389,8 +397,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, + devpriv->mode2 &= oldmask; + } + /* configure specified channels */ +- devpriv->mode1 |= data[4] << shift; +- devpriv->mode2 |= data[5] << shift; ++ devpriv->mode1 |= himask; ++ devpriv->mode2 |= lomask; + break; + default: + return -EINVAL; +diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c +index 385e14269870..2b408c893ed6 100644 +--- a/drivers/staging/comedi/drivers/adv_pci1710.c ++++ b/drivers/staging/comedi/drivers/adv_pci1710.c +@@ -299,11 +299,11 @@ static int pci1710_ai_eoc(struct comedi_device *dev, + static int pci1710_ai_read_sample(struct comedi_device *dev, + struct comedi_subdevice *s, + unsigned int cur_chan, +- unsigned int *val) ++ unsigned short *val) + { + const struct boardtype *board = dev->board_ptr; + struct pci1710_private *devpriv = dev->private; +- unsigned int sample; ++ unsigned short sample; + unsigned int chan; + + sample = inw(dev->iobase + PCI171X_AD_DATA_REG); +@@ -344,7 +344,7 @@ static int pci1710_ai_insn_read(struct comedi_device *dev, + pci1710_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); + + for (i = 0; i < insn->n; i++) { +- unsigned int val; ++ unsigned short val; + + /* start conversion */ + outw(0, dev->iobase + PCI171X_SOFTTRG_REG); +@@ -394,7 +394,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, + { + struct comedi_cmd *cmd = &s->async->cmd; + unsigned int status; +- unsigned int val; ++ unsigned short val; + int ret; + + status = inw(dev->iobase + PCI171X_STATUS_REG); +@@ -454,7 +454,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, + } + + for (i = 0; i < devpriv->max_samples; i++) { +- unsigned int val; ++ unsigned short val; + int ret; + + ret = pci1710_ai_read_sample(dev, s, s->async->cur_chan, &val); +diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c +index 3cd008acb657..15b9cc8531f0 100644 +--- a/drivers/staging/comedi/drivers/cb_pcidas.c ++++ b/drivers/staging/comedi/drivers/cb_pcidas.c +@@ -1290,7 +1290,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, + devpriv->amcc + AMCC_OP_REG_INTCSR); + + ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, +- dev->board_name, dev); ++ "cb_pcidas", dev); + if (ret) { + dev_dbg(dev->class_dev, "unable to allocate irq %d\n", + pcidev->irq); +@@ -1351,6 +1351,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, + if (dev->irq && board->has_ao_fifo) { + dev->write_subdev = s; + s->subdev_flags |= SDF_CMD_WRITE; ++ s->len_chanlist = s->n_chan; + s->do_cmdtest = cb_pcidas_ao_cmdtest; + s->do_cmd = cb_pcidas_ao_cmd; + s->cancel = cb_pcidas_ao_cancel; +diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c +index cb9c2699277e..b202df1dcba0 100644 +--- a/drivers/staging/comedi/drivers/cb_pcidas64.c ++++ b/drivers/staging/comedi/drivers/cb_pcidas64.c +@@ -4034,7 +4034,7 @@ static int auto_attach(struct comedi_device *dev, + init_stc_registers(dev); + + retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, +- dev->board_name, dev); ++ "cb_pcidas64", dev); + if (retval) { + dev_dbg(dev->class_dev, "unable to allocate irq %u\n", + pcidev->irq); +diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c +index 0fdf5e02182f..c27dbd55564c 100644 +--- a/drivers/staging/comedi/drivers/das6402.c ++++ b/drivers/staging/comedi/drivers/das6402.c +@@ -195,7 +195,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) + if (status & DAS6402_STATUS_FFULL) { + async->events |= COMEDI_CB_OVERFLOW; + } else if (status & DAS6402_STATUS_FFNE) { +- unsigned int val; ++ unsigned short val; + + val = das6402_ai_read_sample(dev, s); + comedi_buf_write_samples(s, &val, 1); +diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c +index fd4cb4911671..3bdaedc6d864 100644 +--- a/drivers/staging/comedi/drivers/das800.c ++++ b/drivers/staging/comedi/drivers/das800.c +@@ -436,7 +436,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) + struct comedi_cmd *cmd; + unsigned long irq_flags; + unsigned int status; +- unsigned int val; ++ unsigned short val; + bool fifo_empty; + bool fifo_overflow; + int i; +diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c +index b8606ded0623..a2e3240b66a8 100644 +--- a/drivers/staging/comedi/drivers/dmm32at.c ++++ b/drivers/staging/comedi/drivers/dmm32at.c +@@ -413,7 +413,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) + { + struct comedi_device *dev = d; + unsigned char intstat; +- unsigned int val; ++ unsigned short val; + int i; + + if (!dev->attached) { +diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c +index 15a53204a36a..4fe856128870 100644 +--- a/drivers/staging/comedi/drivers/me4000.c ++++ b/drivers/staging/comedi/drivers/me4000.c +@@ -933,7 +933,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) + struct comedi_subdevice *s = dev->read_subdev; + int i; + int c = 0; +- unsigned int lval; ++ unsigned short lval; + + if (!dev->attached) + return IRQ_NONE; +diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c +index fbdf181d8ccc..40aa24a9b2c3 100644 +--- a/drivers/staging/comedi/drivers/mf6x4.c ++++ b/drivers/staging/comedi/drivers/mf6x4.c +@@ -121,8 +121,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev, + struct mf6x4_private *devpriv = dev->private; + unsigned int status; + ++ /* EOLC goes low at end of conversion. */ + status = ioread32(devpriv->gpioc_reg); +- if (status & MF6X4_GPIOC_EOLC) ++ if ((status & MF6X4_GPIOC_EOLC) == 0) + return 0; + return -EBUSY; + } +diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c +index 84c62e256094..6e411b634015 100644 +--- a/drivers/staging/comedi/drivers/ni_6527.c ++++ b/drivers/staging/comedi/drivers/ni_6527.c +@@ -341,7 +341,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, + case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: + /* check shift amount */ + shift = data[3]; +- if (shift >= s->n_chan) { ++ if (shift >= 32) { + mask = 0; + rising = 0; + falling = 0; +diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c +index 3774daa9d661..e1334733abe7 100644 +--- a/drivers/staging/comedi/drivers/pcl711.c ++++ b/drivers/staging/comedi/drivers/pcl711.c +@@ -193,7 +193,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) + struct comedi_device *dev = d; + struct comedi_subdevice *s = dev->read_subdev; + struct comedi_cmd *cmd = &s->async->cmd; +- unsigned int data; ++ unsigned short data; + + if (!dev->attached) { + dev_err(dev->class_dev, "spurious interrupt\n"); +diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c +index 5aeed44dff70..f06241712308 100644 +--- a/drivers/staging/comedi/drivers/pcl818.c ++++ b/drivers/staging/comedi/drivers/pcl818.c +@@ -422,7 +422,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, + + static bool pcl818_ai_write_sample(struct comedi_device *dev, + struct comedi_subdevice *s, +- unsigned int chan, unsigned int val) ++ unsigned int chan, unsigned short val) + { + struct pcl818_private *devpriv = dev->private; + struct comedi_cmd *cmd = &s->async->cmd; +diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c +index 1800eb3ae017..cdf86284dd04 100644 +--- a/drivers/staging/comedi/drivers/vmk80xx.c ++++ b/drivers/staging/comedi/drivers/vmk80xx.c +@@ -676,6 +676,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) + if (!devpriv->ep_rx || !devpriv->ep_tx) + return -ENODEV; + ++ if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) ++ return -EINVAL; ++ + return 0; + } + +diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c +index 1055649f034c..59266650e071 100644 +--- a/drivers/staging/emxx_udc/emxx_udc.c ++++ b/drivers/staging/emxx_udc/emxx_udc.c +@@ -2173,7 +2173,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, + struct nbu2ss_ep *ep, + int status) + { +- struct nbu2ss_req *req; ++ struct nbu2ss_req *req, *n; + + /* Endpoint Disable */ + _nbu2ss_epn_exit(udc, ep); +@@ -2185,7 +2185,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, + return 0; + + /* called with irqs blocked */ +- list_for_each_entry(req, &ep->queue, queue) { ++ list_for_each_entry_safe(req, n, &ep->queue, queue) { + _nbu2ss_ep_done(ep, req, status); + } + +diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c +index 798c965fe203..3fa6774946fa 100644 +--- a/drivers/staging/fsl-mc/bus/mc-io.c ++++ b/drivers/staging/fsl-mc/bus/mc-io.c +@@ -167,7 +167,12 @@ int __must_check fsl_create_mc_io(struct device *dev, + */ + void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) + { +- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; ++ struct fsl_mc_device *dpmcp_dev; ++ ++ if (!mc_io) ++ return; ++ ++ dpmcp_dev = mc_io->dpmcp_dev; + + if (dpmcp_dev) + fsl_mc_io_unset_dpmcp(mc_io); +diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c +index 49c718b91e55..16f6f35954fb 100644 +--- a/drivers/staging/fwserial/fwserial.c ++++ b/drivers/staging/fwserial/fwserial.c +@@ -2255,6 +2255,7 @@ static int fwserial_create(struct fw_unit *unit) + err = fw_core_add_address_handler(&port->rx_handler, + &fw_high_memory_region); + if (err) { ++ tty_port_destroy(&port->port); + kfree(port); + goto free_ports; + } +@@ -2337,6 +2338,7 @@ static int fwserial_create(struct fw_unit *unit) + + free_ports: + for (--i; i >= 0; --i) { ++ fw_core_remove_address_handler(&serial->ports[i]->rx_handler); + tty_port_destroy(&serial->ports[i]->port); + kfree(serial->ports[i]); + } +diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c +index e72dfa9699f3..454e47424ade 100644 +--- a/drivers/staging/gdm724x/gdm_lte.c ++++ b/drivers/staging/gdm724x/gdm_lte.c +@@ -624,10 +624,12 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, + * bytes (99,130,83,99 dec) + */ + } __packed; +- void *addr = buf + sizeof(struct iphdr) + +- sizeof(struct udphdr) + +- offsetof(struct dhcp_packet, chaddr); +- ether_addr_copy(nic->dest_mac_addr, addr); ++ int offset = sizeof(struct iphdr) + ++ sizeof(struct udphdr) + ++ offsetof(struct dhcp_packet, chaddr); ++ if (offset + ETH_ALEN > len) ++ return; ++ ether_addr_copy(nic->dest_mac_addr, buf + offset); + } + } + +@@ -689,6 +691,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) + struct multi_sdu *multi_sdu = (struct multi_sdu *)buf; + struct sdu *sdu = NULL; + u8 *data = (u8 *)multi_sdu->data; ++ int copied; + u16 i = 0; + u16 num_packet; + u16 hci_len; +@@ -702,6 +705,12 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) + multi_sdu->num_packet); + + for (i = 0; i < num_packet; i++) { ++ copied = data - multi_sdu->data; ++ if (len < copied + sizeof(*sdu)) { ++ pr_err("rx prevent buffer overflow"); ++ return; ++ } ++ + sdu = (struct sdu *)data; + + cmd_evt = gdm_dev16_to_cpu(phy_dev-> +@@ -715,7 +724,8 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) + pr_err("rx sdu wrong hci %04x\n", cmd_evt); + return; + } +- if (hci_len < 12) { ++ if (hci_len < 12 || ++ len < copied + sizeof(*sdu) + (hci_len - 12)) { + pr_err("rx sdu invalid len %d\n", hci_len); + return; + } +diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c +index 8a0744b58a32..4c2d6c2d4fb4 100644 +--- a/drivers/staging/greybus/audio_codec.c ++++ b/drivers/staging/greybus/audio_codec.c +@@ -491,6 +491,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream, + if (ret) { + dev_err_ratelimited(dai->dev, "%d: Error during set_config\n", + ret); ++ gb_pm_runtime_put_noidle(bundle); + mutex_unlock(&codec->lock); + return ret; + } +@@ -562,6 +563,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream, + break; + } + if (ret) { ++ gb_pm_runtime_put_noidle(bundle); + mutex_unlock(&codec->lock); + dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n", + ret); +diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c +index b6251691a33d..2bc415a19ced 100644 +--- a/drivers/staging/greybus/audio_topology.c ++++ b/drivers/staging/greybus/audio_topology.c +@@ -462,6 +462,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, + val = ucontrol->value.integer.value[0] & mask; + connect = !!val; + ++ ret = gb_pm_runtime_get_sync(bundle); ++ if (ret) ++ return ret; ++ ++ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, ++ GB_AUDIO_INVALID_INDEX, &gbvalue); ++ if (ret) ++ goto exit; ++ + /* update ucontrol */ + if (gbvalue.value.integer_value[0] != val) { + for (wi = 0; wi < wlist->num_widgets; wi++) { +@@ -475,25 +484,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, + gbvalue.value.integer_value[0] = + ucontrol->value.integer.value[0]; + +- ret = gb_pm_runtime_get_sync(bundle); +- if (ret) +- return ret; +- + ret = gb_audio_gb_set_control(module->mgmt_connection, + data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); +- +- gb_pm_runtime_put_autosuspend(bundle); +- +- if (ret) { +- dev_err_ratelimited(codec->dev, +- "%d:Error in %s for %s\n", ret, +- __func__, kcontrol->id.name); +- return ret; +- } + } + +- return 0; ++exit: ++ gb_pm_runtime_put_autosuspend(bundle); ++ if (ret) ++ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, ++ __func__, kcontrol->id.name); ++ return ret; + } + + #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ +diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c +index 5649ef1e379d..82a1c2cf6687 100644 +--- a/drivers/staging/greybus/sdio.c ++++ b/drivers/staging/greybus/sdio.c +@@ -413,6 +413,7 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + struct gb_sdio_command_request request = {0}; + struct gb_sdio_command_response response; + struct mmc_data *data = host->mrq->data; ++ unsigned int timeout_ms; + u8 cmd_flags; + u8 cmd_type; + int i; +@@ -471,9 +472,12 @@ static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd) + request.data_blksz = cpu_to_le16(data->blksz); + } + +- ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND, +- &request, sizeof(request), &response, +- sizeof(response)); ++ timeout_ms = cmd->busy_timeout ? cmd->busy_timeout : ++ GB_OPERATION_TIMEOUT_DEFAULT; ++ ++ ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND, ++ &request, sizeof(request), &response, ++ sizeof(response), timeout_ms); + if (ret < 0) + goto out; + +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c +index 9ef9cbfd8926..c35c9b766a00 100644 +--- a/drivers/staging/greybus/uart.c ++++ b/drivers/staging/greybus/uart.c +@@ -661,8 +661,6 @@ static int set_serial_info(struct gb_tty *gb_tty, + if ((close_delay != gb_tty->port.close_delay) || + (closing_wait != gb_tty->port.closing_wait)) + retval = -EPERM; +- else +- retval = -EOPNOTSUPP; + } else { + gb_tty->port.close_delay = close_delay; + gb_tty->port.closing_wait = closing_wait; +diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c +index 5771d4ee8ef1..1a71bca4e698 100644 +--- a/drivers/staging/iio/cdc/ad7746.c ++++ b/drivers/staging/iio/cdc/ad7746.c +@@ -714,7 +714,6 @@ static int ad7746_probe(struct i2c_client *client, + indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); + else + indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2; +- indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); + indio_dev->modes = INDIO_DIRECT_MODE; + + if (pdata) { +diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c +index 08f1583ee34e..fb3ec56d45b6 100644 +--- a/drivers/staging/iio/light/tsl2583.c ++++ b/drivers/staging/iio/light/tsl2583.c +@@ -382,6 +382,15 @@ static int taos_als_calibrate(struct iio_dev *indio_dev) + dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n"); + return lux_val; + } ++ ++ /* Avoid division by zero of lux_value later on */ ++ if (lux_val == 0) { ++ dev_err(&chip->client->dev, ++ "%s: lux_val of 0 will produce out of range trim_value\n", ++ __func__); ++ return -ENODATA; ++ } ++ + gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target) + * chip->taos_settings.als_gain_trim) / lux_val); + +diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c +index b2b4fa4c3834..498ea7754dcc 100644 +--- a/drivers/staging/ks7010/ks_wlan_net.c ++++ b/drivers/staging/ks7010/ks_wlan_net.c +@@ -1404,6 +1404,7 @@ static int ks_wlan_set_scan(struct net_device *dev, + struct ks_wlan_private *priv = + (struct ks_wlan_private *)netdev_priv(dev); + struct iw_scan_req *req = NULL; ++ int len; + DPRINTK(2, "\n"); + + if (priv->sleep_mode == SLP_SLEEP) { +@@ -1415,8 +1416,9 @@ static int ks_wlan_set_scan(struct net_device *dev, + if (wrqu->data.length == sizeof(struct iw_scan_req) + && wrqu->data.flags & IW_SCAN_THIS_ESSID) { + req = (struct iw_scan_req *)extra; +- priv->scan_ssid_len = req->essid_len; +- memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); ++ len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ priv->scan_ssid_len = len; ++ memcpy(priv->scan_ssid, req->essid, len); + } else { + priv->scan_ssid_len = 0; + } +diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c +index e274e2f22398..264bb7d1efcb 100644 +--- a/drivers/staging/media/cec/cec-api.c ++++ b/drivers/staging/media/cec/cec-api.c +@@ -141,7 +141,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, + struct cec_log_addrs log_addrs; + + mutex_lock(&adap->lock); +- log_addrs = adap->log_addrs; ++ /* ++ * We use memcpy here instead of assignment since there is a ++ * hole at the end of struct cec_log_addrs that an assignment ++ * might ignore. So when we do copy_to_user() we could leak ++ * one byte of memory. ++ */ ++ memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); + if (!adap->is_configured) + memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, + sizeof(log_addrs.log_addr)); +diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c +index c26c99fd4a24..1e10fe204d3b 100644 +--- a/drivers/staging/media/omap4iss/iss.c ++++ b/drivers/staging/media/omap4iss/iss.c +@@ -1244,8 +1244,10 @@ static int iss_probe(struct platform_device *pdev) + if (ret < 0) + goto error; + +- if (!omap4iss_get(iss)) ++ if (!omap4iss_get(iss)) { ++ ret = -EINVAL; + goto error; ++ } + + ret = iss_reset(iss); + if (ret < 0) +diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c +index 58d756231136..bebd44d9bd51 100644 +--- a/drivers/staging/media/s5p-cec/s5p_cec.c ++++ b/drivers/staging/media/s5p-cec/s5p_cec.c +@@ -54,7 +54,7 @@ static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable) + } else { + s5p_cec_mask_tx_interrupts(cec); + s5p_cec_mask_rx_interrupts(cec); +- pm_runtime_disable(cec->dev); ++ pm_runtime_put(cec->dev); + } + + return 0; +diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c +index e4198e5e064b..288c7bf12945 100644 +--- a/drivers/staging/most/aim-sound/sound.c ++++ b/drivers/staging/most/aim-sound/sound.c +@@ -92,6 +92,8 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes) + { + unsigned int i = 0; + ++ if (bytes < 2) ++ return; + while (i < bytes - 2) { + dest[i] = source[i + 2]; + dest[i + 1] = source[i + 1]; +diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c +index 691e4a51ace4..99f45012bc3f 100644 +--- a/drivers/staging/octeon/ethernet-mdio.c ++++ b/drivers/staging/octeon/ethernet-mdio.c +@@ -155,12 +155,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev) + + phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { +- int rc; +- +- rc = of_phy_register_fixed_link(priv->of_node); +- if (rc) +- return rc; +- + phy_node = of_node_get(priv->of_node); + } + if (!phy_node) +diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c +index f0900d1c4d7b..47a195921f3c 100644 +--- a/drivers/staging/octeon/ethernet-rx.c ++++ b/drivers/staging/octeon/ethernet-rx.c +@@ -83,15 +83,17 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) + else + port = work->word1.cn38xx.ipprt; + +- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { ++ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) + /* + * Ignore length errors on min size packets. Some + * equipment incorrectly pads packets to 64+4FCS + * instead of 60+4FCS. Note these packets still get + * counted as frame errors. + */ +- } else if (work->word2.snoip.err_code == 5 || +- work->word2.snoip.err_code == 7) { ++ return 0; ++ ++ if (work->word2.snoip.err_code == 5 || ++ work->word2.snoip.err_code == 7) { + /* + * We received a packet with either an alignment error + * or a FCS error. This may be signalling that we are +@@ -122,7 +124,10 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) + /* Port received 0xd5 preamble */ + work->packet_ptr.s.addr += i + 1; + work->word1.len -= i + 5; +- } else if ((*ptr & 0xf) == 0xd) { ++ return 0; ++ } ++ ++ if ((*ptr & 0xf) == 0xd) { + /* Port received 0xd preamble */ + work->packet_ptr.s.addr += i; + work->word1.len -= i + 4; +@@ -132,21 +137,20 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) + ((*(ptr + 1) & 0xf) << 4); + ptr++; + } +- } else { +- printk_ratelimited("Port %d unknown preamble, packet dropped\n", +- port); +- cvm_oct_free_work(work); +- return 1; ++ return 0; + } ++ ++ printk_ratelimited("Port %d unknown preamble, packet dropped\n", ++ port); ++ cvm_oct_free_work(work); ++ return 1; + } +- } else { +- printk_ratelimited("Port %d receive error code %d, packet dropped\n", +- port, work->word2.snoip.err_code); +- cvm_oct_free_work(work); +- return 1; + } + +- return 0; ++ printk_ratelimited("Port %d receive error code %d, packet dropped\n", ++ port, work->word2.snoip.err_code); ++ cvm_oct_free_work(work); ++ return 1; + } + + static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) +diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c +index 12354440a334..7601b800de07 100644 +--- a/drivers/staging/octeon/ethernet.c ++++ b/drivers/staging/octeon/ethernet.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -880,6 +881,14 @@ static int cvm_oct_probe(struct platform_device *pdev) + break; + } + ++ if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) { ++ if (of_phy_register_fixed_link(priv->of_node)) { ++ netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n", ++ interface, priv->port); ++ dev->netdev_ops = NULL; ++ } ++ } ++ + if (!dev->netdev_ops) { + free_netdev(dev); + } else if (register_netdev(dev) < 0) { +diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c +index 6513ace1fce6..7dd6b7f19bf6 100644 +--- a/drivers/staging/rtl8188eu/core/rtw_ap.c ++++ b/drivers/staging/rtl8188eu/core/rtw_ap.c +@@ -917,6 +917,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* SSID */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_)); + if (p && ie_len > 0) { ++ ie_len = min_t(int, ie_len, sizeof(pbss_network->Ssid.Ssid)); + memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); + memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len); + pbss_network->Ssid.SsidLength = ie_len; +@@ -935,6 +936,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* get supported rates */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_)); + if (p) { ++ ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); + memcpy(supportRate, p + 2, ie_len); + supportRateNum = ie_len; + } +@@ -942,6 +944,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + /* get ext_supported rates */ + p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_); + if (p) { ++ ie_len = min_t(int, ie_len, ++ NDIS_802_11_LENGTH_RATES_EX - supportRateNum); + memcpy(supportRate + supportRateNum, p + 2, ie_len); + supportRateNum += ie_len; + } +@@ -1057,6 +1061,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) + pht_cap->mcs.rx_mask[0] = 0xff; + pht_cap->mcs.rx_mask[1] = 0x0; + } ++ ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); + memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len); + } + +diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +index 50793c9df1b3..866f9a571813 100644 +--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c ++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +@@ -1172,9 +1172,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, + break; + } + sec_len = *(pos++); len -= 1; +- if (sec_len > 0 && sec_len <= len) { ++ if (sec_len > 0 && ++ sec_len <= len && ++ sec_len <= 32) { + ssid[ssid_index].SsidLength = sec_len; +- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); ++ memcpy(ssid[ssid_index].Ssid, pos, sec_len); + ssid_index++; + } + pos += sec_len; +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index c14088075c59..3afdb326e224 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { + {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ + {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ ++ {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */ + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ + {} /* Terminating entry */ + }; +diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +index 7413a100ca19..fb3291dde3ae 100644 +--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c ++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +@@ -419,9 +419,10 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, + struct iw_scan_req *req = (struct iw_scan_req *)b; + + if (req->essid_len) { +- ieee->current_network.ssid_len = req->essid_len; +- memcpy(ieee->current_network.ssid, req->essid, +- req->essid_len); ++ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ ++ ieee->current_network.ssid_len = len; ++ memcpy(ieee->current_network.ssid, req->essid, len); + } + } + +diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h +index b895a537d3e4..e46da4e128e0 100644 +--- a/drivers/staging/rtl8192e/rtllib.h ++++ b/drivers/staging/rtl8192e/rtllib.h +@@ -1160,7 +1160,7 @@ struct rtllib_network { + bool bWithAironetIE; + bool bCkipSupported; + bool bCcxRmEnable; +- u16 CcxRmState[2]; ++ u8 CcxRmState[2]; + bool bMBssidValid; + u8 MBssidMask; + u8 MBssid[ETH_ALEN]; +diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c +index c743182b933e..247475aa522e 100644 +--- a/drivers/staging/rtl8192e/rtllib_rx.c ++++ b/drivers/staging/rtl8192e/rtllib_rx.c +@@ -1986,7 +1986,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, + info_element->data[2] == 0x96 && + info_element->data[3] == 0x01) { + if (info_element->len == 6) { +- memcpy(network->CcxRmState, &info_element[4], 2); ++ memcpy(network->CcxRmState, &info_element->data[4], 2); + if (network->CcxRmState[0] != 0) + network->bCcxRmEnable = true; + else +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c +index 6ec379056650..53b77c8ae328 100644 +--- a/drivers/staging/rtl8192u/r8192U_core.c ++++ b/drivers/staging/rtl8192u/r8192U_core.c +@@ -2522,7 +2522,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev) + ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)); + if (ret < 0) + return ret; +- priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8; ++ priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8; + } else + priv->EEPROMTxPowerLevelCCK = 0x10; + RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK); +@@ -3420,7 +3420,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, + u32 *TotalRxDataNum) + { + u16 SlotIndex; +- u8 i; ++ u16 i; + + *TotalRxBcnNum = 0; + *TotalRxDataNum = 0; +diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c +index d2f2f246063f..65aec3c2f574 100644 +--- a/drivers/staging/rtl8192u/r8192U_wx.c ++++ b/drivers/staging/rtl8192u/r8192U_wx.c +@@ -333,8 +333,10 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, + struct iw_scan_req *req = (struct iw_scan_req *)b; + + if (req->essid_len) { +- ieee->current_network.ssid_len = req->essid_len; +- memcpy(ieee->current_network.ssid, req->essid, req->essid_len); ++ int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); ++ ++ ieee->current_network.ssid_len = len; ++ memcpy(ieee->current_network.ssid, req->essid, len); + } + } + +diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c +index b7ee5e63af33..166f5151e2b8 100644 +--- a/drivers/staging/rtl8712/rtl871x_cmd.c ++++ b/drivers/staging/rtl8712/rtl871x_cmd.c +@@ -239,8 +239,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, + psurveyPara->ss_ssidlen = 0; + memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); + if ((pssid != NULL) && (pssid->SsidLength)) { +- memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); +- psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); ++ int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); ++ ++ memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); ++ psurveyPara->ss_ssidlen = cpu_to_le32(len); + } + set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); + r8712_enqueue_cmd(pcmdpriv, ph2c); +diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +index 2d26f9a30fcf..af190b3863c8 100644 +--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c ++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +@@ -932,7 +932,7 @@ static int r871x_wx_set_priv(struct net_device *dev, + struct iw_point *dwrq = (struct iw_point *)awrq; + + len = dwrq->length; +- ext = memdup_user(dwrq->pointer, len); ++ ext = strndup_user(dwrq->pointer, len); + if (IS_ERR(ext)) + return PTR_ERR(ext); + +diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c +index 86ace1449309..ee54711cf8e8 100644 +--- a/drivers/staging/sm750fb/sm750.c ++++ b/drivers/staging/sm750fb/sm750.c +@@ -897,6 +897,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index) + fix->visual = FB_VISUAL_PSEUDOCOLOR; + break; + case 16: ++ case 24: + case 32: + fix->visual = FB_VISUAL_TRUECOLOR; + break; +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c +index 764656759fbf..1079b11fff4b 100644 +--- a/drivers/staging/speakup/speakup_dectlk.c ++++ b/drivers/staging/speakup/speakup_dectlk.c +@@ -47,7 +47,7 @@ static unsigned char get_index(void); + static int in_escape; + static int is_flushing; + +-static spinlock_t flush_lock; ++static DEFINE_SPINLOCK(flush_lock); + static DECLARE_WAIT_QUEUE_HEAD(flush); + + static struct var_t vars[] = { +diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c +index 52ad5c137d9f..9d4e3b0d366f 100644 +--- a/drivers/staging/wlan-ng/hfa384x_usb.c ++++ b/drivers/staging/wlan-ng/hfa384x_usb.c +@@ -523,13 +523,8 @@ static void hfa384x_usb_defer(struct work_struct *data) + ----------------------------------------------------------------*/ + void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) + { +- memset(hw, 0, sizeof(struct hfa384x)); + hw->usb = usb; + +- /* set up the endpoints */ +- hw->endp_in = usb_rcvbulkpipe(usb, 1); +- hw->endp_out = usb_sndbulkpipe(usb, 2); +- + /* Set up the waitq */ + init_waitqueue_head(&hw->cmdq); + +diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c +index f7149bb23180..bf617620e17b 100644 +--- a/drivers/staging/wlan-ng/prism2usb.c ++++ b/drivers/staging/wlan-ng/prism2usb.c +@@ -60,11 +60,16 @@ static int prism2sta_probe_usb(struct usb_interface *interface, + const struct usb_device_id *id) + { + struct usb_device *dev; +- ++ struct usb_endpoint_descriptor *bulk_in, *bulk_out; ++ struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct wlandevice *wlandev = NULL; + struct hfa384x *hw = NULL; + int result = 0; + ++ result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); ++ if (result) ++ goto failed; ++ + dev = interface_to_usbdev(interface); + wlandev = create_wlan(); + if (!wlandev) { +@@ -81,6 +86,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, + } + + /* Initialize the hw data */ ++ hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); ++ hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); + hfa384x_create(hw, dev); + hw->wlandev = wlandev; + +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 2b8fbcd8dde2..eba21f06a9b9 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -493,8 +493,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) + bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + + spin_lock_bh(&conn->cmd_lock); +- if (!list_empty(&cmd->i_conn_node) && +- !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) ++ if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + +@@ -1396,14 +1395,27 @@ static u32 iscsit_do_crypto_hash_sg( + sg = cmd->first_data_sg; + page_off = cmd->first_data_sg_off; + ++ if (data_length && page_off) { ++ struct scatterlist first_sg; ++ u32 len = min_t(u32, data_length, sg->length - page_off); ++ ++ sg_init_table(&first_sg, 1); ++ sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); ++ ++ ahash_request_set_crypt(hash, &first_sg, NULL, len); ++ crypto_ahash_update(hash); ++ ++ data_length -= len; ++ sg = sg_next(sg); ++ } ++ + while (data_length) { +- u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); ++ u32 cur_len = min_t(u32, data_length, sg->length); + + ahash_request_set_crypt(hash, sg, NULL, cur_len); + crypto_ahash_update(hash); + + data_length -= cur_len; +- page_off = 0; + /* iscsit_map_iovec has already checked for invalid sg pointers */ + sg = sg_next(sg); + } +@@ -4080,12 +4092,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) + spin_lock_bh(&conn->cmd_lock); + list_splice_init(&conn->conn_cmd_list, &tmp_list); + +- list_for_each_entry(cmd, &tmp_list, i_conn_node) { ++ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (se_cmd->se_tfo != NULL) { + spin_lock_irq(&se_cmd->t_state_lock); +- se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ if (se_cmd->transport_state & CMD_T_ABORTED) { ++ /* ++ * LIO's abort path owns the cleanup for this, ++ * so put it back on the list and let ++ * aborted_task handle it. ++ */ ++ list_move_tail(&cmd->i_conn_node, ++ &conn->conn_cmd_list); ++ } else { ++ se_cmd->transport_state |= CMD_T_FABRIC_STOP; ++ } + spin_unlock_irq(&se_cmd->t_state_lock); + } + } +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c +index 985e600908e0..82a414432fd4 100644 +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -1150,7 +1150,7 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t) + } + + void iscsi_target_login_sess_out(struct iscsi_conn *conn, +- struct iscsi_np *np, bool zero_tsih, bool new_sess) ++ bool zero_tsih, bool new_sess) + { + if (!new_sess) + goto old_sess_out; +@@ -1172,7 +1172,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, + conn->sess = NULL; + + old_sess_out: +- iscsi_stop_login_thread_timer(np); + /* + * If login negotiation fails check if the Time2Retain timer + * needs to be restarted. +@@ -1432,8 +1431,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) + new_sess_out: + new_sess = true; + old_sess_out: ++ iscsi_stop_login_thread_timer(np); + tpg_np = conn->tpg_np; +- iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); ++ iscsi_target_login_sess_out(conn, zero_tsih, new_sess); + new_sess = false; + + if (tpg) { +diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h +index b597aa2c61a1..e9daabbb4f54 100644 +--- a/drivers/target/iscsi/iscsi_target_login.h ++++ b/drivers/target/iscsi/iscsi_target_login.h +@@ -14,8 +14,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); + extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); + extern int iscsit_start_kthreads(struct iscsi_conn *); + extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); +-extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, +- bool, bool); ++extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); + extern int iscsi_target_login_thread(void *); + + #endif /*** ISCSI_TARGET_LOGIN_H ***/ +diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c +index e8efb4299a95..26b8828101ea 100644 +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -548,12 +548,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in + + static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) + { +- struct iscsi_np *np = login->np; + bool zero_tsih = login->zero_tsih; + + iscsi_remove_failed_auth_entry(conn); + iscsi_target_nego_release(conn); +- iscsi_target_login_sess_out(conn, np, zero_tsih, true); ++ iscsi_target_login_sess_out(conn, zero_tsih, true); + } + + static void iscsi_target_login_timeout(unsigned long data) +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c +index ef1c8c158f66..089ba39f76a2 100644 +--- a/drivers/target/target_core_pscsi.c ++++ b/drivers/target/target_core_pscsi.c +@@ -629,8 +629,9 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, + unsigned char *buf; + + buf = transport_kmap_data_sg(cmd); +- if (!buf) ++ if (!buf) { + ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ ++ } + + if (cdb[0] == MODE_SENSE_10) { + if (!(buf[3] & 0x80)) +@@ -951,6 +952,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, + + return 0; + fail: ++ if (bio) ++ bio_put(bio); ++ while (req->bio) { ++ bio = req->bio; ++ req->bio = bio->bi_next; ++ bio_put(bio); ++ } ++ req->biotail = NULL; + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c +index b3b1461ec60d..6a5a089fd13e 100644 +--- a/drivers/target/target_core_sbc.c ++++ b/drivers/target/target_core_sbc.c +@@ -37,7 +37,7 @@ + #include "target_core_alua.h" + + static sense_reason_t +-sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); ++sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); + static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); + + static sense_reason_t +@@ -319,14 +319,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) + } + + static sense_reason_t +-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) ++sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) + { + struct se_device *dev = cmd->se_dev; + sector_t end_lba = dev->transport->get_blocks(dev) + 1; + unsigned int sectors = sbc_get_write_same_sectors(cmd); + sense_reason_t ret; + +- if ((flags[0] & 0x04) || (flags[0] & 0x02)) { ++ if ((flags & 0x04) || (flags & 0x02)) { + pr_err("WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); +@@ -348,7 +348,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + } + + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ +- if (flags[0] & 0x10) { ++ if (flags & 0x10) { + pr_warn("WRITE SAME with ANCHOR not supported\n"); + return TCM_INVALID_CDB_FIELD; + } +@@ -356,7 +356,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting + * translated into block discard requests within backend code. + */ +- if (flags[0] & 0x08) { ++ if (flags & 0x08) { + if (!ops->execute_unmap) + return TCM_UNSUPPORTED_SCSI_OPCODE; + +@@ -371,7 +371,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o + if (!ops->execute_write_same) + return TCM_UNSUPPORTED_SCSI_OPCODE; + +- ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); ++ ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); + if (ret) + return ret; + +@@ -729,10 +729,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ + } + + static sense_reason_t +-sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, ++sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, + u32 sectors, bool is_write) + { +- u8 protect = cdb[1] >> 5; + int sp_ops = cmd->se_sess->sup_prot_ops; + int pi_prot_type = dev->dev_attrib.pi_prot_type; + bool fabric_prot = false; +@@ -780,7 +779,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, + /* Fallthrough */ + default: + pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " +- "PROTECT: 0x%02x\n", cdb[0], protect); ++ "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); + return TCM_INVALID_CDB_FIELD; + } + +@@ -855,7 +854,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + +@@ -869,7 +868,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + +@@ -883,7 +882,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, false); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + if (ret) + return ret; + +@@ -904,7 +903,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + +@@ -918,7 +917,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + +@@ -932,7 +931,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + +- ret = sbc_check_prot(dev, cmd, cdb, sectors, true); ++ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + if (ret) + return ret; + +@@ -991,7 +990,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + size = sbc_get_size(cmd, 1); + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); + +- ret = sbc_setup_write_same(cmd, &cdb[10], ops); ++ ret = sbc_setup_write_same(cmd, cdb[10], ops); + if (ret) + return ret; + break; +@@ -1084,7 +1083,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + size = sbc_get_size(cmd, 1); + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); + +- ret = sbc_setup_write_same(cmd, &cdb[1], ops); ++ ret = sbc_setup_write_same(cmd, cdb[1], ops); + if (ret) + return ret; + break; +@@ -1102,7 +1101,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) + * Follow sbcr26 with WRITE_SAME (10) and check for the existence + * of byte 1 bit 3 UNMAP instead of original reserved field + */ +- ret = sbc_setup_write_same(cmd, &cdb[1], ops); ++ ret = sbc_setup_write_same(cmd, cdb[1], ops); + if (ret) + return ret; + break; +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index e738b4621cbb..6afb65387be6 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1736,6 +1736,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: ++ case TCM_TOO_MANY_TARGET_DESCS: ++ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: ++ case TCM_TOO_MANY_SEGMENT_DESCS: ++ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: + break; + case TCM_OUT_OF_RESOURCES: + sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +@@ -2775,9 +2779,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, + __releases(&cmd->t_state_lock) + __acquires(&cmd->t_state_lock) + { +- +- assert_spin_locked(&cmd->t_state_lock); +- WARN_ON_ONCE(!irqs_disabled()); ++ lockdep_assert_held(&cmd->t_state_lock); + + if (fabric_stop) + cmd->transport_state |= CMD_T_FABRIC_STOP; +@@ -2886,6 +2888,26 @@ static const struct sense_info sense_info_table[] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ + }, ++ [TCM_TOO_MANY_TARGET_DESCS] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ ++ }, ++ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ ++ }, ++ [TCM_TOO_MANY_SEGMENT_DESCS] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ ++ }, ++ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { ++ .key = ILLEGAL_REQUEST, ++ .asc = 0x26, ++ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ ++ }, + [TCM_PARAMETER_LIST_LENGTH_ERROR] = { + .key = ILLEGAL_REQUEST, + .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index 1a83456a65a0..693fbb285840 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -666,7 +666,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) + struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; + struct tcmu_cmd *cmd; + +- tcmu_flush_dcache_range(entry, sizeof(*entry)); ++ /* ++ * Flush max. up to end of cmd ring since current entry might ++ * be a padding that is shorter than sizeof(*entry) ++ */ ++ size_t ring_left = head_to_end(udev->cmdr_last_cleaned, ++ udev->cmdr_size); ++ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? ++ ring_left : sizeof(*entry)); + + if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { + UPDATE_HEAD(udev->cmdr_last_cleaned, +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c +index 18848ba8d2ba..84e3bf1132fd 100644 +--- a/drivers/target/target_core_xcopy.c ++++ b/drivers/target/target_core_xcopy.c +@@ -52,64 +52,87 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) + return 0; + } + +-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, +- bool src) ++/** ++ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers ++ * ++ * @se_dev: device being considered for match ++ * @dev_wwn: XCOPY requested NAA dev_wwn ++ * @return: 1 on match, 0 on no-match ++ */ ++static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, ++ const unsigned char *dev_wwn) + { +- struct se_device *se_dev; +- unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; ++ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + int rc; + +- if (src) +- dev_wwn = &xop->dst_tid_wwn[0]; +- else +- dev_wwn = &xop->src_tid_wwn[0]; +- +- mutex_lock(&g_device_mutex); +- list_for_each_entry(se_dev, &g_device_list, g_dev_node) { +- +- if (!se_dev->dev_attrib.emulate_3pc) +- continue; ++ if (!se_dev->dev_attrib.emulate_3pc) { ++ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); ++ return 0; ++ } + +- memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); +- target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); ++ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); ++ target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); + +- rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); +- if (rc != 0) +- continue; ++ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); ++ if (rc != 0) { ++ pr_debug("XCOPY: skip non-matching: %*ph\n", ++ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); ++ return 0; ++ } ++ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); + +- if (src) { +- xop->dst_dev = se_dev; +- pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" +- " se_dev\n", xop->dst_dev); +- } else { +- xop->src_dev = se_dev; +- pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" +- " se_dev\n", xop->src_dev); +- } ++ return 1; ++} + +- rc = target_depend_item(&se_dev->dev_group.cg_item); +- if (rc != 0) { +- pr_err("configfs_depend_item attempt failed:" +- " %d for se_dev: %p\n", rc, se_dev); +- mutex_unlock(&g_device_mutex); +- return rc; ++static int target_xcopy_locate_se_dev_e4(struct se_session *sess, ++ const unsigned char *dev_wwn, ++ struct se_device **_found_dev, ++ struct percpu_ref **_found_lun_ref) ++{ ++ struct se_dev_entry *deve; ++ struct se_node_acl *nacl; ++ struct se_lun *this_lun = NULL; ++ struct se_device *found_dev = NULL; ++ ++ /* cmd with NULL sess indicates no associated $FABRIC_MOD */ ++ if (!sess) ++ goto err_out; ++ ++ pr_debug("XCOPY 0xe4: searching for: %*ph\n", ++ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); ++ ++ nacl = sess->se_node_acl; ++ rcu_read_lock(); ++ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { ++ struct se_device *this_dev; ++ int rc; ++ ++ this_lun = rcu_dereference(deve->se_lun); ++ this_dev = rcu_dereference_raw(this_lun->lun_se_dev); ++ ++ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); ++ if (rc) { ++ if (percpu_ref_tryget_live(&this_lun->lun_ref)) ++ found_dev = this_dev; ++ break; + } +- +- pr_debug("Called configfs_depend_item for se_dev: %p" +- " se_dev->se_dev_group: %p\n", se_dev, +- &se_dev->dev_group); +- +- mutex_unlock(&g_device_mutex); +- return 0; + } +- mutex_unlock(&g_device_mutex); +- ++ rcu_read_unlock(); ++ if (found_dev == NULL) ++ goto err_out; ++ ++ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", ++ found_dev, &found_dev->dev_group); ++ *_found_dev = found_dev; ++ *_found_lun_ref = &this_lun->lun_ref; ++ return 0; ++err_out: + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; + } + + static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, +- unsigned char *p, bool src) ++ unsigned char *p, unsigned short cscd_index) + { + unsigned char *desc = p; + unsigned short ript; +@@ -154,7 +177,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op + return -EINVAL; + } + +- if (src) { ++ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { ++ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " ++ "dest\n", cscd_index); ++ return 0; ++ } ++ ++ if (cscd_index == xop->stdi) { + memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* + * Determine if the source designator matches the local device +@@ -166,10 +195,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op + pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" + " received xop\n", xop->src_dev); + } +- } else { ++ } ++ ++ if (cscd_index == xop->dtdi) { + memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); + /* +- * Determine if the destination designator matches the local device ++ * Determine if the destination designator matches the local ++ * device. If @cscd_index corresponds to both source (stdi) and ++ * destination (dtdi), or dtdi comes after stdi, then ++ * XCOL_DEST_RECV_OP wins. + */ + if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], + XCOPY_NAA_IEEE_REGEX_LEN)) { +@@ -189,9 +223,9 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + { + struct se_device *local_dev = se_cmd->se_dev; + unsigned char *desc = p; +- int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; ++ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; ++ unsigned short cscd_index = 0; + unsigned short start = 0; +- bool src = true; + + *sense_ret = TCM_INVALID_PARAMETER_LIST; + +@@ -214,25 +248,19 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + + while (start < tdll) { + /* +- * Check target descriptor identification with 0xE4 type with +- * use VPD 0x83 WWPN matching .. ++ * Check target descriptor identification with 0xE4 type, and ++ * compare the current index with the CSCD descriptor IDs in ++ * the segment descriptor. Use VPD 0x83 WWPN matching .. + */ + switch (desc[0]) { + case 0xe4: + rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, +- &desc[0], src); ++ &desc[0], cscd_index); + if (rc != 0) + goto out; +- /* +- * Assume target descriptors are in source -> destination order.. +- */ +- if (src) +- src = false; +- else +- src = true; + start += XCOPY_TARGET_DESC_LEN; + desc += XCOPY_TARGET_DESC_LEN; +- ret++; ++ cscd_index++; + break; + default: + pr_err("XCOPY unsupported descriptor type code:" +@@ -241,10 +269,25 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + } + } + +- if (xop->op_origin == XCOL_SOURCE_RECV_OP) +- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); +- else +- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); ++ switch (xop->op_origin) { ++ case XCOL_SOURCE_RECV_OP: ++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, ++ xop->dst_tid_wwn, ++ &xop->dst_dev, ++ &xop->remote_lun_ref); ++ break; ++ case XCOL_DEST_RECV_OP: ++ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, ++ xop->src_tid_wwn, ++ &xop->src_dev, ++ &xop->remote_lun_ref); ++ break; ++ default: ++ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " ++ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); ++ rc = -EINVAL; ++ break; ++ } + /* + * If a matching IEEE NAA 0x83 descriptor for the requested device + * is not located on this node, return COPY_ABORTED with ASQ/ASQC +@@ -261,7 +304,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, + pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", + xop->dst_dev, &xop->dst_tid_wwn[0]); + +- return ret; ++ return cscd_index; + + out: + return -EINVAL; +@@ -305,17 +348,26 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op + + static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, + struct xcopy_op *xop, unsigned char *p, +- unsigned int sdll) ++ unsigned int sdll, sense_reason_t *sense_ret) + { + unsigned char *desc = p; + unsigned int start = 0; + int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + ++ *sense_ret = TCM_INVALID_PARAMETER_LIST; ++ + if (offset != 0) { + pr_err("XCOPY segment descriptor list length is not" + " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + return -EINVAL; + } ++ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { ++ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" ++ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); ++ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ ++ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; ++ return -EINVAL; ++ } + + while (start < sdll) { + /* +@@ -372,18 +424,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) + + static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) + { +- struct se_device *remote_dev; +- + if (xop->op_origin == XCOL_SOURCE_RECV_OP) +- remote_dev = xop->dst_dev; ++ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); + else +- remote_dev = xop->src_dev; ++ pr_debug("putting src lun_ref for %p\n", xop->src_dev); + +- pr_debug("Calling configfs_undepend_item for" +- " remote_dev: %p remote_dev->dev_group: %p\n", +- remote_dev, &remote_dev->dev_group.cg_item); +- +- target_undepend_item(&remote_dev->dev_group.cg_item); ++ percpu_ref_put(xop->remote_lun_ref); + } + + static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) +@@ -893,6 +939,20 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) + " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, + tdll, sdll, inline_dl); + ++ /* ++ * skip over the target descriptors until segment descriptors ++ * have been passed - CSCD ids are needed to determine src and dest. ++ */ ++ seg_desc = &p[16] + tdll; ++ ++ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, ++ sdll, &ret); ++ if (rc <= 0) ++ goto out; ++ ++ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, ++ rc * XCOPY_SEGMENT_DESC_LEN); ++ + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); + if (rc <= 0) + goto out; +@@ -910,18 +970,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) + + pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, + rc * XCOPY_TARGET_DESC_LEN); +- seg_desc = &p[16]; +- seg_desc += (rc * XCOPY_TARGET_DESC_LEN); +- +- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); +- if (rc <= 0) { +- xcopy_pt_undepend_remotedev(xop); +- goto out; +- } + transport_kunmap_data_sg(se_cmd); + +- pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, +- rc * XCOPY_SEGMENT_DESC_LEN); + INIT_WORK(&xop->xop_work, target_xcopy_do_work); + queue_work(xcopy_wq, &xop->xop_work); + return TCM_NO_SENSE; +diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h +index 700a981c7b41..7db8d0c9223f 100644 +--- a/drivers/target/target_core_xcopy.h ++++ b/drivers/target/target_core_xcopy.h +@@ -19,6 +19,7 @@ struct xcopy_op { + struct se_device *dst_dev; + unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; ++ struct percpu_ref *remote_lun_ref; + + sector_t src_lba; + sector_t dst_lba; +diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c +index ed2d71c3337d..cdb52b1ba583 100644 +--- a/drivers/tee/tee_shm.c ++++ b/drivers/tee/tee_shm.c +@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm) + if (!(shm->flags & TEE_SHM_DMA_BUF)) + return -EINVAL; + ++ get_dma_buf(shm->dmabuf); + fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); +- if (fd >= 0) +- get_dma_buf(shm->dmabuf); ++ if (fd < 0) ++ dma_buf_put(shm->dmabuf); + return fd; + } + +diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c +index f2011f6324dc..ac9c6773d352 100644 +--- a/drivers/thermal/fair_share.c ++++ b/drivers/thermal/fair_share.c +@@ -93,6 +93,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) + int total_instance = 0; + int cur_trip_level = get_trip_level(tz); + ++ mutex_lock(&tz->lock); ++ + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { + if (instance->trip != trip) + continue; +@@ -121,6 +123,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) + mutex_unlock(&instance->cdev->lock); + thermal_cdev_update(cdev); + } ++ ++ mutex_unlock(&tz->lock); + return 0; + } + +diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c +index 2d855a96cdd9..761d0559c268 100644 +--- a/drivers/thermal/hisi_thermal.c ++++ b/drivers/thermal/hisi_thermal.c +@@ -527,7 +527,7 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, + static int hisi_thermal_probe(struct platform_device *pdev) + { + struct hisi_thermal_data *data; +- int const (*platform_probe)(struct hisi_thermal_data *); ++ int (*platform_probe)(struct hisi_thermal_data *); + struct device *dev = &pdev->dev; + int ret; + +diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c +index ea9558679634..34169c32d495 100644 +--- a/drivers/thermal/mtk_thermal.c ++++ b/drivers/thermal/mtk_thermal.c +@@ -348,8 +348,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) + u32 raw; + + for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { +- raw = readl(mt->thermal_base + +- conf->msr[conf->bank_data[bank->id].sensors[i]]); ++ raw = readl(mt->thermal_base + conf->msr[i]); + + temp = raw_to_mcelsius(mt, + conf->bank_data[bank->id].sensors[i], +@@ -486,8 +485,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, + + for (i = 0; i < conf->bank_data[num].num_sensors; i++) + writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], +- mt->thermal_base + +- conf->adcpnp[conf->bank_data[num].sensors[i]]); ++ mt->thermal_base + conf->adcpnp[i]); + + writel((1 << conf->bank_data[num].num_sensors) - 1, + mt->thermal_base + TEMP_MONCTL0); +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c +index 5b6475a8804e..230f43efbf1e 100755 +--- a/drivers/thermal/thermal_core.c ++++ b/drivers/thermal/thermal_core.c +@@ -2327,7 +2327,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, + EXPORT_SYMBOL_GPL(thermal_zone_device_register); + + /** +- * thermal_device_unregister - removes the registered thermal zone device ++ * thermal_zone_device_unregister - removes the registered thermal zone device + * @tz: the thermal zone device to remove + */ + void thermal_zone_device_unregister(struct thermal_zone_device *tz) +diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +index d255d33da9eb..02e71d461d5c 100644 +--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c ++++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +@@ -49,20 +49,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { + + /* + * Temperature values in milli degree celsius +- * ADC code values from 530 to 923 ++ * ADC code values from 13 to 107, see TRM ++ * "18.4.10.2.3 ADC Codes Versus Temperature". + */ + static const int + omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { +- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, +- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, +- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, +- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, +- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, +- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, +- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, +- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, +- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, +- 117000, 118000, 120000, 122000, 123000, ++ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, ++ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, ++ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, ++ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, ++ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, ++ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, ++ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, ++ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, ++ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, ++ 115000, 117000, 118500, 120000, 122000, 123500, 125000, + }; + + /* OMAP4430 data */ +diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +index 6f2de3a3356d..86850082b24b 100644 +--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h ++++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +@@ -67,9 +67,13 @@ + * and thresholds for OMAP4430. + */ + +-/* ADC conversion table limits */ +-#define OMAP4430_ADC_START_VALUE 0 +-#define OMAP4430_ADC_END_VALUE 127 ++/* ++ * ADC conversion table limits. Ignore values outside the TRM listed ++ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter ++ * "18.4.10.2.3 ADC Codes Versus Temperature". ++ */ ++#define OMAP4430_ADC_START_VALUE 13 ++#define OMAP4430_ADC_END_VALUE 107 + /* bandgap clock limits (no control on 4430) */ + #define OMAP4430_MAX_FREQ 32768 + #define OMAP4430_MIN_FREQ 32768 +diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c +index 3c4d7c2b4ade..de05196738da 100644 +--- a/drivers/tty/hvc/hvcs.c ++++ b/drivers/tty/hvc/hvcs.c +@@ -1232,13 +1232,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp) + + tty_wait_until_sent(tty, HVCS_CLOSE_WAIT); + +- /* +- * This line is important because it tells hvcs_open that this +- * device needs to be re-configured the next time hvcs_open is +- * called. +- */ +- tty->driver_data = NULL; +- + free_irq(irq, hvcsd); + return; + } else if (hvcsd->port.count < 0) { +@@ -1254,6 +1247,13 @@ static void hvcs_cleanup(struct tty_struct * tty) + { + struct hvcs_struct *hvcsd = tty->driver_data; + ++ /* ++ * This line is important because it tells hvcs_open that this ++ * device needs to be re-configured the next time hvcs_open is ++ * called. ++ */ ++ tty->driver_data = NULL; ++ + tty_port_put(&hvcsd->port); + } + +diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c +index c0dfb642383b..dc7f4eb18e0a 100644 +--- a/drivers/tty/ipwireless/network.c ++++ b/drivers/tty/ipwireless/network.c +@@ -116,7 +116,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, + skb->len, + notify_packet_sent, + network); +- if (ret == -1) { ++ if (ret < 0) { + skb_pull(skb, 2); + return 0; + } +@@ -133,7 +133,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel, + notify_packet_sent, + network); + kfree(buf); +- if (ret == -1) ++ if (ret < 0) + return 0; + } + kfree_skb(skb); +diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c +index 2685d59d2724..4f9690442507 100644 +--- a/drivers/tty/ipwireless/tty.c ++++ b/drivers/tty/ipwireless/tty.c +@@ -217,7 +217,7 @@ static int ipw_write(struct tty_struct *linux_tty, + ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS, + buf, count, + ipw_write_packet_sent_callback, tty); +- if (ret == -1) { ++ if (ret < 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return 0; + } +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c +index 9e9016e67843..1ab9bd433542 100644 +--- a/drivers/tty/n_gsm.c ++++ b/drivers/tty/n_gsm.c +@@ -681,11 +681,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, + * FIXME: lock against link layer control transmissions + */ + +-static void gsm_data_kick(struct gsm_mux *gsm) ++static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) + { + struct gsm_msg *msg, *nmsg; + int len; +- int skip_sof = 0; + + list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { + if (gsm->constipated && msg->addr) +@@ -707,18 +706,23 @@ static void gsm_data_kick(struct gsm_mux *gsm) + print_hex_dump_bytes("gsm_data_kick: ", + DUMP_PREFIX_OFFSET, + gsm->txframe, len); +- +- if (gsm->output(gsm, gsm->txframe + skip_sof, +- len - skip_sof) < 0) ++ if (gsm->output(gsm, gsm->txframe, len) < 0) + break; + /* FIXME: Can eliminate one SOF in many more cases */ + gsm->tx_bytes -= msg->len; +- /* For a burst of frames skip the extra SOF within the +- burst */ +- skip_sof = 1; + + list_del(&msg->list); + kfree(msg); ++ ++ if (dlci) { ++ tty_port_tty_wakeup(&dlci->port); ++ } else { ++ int i = 0; ++ ++ for (i = 0; i < NUM_DLCI; i++) ++ if (gsm->dlci[i]) ++ tty_port_tty_wakeup(&gsm->dlci[i]->port); ++ } + } + } + +@@ -770,7 +774,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) + /* Add to the actual output queue */ + list_add_tail(&msg->list, &gsm->tx_list); + gsm->tx_bytes += msg->len; +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, dlci); + } + + /** +@@ -1231,7 +1235,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, + gsm_control_reply(gsm, CMD_FCON, NULL, 0); + /* Kick the link in case it is idling */ + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + spin_unlock_irqrestore(&gsm->tx_lock, flags); + break; + case CMD_FCOFF: +@@ -2414,7 +2418,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty) + /* Queue poll */ + clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_lock_irqsave(&gsm->tx_lock, flags); +- gsm_data_kick(gsm); ++ gsm_data_kick(gsm, NULL); + if (gsm->tx_bytes < TX_THRESH_LO) { + gsm_dlci_data_sweep(gsm); + } +diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c +index d6fd0e802ef5..37c7cea8c34f 100644 +--- a/drivers/tty/nozomi.c ++++ b/drivers/tty/nozomi.c +@@ -1437,7 +1437,7 @@ static int nozomi_card_init(struct pci_dev *pdev, + NOZOMI_NAME, dc); + if (unlikely(ret)) { + dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); +- goto err_free_kfifo; ++ goto err_free_all_kfifo; + } + + DBG1("base_addr: %p", dc->base_addr); +@@ -1475,12 +1475,15 @@ static int nozomi_card_init(struct pci_dev *pdev, + return 0; + + err_free_tty: +- for (i = 0; i < MAX_PORT; ++i) { ++ for (i--; i >= 0; i--) { + tty_unregister_device(ntty_driver, dc->index_start + i); + tty_port_destroy(&dc->port[i].port); + } ++ free_irq(pdev->irq, dc); ++err_free_all_kfifo: ++ i = MAX_PORT; + err_free_kfifo: +- for (i = 0; i < MAX_PORT; i++) ++ for (i--; i >= PORT_MDM; i--) + kfifo_free(&dc->port[i].fifo_ul); + err_free_sbuf: + kfree(dc->send_buf); +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 171130a9ecc8..8a063a036bc0 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -115,10 +115,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) + spin_lock_irqsave(&to->port->lock, flags); + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to->port, buf, c); ++ spin_unlock_irqrestore(&to->port->lock, flags); + /* And shovel */ + if (c) + tty_flip_buffer_push(to->port); +- spin_unlock_irqrestore(&to->port->lock, flags); + } + return c; + } +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index c4e9eba36023..8dbfd4ffd635 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -529,6 +529,7 @@ static void __init serial8250_isa_init_ports(void) + */ + up->mcr_mask = ~ALPHA_KLUDGE_MCR; + up->mcr_force = ALPHA_KLUDGE_MCR; ++ serial8250_set_defaults(up); + } + + /* chain base port ops to support Remote Supervisor Adapter */ +@@ -552,7 +553,6 @@ static void __init serial8250_isa_init_ports(void) + port->membase = old_serial_port[i].iomem_base; + port->iotype = old_serial_port[i].io_type; + port->regshift = old_serial_port[i].iomem_reg_shift; +- serial8250_set_defaults(up); + + port->irqflags |= irqflag; + if (serial8250_isa_config != NULL) +@@ -1046,8 +1046,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) + + ret = uart_add_one_port(&serial8250_reg, + &uart->port); +- if (ret == 0) +- ret = uart->port.line; ++ if (ret) ++ goto err; ++ ++ ret = uart->port.line; + } else { + dev_info(uart->port.dev, + "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", +@@ -1061,6 +1063,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up) + mutex_unlock(&serial_mutex); + + return ret; ++ ++err: ++ uart->port.dev = NULL; ++ mutex_unlock(&serial_mutex); ++ return ret; + } + EXPORT_SYMBOL(serial8250_register_8250_port); + +diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c +index 91db9ca1c6c9..d7421f0f857b 100644 +--- a/drivers/tty/serial/8250/8250_mtk.c ++++ b/drivers/tty/serial/8250/8250_mtk.c +@@ -45,7 +45,20 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, + unsigned long flags; + unsigned int baud, quot; + +- serial8250_do_set_termios(port, termios, old); ++ /* ++ * Store the requested baud rate before calling the generic 8250 ++ * set_termios method. Standard 8250 port expects bauds to be ++ * no higher than (uartclk / 16) so the baud will be clamped if it ++ * gets out of that bound. Mediatek 8250 port supports speed ++ * higher than that, therefore we'll get original baud rate back ++ * after calling the generic set_termios method and recalculate ++ * the speed later in this method. ++ */ ++ baud = tty_termios_baud_rate(termios); ++ ++ serial8250_do_set_termios(port, termios, NULL); ++ ++ tty_termios_encode_baud_rate(termios, baud, baud); + + /* + * Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS) +@@ -85,6 +98,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, + */ + spin_lock_irqsave(&port->lock, flags); + ++ /* ++ * Update the per-port timeout. ++ */ ++ uart_update_timeout(port, termios->c_cflag, baud); ++ + /* set DLAB we have cval saved in up->lcr from the call to the core */ + serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); + serial_dl_write(up, quot); +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index a3adf21f9dce..c551407bee07 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -161,11 +161,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up, + struct omap8250_priv *priv) + { + u8 timeout = 255; +- u8 old_mdr1; +- +- old_mdr1 = serial_in(up, UART_OMAP_MDR1); +- if (old_mdr1 == priv->mdr1) +- return; + + serial_out(up, UART_OMAP_MDR1, priv->mdr1); + udelay(2); +@@ -773,7 +768,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) + dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); + + count = dma->rx_size - state.residue; +- ++ if (count < dma->rx_size) ++ dmaengine_terminate_async(dma->rxchan); ++ if (!count) ++ goto unlock; + ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); + + p->port.icount.rx += ret; +@@ -811,7 +809,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) + spin_unlock_irqrestore(&priv->rx_dma_lock, flags); + + __dma_rx_do_complete(p); +- dmaengine_terminate_all(dma->rxchan); + } + + static int omap_8250_rx_dma(struct uart_8250_port *p) +@@ -1194,11 +1191,11 @@ static int omap8250_probe(struct platform_device *pdev) + spin_lock_init(&priv->rx_dma_lock); + + device_init_wakeup(&pdev->dev, true); ++ pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, -1); + + pm_runtime_irq_safe(&pdev->dev); +- pm_runtime_enable(&pdev->dev); + + pm_runtime_get_sync(&pdev->dev); + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 2c38b3a1d518..a9c46e10d204 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -5709,6 +5709,17 @@ static struct pci_device_id serial_pci_tbl[] = { + 0, + 0, pbn_exar_XR17V358 }, + ++ /* ++ * Realtek RealManage ++ */ ++ { PCI_VENDOR_ID_REALTEK, 0x816a, ++ PCI_ANY_ID, PCI_ANY_ID, ++ 0, 0, pbn_b0_1_115200 }, ++ ++ { PCI_VENDOR_ID_REALTEK, 0x816b, ++ PCI_ANY_ID, PCI_ANY_ID, ++ 0, 0, pbn_b0_1_115200 }, ++ + /* Fintek PCI serial cards */ + { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, + { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index c7a7574172fa..611bc0556571 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -277,7 +277,11 @@ static const struct serial8250_config uart_config[] = { + /* Uart divisor latch read */ + static int default_serial_dl_read(struct uart_8250_port *up) + { +- return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8; ++ /* Assign these in pieces to truncate any bits above 7. */ ++ unsigned char dll = serial_in(up, UART_DLL); ++ unsigned char dlm = serial_in(up, UART_DLM); ++ ++ return dll | dlm << 8; + } + + /* Uart divisor latch write */ +@@ -1262,9 +1266,11 @@ static void autoconfig(struct uart_8250_port *up) + serial_out(up, UART_LCR, 0); + + serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); +- scratch = serial_in(up, UART_IIR) >> 6; + +- switch (scratch) { ++ /* Assign this as it is to truncate any bits above 7. */ ++ scratch = serial_in(up, UART_IIR); ++ ++ switch (scratch >> 6) { + case 0: + autoconfig_8250(up); + break; +@@ -1806,6 +1812,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) + unsigned char status; + unsigned long flags; + struct uart_8250_port *up = up_to_u8250p(port); ++ bool skip_rx = false; + + if (iir & UART_IIR_NO_INT) + return 0; +@@ -1814,7 +1821,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) + + status = serial_port_in(port, UART_LSR); + +- if (status & (UART_LSR_DR | UART_LSR_BI)) { ++ /* ++ * If port is stopped and there are no error conditions in the ++ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer ++ * overflow. Not servicing, RX FIFO would trigger auto HW flow ++ * control when FIFO occupancy reaches preset threshold, thus ++ * halting RX. This only works when auto HW flow control is ++ * available. ++ */ ++ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && ++ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && ++ !(port->read_status_mask & UART_LSR_DR)) ++ skip_rx = true; ++ ++ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { + if (!up->dma || handle_rx_dma(up, iir)) + status = serial8250_rx_chars(up, status); + } +@@ -2205,6 +2225,10 @@ int serial8250_do_startup(struct uart_port *port) + + if (port->irq) { + unsigned char iir1; ++ ++ if (port->irqflags & IRQF_SHARED) ++ disable_irq_nosync(port->irq); ++ + /* + * Test for UARTs that do not reassert THRE when the + * transmitter is idle and the interrupt has already +@@ -2214,8 +2238,6 @@ int serial8250_do_startup(struct uart_port *port) + * allow register changes to become visible. + */ + spin_lock_irqsave(&port->lock, flags); +- if (up->port.irqflags & IRQF_SHARED) +- disable_irq_nosync(port->irq); + + wait_for_xmitr(up, UART_LSR_THRE); + serial_port_out_sync(port, UART_IER, UART_IER_THRI); +@@ -2227,9 +2249,10 @@ int serial8250_do_startup(struct uart_port *port) + iir = serial_port_in(port, UART_IIR); + serial_port_out(port, UART_IER, 0); + ++ spin_unlock_irqrestore(&port->lock, flags); ++ + if (port->irqflags & IRQF_SHARED) + enable_irq(port->irq); +- spin_unlock_irqrestore(&port->lock, flags); + + /* + * If the interrupt is not reasserted, or we otherwise +diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c +index 8106353ce7aa..3747991024d5 100644 +--- a/drivers/tty/serial/8250/serial_cs.c ++++ b/drivers/tty/serial/8250/serial_cs.c +@@ -305,6 +305,7 @@ static int serial_resume(struct pcmcia_device *link) + static int serial_probe(struct pcmcia_device *link) + { + struct serial_info *info; ++ int ret; + + dev_dbg(&link->dev, "serial_attach()\n"); + +@@ -319,7 +320,15 @@ static int serial_probe(struct pcmcia_device *link) + if (do_sound) + link->config_flags |= CONF_ENABLE_SPKR; + +- return serial_config(link); ++ ret = serial_config(link); ++ if (ret) ++ goto free_info; ++ ++ return 0; ++ ++free_info: ++ kfree(info); ++ return ret; + } + + static void serial_detach(struct pcmcia_device *link) +@@ -779,6 +788,7 @@ static const struct pcmcia_device_id serial_ids[] = { + PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e), + PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a), + PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41), ++ PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa), + PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab), + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f), + PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d), +@@ -806,7 +816,6 @@ static const struct pcmcia_device_id serial_ids[] = { + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), +- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83), + PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490), +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index 0c5e9ca9d928..f09c327405ac 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -9,6 +9,7 @@ menu "Serial drivers" + + config SERIAL_EARLYCON + bool ++ depends on SERIAL_CORE + help + Support for early consoles with the earlycon parameter. This enables + the console before standard serial driver is probed. The console is +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index f6586a8681b9..6e27dee3876a 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -2249,9 +2249,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) + clk_disable(uap->clk); + } + +-static void __init +-pl011_console_get_options(struct uart_amba_port *uap, int *baud, +- int *parity, int *bits) ++static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, ++ int *parity, int *bits) + { + if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { + unsigned int lcr_h, ibrd, fbrd; +@@ -2284,7 +2283,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, + } + } + +-static int __init pl011_console_setup(struct console *co, char *options) ++static int pl011_console_setup(struct console *co, char *options) + { + struct uart_amba_port *uap; + int baud = 38400; +@@ -2352,8 +2351,8 @@ static int __init pl011_console_setup(struct console *co, char *options) + * + * Returns 0 if console matches; otherwise non-zero to use default matching + */ +-static int __init pl011_console_match(struct console *co, char *name, int idx, +- char *options) ++static int pl011_console_match(struct console *co, char *name, int idx, ++ char *options) + { + unsigned char iotype; + resource_size_t addr; +@@ -2524,6 +2523,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + uap->port.fifosize = uap->fifosize; + uap->port.flags = UPF_BOOT_AUTOCONF; + uap->port.line = index; ++ spin_lock_init(&uap->port.lock); + + amba_ports[index] = uap; + +@@ -2532,7 +2532,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, + + static int pl011_register_port(struct uart_amba_port *uap) + { +- int ret; ++ int ret, i; + + /* Ensure interrupts from this UART are masked and cleared */ + pl011_write(0, uap, REG_IMSC); +@@ -2543,6 +2543,9 @@ static int pl011_register_port(struct uart_amba_port *uap) + if (ret < 0) { + dev_err(uap->port.dev, + "Failed to register AMBA-PL011 driver\n"); ++ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) ++ if (amba_ports[i] == uap) ++ amba_ports[i] = NULL; + return ret; + } + } +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index 5b6093dc3ff2..a4c1797e30d7 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -1766,6 +1766,9 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud, + + bd = lpuart32_read(sport->port.membase + UARTBAUD); + bd &= UARTBAUD_SBR_MASK; ++ if (!bd) ++ return; ++ + sbr = bd; + uartclk = clk_get_rate(sport->clk); + /* +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c +index 325c38c9b451..a60376496b1a 100644 +--- a/drivers/tty/serial/imx.c ++++ b/drivers/tty/serial/imx.c +@@ -1787,16 +1787,6 @@ imx_console_write(struct console *co, const char *s, unsigned int count) + unsigned int ucr1; + unsigned long flags = 0; + int locked = 1; +- int retval; +- +- retval = clk_enable(sport->clk_per); +- if (retval) +- return; +- retval = clk_enable(sport->clk_ipg); +- if (retval) { +- clk_disable(sport->clk_per); +- return; +- } + + if (sport->port.sysrq) + locked = 0; +@@ -1832,9 +1822,6 @@ imx_console_write(struct console *co, const char *s, unsigned int count) + + if (locked) + spin_unlock_irqrestore(&sport->port.lock, flags); +- +- clk_disable(sport->clk_ipg); +- clk_disable(sport->clk_per); + } + + /* +@@ -1935,15 +1922,14 @@ imx_console_setup(struct console *co, char *options) + + retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); + +- clk_disable(sport->clk_ipg); + if (retval) { +- clk_unprepare(sport->clk_ipg); ++ clk_disable_unprepare(sport->clk_ipg); + goto error_console; + } + +- retval = clk_prepare(sport->clk_per); ++ retval = clk_prepare_enable(sport->clk_per); + if (retval) +- clk_unprepare(sport->clk_ipg); ++ clk_disable_unprepare(sport->clk_ipg); + + error_console: + return retval; +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c +index 80ab672d61cc..febbacecb3ba 100644 +--- a/drivers/tty/serial/max310x.c ++++ b/drivers/tty/serial/max310x.c +@@ -1385,10 +1385,12 @@ static int __init max310x_uart_init(void) + return ret; + + #ifdef CONFIG_SPI_MASTER +- spi_register_driver(&max310x_spi_driver); ++ ret = spi_register_driver(&max310x_spi_driver); ++ if (ret) ++ uart_unregister_driver(&max310x_uart); + #endif + +- return 0; ++ return ret; + } + module_init(max310x_uart_init); + +diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c +index a2522c6d781b..c114757b726b 100644 +--- a/drivers/tty/serial/msm_serial_hs.c ++++ b/drivers/tty/serial/msm_serial_hs.c +@@ -3,7 +3,7 @@ + * MSM 7k High speed uart driver + * + * Copyright (c) 2008 Google Inc. +- * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2007-2018, 2021 The Linux Foundation. All rights reserved. + * Modified: Nick Pelly + * + * All source code in this file is licensed under the following license +@@ -1872,7 +1872,7 @@ static void msm_hs_sps_tx_callback(struct sps_event_notify *notify) + &addr, notify->data.transfer.iovec.size, + notify->data.transfer.iovec.flags); + +- del_timer(&msm_uport->tx.tx_timeout_timer); ++ del_timer_sync(&msm_uport->tx.tx_timeout_timer); + MSM_HS_DBG("%s(): Queue kthread work", __func__); + kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork); + } +diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c +index 056f91b3a4ca..b7d1b1645c84 100644 +--- a/drivers/tty/serial/rp2.c ++++ b/drivers/tty/serial/rp2.c +@@ -198,7 +198,6 @@ struct rp2_card { + void __iomem *bar0; + void __iomem *bar1; + spinlock_t card_lock; +- struct completion fw_loaded; + }; + + #define RP_ID(prod) PCI_VDEVICE(RP, (prod)) +@@ -667,17 +666,10 @@ static void rp2_remove_ports(struct rp2_card *card) + card->initialized_ports = 0; + } + +-static void rp2_fw_cb(const struct firmware *fw, void *context) ++static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) + { +- struct rp2_card *card = context; + resource_size_t phys_base; +- int i, rc = -ENOENT; +- +- if (!fw) { +- dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n", +- RP2_FW_NAME); +- goto no_fw; +- } ++ int i, rc = 0; + + phys_base = pci_resource_start(card->pdev, 1); + +@@ -723,23 +715,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context) + card->initialized_ports++; + } + +- release_firmware(fw); +-no_fw: +- /* +- * rp2_fw_cb() is called from a workqueue long after rp2_probe() +- * has already returned success. So if something failed here, +- * we'll just leave the now-dormant device in place until somebody +- * unbinds it. +- */ +- if (rc) +- dev_warn(&card->pdev->dev, "driver initialization failed\n"); +- +- complete(&card->fw_loaded); ++ return rc; + } + + static int rp2_probe(struct pci_dev *pdev, + const struct pci_device_id *id) + { ++ const struct firmware *fw; + struct rp2_card *card; + struct rp2_uart_port *ports; + void __iomem * const *bars; +@@ -750,7 +732,6 @@ static int rp2_probe(struct pci_dev *pdev, + return -ENOMEM; + pci_set_drvdata(pdev, card); + spin_lock_init(&card->card_lock); +- init_completion(&card->fw_loaded); + + rc = pcim_enable_device(pdev); + if (rc) +@@ -783,21 +764,23 @@ static int rp2_probe(struct pci_dev *pdev, + return -ENOMEM; + card->ports = ports; + +- rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, +- IRQF_SHARED, DRV_NAME, card); +- if (rc) ++ rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev); ++ if (rc < 0) { ++ dev_err(&pdev->dev, "cannot find '%s' firmware image\n", ++ RP2_FW_NAME); + return rc; ++ } + +- /* +- * Only catastrophic errors (e.g. ENOMEM) are reported here. +- * If the FW image is missing, we'll find out in rp2_fw_cb() +- * and print an error message. +- */ +- rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev, +- GFP_KERNEL, card, rp2_fw_cb); ++ rc = rp2_load_firmware(card, fw); ++ ++ release_firmware(fw); ++ if (rc < 0) ++ return rc; ++ ++ rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, ++ IRQF_SHARED, DRV_NAME, card); + if (rc) + return rc; +- dev_dbg(&pdev->dev, "waiting for firmware blob...\n"); + + return 0; + } +@@ -806,7 +789,6 @@ static void rp2_remove(struct pci_dev *pdev) + { + struct rp2_card *card = pci_get_drvdata(pdev); + +- wait_for_completion(&card->fw_loaded); + rp2_remove_ports(card); + } + +diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c +index 01ff8ec78023..8c89697c5357 100644 +--- a/drivers/tty/serial/samsung.c ++++ b/drivers/tty/serial/samsung.c +@@ -1157,14 +1157,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, + struct s3c24xx_uart_info *info = ourport->info; + struct clk *clk; + unsigned long rate; +- unsigned int cnt, baud, quot, clk_sel, best_quot = 0; ++ unsigned int cnt, baud, quot, best_quot = 0; + char clkname[MAX_CLK_NAME_LENGTH]; + int calc_deviation, deviation = (1 << 30) - 1; + +- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : +- ourport->info->def_clk_sel; + for (cnt = 0; cnt < info->num_clks; cnt++) { +- if (!(clk_sel & (1 << cnt))) ++ /* Keep selected clock if provided */ ++ if (ourport->cfg->clk_sel && ++ !(ourport->cfg->clk_sel & (1 << cnt))) + continue; + + sprintf(clkname, "clk_uart_baud%d", cnt); +@@ -1725,9 +1725,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, + ourport->tx_irq = ret + 1; + } + +- ret = platform_get_irq(platdev, 1); +- if (ret > 0) +- ourport->tx_irq = ret; ++ if (!s3c24xx_serial_has_interrupt_mask(port)) { ++ ret = platform_get_irq(platdev, 1); ++ if (ret > 0) ++ ourport->tx_irq = ret; ++ } + /* + * DMA is currently supported only on DT platforms, if DMA properties + * are specified. +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 9ab13210f8ab..cb137b334bc5 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -1418,6 +1418,10 @@ static void uart_set_ldisc(struct tty_struct *tty) + { + struct uart_state *state = tty->driver_data; + struct uart_port *uport; ++ struct tty_port *port = &state->port; ++ ++ if (!tty_port_initialized(port)) ++ return; + + mutex_lock(&state->port.mutex); + uport = uart_port_check(state); +diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c +index f80312eed4fd..ffb3fb1bda9e 100644 +--- a/drivers/tty/serial/serial_txx9.c ++++ b/drivers/tty/serial/serial_txx9.c +@@ -1287,6 +1287,9 @@ static int __init serial_txx9_init(void) + + #ifdef ENABLE_SERIAL_TXX9_PCI + ret = pci_register_driver(&serial_txx9_pci_driver); ++ if (ret) { ++ platform_driver_unregister(&serial_txx9_plat_driver); ++ } + #endif + if (ret == 0) + goto out; +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c +index ea35f5144237..b4f528d26bf1 100644 +--- a/drivers/tty/serial/sh-sci.c ++++ b/drivers/tty/serial/sh-sci.c +@@ -612,6 +612,14 @@ static void sci_stop_tx(struct uart_port *port) + ctrl &= ~SCSCR_TIE; + + serial_port_out(port, SCSCR, ctrl); ++ ++#ifdef CONFIG_SERIAL_SH_SCI_DMA ++ if (to_sci_port(port)->chan_tx && ++ !dma_submit_error(to_sci_port(port)->cookie_tx)) { ++ dmaengine_terminate_async(to_sci_port(port)->chan_tx); ++ to_sci_port(port)->cookie_tx = -EINVAL; ++ } ++#endif + } + + static void sci_start_rx(struct uart_port *port) +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c +index ea8b591dd46f..f325019887b2 100644 +--- a/drivers/tty/serial/stm32-usart.c ++++ b/drivers/tty/serial/stm32-usart.c +@@ -476,8 +476,9 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios, + unsigned int baud; + u32 usartdiv, mantissa, fraction, oversampling; + tcflag_t cflag = termios->c_cflag; +- u32 cr1, cr2, cr3; ++ u32 cr1, cr2, cr3, isr; + unsigned long flags; ++ int ret; + + if (!stm32_port->hw_flow_control) + cflag &= ~CRTSCTS; +@@ -486,6 +487,15 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios, + + spin_lock_irqsave(&port->lock, flags); + ++ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, ++ isr, ++ (isr & USART_SR_TC), ++ 10, 100000); ++ ++ /* Send the TC error message only when ISR_TC is not set. */ ++ if (ret) ++ dev_err(port->dev, "Transmission is not complete\n"); ++ + /* Stop serial port and reset value */ + writel_relaxed(0, port->membase + ofs->cr1); + +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index 6267efa99bed..8ec7b0ceb136 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -544,8 +544,8 @@ static void __proc_set_tty(struct tty_struct *tty) + put_pid(tty->session); + put_pid(tty->pgrp); + tty->pgrp = get_pid(task_pgrp(current)); +- spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty->session = get_pid(task_session(current)); ++ spin_unlock_irqrestore(&tty->ctrl_lock, flags); + if (current->signal->tty) { + tty_debug(tty, "current tty %s not NULL!!\n", + current->signal->tty->name); +@@ -935,21 +935,24 @@ void disassociate_ctty(int on_exit) + spin_lock_irq(¤t->sighand->siglock); + put_pid(current->signal->tty_old_pgrp); + current->signal->tty_old_pgrp = NULL; +- + tty = tty_kref_get(current->signal->tty); ++ spin_unlock_irq(¤t->sighand->siglock); ++ + if (tty) { + unsigned long flags; ++ ++ tty_lock(tty); + spin_lock_irqsave(&tty->ctrl_lock, flags); + put_pid(tty->session); + put_pid(tty->pgrp); + tty->session = NULL; + tty->pgrp = NULL; + spin_unlock_irqrestore(&tty->ctrl_lock, flags); ++ tty_unlock(tty); + tty_kref_put(tty); + } else + tty_debug_hangup(tty, "no current tty\n"); + +- spin_unlock_irq(¤t->sighand->siglock); + /* Now clear signal->tty under the lock */ + read_lock(&tasklist_lock); + session_clear_tty(task_session(current)); +@@ -2632,14 +2635,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t + return -ENOTTY; + if (retval) + return retval; +- if (!current->signal->tty || +- (current->signal->tty != real_tty) || +- (real_tty->session != task_session(current))) +- return -ENOTTY; ++ + if (get_user(pgrp_nr, p)) + return -EFAULT; + if (pgrp_nr < 0) + return -EINVAL; ++ ++ spin_lock_irq(&real_tty->ctrl_lock); ++ if (!current->signal->tty || ++ (current->signal->tty != real_tty) || ++ (real_tty->session != task_session(current))) { ++ retval = -ENOTTY; ++ goto out_unlock_ctrl; ++ } + rcu_read_lock(); + pgrp = find_vpid(pgrp_nr); + retval = -ESRCH; +@@ -2655,6 +2663,8 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t + spin_unlock_irq(&real_tty->ctrl_lock); + out_unlock: + rcu_read_unlock(); ++out_unlock_ctrl: ++ spin_unlock_irq(&real_tty->ctrl_lock); + return retval; + } + +@@ -2666,21 +2676,31 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t + * + * Obtain the session id of the tty. If there is no session + * return an error. +- * +- * Locking: none. Reference to current->signal->tty is safe. + */ + + static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) + { ++ unsigned long flags; ++ pid_t sid; ++ + /* + * (tty == real_tty) is a cheap way of + * testing if the tty is NOT a master pty. + */ + if (tty == real_tty && current->signal->tty != real_tty) + return -ENOTTY; ++ ++ spin_lock_irqsave(&real_tty->ctrl_lock, flags); + if (!real_tty->session) +- return -ENOTTY; +- return put_user(pid_vnr(real_tty->session), p); ++ goto err; ++ sid = pid_vnr(real_tty->session); ++ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); ++ ++ return put_user(sid, p); ++ ++err: ++ spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); ++ return -ENOTTY; + } + + /** +@@ -2777,14 +2797,14 @@ static int send_break(struct tty_struct *tty, unsigned int duration) + * @p: pointer to result + * + * Obtain the modem status bits from the tty driver if the feature +- * is supported. Return -EINVAL if it is not available. ++ * is supported. Return -ENOTTY if it is not available. + * + * Locking: none (up to the driver) + */ + + static int tty_tiocmget(struct tty_struct *tty, int __user *p) + { +- int retval = -EINVAL; ++ int retval = -ENOTTY; + + if (tty->ops->tiocmget) { + retval = tty->ops->tiocmget(tty); +@@ -2802,7 +2822,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p) + * @p: pointer to desired bits + * + * Set the modem status bits from the tty driver if the feature +- * is supported. Return -EINVAL if it is not available. ++ * is supported. Return -ENOTTY if it is not available. + * + * Locking: none (up to the driver) + */ +@@ -2814,7 +2834,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd, + unsigned int set, clear, val; + + if (tty->ops->tiocmset == NULL) +- return -EINVAL; ++ return -ENOTTY; + + retval = get_user(val, p); + if (retval) +@@ -3098,10 +3118,14 @@ void __do_SAK(struct tty_struct *tty) + struct task_struct *g, *p; + struct pid *session; + int i; ++ unsigned long flags; + + if (!tty) + return; +- session = tty->session; ++ ++ spin_lock_irqsave(&tty->ctrl_lock, flags); ++ session = get_pid(tty->session); ++ spin_unlock_irqrestore(&tty->ctrl_lock, flags); + + tty_ldisc_flush(tty); + +@@ -3133,6 +3157,7 @@ void __do_SAK(struct tty_struct *tty) + task_unlock(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); ++ put_pid(session); + #endif + } + +diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c +index 9d7ab7b66a8a..3e668d7c4b57 100644 +--- a/drivers/tty/vt/consolemap.c ++++ b/drivers/tty/vt/consolemap.c +@@ -494,7 +494,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos) + + p2[unicode & 0x3f] = fontpos; + +- p->sum += (fontpos << 20) + unicode; ++ p->sum += (fontpos << 20U) + unicode; + + return 0; + } +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index d9eba7938917..4bc6261ef3c1 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -712,8 +712,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) + return; + + if ((unsigned)value < ARRAY_SIZE(func_table)) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&func_buf_lock, flags); + if (func_table[value]) + puts_queue(vc, func_table[value]); ++ spin_unlock_irqrestore(&func_buf_lock, flags); ++ + } else + pr_err("k_fn called with value=%d\n", value); + } +@@ -1959,13 +1964,11 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, + #undef s + #undef v + +-/* FIXME: This one needs untangling and locking */ ++/* FIXME: This one needs untangling */ + int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + { + struct kbsentry *kbs; +- char *p; + u_char *q; +- u_char __user *up; + int sz, fnw_sz; + int delta; + char *first_free, *fj, *fnw; +@@ -1991,23 +1994,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) + i = kbs->kb_func; + + switch (cmd) { +- case KDGKBSENT: +- sz = sizeof(kbs->kb_string) - 1; /* sz should have been +- a struct member */ +- up = user_kdgkb->kb_string; +- p = func_table[i]; +- if(p) +- for ( ; *p && sz; p++, sz--) +- if (put_user(*p, up++)) { +- ret = -EFAULT; +- goto reterr; +- } +- if (put_user('\0', up)) { +- ret = -EFAULT; +- goto reterr; +- } +- kfree(kbs); +- return ((p && *p) ? -EOVERFLOW : 0); ++ case KDGKBSENT: { ++ /* size should have been a struct member */ ++ ssize_t len = sizeof(user_kdgkb->kb_string); ++ ++ spin_lock_irqsave(&func_buf_lock, flags); ++ len = strlcpy(kbs->kb_string, func_table[i] ? : "", len); ++ spin_unlock_irqrestore(&func_buf_lock, flags); ++ ++ ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string, ++ len + 1) ? -EFAULT : 0; ++ ++ goto reterr; ++ } + case KDSKBSENT: + if (!perm) { + ret = -EPERM; +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 29fb08c8a2fd..8c74d9ebfc50 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -765,10 +765,19 @@ static const struct tty_port_operations vc_port_ops = { + .destruct = vc_port_destruct, + }; + ++/* ++ * Change # of rows and columns (0 means unchanged/the size of fg_console) ++ * [this is to be used together with some user program ++ * like resize that changes the hardware videomode] ++ */ ++#define VC_MAXCOL (32767) ++#define VC_MAXROW (32767) ++ + int vc_allocate(unsigned int currcons) /* return 0 on success */ + { + struct vt_notifier_param param; + struct vc_data *vc; ++ int err; + + WARN_CONSOLE_UNLOCKED(); + +@@ -798,6 +807,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ + if (!*vc->vc_uni_pagedir_loc) + con_set_default_unimap(vc); + ++ err = -EINVAL; ++ if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || ++ vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) ++ goto err_free; ++ err = -ENOMEM; + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); + if (!vc->vc_screenbuf) + goto err_free; +@@ -815,7 +829,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ + err_free: + kfree(vc); + vc_cons[currcons].d = NULL; +- return -ENOMEM; ++ return err; + } + + static inline int resize_screen(struct vc_data *vc, int width, int height, +@@ -824,20 +838,12 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, + /* Resizes the resolution of the display adapater */ + int err = 0; + +- if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize) ++ if (vc->vc_sw->con_resize) + err = vc->vc_sw->con_resize(vc, width, height, user); + + return err; + } + +-/* +- * Change # of rows and columns (0 means unchanged/the size of fg_console) +- * [this is to be used together with some user program +- * like resize that changes the hardware videomode] +- */ +-#define VC_RESIZE_MAXCOL (32767) +-#define VC_RESIZE_MAXROW (32767) +- + /** + * vc_do_resize - resizing method for the tty + * @tty: tty being resized +@@ -862,7 +868,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + unsigned int old_rows, old_row_size; + unsigned int new_cols, new_rows, new_row_size, new_screen_size; + unsigned int user; +- unsigned short *newscreen; ++ unsigned short *oldscreen, *newscreen; + + WARN_CONSOLE_UNLOCKED(); + +@@ -872,7 +878,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + user = vc->vc_resize_user; + vc->vc_resize_user = 0; + +- if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) ++ if (cols > VC_MAXCOL || lines > VC_MAXROW) + return -EINVAL; + + new_cols = (cols ? cols : vc->vc_cols); +@@ -883,7 +889,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) + return 0; + +- if (new_screen_size > (4 << 20)) ++ if (new_screen_size > (4 << 20) || !new_screen_size) + return -EINVAL; + newscreen = kzalloc(new_screen_size, GFP_USER); + if (!newscreen) +@@ -944,10 +950,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + if (new_scr_end > new_origin) + scr_memsetw((void *)new_origin, vc->vc_video_erase_char, + new_scr_end - new_origin); +- kfree(vc->vc_screenbuf); ++ oldscreen = vc->vc_screenbuf; + vc->vc_screenbuf = newscreen; + vc->vc_screenbuf_size = new_screen_size; + set_origin(vc); ++ kfree(oldscreen); + + /* do part of a reset_terminal() */ + vc->vc_top = 0; +@@ -3033,6 +3040,7 @@ static int __init con_init(void) + INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); + tty_port_init(&vc->port); + visual_init(vc, currcons, 1); ++ /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); + vc_init(vc, vc->vc_rows, vc->vc_cols, + currcons || !vc->vc_sw->con_save_screen); +@@ -4227,27 +4235,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) + return rc; + } + +-static int con_font_copy(struct vc_data *vc, struct console_font_op *op) +-{ +- int con = op->height; +- int rc; +- +- +- console_lock(); +- if (vc->vc_mode != KD_TEXT) +- rc = -EINVAL; +- else if (!vc->vc_sw->con_font_copy) +- rc = -ENOSYS; +- else if (con < 0 || !vc_cons_allocated(con)) +- rc = -ENOTTY; +- else if (con == vc->vc_num) /* nothing to do */ +- rc = 0; +- else +- rc = vc->vc_sw->con_font_copy(vc, con); +- console_unlock(); +- return rc; +-} +- + int con_font_op(struct vc_data *vc, struct console_font_op *op) + { + switch (op->op) { +@@ -4258,7 +4245,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op) + case KD_FONT_OP_SET_DEFAULT: + return con_font_default(vc, op); + case KD_FONT_OP_COPY: +- return con_font_copy(vc, op); ++ /* was buggy and never really used */ ++ return -EINVAL; + } + return -ENOSYS; + } +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c +index 4ed0d77e5918..e1c1627a3356 100644 +--- a/drivers/tty/vt/vt_ioctl.c ++++ b/drivers/tty/vt/vt_ioctl.c +@@ -243,7 +243,7 @@ int vt_waitactive(int n) + + + static inline int +-do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) ++do_fontx_ioctl(struct vc_data *vc, int cmd, struct consolefontdesc __user *user_cfd, int perm, struct console_font_op *op) + { + struct consolefontdesc cfdarg; + int i; +@@ -261,15 +261,16 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = cfdarg.chardata; +- return con_font_op(vc_cons[fg_console].d, op); +- case GIO_FONTX: { ++ return con_font_op(vc, op); ++ ++ case GIO_FONTX: + op->op = KD_FONT_OP_GET; + op->flags = KD_FONT_FLAG_OLD; + op->width = 8; + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = cfdarg.chardata; +- i = con_font_op(vc_cons[fg_console].d, op); ++ i = con_font_op(vc, op); + if (i) + return i; + cfdarg.charheight = op->height; +@@ -277,7 +278,6 @@ do_fontx_ioctl(int cmd, struct consolefontdesc __user *user_cfd, int perm, struc + if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) + return -EFAULT; + return 0; +- } + } + return -EINVAL; + } +@@ -487,16 +487,19 @@ int vt_ioctl(struct tty_struct *tty, + ret = -EINVAL; + goto out; + } +- /* FIXME: this needs the console lock extending */ +- if (vc->vc_mode == (unsigned char) arg) ++ console_lock(); ++ if (vc->vc_mode == (unsigned char) arg) { ++ console_unlock(); + break; ++ } + vc->vc_mode = (unsigned char) arg; +- if (console != fg_console) ++ if (console != fg_console) { ++ console_unlock(); + break; ++ } + /* + * explicitly blank/unblank the screen if switching modes + */ +- console_lock(); + if (arg == KD_TEXT) + do_unblank_screen(1); + else +@@ -896,12 +899,22 @@ int vt_ioctl(struct tty_struct *tty, + console_lock(); + vcp = vc_cons[i].d; + if (vcp) { ++ int ret; ++ int save_scan_lines = vcp->vc_scan_lines; ++ int save_cell_height = vcp->vc_cell_height; ++ + if (v.v_vlin) + vcp->vc_scan_lines = v.v_vlin; + if (v.v_clin) +- vcp->vc_font.height = v.v_clin; ++ vcp->vc_cell_height = v.v_clin; + vcp->vc_resize_user = 1; +- vc_resize(vcp, v.v_cols, v.v_rows); ++ ret = vc_resize(vcp, v.v_cols, v.v_rows); ++ if (ret) { ++ vcp->vc_scan_lines = save_scan_lines; ++ vcp->vc_cell_height = save_cell_height; ++ console_unlock(); ++ return ret; ++ } + } + console_unlock(); + } +@@ -917,7 +930,7 @@ int vt_ioctl(struct tty_struct *tty, + op.height = 0; + op.charcount = 256; + op.data = up; +- ret = con_font_op(vc_cons[fg_console].d, &op); ++ ret = con_font_op(vc, &op); + break; + } + +@@ -928,7 +941,7 @@ int vt_ioctl(struct tty_struct *tty, + op.height = 32; + op.charcount = 256; + op.data = up; +- ret = con_font_op(vc_cons[fg_console].d, &op); ++ ret = con_font_op(vc, &op); + break; + } + +@@ -945,7 +958,7 @@ int vt_ioctl(struct tty_struct *tty, + + case PIO_FONTX: + case GIO_FONTX: +- ret = do_fontx_ioctl(cmd, up, perm, &op); ++ ret = do_fontx_ioctl(vc, cmd, up, perm, &op); + break; + + case PIO_FONTRESET: +@@ -962,11 +975,11 @@ int vt_ioctl(struct tty_struct *tty, + { + op.op = KD_FONT_OP_SET_DEFAULT; + op.data = NULL; +- ret = con_font_op(vc_cons[fg_console].d, &op); ++ ret = con_font_op(vc, &op); + if (ret) + break; + console_lock(); +- con_set_default_unimap(vc_cons[fg_console].d); ++ con_set_default_unimap(vc); + console_unlock(); + break; + } +@@ -1093,8 +1106,9 @@ struct compat_consolefontdesc { + }; + + static inline int +-compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, +- int perm, struct console_font_op *op) ++compat_fontx_ioctl(struct vc_data *vc, int cmd, ++ struct compat_consolefontdesc __user *user_cfd, ++ int perm, struct console_font_op *op) + { + struct compat_consolefontdesc cfdarg; + int i; +@@ -1112,7 +1126,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = compat_ptr(cfdarg.chardata); +- return con_font_op(vc_cons[fg_console].d, op); ++ return con_font_op(vc, op); ++ + case GIO_FONTX: + op->op = KD_FONT_OP_GET; + op->flags = KD_FONT_FLAG_OLD; +@@ -1120,7 +1135,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, + op->height = cfdarg.charheight; + op->charcount = cfdarg.charcount; + op->data = compat_ptr(cfdarg.chardata); +- i = con_font_op(vc_cons[fg_console].d, op); ++ i = con_font_op(vc, op); + if (i) + return i; + cfdarg.charheight = op->height; +@@ -1215,7 +1230,7 @@ long vt_compat_ioctl(struct tty_struct *tty, + */ + case PIO_FONTX: + case GIO_FONTX: +- ret = compat_fontx_ioctl(cmd, up, perm, &op); ++ ret = compat_fontx_ioctl(vc, cmd, up, perm, &op); + break; + + case KDFONTOP: +diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c +index f598ecddc8a7..b58a504240c4 100644 +--- a/drivers/uio/uio_pdrv_genirq.c ++++ b/drivers/uio/uio_pdrv_genirq.c +@@ -148,7 +148,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) + if (!uioinfo->irq) { + ret = platform_get_irq(pdev, 0); + uioinfo->irq = ret; +- if (ret == -ENXIO && pdev->dev.of_node) ++ if (ret == -ENXIO) + uioinfo->irq = UIO_IRQ_NONE; + else if (ret < 0) { + dev_err(&pdev->dev, "failed to get IRQ\n"); +diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c +index 7311ed61e99a..029c8bc54b7a 100644 +--- a/drivers/usb/c67x00/c67x00-sched.c ++++ b/drivers/usb/c67x00/c67x00-sched.c +@@ -500,7 +500,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) + c67x00_release_urb(c67x00, urb); + usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); + spin_unlock(&c67x00->lock); +- usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); ++ usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); + spin_lock(&c67x00->lock); + } + +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c +index 099179457f60..553bdd0983f7 100644 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c +@@ -63,7 +63,8 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = { + + static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = { + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM | +- CI_HDRC_TURN_VBUS_EARLY_ON, ++ CI_HDRC_TURN_VBUS_EARLY_ON | ++ CI_HDRC_DISABLE_DEVICE_STREAMING, + }; + + static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = { +@@ -132,9 +133,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) + misc_pdev = of_find_device_by_node(args.np); + of_node_put(args.np); + +- if (!misc_pdev || !platform_get_drvdata(misc_pdev)) ++ if (!misc_pdev) + return ERR_PTR(-EPROBE_DEFER); + ++ if (!platform_get_drvdata(misc_pdev)) { ++ put_device(&misc_pdev->dev); ++ return ERR_PTR(-EPROBE_DEFER); ++ } + data->dev = &misc_pdev->dev; + + if (of_find_property(np, "disable-over-current", NULL)) +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index e96e3a5808b3..6062a5d816a6 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -1110,6 +1110,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) + enable_irq(ci->irq); + } + ++/* ++ * Handle the wakeup interrupt triggered by extcon connector ++ * We need to call ci_irq again for extcon since the first ++ * interrupt (wakeup int) only let the controller be out of ++ * low power mode, but not handle any interrupts. ++ */ ++static void ci_extcon_wakeup_int(struct ci_hdrc *ci) ++{ ++ struct ci_hdrc_cable *cable_id, *cable_vbus; ++ u32 otgsc = hw_read_otgsc(ci, ~0); ++ ++ cable_id = &ci->platdata->id_extcon; ++ cable_vbus = &ci->platdata->vbus_extcon; ++ ++ if (!IS_ERR(cable_id->edev) && ci->is_otg && ++ (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) ++ ci_irq(ci->irq, ci); ++ ++ if (!IS_ERR(cable_vbus->edev) && ci->is_otg && ++ (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) ++ ci_irq(ci->irq, ci); ++} ++ + static int ci_controller_resume(struct device *dev) + { + struct ci_hdrc *ci = dev_get_drvdata(dev); +@@ -1136,6 +1159,7 @@ static int ci_controller_resume(struct device *dev) + enable_irq(ci->irq); + if (ci_otg_is_fsm_mode(ci)) + ci_otg_fsm_wakeup_by_srp(ci); ++ ci_extcon_wakeup_int(ci); + } + + return 0; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 5b0bffba4aac..23df1549eb0d 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -335,8 +335,10 @@ static void acm_ctrl_irq(struct urb *urb) + acm->iocount.dsr++; + if (difference & ACM_CTRL_DCD) + acm->iocount.dcd++; +- if (newctrl & ACM_CTRL_BRK) ++ if (newctrl & ACM_CTRL_BRK) { + acm->iocount.brk++; ++ tty_insert_flip_char(&acm->port, 0, TTY_BREAK); ++ } + if (newctrl & ACM_CTRL_RI) + acm->iocount.rng++; + if (newctrl & ACM_CTRL_FRAMING) +@@ -541,7 +543,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise) + + res = acm_set_control(acm, val); + if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) +- dev_err(&acm->control->dev, "failed to set dtr/rts\n"); ++ /* This is broken in too many devices to spam the logs */ ++ dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); + } + + static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) +@@ -867,8 +870,6 @@ static int set_serial_info(struct acm *acm, + if ((new_serial.close_delay != old_close_delay) || + (new_serial.closing_wait != old_closing_wait)) + retval = -EPERM; +- else +- retval = -EOPNOTSUPP; + } else { + acm->port.close_delay = close_delay; + acm->port.closing_wait = closing_wait; +@@ -1178,9 +1179,21 @@ static int acm_probe(struct usb_interface *intf, + } + } + } else { ++ int class = -1; ++ + data_intf_num = union_header->bSlaveInterface0; + control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); + data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); ++ ++ if (control_interface) ++ class = control_interface->cur_altsetting->desc.bInterfaceClass; ++ ++ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) { ++ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n"); ++ combined_interfaces = 1; ++ control_interface = data_interface = intf; ++ goto look_for_collapsed_interface; ++ } + } + + if (!control_interface || !data_interface) { +@@ -1445,6 +1458,11 @@ static int acm_probe(struct usb_interface *intf, + + return 0; + alloc_fail8: ++ if (!acm->combined_interfaces) { ++ /* Clear driver data so that disconnect() returns early. */ ++ usb_set_intfdata(data_interface, NULL); ++ usb_driver_release_interface(&acm_driver, data_interface); ++ } + if (acm->country_codes) { + device_remove_file(&acm->control->dev, + &dev_attr_wCountryCodes); +@@ -1627,6 +1645,8 @@ static int acm_reset_resume(struct usb_interface *intf) + + static const struct usb_device_id acm_ids[] = { + /* quirky and broken devices */ ++ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ ++ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ + { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ + .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ + { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ +@@ -1634,6 +1654,15 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, ++ { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */ ++ .driver_info = DISABLE_ECHO, /* Don't echo banner */ ++ }, ++ { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */ ++ .driver_info = DISABLE_ECHO, /* Don't echo banner */ ++ }, ++ { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */ ++ .driver_info = DISABLE_ECHO, /* Don't echo banner */ ++ }, + { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, +@@ -1826,6 +1855,10 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */ + .driver_info = IGNORE_DEVICE, + }, ++ ++ { USB_DEVICE(0x04d8, 0xf58b), ++ .driver_info = IGNORE_DEVICE, ++ }, + #endif + + /*Samsung phone in firmware update mode */ +@@ -1838,6 +1871,17 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = IGNORE_DEVICE, + }, + ++ /* Exclude ETAS ES58x */ ++ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */ ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ + .driver_info = SEND_ZERO_PACKET, + }, +@@ -1845,6 +1889,16 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = SEND_ZERO_PACKET, + }, + ++ /* Exclude Goodix Fingerprint Reader */ ++ { USB_DEVICE(0x27c6, 0x5395), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ ++ /* Exclude Heimann Sensor GmbH USB appset demo */ ++ { USB_DEVICE(0x32a7, 0x0000), ++ .driver_info = IGNORE_DEVICE, ++ }, ++ + /* control interfaces without any protocol set */ + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_PROTO_NONE) }, +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c +index 09337a973335..f9d39c993f2f 100644 +--- a/drivers/usb/class/cdc-wdm.c ++++ b/drivers/usb/class/cdc-wdm.c +@@ -61,6 +61,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); + + #define WDM_MAX 16 + ++/* we cannot wait forever at flush() */ ++#define WDM_FLUSH_TIMEOUT (30 * HZ) ++ + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ + #define WDM_DEFAULT_BUFSIZE 256 + +@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb) + kfree(desc->outbuf); + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); +- wake_up(&desc->wait); ++ wake_up_all(&desc->wait); + } + + /* forward declaration */ +@@ -402,6 +405,9 @@ static ssize_t wdm_write + if (test_bit(WDM_RESETTING, &desc->flags)) + r = -EIO; + ++ if (test_bit(WDM_DISCONNECTING, &desc->flags)) ++ r = -ENODEV; ++ + if (r < 0) { + rv = r; + goto out_free_mem_pm; +@@ -433,6 +439,7 @@ static ssize_t wdm_write + if (rv < 0) { + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); ++ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */ + dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); + rv = usb_translate_errors(rv); + goto out_free_mem_pm; +@@ -593,28 +600,58 @@ static ssize_t wdm_read + return rv; + } + +-static int wdm_flush(struct file *file, fl_owner_t id) ++static int wdm_wait_for_response(struct file *file, long timeout) + { + struct wdm_device *desc = file->private_data; ++ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */ + +- wait_event(desc->wait, +- /* +- * needs both flags. We cannot do with one +- * because resetting it would cause a race +- * with write() yet we need to signal +- * a disconnect +- */ +- !test_bit(WDM_IN_USE, &desc->flags) || +- test_bit(WDM_DISCONNECTING, &desc->flags)); +- +- /* cannot dereference desc->intf if WDM_DISCONNECTING */ ++ /* ++ * Needs both flags. We cannot do with one because resetting it would ++ * cause a race with write() yet we need to signal a disconnect. ++ */ ++ rv = wait_event_interruptible_timeout(desc->wait, ++ !test_bit(WDM_IN_USE, &desc->flags) || ++ test_bit(WDM_DISCONNECTING, &desc->flags), ++ timeout); ++ ++ /* ++ * To report the correct error. This is best effort. ++ * We are inevitably racing with the hardware. ++ */ + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + return -ENODEV; +- if (desc->werr < 0) +- dev_err(&desc->intf->dev, "Error in flush path: %d\n", +- desc->werr); ++ if (!rv) ++ return -EIO; ++ if (rv < 0) ++ return -EINTR; + +- return usb_translate_errors(desc->werr); ++ spin_lock_irq(&desc->iuspin); ++ rv = desc->werr; ++ desc->werr = 0; ++ spin_unlock_irq(&desc->iuspin); ++ ++ return usb_translate_errors(rv); ++ ++} ++ ++/* ++ * You need to send a signal when you react to malicious or defective hardware. ++ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do ++ * not implement wdm_flush() will return -EINVAL. ++ */ ++static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync) ++{ ++ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT); ++} ++ ++/* ++ * Same with wdm_fsync(), except it uses finite timeout in order to react to ++ * malicious or defective hardware which ceased communication after close() was ++ * implicitly called due to process termination. ++ */ ++static int wdm_flush(struct file *file, fl_owner_t id) ++{ ++ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT); + } + + static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait) +@@ -739,6 +776,7 @@ static const struct file_operations wdm_fops = { + .owner = THIS_MODULE, + .read = wdm_read, + .write = wdm_write, ++ .fsync = wdm_fsync, + .open = wdm_open, + .flush = wdm_flush, + .release = wdm_release, +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c +index 07c3c3449147..582099f4f449 100644 +--- a/drivers/usb/class/usblp.c ++++ b/drivers/usb/class/usblp.c +@@ -289,8 +289,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i + #define usblp_reset(usblp)\ + usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) + +-#define usblp_hp_channel_change_request(usblp, channel, buffer) \ +- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) ++static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) ++{ ++ u8 *buf; ++ int ret; ++ ++ buf = kzalloc(1, GFP_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ ++ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, ++ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, ++ channel, buf, 1); ++ if (ret == 0) ++ *new_channel = buf[0]; ++ ++ kfree(buf); ++ ++ return ret; ++} + + /* + * See the description for usblp_select_alts() below for the usage +@@ -481,7 +498,8 @@ static int usblp_release(struct inode *inode, struct file *file) + usb_autopm_put_interface(usblp->intf); + + if (!usblp->present) /* finish cleanup from disconnect */ +- usblp_cleanup(usblp); ++ usblp_cleanup(usblp); /* any URBs must be dead */ ++ + mutex_unlock(&usblp_mutex); + return 0; + } +@@ -839,6 +857,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo + if (rv < 0) + return rv; + ++ if (!usblp->present) { ++ count = -ENODEV; ++ goto done; ++ } ++ + if ((avail = usblp->rstatus) < 0) { + printk(KERN_ERR "usblp%d: error %d reading from printer\n", + usblp->minor, (int)avail); +@@ -1326,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) + return -EINVAL; + +- alts = usblp->protocol[protocol].alt_setting; +- if (alts < 0) +- return -EINVAL; +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts); +- if (r < 0) { +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", +- alts, usblp->ifnum); +- return r; ++ /* Don't unnecessarily set the interface if there's a single alt. */ ++ if (usblp->intf->num_altsetting > 1) { ++ alts = usblp->protocol[protocol].alt_setting; ++ if (alts < 0) ++ return -EINVAL; ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts); ++ if (r < 0) { ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", ++ alts, usblp->ifnum); ++ return r; ++ } + } + + usblp->bidir = (usblp->protocol[protocol].epread != NULL); +@@ -1397,9 +1423,11 @@ static void usblp_disconnect(struct usb_interface *intf) + + usblp_unlink_urbs(usblp); + mutex_unlock(&usblp->mut); ++ usb_poison_anchored_urbs(&usblp->urbs); + + if (!usblp->used) + usblp_cleanup(usblp); ++ + mutex_unlock(&usblp_mutex); + } + +diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c +index a391b50fb32f..c310c6d4d150 100644 +--- a/drivers/usb/class/usbtmc.c ++++ b/drivers/usb/class/usbtmc.c +@@ -1342,16 +1342,10 @@ static void usbtmc_interrupt(struct urb *urb) + case -EOVERFLOW: + dev_err(dev, "overflow with length %d, actual length is %d\n", + data->iin_wMaxPacketSize, urb->actual_length); +- case -ECONNRESET: +- case -ENOENT: +- case -ESHUTDOWN: +- case -EILSEQ: +- case -ETIME: ++ default: + /* urb terminated, clean up */ + dev_dbg(dev, "urb terminated, status: %d\n", status); + return; +- default: +- dev_err(dev, "unknown status received: %d\n", status); + } + exit: + rv = usb_submit_urb(urb, GFP_ATOMIC); +diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c +index 2f537bbdda09..362cca983e6e 100644 +--- a/drivers/usb/common/usb-otg-fsm.c ++++ b/drivers/usb/common/usb-otg-fsm.c +@@ -199,7 +199,11 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm) + if (!fsm->host_req_flag) + return; + +- INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work); ++ if (!fsm->hnp_work_inited) { ++ INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work); ++ fsm->hnp_work_inited = true; ++ } ++ + schedule_delayed_work(&fsm->hnp_polling_work, + msecs_to_jiffies(T_HOST_REQ_POLL)); + } +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index dcaf82dc87fd..c25ba4db8673 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -251,6 +251,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, + struct usb_host_interface *ifp, int num_ep, + unsigned char *buffer, int size) + { ++ struct usb_device *udev = to_usb_device(ddev); + unsigned char *buffer0 = buffer; + struct usb_endpoint_descriptor *d; + struct usb_host_endpoint *endpoint; +@@ -292,6 +293,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, + goto skip_to_next_endpoint_or_interface_descriptor; + } + ++ /* Ignore blacklisted endpoints */ ++ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) { ++ if (usb_endpoint_is_blacklisted(udev, ifp, d)) { ++ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n", ++ cfgno, inum, asnum, ++ d->bEndpointAddress); ++ goto skip_to_next_endpoint_or_interface_descriptor; ++ } ++ } ++ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + ++ifp->desc.bNumEndpoints; + +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c +index 059e71d71b66..781db58004f7 100644 +--- a/drivers/usb/core/devio.c ++++ b/drivers/usb/core/devio.c +@@ -477,11 +477,11 @@ static void snoop_urb(struct usb_device *udev, + + if (userurb) { /* Async */ + if (when == SUBMIT) +- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " ++ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " + "length %u\n", + userurb, ep, t, d, length); + else +- dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " ++ dev_info(&udev->dev, "userurb %px, ep%d %s-%s, " + "actual_length %u status %d\n", + userurb, ep, t, d, length, + timeout_or_status); +@@ -1945,7 +1945,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) + if (as) { + int retval; + +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb); ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb); + retval = processcompl(as, (void __user * __user *)arg); + free_async(as); + return retval; +@@ -1962,7 +1962,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) + + as = async_getcompleted(ps); + if (as) { +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb); ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb); + retval = processcompl(as, (void __user * __user *)arg); + free_async(as); + } else { +@@ -2094,7 +2094,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) + if (as) { + int retval; + +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb); ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb); + retval = processcompl_compat(as, (void __user * __user *)arg); + free_async(as); + return retval; +@@ -2111,7 +2111,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar + + as = async_getcompleted(ps); + if (as) { +- snoop(&ps->dev->dev, "reap %pK\n", as->userurb); ++ snoop(&ps->dev->dev, "reap %px\n", as->userurb); + retval = processcompl_compat(as, (void __user * __user *)arg); + free_async(as); + } else { +@@ -2540,7 +2540,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, + #endif + + case USBDEVFS_DISCARDURB: +- snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p); ++ snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p); + ret = proc_unlinkurb(ps, p); + break; + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 865bd3b79012..90e62eec027c 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3516,6 +3516,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) + usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_SUSPEND); + } ++ ++ /* TRSMRCY = 10 msec */ ++ msleep(10); + } + + if (udev->persist_enabled) +@@ -3905,6 +3908,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev, + return 0; + } + ++/* ++ * Don't allow device intiated U1/U2 if the system exit latency + one bus ++ * interval is greater than the minimum service interval of any active ++ * periodic endpoint. See USB 3.2 section 9.4.9 ++ */ ++static bool usb_device_may_initiate_lpm(struct usb_device *udev, ++ enum usb3_link_state state) ++{ ++ unsigned int sel; /* us */ ++ int i, j; ++ ++ if (state == USB3_LPM_U1) ++ sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); ++ else if (state == USB3_LPM_U2) ++ sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); ++ else ++ return false; ++ ++ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { ++ struct usb_interface *intf; ++ struct usb_endpoint_descriptor *desc; ++ unsigned int interval; ++ ++ intf = udev->actconfig->interface[i]; ++ if (!intf) ++ continue; ++ ++ for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) { ++ desc = &intf->cur_altsetting->endpoint[j].desc; ++ ++ if (usb_endpoint_xfer_int(desc) || ++ usb_endpoint_xfer_isoc(desc)) { ++ interval = (1 << (desc->bInterval - 1)) * 125; ++ if (sel + 125 > interval) ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ + /* + * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated + * U1/U2 entry. +@@ -3977,20 +4021,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, + * U1/U2_ENABLE + */ + if (udev->actconfig && +- usb_set_device_initiated_lpm(udev, state, true) == 0) { +- if (state == USB3_LPM_U1) +- udev->usb3_lpm_u1_enabled = 1; +- else if (state == USB3_LPM_U2) +- udev->usb3_lpm_u2_enabled = 1; +- } else { +- /* Don't request U1/U2 entry if the device +- * cannot transition to U1/U2. +- */ +- usb_set_lpm_timeout(udev, state, 0); +- hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); ++ usb_device_may_initiate_lpm(udev, state)) { ++ if (usb_set_device_initiated_lpm(udev, state, true)) { ++ /* ++ * Request to enable device initiated U1/U2 failed, ++ * better to turn off lpm in this case. ++ */ ++ usb_set_lpm_timeout(udev, state, 0); ++ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); ++ return; ++ } + } +-} + ++ if (state == USB3_LPM_U1) ++ udev->usb3_lpm_u1_enabled = 1; ++ else if (state == USB3_LPM_U2) ++ udev->usb3_lpm_u2_enabled = 1; ++} + /* + * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated + * U1/U2 entry. +diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h +index 34c1a7e22aae..be5075d41406 100644 +--- a/drivers/usb/core/hub.h ++++ b/drivers/usb/core/hub.h +@@ -151,8 +151,10 @@ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) + { + unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2; + +- /* Wait at least 100 msec for power to become stable */ +- return max(delay, 100U); ++ if (!hub->hdev->parent) /* root hub */ ++ return delay; ++ else /* Wait at least 100 msec for power to become stable */ ++ return max(delay, 100U); + } + + static inline int hub_port_debounce_be_connected(struct usb_hub *hub, +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index bd4c86da860b..18879d6def8d 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -1142,6 +1142,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, + } + } + ++/* ++ * usb_disable_device_endpoints -- Disable all endpoints for a device ++ * @dev: the device whose endpoints are being disabled ++ * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. ++ */ ++static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) ++{ ++ struct usb_hcd *hcd = bus_to_hcd(dev->bus); ++ int i; ++ ++ if (hcd->driver->check_bandwidth) { ++ /* First pass: Cancel URBs, leave endpoint pointers intact. */ ++ for (i = skip_ep0; i < 16; ++i) { ++ usb_disable_endpoint(dev, i, false); ++ usb_disable_endpoint(dev, i + USB_DIR_IN, false); ++ } ++ /* Remove endpoints from the host controller internal state */ ++ mutex_lock(hcd->bandwidth_mutex); ++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); ++ mutex_unlock(hcd->bandwidth_mutex); ++ } ++ /* Second pass: remove endpoint pointers */ ++ for (i = skip_ep0; i < 16; ++i) { ++ usb_disable_endpoint(dev, i, true); ++ usb_disable_endpoint(dev, i + USB_DIR_IN, true); ++ } ++} ++ + /** + * usb_disable_device - Disable all the endpoints for a USB device + * @dev: the device whose endpoints are being disabled +@@ -1155,7 +1183,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, + void usb_disable_device(struct usb_device *dev, int skip_ep0) + { + int i; +- struct usb_hcd *hcd = bus_to_hcd(dev->bus); + + /* getting rid of interfaces will disconnect + * any drivers bound to them (a key side effect) +@@ -1201,22 +1228,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) + + dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, + skip_ep0 ? "non-ep0" : "all"); +- if (hcd->driver->check_bandwidth) { +- /* First pass: Cancel URBs, leave endpoint pointers intact. */ +- for (i = skip_ep0; i < 16; ++i) { +- usb_disable_endpoint(dev, i, false); +- usb_disable_endpoint(dev, i + USB_DIR_IN, false); +- } +- /* Remove endpoints from the host controller internal state */ +- mutex_lock(hcd->bandwidth_mutex); +- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); +- mutex_unlock(hcd->bandwidth_mutex); +- /* Second pass: remove endpoint pointers */ +- } +- for (i = skip_ep0; i < 16; ++i) { +- usb_disable_endpoint(dev, i, true); +- usb_disable_endpoint(dev, i + USB_DIR_IN, true); +- } ++ ++ usb_disable_device_endpoints(dev, skip_ep0); + } + + /** +@@ -1628,6 +1641,9 @@ EXPORT_SYMBOL(usb_set_interface_timeout); + * The caller must own the device lock. + * + * Return: Zero on success, else a negative error code. ++ * ++ * If this routine fails the device will probably be in an unusable state ++ * with endpoints disabled, and interfaces only partially enabled. + */ + int usb_reset_configuration(struct usb_device *dev) + { +@@ -1643,10 +1659,7 @@ int usb_reset_configuration(struct usb_device *dev) + * calls during probe() are fine + */ + +- for (i = 1; i < 16; ++i) { +- usb_disable_endpoint(dev, i, true); +- usb_disable_endpoint(dev, i + USB_DIR_IN, true); +- } ++ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ + + config = dev->actconfig; + retval = 0; +@@ -1659,34 +1672,10 @@ int usb_reset_configuration(struct usb_device *dev) + mutex_unlock(hcd->bandwidth_mutex); + return -ENOMEM; + } +- /* Make sure we have enough bandwidth for each alternate setting 0 */ +- for (i = 0; i < config->desc.bNumInterfaces; i++) { +- struct usb_interface *intf = config->interface[i]; +- struct usb_host_interface *alt; + +- alt = usb_altnum_to_altsetting(intf, 0); +- if (!alt) +- alt = &intf->altsetting[0]; +- if (alt != intf->cur_altsetting) +- retval = usb_hcd_alloc_bandwidth(dev, NULL, +- intf->cur_altsetting, alt); +- if (retval < 0) +- break; +- } +- /* If not, reinstate the old alternate settings */ ++ /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ ++ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); + if (retval < 0) { +-reset_old_alts: +- for (i--; i >= 0; i--) { +- struct usb_interface *intf = config->interface[i]; +- struct usb_host_interface *alt; +- +- alt = usb_altnum_to_altsetting(intf, 0); +- if (!alt) +- alt = &intf->altsetting[0]; +- if (alt != intf->cur_altsetting) +- usb_hcd_alloc_bandwidth(dev, NULL, +- alt, intf->cur_altsetting); +- } + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; +@@ -1695,8 +1684,12 @@ int usb_reset_configuration(struct usb_device *dev) + USB_REQ_SET_CONFIGURATION, 0, + config->desc.bConfigurationValue, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); +- if (retval < 0) +- goto reset_old_alts; ++ if (retval < 0) { ++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); ++ usb_enable_lpm(dev); ++ mutex_unlock(hcd->bandwidth_mutex); ++ return retval; ++ } + mutex_unlock(hcd->bandwidth_mutex); + + /* re-init hc/hcd interface/endpoint state */ +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 27d05f0134de..2ca6ed207e26 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -73,11 +73,12 @@ static const struct usb_device_id usb_quirk_list[] = { + /* Logitech HD Webcam C270 */ + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + +- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ ++ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, ++ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Logitech ConferenceCam CC3000e */ + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, +@@ -188,12 +189,19 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x06a3, 0x0006), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + ++ /* Agfa SNAPSCAN 1212U */ ++ { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME }, ++ + /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */ + { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Guillemot Webcam Hercules Dualpix Exchange*/ + { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME }, + ++ /* Guillemot Hercules DJ Console audio card (BZ 208357) */ ++ { USB_DEVICE(0x06f8, 0xb000), .driver_info = ++ USB_QUIRK_ENDPOINT_BLACKLIST }, ++ + /* Midiman M-Audio Keystation 88es */ + { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, + +@@ -216,9 +224,15 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + ++ /* Kingston DataTraveler 3.0 */ ++ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ + { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, + ++ /* ELMO L-12F document camera */ ++ { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG }, ++ + /* Broadcom BCM92035DGROM BT dongle */ + { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, + +@@ -231,10 +245,15 @@ static const struct usb_device_id usb_quirk_list[] = { + + /* Realtek hub in Dell WD19 (Type-C) */ + { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, ++ { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Generic RTL8153 based ethernet adapters */ + { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + ++ /* SONiX USB DEVICE Touchpad */ ++ { USB_DEVICE(0x0c45, 0x7056), .driver_info = ++ USB_QUIRK_IGNORE_REMOTE_WAKEUP }, ++ + /* Action Semiconductor flash disk */ + { USB_DEVICE(0x10d6, 0x2200), .driver_info = + USB_QUIRK_STRING_FETCH_255 }, +@@ -246,6 +265,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x1532, 0x0116), .driver_info = + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, + ++ /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */ ++ { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* BUILDWIN Photo Frame */ + { USB_DEVICE(0x1908, 0x1315), .driver_info = + USB_QUIRK_HONOR_BNUMINTERFACES }, +@@ -298,6 +320,8 @@ static const struct usb_device_id usb_quirk_list[] = { + + { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + ++ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, ++ + /* DJI CineSSD */ + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + +@@ -334,6 +358,40 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { + { } /* terminating entry must be last */ + }; + ++/* ++ * Entries for blacklisted endpoints that should be ignored when parsing ++ * configuration descriptors. ++ * ++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST. ++ */ ++static const struct usb_device_id usb_endpoint_blacklist[] = { ++ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 }, ++ { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 }, ++ { } ++}; ++ ++bool usb_endpoint_is_blacklisted(struct usb_device *udev, ++ struct usb_host_interface *intf, ++ struct usb_endpoint_descriptor *epd) ++{ ++ const struct usb_device_id *id; ++ unsigned int address; ++ ++ for (id = usb_endpoint_blacklist; id->match_flags; ++id) { ++ if (!usb_match_device(udev, id)) ++ continue; ++ ++ if (!usb_match_one_id_intf(udev, intf, id)) ++ continue; ++ ++ address = id->driver_info; ++ if (address == epd->bEndpointAddress) ++ return true; ++ } ++ ++ return false; ++} ++ + static bool usb_match_any_interface(struct usb_device *udev, + const struct usb_device_id *id) + { +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c +index 1a232b4ffe71..f6c6d99eb388 100644 +--- a/drivers/usb/core/sysfs.c ++++ b/drivers/usb/core/sysfs.c +@@ -848,7 +848,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, + size_t srclen, n; + int cfgno; + void *src; ++ int retval; + ++ retval = usb_lock_device_interruptible(udev); ++ if (retval < 0) ++ return -EINTR; + /* The binary attribute begins with the device descriptor. + * Following that are the raw descriptor entries for all the + * configurations (config plus subsidiary descriptors). +@@ -873,6 +877,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, + off -= srclen; + } + } ++ usb_unlock_device(udev); + return count - nleft; + } + +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c +index 7b81bcfa19cb..6785ebc07804 100644 +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -183,6 +183,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); + + /*-------------------------------------------------------------------*/ + ++static const int pipetypes[4] = { ++ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT ++}; ++ ++/** ++ * usb_urb_ep_type_check - sanity check of endpoint in the given urb ++ * @urb: urb to be checked ++ * ++ * This performs a light-weight sanity check for the endpoint in the ++ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise ++ * a negative error code. ++ */ ++int usb_urb_ep_type_check(const struct urb *urb) ++{ ++ const struct usb_host_endpoint *ep; ++ ++ ep = usb_pipe_endpoint(urb->dev, urb->pipe); ++ if (!ep) ++ return -EINVAL; ++ if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) ++ return -EINVAL; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); ++ + /** + * usb_submit_urb - issue an asynchronous transfer request for an endpoint + * @urb: pointer to the urb describing the request +@@ -322,9 +347,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb); + */ + int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + { +- static int pipetypes[4] = { +- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT +- }; + int xfertype, max; + struct usb_device *dev; + struct usb_host_endpoint *ep; +@@ -443,7 +465,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) + */ + + /* Check that the pipe's type matches the endpoint's type */ +- if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) ++ if (usb_urb_ep_type_check(urb)) + dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", + usb_pipetype(urb->pipe), pipetypes[xfertype]); + +@@ -743,11 +765,12 @@ void usb_block_urb(struct urb *urb) + EXPORT_SYMBOL_GPL(usb_block_urb); + + /** +- * usb_kill_anchored_urbs - cancel transfer requests en masse ++ * usb_kill_anchored_urbs - kill all URBs associated with an anchor + * @anchor: anchor the requests are bound to + * +- * this allows all outstanding URBs to be killed starting +- * from the back of the queue ++ * This kills all outstanding URBs starting from the back of the queue, ++ * with guarantee that no completer callbacks will take place from the ++ * anchor after this function returns. + * + * This routine should not be called by a driver after its disconnect + * method has returned. +@@ -755,20 +778,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb); + void usb_kill_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; ++ int surely_empty; + +- spin_lock_irq(&anchor->lock); +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- /* we must make sure the URB isn't freed before we kill it*/ +- usb_get_urb(victim); +- spin_unlock_irq(&anchor->lock); +- /* this will unanchor the URB */ +- usb_kill_urb(victim); +- usb_put_urb(victim); ++ do { + spin_lock_irq(&anchor->lock); +- } +- spin_unlock_irq(&anchor->lock); ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ /* make sure the URB isn't freed before we kill it */ ++ usb_get_urb(victim); ++ spin_unlock_irq(&anchor->lock); ++ /* this will unanchor the URB */ ++ usb_kill_urb(victim); ++ usb_put_urb(victim); ++ spin_lock_irq(&anchor->lock); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); ++ ++ spin_unlock_irq(&anchor->lock); ++ cpu_relax(); ++ } while (!surely_empty); + } + EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); + +@@ -787,21 +816,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); + void usb_poison_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; ++ int surely_empty; + +- spin_lock_irq(&anchor->lock); +- anchor->poisoned = 1; +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- /* we must make sure the URB isn't freed before we kill it*/ +- usb_get_urb(victim); +- spin_unlock_irq(&anchor->lock); +- /* this will unanchor the URB */ +- usb_poison_urb(victim); +- usb_put_urb(victim); ++ do { + spin_lock_irq(&anchor->lock); +- } +- spin_unlock_irq(&anchor->lock); ++ anchor->poisoned = 1; ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ /* make sure the URB isn't freed before we kill it */ ++ usb_get_urb(victim); ++ spin_unlock_irq(&anchor->lock); ++ /* this will unanchor the URB */ ++ usb_poison_urb(victim); ++ usb_put_urb(victim); ++ spin_lock_irq(&anchor->lock); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); ++ ++ spin_unlock_irq(&anchor->lock); ++ cpu_relax(); ++ } while (!surely_empty); + } + EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); + +@@ -941,14 +976,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) + { + struct urb *victim; + unsigned long flags; ++ int surely_empty; ++ ++ do { ++ spin_lock_irqsave(&anchor->lock, flags); ++ while (!list_empty(&anchor->urb_list)) { ++ victim = list_entry(anchor->urb_list.prev, ++ struct urb, anchor_list); ++ __usb_unanchor_urb(victim, anchor); ++ } ++ surely_empty = usb_anchor_check_wakeup(anchor); + +- spin_lock_irqsave(&anchor->lock, flags); +- while (!list_empty(&anchor->urb_list)) { +- victim = list_entry(anchor->urb_list.prev, struct urb, +- anchor_list); +- __usb_unanchor_urb(victim, anchor); +- } +- spin_unlock_irqrestore(&anchor->lock, flags); ++ spin_unlock_irqrestore(&anchor->lock, flags); ++ cpu_relax(); ++ } while (!surely_empty); + } + + EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); +diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c +index 793c849e70c4..8f9bdb638b67 100644 +--- a/drivers/usb/core/usb.c ++++ b/drivers/usb/core/usb.c +@@ -72,6 +72,89 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); + #endif + + ++/** ++ * usb_find_common_endpoints() -- look up common endpoint descriptors ++ * @alt: alternate setting to search ++ * @bulk_in: pointer to descriptor pointer, or NULL ++ * @bulk_out: pointer to descriptor pointer, or NULL ++ * @int_in: pointer to descriptor pointer, or NULL ++ * @int_out: pointer to descriptor pointer, or NULL ++ * ++ * Search the alternate setting's endpoint descriptors for the first bulk-in, ++ * bulk-out, interrupt-in and interrupt-out endpoints and return them in the ++ * provided pointers (unless they are NULL). ++ * ++ * If a requested endpoint is not found, the corresponding pointer is set to ++ * NULL. ++ * ++ * Return: Zero if all requested descriptors were found, or -ENXIO otherwise. ++ */ ++int usb_find_common_endpoints(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **bulk_in, ++ struct usb_endpoint_descriptor **bulk_out, ++ struct usb_endpoint_descriptor **int_in, ++ struct usb_endpoint_descriptor **int_out) ++{ ++ struct usb_endpoint_descriptor *epd; ++ int i; ++ ++ if (bulk_in) ++ *bulk_in = NULL; ++ if (bulk_out) ++ *bulk_out = NULL; ++ if (int_in) ++ *int_in = NULL; ++ if (int_out) ++ *int_out = NULL; ++ ++ for (i = 0; i < alt->desc.bNumEndpoints; ++i) { ++ epd = &alt->endpoint[i].desc; ++ ++ switch (usb_endpoint_type(epd)) { ++ case USB_ENDPOINT_XFER_BULK: ++ if (usb_endpoint_dir_in(epd)) { ++ if (bulk_in && !*bulk_in) { ++ *bulk_in = epd; ++ break; ++ } ++ } else { ++ if (bulk_out && !*bulk_out) { ++ *bulk_out = epd; ++ break; ++ } ++ } ++ ++ continue; ++ case USB_ENDPOINT_XFER_INT: ++ if (usb_endpoint_dir_in(epd)) { ++ if (int_in && !*int_in) { ++ *int_in = epd; ++ break; ++ } ++ } else { ++ if (int_out && !*int_out) { ++ *int_out = epd; ++ break; ++ } ++ } ++ ++ continue; ++ default: ++ continue; ++ } ++ ++ if ((!bulk_in || *bulk_in) && ++ (!bulk_out || *bulk_out) && ++ (!int_in || *int_in) && ++ (!int_out || *int_out)) { ++ return 0; ++ } ++ } ++ ++ return -ENXIO; ++} ++EXPORT_SYMBOL_GPL(usb_find_common_endpoints); ++ + /** + * usb_find_alt_setting() - Given a configuration, find the alternate setting + * for the given interface. +diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h +index dde0e997799e..6447ea618a08 100644 +--- a/drivers/usb/core/usb.h ++++ b/drivers/usb/core/usb.h +@@ -31,6 +31,9 @@ extern void usb_deauthorize_interface(struct usb_interface *); + extern void usb_authorize_interface(struct usb_interface *); + extern void usb_detect_quirks(struct usb_device *udev); + extern void usb_detect_interface_quirks(struct usb_device *udev); ++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev, ++ struct usb_host_interface *intf, ++ struct usb_endpoint_descriptor *epd); + extern int usb_remove_device(struct usb_device *udev); + + extern int usb_get_device_descriptor(struct usb_device *dev, +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h +index 0f45a2f165e5..1bb8b1e22ed3 100644 +--- a/drivers/usb/dwc2/core.h ++++ b/drivers/usb/dwc2/core.h +@@ -164,6 +164,7 @@ struct dwc2_hsotg_req; + * @lock: State lock to protect contents of endpoint. + * @dir_in: Set to true if this endpoint is of the IN direction, which + * means that it is sending data to the Host. ++ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped. + * @index: The index for the endpoint registers. + * @mc: Multi Count - number of transactions per microframe + * @interval - Interval for periodic endpoints, in frames or microframes. +@@ -207,6 +208,7 @@ struct dwc2_hsotg_ep { + unsigned short fifo_index; + + unsigned char dir_in; ++ unsigned char map_dir; + unsigned char index; + unsigned char mc; + u16 interval; +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c +index d85c5c9f96c1..f046703f63f2 100644 +--- a/drivers/usb/dwc2/core_intr.c ++++ b/drivers/usb/dwc2/core_intr.c +@@ -365,10 +365,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg) + if (ret && (ret != -ENOTSUPP)) + dev_err(hsotg->dev, "exit hibernation failed\n"); + ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + call_gadget(hsotg, resume); ++ } else { ++ /* Change to L0 state */ ++ hsotg->lx_state = DWC2_L0; + } +- /* Change to L0 state */ +- hsotg->lx_state = DWC2_L0; + } else { + if (hsotg->core_params->hibernation) + return; +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c +index 3ae27b6ed07c..e7ad3ae4ea6b 100644 +--- a/drivers/usb/dwc2/gadget.c ++++ b/drivers/usb/dwc2/gadget.c +@@ -308,7 +308,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, + if (hs_req->req.length == 0) + return; + +- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); ++ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir); + } + + /** +@@ -745,6 +745,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, + if (hs_req->req.length == 0) + return 0; + ++ hs_ep->map_dir = hs_ep->dir_in; + ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); + if (ret) + goto dma_error; +@@ -942,7 +943,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + u32 windex) + { +- struct dwc2_hsotg_ep *ep; + int dir = (windex & USB_DIR_IN) ? 1 : 0; + int idx = windex & 0x7F; + +@@ -952,12 +952,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, + if (idx > hsotg->num_of_eps) + return NULL; + +- ep = index_to_ep(hsotg, idx, dir); +- +- if (idx && ep->dir_in != dir) +- return NULL; +- +- return ep; ++ return index_to_ep(hsotg, idx, dir); + } + + /** +@@ -3947,12 +3942,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) + epnum, 0); + } + +- ret = usb_add_gadget_udc(dev, &hsotg->gadget); +- if (ret) { +- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, +- hsotg->ctrl_req); +- return ret; +- } + dwc2_hsotg_dump(hsotg); + + return 0; +diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c +index 120c8f716acf..f2c7cd22b73f 100644 +--- a/drivers/usb/dwc2/hcd.c ++++ b/drivers/usb/dwc2/hcd.c +@@ -1425,19 +1425,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, + if (num_packets > max_hc_pkt_count) { + num_packets = max_hc_pkt_count; + chan->xfer_len = num_packets * chan->max_packet; ++ } else if (chan->ep_is_in) { ++ /* ++ * Always program an integral # of max packets ++ * for IN transfers. ++ * Note: This assumes that the input buffer is ++ * aligned and sized accordingly. ++ */ ++ chan->xfer_len = num_packets * chan->max_packet; + } + } else { + /* Need 1 packet for transfer length of 0 */ + num_packets = 1; + } + +- if (chan->ep_is_in) +- /* +- * Always program an integral # of max packets for IN +- * transfers +- */ +- chan->xfer_len = num_packets * chan->max_packet; +- + if (chan->ep_type == USB_ENDPOINT_XFER_INT || + chan->ep_type == USB_ENDPOINT_XFER_ISOC) + /* +diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c +index 8066fa9ac97b..e39210bd9710 100644 +--- a/drivers/usb/dwc2/hcd_intr.c ++++ b/drivers/usb/dwc2/hcd_intr.c +@@ -488,7 +488,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, + &short_read); + + if (urb->actual_length + xfer_length > urb->length) { +- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); ++ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__); + xfer_length = urb->length - urb->actual_length; + } + +@@ -1921,6 +1921,18 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg, + qtd->error_count++; + dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, + qtd, DWC2_HC_XFER_XACT_ERR); ++ /* ++ * We can get here after a completed transaction ++ * (urb->actual_length >= urb->length) which was not reported ++ * as completed. If that is the case, and we do not abort ++ * the transfer, a transfer of size 0 will be enqueued ++ * subsequently. If urb->actual_length is not DMA-aligned, ++ * the buffer will then point to an unaligned address, and ++ * the resulting behavior is undefined. Bail out in that ++ * situation. ++ */ ++ if (qtd->urb->actual_length >= qtd->urb->length) ++ qtd->error_count = 3; + dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); + dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); + } +diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c +index 8e1728b39a49..f985315ebd3b 100644 +--- a/drivers/usb/dwc2/platform.c ++++ b/drivers/usb/dwc2/platform.c +@@ -507,7 +507,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) + { + struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); + +- disable_irq(hsotg->irq); ++ dwc2_disable_global_interrupts(hsotg); ++ synchronize_irq(hsotg->irq); + } + + static const struct of_device_id dwc2_of_match_table[] = { +@@ -661,10 +662,23 @@ static int dwc2_driver_probe(struct platform_device *dev) + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) + dwc2_lowlevel_hw_disable(hsotg); + ++#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) ++ /* Postponed adding a new gadget to the udc class driver list */ ++ if (hsotg->gadget_enabled) { ++ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); ++ if (retval) { ++ hsotg->gadget.udc = NULL; ++ dwc2_hsotg_remove(hsotg); ++ goto error; ++ } ++ } ++#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ + return 0; + + error: +- dwc2_lowlevel_hw_disable(hsotg); ++ if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) ++ dwc2_lowlevel_hw_disable(hsotg); + return retval; + } + +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index b35f83e59288..c54d7d187f6f 100755 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1412,8 +1412,8 @@ static int dwc3_remove(struct platform_device *pdev) + */ + res->start -= DWC3_GLOBALS_REGS_START; + +- dwc3_debugfs_exit(dwc); + dwc3_core_exit_mode(dwc); ++ dwc3_debugfs_exit(dwc); + + dwc3_core_exit(dwc); + dwc3_ulpi_exit(dwc); +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h +index e3d40acd6ba7..77618496ecc2 100644 +--- a/drivers/usb/dwc3/core.h ++++ b/drivers/usb/dwc3/core.h +@@ -606,6 +606,7 @@ struct dwc3_ep_events { + * @desc: usb_endpoint_descriptor pointer + * @dwc: pointer to DWC controller + * @saved_state: ep state saved during hibernation ++ * @failedpkt_counter: counter for missed packets sent + * @flags: endpoint flags (wedged, stalled, ...) + * @number: endpoint number (1 - 15) + * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK +@@ -638,6 +639,7 @@ struct dwc3_ep { + struct dwc3 *dwc; + + u32 saved_state; ++ u32 failedpkt_counter; + unsigned flags; + #define DWC3_EP_ENABLED (1 << 0) + #define DWC3_EP_STALL (1 << 1) +@@ -1446,5 +1448,5 @@ extern void dwc3_set_notifier( + unsigned int value)); + extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned int event, + unsigned int value); +- ++void orderly_poweroff(bool force); + #endif /* __DRIVERS_USB_DWC3_CORE_H */ +diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c +index 4a28f48dd240..3570b0bd9b25 100644 +--- a/drivers/usb/dwc3/dwc3-msm.c ++++ b/drivers/usb/dwc3/dwc3-msm.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -323,6 +323,7 @@ struct dwc3_msm { + u32 num_gsi_event_buffers; + struct dwc3_event_buffer **gsi_ev_buff; + int pm_qos_latency; ++ bool perf_mode; + struct pm_qos_request pm_qos_req_dma; + struct delayed_work perf_vote_work; + struct delayed_work sdp_check; +@@ -359,6 +360,8 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, + unsigned int value); + static int dwc3_restart_usb_host_mode(struct notifier_block *nb, + unsigned long event, void *ptr); ++static bool shutdown_when_disconnected; ++static bool shutdown_from_sysfs; + + /** + * +@@ -1407,6 +1410,7 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request) + reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); + reg |= DWC3_DALEPENA_EP(dep->number); + dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); ++ dep->trb_dequeue = 0; + } + + } +@@ -2034,8 +2038,7 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event, + switch (event) { + case DWC3_CONTROLLER_ERROR_EVENT: + dev_info(mdwc->dev, +- "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n", +- dwc->irq_cnt); ++ "DWC3_CONTROLLER_ERROR_EVENT received\n"); + + dwc3_gadget_disable_irq(dwc); + +@@ -2982,10 +2985,16 @@ static void dwc3_resume_work(struct work_struct *w) + mdwc->typec_orientation = val.intval ? + ORIENTATION_CC2 : ORIENTATION_CC1; + #ifdef CONFIG_TUSB1064_XR_MISC +- tusb1064_usb_event(val.intval ? true : false); ++ /* ++ * TUSB1064 handles the muxing, hence SSPHY only ++ * needs to be configured to CC1 ++ */ ++ if (tusb1064_usb_event(val.intval ? true : false)) ++ mdwc->typec_orientation = ORIENTATION_CC1; + #endif + #ifdef CONFIG_VXR200_XR_MISC +- vxr7200_usb_event(true); ++ if (!shutdown_from_sysfs) ++ vxr7200_usb_event(true); + #endif + + } +@@ -3103,7 +3112,7 @@ static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data) + + if (mdwc->drd_state == DRD_STATE_PERIPHERAL_SUSPEND) { + dev_info(mdwc->dev, "USB Resume start\n"); +- place_marker("M - USB device resume started"); ++ update_marker("M - USB device resume started"); + } + + /* +@@ -3535,12 +3544,14 @@ static ssize_t mode_store(struct device *dev, struct device_attribute *attr, + if (sysfs_streq(buf, "peripheral")) { + mdwc->vbus_active = true; + mdwc->id_state = DWC3_ID_FLOAT; ++ shutdown_from_sysfs = false; + } else if (sysfs_streq(buf, "host")) { + mdwc->vbus_active = false; + mdwc->id_state = DWC3_ID_GROUND; + } else { + mdwc->vbus_active = false; + mdwc->id_state = DWC3_ID_FLOAT; ++ shutdown_from_sysfs = true; + } + + dwc3_ext_event_notify(mdwc); +@@ -3903,6 +3914,9 @@ static int dwc3_msm_probe(struct platform_device *pdev) + } + } + ++ shutdown_when_disconnected = of_property_read_bool(node, ++ "qcom,shutdown-enable"); ++ + /* Assumes dwc3 is the first DT child of dwc3-msm */ + dwc3_node = of_get_next_available_child(node, NULL); + if (!dwc3_node) { +@@ -4117,6 +4131,7 @@ static int dwc3_msm_remove(struct platform_device *pdev) + } + + cancel_delayed_work_sync(&mdwc->perf_vote_work); ++ msm_dwc3_perf_vote_update(mdwc, false); + cancel_delayed_work_sync(&mdwc->sm_work); + + if (mdwc->hs_phy) +@@ -4241,10 +4256,12 @@ static int dwc3_msm_host_notifier(struct notifier_block *nb, + + static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode) + { +- static bool curr_perf_mode; + int latency = mdwc->pm_qos_latency; ++ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); + +- if ((curr_perf_mode == perf_mode) || !latency) ++ dwc->irq_cnt = 0; ++ ++ if ((mdwc->perf_mode == perf_mode) || !latency) + return; + + if (perf_mode) +@@ -4253,7 +4270,7 @@ static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode) + pm_qos_update_request(&mdwc->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + +- curr_perf_mode = perf_mode; ++ mdwc->perf_mode = perf_mode; + pr_debug("%s: latency updated to: %d\n", __func__, + perf_mode ? latency : PM_QOS_DEFAULT_VALUE); + } +@@ -4263,13 +4280,14 @@ static void msm_dwc3_perf_vote_work(struct work_struct *w) + struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, + perf_vote_work.work); + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); +- static unsigned long last_irq_cnt; + bool in_perf_mode = false; + +- if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD) ++ if (dwc->irq_cnt >= PM_QOS_THRESHOLD) + in_perf_mode = true; + +- last_irq_cnt = dwc->irq_cnt; ++ pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n", ++ __func__, in_perf_mode, dwc->irq_cnt); ++ + msm_dwc3_perf_vote_update(mdwc, in_perf_mode); + schedule_delayed_work(&mdwc->perf_vote_work, + msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); +@@ -4730,9 +4748,14 @@ static void dwc3_otg_sm_work(struct work_struct *w) + dwc3_msm_gadget_vbus_draw(mdwc, 0); + dev_dbg(mdwc->dev, "Cable disconnected\n"); + #ifdef CONFIG_VXR200_XR_MISC +- vxr7200_usb_event(false); ++ if (!shutdown_from_sysfs) ++ vxr7200_usb_event(false); + #endif +- ++ if (shutdown_when_disconnected && ++ !shutdown_from_sysfs) { ++ pr_err("ARGlass: USB discontd, powering off\n"); ++ orderly_poweroff(true); ++ } + } + break; + +@@ -4918,7 +4941,7 @@ static int dwc3_msm_pm_resume(struct device *dev) + if (atomic_read(&dwc->in_lpm) && + mdwc->drd_state == DRD_STATE_PERIPHERAL_SUSPEND) { + dev_info(mdwc->dev, "USB Resume start\n"); +- place_marker("M - USB device resume started"); ++ update_marker("M - USB device resume started"); + } + + /* kick in otg state machine */ +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c +index 82e159c66b8c..de747b8c26ec 100644 +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -370,6 +370,9 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le) + epnum |= 1; + + dep = dwc->eps[epnum]; ++ if (dep == NULL) ++ return NULL; ++ + if (dep->flags & DWC3_EP_ENABLED) + return dep; + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 8a02f84495cc..4edeb4be883f 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -36,6 +36,10 @@ + #include "core.h" + #include "gadget.h" + #include "io.h" ++#ifdef CONFIG_VXR200_XR_MISC ++#include "../../misc/vxr7200.h" ++#endif ++ + + static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup); + static int dwc3_gadget_wakeup_int(struct dwc3 *dwc); +@@ -229,6 +233,10 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) + || usb_endpoint_xfer_isoc(dep->endpoint.desc)) + mult = 3; + ++ if ((dep->endpoint.maxburst > 6) && ++ usb_endpoint_xfer_isoc(dep->endpoint.desc)) ++ mult = 6; ++ + tmp = ((max_packet + mdwidth) * mult) + mdwidth; + fifo_size = DIV_ROUND_UP(tmp, mdwidth); + dep->fifo_depth = fifo_size; +@@ -288,6 +296,17 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + + trace_dwc3_gadget_giveback(req); + ++ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { ++ if (list_empty(&dep->started_list)) { ++ dep->flags |= DWC3_EP_PENDING_REQUEST; ++ dbg_event(dep->number, "STARTEDLISTEMPTY", 0); ++ } ++ ++ dbg_log_string("%s(%d): Give back req %pK len(%d) actual(%d))", ++ dep->name, dep->number, &req->request, ++ req->request.length, req->request.actual); ++ } ++ + spin_unlock(&dwc->lock); + usb_gadget_giveback_request(&dep->endpoint, &req->request); + spin_lock(&dwc->lock); +@@ -630,8 +649,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, + params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); + + if (desc->bInterval) { +- params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); +- dep->interval = 1 << (desc->bInterval - 1); ++ u8 bInterval_m1; ++ ++ /* ++ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it ++ * must be set to 0 when the controller operates in full-speed. ++ */ ++ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); ++ if (dwc->gadget.speed == USB_SPEED_FULL) ++ bInterval_m1 = 0; ++ ++ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && ++ dwc->gadget.speed == USB_SPEED_FULL) ++ dep->interval = desc->bInterval; ++ else ++ dep->interval = 1 << (desc->bInterval - 1); ++ ++ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); + } + + return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms); +@@ -886,6 +920,8 @@ static int dwc3_gadget_ep_disable(struct usb_ep *ep) + spin_lock_irqsave(&dwc->lock, flags); + ret = __dwc3_gadget_ep_disable(dep); + dbg_event(dep->number, "DISABLE", ret); ++ dbg_event(dep->number, "DISABLEFAILPKT", dep->failedpkt_counter); ++ dep->failedpkt_counter = 0; + spin_unlock_irqrestore(&dwc->lock, flags); + + return ret; +@@ -1072,19 +1108,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index) + + static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) + { +- struct dwc3_trb *tmp; + u8 trbs_left; + + /* +- * If enqueue & dequeue are equal than it is either full or empty. +- * +- * One way to know for sure is if the TRB right before us has HWO bit +- * set or not. If it has, then we're definitely full and can't fit any +- * more transfers in our ring. ++ * If the enqueue & dequeue are equal then the TRB ring is either full ++ * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs ++ * pending to be processed by the driver. + */ + if (dep->trb_enqueue == dep->trb_dequeue) { +- tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); +- if (!tmp || tmp->ctrl & DWC3_TRB_CTRL_HWO) ++ /* ++ * If there is any request remained in the started_list at ++ * this point, that means there is no TRB available. ++ */ ++ if (!list_empty(&dep->started_list)) + return 0; + + return DWC3_TRB_NUM - 1; +@@ -1130,12 +1166,18 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, + { + unsigned int length; + dma_addr_t dma; ++ struct dwc3 *dwc = dep->dwc; + + dma = req->request.dma; + length = req->request.length; + + dwc3_prepare_one_trb(dep, req, dma, length, + false, 0); ++ ++ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) ++ dbg_log_string("%s(%d): req queue %pK len(%d))", ++ dep->name, dep->number, ++ &req->request, req->request.length); + } + + /* +@@ -2611,6 +2653,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, + */ + dep->flags |= DWC3_EP_MISSED_ISOC; + dbg_event(dep->number, "MISSED ISOC", status); ++ dep->failedpkt_counter++; + } else { + dev_err(dwc->dev, "incomplete IN transfer %s\n", + dep->name); +@@ -3104,6 +3147,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) + + dwc->connected = true; + ++ /* ++ * Ideally, dwc3_reset_gadget() would trigger the function ++ * drivers to stop any active transfers through ep disable. ++ * However, for functions which defer ep disable, such as mass ++ * storage, we will need to rely on the call to stop active ++ * transfers here, and avoid allowing of request queuing. ++ */ ++ dwc->connected = false; ++ + /* + * WORKAROUND: DWC3 revisions <1.88a have an issue which + * would cause a missing Disconnect Event if there's a +@@ -3546,6 +3598,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, + case DWC3_DEVICE_EVENT_DISCONNECT: + dwc3_gadget_disconnect_interrupt(dwc); + dwc->dbg_gadget_events.disconnect++; ++#ifdef CONFIG_VXR200_XR_MISC ++ vxr7200_usb_event(false); ++#endif + break; + case DWC3_DEVICE_EVENT_RESET: + dwc3_gadget_reset_interrupt(dwc); +@@ -3554,6 +3609,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, + case DWC3_DEVICE_EVENT_CONNECT_DONE: + dwc3_gadget_conndone_interrupt(dwc); + dwc->dbg_gadget_events.connect++; ++#ifdef CONFIG_VXR200_XR_MISC ++ vxr7200_usb_event(true); ++#endif + break; + case DWC3_DEVICE_EVENT_WAKEUP: + dwc3_gadget_wakeup_interrupt(dwc, false); +diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c +index bd86f84f3790..3862edf59f7d 100644 +--- a/drivers/usb/dwc3/ulpi.c ++++ b/drivers/usb/dwc3/ulpi.c +@@ -10,6 +10,8 @@ + * published by the Free Software Foundation. + */ + ++#include ++#include + #include + + #include "core.h" +@@ -20,12 +22,22 @@ + DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ + DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) + +-static int dwc3_ulpi_busyloop(struct dwc3 *dwc) ++#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L) ++ ++static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read) + { +- unsigned count = 1000; ++ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY; ++ unsigned int count = 1000; + u32 reg; + ++ if (addr >= ULPI_EXT_VENDOR_SPECIFIC) ++ ns += DWC3_ULPI_BASE_DELAY; ++ ++ if (read) ++ ns += DWC3_ULPI_BASE_DELAY; ++ + while (count--) { ++ ndelay(ns); + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); + if (!(reg & DWC3_GUSB2PHYACC_BUSY)) + return 0; +@@ -44,7 +56,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr) + reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); + +- ret = dwc3_ulpi_busyloop(dwc); ++ ret = dwc3_ulpi_busyloop(dwc, addr, true); + if (ret) + return ret; + +@@ -62,7 +74,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val) + reg |= DWC3_GUSB2PHYACC_WRITE | val; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); + +- return dwc3_ulpi_busyloop(dwc); ++ return dwc3_ulpi_busyloop(dwc, addr, false); + } + + static const struct ulpi_ops dwc3_ulpi_ops = { +diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig +index d29c5837c643..dd34f47d23a2 100644 +--- a/drivers/usb/gadget/Kconfig ++++ b/drivers/usb/gadget/Kconfig +@@ -297,6 +297,7 @@ config USB_CONFIGFS_NCM + depends on NET + select USB_U_ETHER + select USB_F_NCM ++ select CRC32 + help + NCM is an advanced protocol for Ethernet encapsulation, allows + grouping of several ethernet frames into one USB transfer and +@@ -362,6 +363,7 @@ config USB_CONFIGFS_EEM + depends on NET + select USB_U_ETHER + select USB_F_EEM ++ select CRC32 + help + CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM + and therefore can be supported by more hardware. Technically ECM and +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index f7367ba4b2aa..ab5283eb6fbf 100755 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -108,40 +108,43 @@ function_descriptors(struct usb_function *f, + } + + /** +- * next_ep_desc() - advance to the next EP descriptor ++ * next_desc() - advance to the next desc_type descriptor + * @t: currect pointer within descriptor array ++ * @desc_type: descriptor type + * +- * Return: next EP descriptor or NULL ++ * Return: next desc_type descriptor or NULL + * +- * Iterate over @t until either EP descriptor found or ++ * Iterate over @t until either desc_type descriptor found or + * NULL (that indicates end of list) encountered + */ + static struct usb_descriptor_header** +-next_ep_desc(struct usb_descriptor_header **t) ++next_desc(struct usb_descriptor_header **t, u8 desc_type) + { + for (; *t; t++) { +- if ((*t)->bDescriptorType == USB_DT_ENDPOINT) ++ if ((*t)->bDescriptorType == desc_type) + return t; + } + return NULL; + } + + /* +- * for_each_ep_desc()- iterate over endpoint descriptors in the +- * descriptors list +- * @start: pointer within descriptor array. +- * @ep_desc: endpoint descriptor to use as the loop cursor ++ * for_each_desc() - iterate over desc_type descriptors in the ++ * descriptors list ++ * @start: pointer within descriptor array. ++ * @iter_desc: desc_type descriptor to use as the loop cursor ++ * @desc_type: wanted descriptr type + */ +-#define for_each_ep_desc(start, ep_desc) \ +- for (ep_desc = next_ep_desc(start); \ +- ep_desc; ep_desc = next_ep_desc(ep_desc+1)) ++#define for_each_desc(start, iter_desc, desc_type) \ ++ for (iter_desc = next_desc(start, desc_type); \ ++ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type)) + + /** +- * config_ep_by_speed() - configures the given endpoint ++ * config_ep_by_speed_and_alt() - configures the given endpoint + * according to gadget speed. + * @g: pointer to the gadget + * @f: usb function + * @_ep: the endpoint to configure ++ * @alt: alternate setting number + * + * Return: error code, 0 on success + * +@@ -154,11 +157,13 @@ next_ep_desc(struct usb_descriptor_header **t) + * Note: the supplied function should hold all the descriptors + * for supported speeds + */ +-int config_ep_by_speed(struct usb_gadget *g, +- struct usb_function *f, +- struct usb_ep *_ep) ++int config_ep_by_speed_and_alt(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep, ++ u8 alt) + { + struct usb_endpoint_descriptor *chosen_desc = NULL; ++ struct usb_interface_descriptor *int_desc = NULL; + struct usb_descriptor_header **speed_desc = NULL; + + struct usb_ss_ep_comp_descriptor *comp_desc = NULL; +@@ -194,8 +199,21 @@ int config_ep_by_speed(struct usb_gadget *g, + default: + speed_desc = f->fs_descriptors; + } ++ ++ /* find correct alternate setting descriptor */ ++ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) { ++ int_desc = (struct usb_interface_descriptor *)*d_spd; ++ ++ if (int_desc->bAlternateSetting == alt) { ++ speed_desc = d_spd; ++ goto intf_found; ++ } ++ } ++ return -EIO; ++ ++intf_found: + /* find descriptors */ +- for_each_ep_desc(speed_desc, d_spd) { ++ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) { + chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; + if (chosen_desc->bEndpointAddress == _ep->address) + goto ep_found; +@@ -248,6 +266,32 @@ int config_ep_by_speed(struct usb_gadget *g, + } + return 0; + } ++EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt); ++ ++/** ++ * config_ep_by_speed() - configures the given endpoint ++ * according to gadget speed. ++ * @g: pointer to the gadget ++ * @f: usb function ++ * @_ep: the endpoint to configure ++ * ++ * Return: error code, 0 on success ++ * ++ * This function chooses the right descriptors for a given ++ * endpoint according to gadget speed and saves it in the ++ * endpoint desc field. If the endpoint already has a descriptor ++ * assigned to it - overwrites it with currently corresponding ++ * descriptor. The endpoint maxpacket field is updated according ++ * to the chosen descriptor. ++ * Note: the supplied function should hold all the descriptors ++ * for supported speeds ++ */ ++int config_ep_by_speed(struct usb_gadget *g, ++ struct usb_function *f, ++ struct usb_ep *_ep) ++{ ++ return config_ep_by_speed_and_alt(g, f, _ep, 0); ++} + EXPORT_SYMBOL_GPL(config_ep_by_speed); + + /** +@@ -1179,7 +1223,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) + while (*sp) { + s = *sp; + language = cpu_to_le16(s->language); +- for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { ++ for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) { + if (*tmp == language) + goto repeat; + } +@@ -1254,7 +1298,7 @@ static int get_string(struct usb_composite_dev *cdev, + collect_langs(sp, s->wData); + } + +- for (len = 0; len <= 126 && s->wData[len]; len++) ++ for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++) + continue; + if (!len) + return -EINVAL; +@@ -1807,8 +1851,10 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + value = min(w_length, (u16) value); + break; + case USB_DT_STRING: ++ spin_lock(&cdev->lock); + value = get_string(cdev, req->buf, + w_index, w_value & 0xff); ++ spin_unlock(&cdev->lock); + if (value >= 0) + value = min(w_length, (u16) value); + break; +@@ -2503,7 +2549,7 @@ void composite_resume(struct usb_gadget *gadget) + * suspend/resume callbacks? + */ + INFO(cdev, "USB Resume end\n"); +- place_marker("M - USB device is resumed"); ++ update_marker("M - USB device is resumed"); + if (cdev->driver->resume) + cdev->driver->resume(cdev); + +diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c +index 9b9d31eb6037..2695a4db3829 100644 +--- a/drivers/usb/gadget/config.c ++++ b/drivers/usb/gadget/config.c +@@ -168,6 +168,14 @@ int usb_assign_descriptors(struct usb_function *f, + { + struct usb_gadget *g = f->config->cdev->gadget; + ++ /* super-speed-plus descriptor falls back to super-speed one, ++ * if such a descriptor was provided, thus avoiding a NULL ++ * pointer dereference if a 5gbps capable gadget is used with ++ * a 10gbps capable config (device port + cable + host port) ++ */ ++ if (!ssp) ++ ssp = ss; ++ + if (fs) { + f->fs_descriptors = usb_copy_descriptors(fs); + if (!f->fs_descriptors) +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c +index f0e867e787d9..9aafc08a0724 100755 +--- a/drivers/usb/gadget/configfs.c ++++ b/drivers/usb/gadget/configfs.c +@@ -146,8 +146,7 @@ struct gadget_config_name { + struct list_head list; + }; + +-#define MAX_USB_STRING_LEN 126 +-#define MAX_USB_STRING_WITH_NULL_LEN (MAX_USB_STRING_LEN+1) ++#define USB_MAX_STRING_WITH_NULL_LEN (USB_MAX_STRING_LEN+1) + + static int usb_string_copy(const char *s, char **s_copy) + { +@@ -155,17 +154,17 @@ static int usb_string_copy(const char *s, char **s_copy) + char *str; + char *copy = *s_copy; + ret = strlen(s); +- if (ret > MAX_USB_STRING_LEN) ++ if (ret > USB_MAX_STRING_LEN) + return -EOVERFLOW; + + if (copy) { + str = copy; + } else { +- str = kmalloc(MAX_USB_STRING_WITH_NULL_LEN, GFP_KERNEL); ++ str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL); + if (!str) + return -ENOMEM; + } +- strlcpy(str, s, MAX_USB_STRING_WITH_NULL_LEN); ++ strcpy(str, s); + if (str[ret - 1] == '\n') + str[ret - 1] = '\0'; + *s_copy = str; +@@ -277,9 +276,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item, + + static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page) + { +- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name; ++ struct gadget_info *gi = to_gadget_info(item); ++ char *udc_name; ++ int ret; ++ ++ mutex_lock(&gi->lock); ++ udc_name = gi->composite.gadget_driver.udc_name; ++ ret = sprintf(page, "%s\n", udc_name ?: ""); ++ mutex_unlock(&gi->lock); + +- return sprintf(page, "%s\n", udc_name ?: ""); ++ return ret; + } + + static int unregister_gadget(struct gadget_info *gi) +@@ -1706,7 +1712,7 @@ static const struct usb_gadget_driver configfs_driver_template = { + .suspend = configfs_composite_suspend, + .resume = configfs_composite_resume, + +- .max_speed = USB_SPEED_SUPER, ++ .max_speed = USB_SPEED_SUPER_PLUS, + .driver = { + .owner = THIS_MODULE, + .name = "configfs-gadget", +@@ -1834,7 +1840,7 @@ static struct config_group *gadgets_make( + gi->composite.unbind = configfs_do_nothing; + gi->composite.suspend = NULL; + gi->composite.resume = NULL; +- gi->composite.max_speed = USB_SPEED_SUPER; ++ gi->composite.max_speed = USB_SPEED_SUPER_PLUS; + + spin_lock_init(&gi->spinlock); + mutex_init(&gi->lock); +diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c +index bf3743882c25..d4414149fc80 100644 +--- a/drivers/usb/gadget/function/f_accessory.c ++++ b/drivers/usb/gadget/function/f_accessory.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -73,6 +74,7 @@ struct acc_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + spinlock_t lock; ++ struct acc_dev_ref *ref; + + struct usb_ep *ep_in; + struct usb_ep *ep_out; +@@ -80,13 +82,13 @@ struct acc_dev { + /* online indicates state of function_set_alt & function_unbind + * set to 1 when we connect + */ +- int online:1; ++ int online; + + /* disconnected indicates state of open & release + * Set to 1 when we disconnect. + * Not cleared until our file is closed. + */ +- int disconnected:1; ++ int disconnected; + + /* strings sent by the host */ + char manufacturer[ACC_STRING_SIZE]; +@@ -243,14 +245,48 @@ static struct usb_gadget_strings *acc_strings[] = { + NULL, + }; + +-/* temporary variable used between acc_open() and acc_gadget_bind() */ +-static struct acc_dev *_acc_dev; ++struct acc_dev_ref { ++ struct kref kref; ++ struct acc_dev *acc_dev; ++}; ++ ++static struct acc_dev_ref _acc_dev_ref = { ++ .kref = KREF_INIT(0), ++}; + + struct acc_instance { + struct usb_function_instance func_inst; + const char *name; + }; + ++static struct acc_dev *get_acc_dev(void) ++{ ++ struct acc_dev_ref *ref = &_acc_dev_ref; ++ ++ return kref_get_unless_zero(&ref->kref) ? ref->acc_dev : NULL; ++} ++ ++static void __put_acc_dev(struct kref *kref) ++{ ++ struct acc_dev_ref *ref = container_of(kref, struct acc_dev_ref, kref); ++ struct acc_dev *dev = ref->acc_dev; ++ ++ /* Cancel any async work */ ++ cancel_delayed_work_sync(&dev->start_work); ++ cancel_work_sync(&dev->hid_work); ++ ++ ref->acc_dev = NULL; ++ kfree(dev); ++} ++ ++static void put_acc_dev(struct acc_dev *dev) ++{ ++ struct acc_dev_ref *ref = dev->ref; ++ ++ WARN_ON(ref->acc_dev != dev); ++ kref_put(&ref->kref, __put_acc_dev); ++} ++ + static inline struct acc_dev *func_to_dev(struct usb_function *f) + { + return container_of(f, struct acc_dev, function); +@@ -316,7 +352,10 @@ static void acc_set_disconnected(struct acc_dev *dev) + + static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) + { +- struct acc_dev *dev = _acc_dev; ++ struct acc_dev *dev = get_acc_dev(); ++ ++ if (!dev) ++ return; + + if (req->status == -ESHUTDOWN) { + pr_debug("acc_complete_in set disconnected"); +@@ -326,11 +365,15 @@ static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) + req_put(dev, &dev->tx_idle, req); + + wake_up(&dev->write_wq); ++ put_acc_dev(dev); + } + + static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) + { +- struct acc_dev *dev = _acc_dev; ++ struct acc_dev *dev = get_acc_dev(); ++ ++ if (!dev) ++ return; + + dev->rx_done = 1; + if (req->status == -ESHUTDOWN) { +@@ -339,6 +382,7 @@ static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) + } + + wake_up(&dev->read_wq); ++ put_acc_dev(dev); + } + + static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req) +@@ -617,7 +661,9 @@ static ssize_t acc_read(struct file *fp, char __user *buf, + { + struct acc_dev *dev = fp->private_data; + struct usb_request *req; +- ssize_t r = count, xfer, len; ++ ssize_t r = count; ++ ssize_t data_length; ++ unsigned xfer; + int ret = 0; + + pr_debug("acc_read(%zu)\n", count); +@@ -638,7 +684,14 @@ static ssize_t acc_read(struct file *fp, char __user *buf, + goto done; + } + +- len = ALIGN(count, dev->ep_out->maxpacket); ++ /* ++ * Calculate the data length by considering termination character. ++ * Then compansite the difference of rounding up to ++ * integer multiple of maxpacket size. ++ */ ++ data_length = count; ++ data_length += dev->ep_out->maxpacket - 1; ++ data_length -= data_length % dev->ep_out->maxpacket; + + if (dev->rx_done) { + // last req cancelled. try to get it. +@@ -649,7 +702,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf, + requeue_req: + /* queue a request */ + req = dev->rx_req[0]; +- req->length = len; ++ req->length = data_length; + dev->rx_done = 0; + ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); + if (ret < 0) { +@@ -803,24 +856,36 @@ static long acc_ioctl(struct file *fp, unsigned code, unsigned long value) + + static int acc_open(struct inode *ip, struct file *fp) + { +- printk(KERN_INFO "acc_open\n"); +- if (atomic_xchg(&_acc_dev->open_excl, 1)) ++ struct acc_dev *dev = get_acc_dev(); ++ ++ if (!dev) ++ return -ENODEV; ++ ++ if (atomic_xchg(&dev->open_excl, 1)) { ++ put_acc_dev(dev); + return -EBUSY; ++ } + +- _acc_dev->disconnected = 0; +- fp->private_data = _acc_dev; ++ dev->disconnected = 0; ++ fp->private_data = dev; + return 0; + } + + static int acc_release(struct inode *ip, struct file *fp) + { +- printk(KERN_INFO "acc_release\n"); ++ struct acc_dev *dev = fp->private_data; ++ ++ if (!dev) ++ return -ENOENT; + +- WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); + /* indicate that we are disconnected + * still could be online so don't touch online flag + */ +- _acc_dev->disconnected = 1; ++ dev->disconnected = 1; ++ ++ fp->private_data = NULL; ++ WARN_ON(!atomic_xchg(&dev->open_excl, 0)); ++ put_acc_dev(dev); + return 0; + } + +@@ -876,7 +941,7 @@ static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req) + int acc_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) + { +- struct acc_dev *dev = _acc_dev; ++ struct acc_dev *dev = get_acc_dev(); + int value = -EOPNOTSUPP; + struct acc_hid_dev *hid; + int offset; +@@ -987,6 +1052,7 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, + "%02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); ++ put_acc_dev(dev); + return value; + } + EXPORT_SYMBOL_GPL(acc_ctrlrequest); +@@ -1065,10 +1131,6 @@ kill_all_hid_devices(struct acc_dev *dev) + struct list_head *entry, *temp; + unsigned long flags; + +- /* do nothing if usb accessory device doesn't exist */ +- if (!dev) +- return; +- + spin_lock_irqsave(&dev->lock, flags); + list_for_each_safe(entry, temp, &dev->hid_list) { + hid = list_entry(entry, struct acc_hid_dev, list); +@@ -1153,12 +1215,15 @@ static void acc_hid_delete(struct acc_hid_dev *hid) + + static void acc_hid_work(struct work_struct *data) + { +- struct acc_dev *dev = _acc_dev; ++ struct acc_dev *dev = get_acc_dev(); + struct list_head *entry, *temp; + struct acc_hid_dev *hid; + struct list_head new_list, dead_list; + unsigned long flags; + ++ if (!dev) ++ return; ++ + INIT_LIST_HEAD(&new_list); + + spin_lock_irqsave(&dev->lock, flags); +@@ -1204,6 +1269,8 @@ static void acc_hid_work(struct work_struct *data) + hid_destroy_device(hid->hid); + acc_hid_delete(hid); + } ++ ++ put_acc_dev(dev); + } + + static int acc_function_set_alt(struct usb_function *f, +@@ -1260,9 +1327,13 @@ static void acc_function_disable(struct usb_function *f) + + static int acc_setup(void) + { ++ struct acc_dev_ref *ref = &_acc_dev_ref; + struct acc_dev *dev; + int ret; + ++ if (kref_read(&ref->kref)) ++ return -EBUSY; ++ + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; +@@ -1278,16 +1349,22 @@ static int acc_setup(void) + INIT_DELAYED_WORK(&dev->start_work, acc_start_work); + INIT_WORK(&dev->hid_work, acc_hid_work); + ++ dev->ref = ref; ++ if (cmpxchg_relaxed(&ref->acc_dev, NULL, dev)) { ++ ret = -EBUSY; ++ goto err_free_dev; ++ } ++ + ret = misc_register(&acc_device); + if (ret) +- goto err; +- +- /* _acc_dev must be set before calling usb_gadget_register_driver */ +- _acc_dev = dev; ++ goto err_zap_ptr; + ++ kref_init(&ref->kref); + return 0; + +-err: ++err_zap_ptr: ++ ref->acc_dev = NULL; ++err_free_dev: + kfree(dev); + pr_err("USB accessory gadget driver failed to initialize\n"); + return ret; +@@ -1295,16 +1372,24 @@ static int acc_setup(void) + + void acc_disconnect(void) + { ++ struct acc_dev *dev = get_acc_dev(); ++ ++ if (!dev) ++ return; ++ + /* unregister all HID devices if USB is disconnected */ +- kill_all_hid_devices(_acc_dev); ++ kill_all_hid_devices(dev); ++ put_acc_dev(dev); + } + EXPORT_SYMBOL_GPL(acc_disconnect); + + static void acc_cleanup(void) + { ++ struct acc_dev *dev = get_acc_dev(); ++ + misc_deregister(&acc_device); +- kfree(_acc_dev); +- _acc_dev = NULL; ++ put_acc_dev(dev); ++ put_acc_dev(dev); /* Pairs with kref_init() in acc_setup() */ + } + static struct acc_instance *to_acc_instance(struct config_item *item) + { +@@ -1364,7 +1449,6 @@ static void acc_free_inst(struct usb_function_instance *fi) + static struct usb_function_instance *acc_alloc_inst(void) + { + struct acc_instance *fi_acc; +- struct acc_dev *dev; + int err; + + fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL); +@@ -1376,19 +1460,19 @@ static struct usb_function_instance *acc_alloc_inst(void) + err = acc_setup(); + if (err) { + kfree(fi_acc); +- pr_err("Error setting ACCESSORY\n"); + return ERR_PTR(err); + } + + config_group_init_type_name(&fi_acc->func_inst.group, + "", &acc_func_type); +- dev = _acc_dev; + return &fi_acc->func_inst; + } + + static void acc_free(struct usb_function *f) + { +-/*NO-OP: no function specific resource allocation in mtp_alloc*/ ++ struct acc_dev *dev = func_to_dev(f); ++ ++ put_acc_dev(dev); + } + + int acc_ctrlrequest_configfs(struct usb_function *f, +@@ -1401,9 +1485,7 @@ int acc_ctrlrequest_configfs(struct usb_function *f, + + static struct usb_function *acc_alloc(struct usb_function_instance *fi) + { +- struct acc_dev *dev = _acc_dev; +- +- pr_info("acc_alloc\n"); ++ struct acc_dev *dev = get_acc_dev(); + + dev->function.name = "accessory"; + dev->function.strings = acc_strings, +diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c +index e9d991800d6d..f2f8b9ddb08f 100644 +--- a/drivers/usb/gadget/function/f_acm.c ++++ b/drivers/usb/gadget/function/f_acm.c +@@ -687,7 +687,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) + acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, +- acm_ss_function, NULL); ++ acm_ss_function, acm_ss_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c +index 36ace639a817..88e75d94b7cb 100644 +--- a/drivers/usb/gadget/function/f_cdev.c ++++ b/drivers/usb/gadget/function/f_cdev.c +@@ -59,6 +59,7 @@ + #define BRIDGE_TX_QUEUE_SIZE 8 + #define BRIDGE_TX_BUF_SIZE 2048 + #define BRIDGE_RX_BUF_SIZE_STANDALONE (50 * 1024) ++#define BRIDGE_RX_QUEUE_SIZE_STANDALONE (20) + + #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ + #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ +@@ -353,6 +354,7 @@ static struct usb_gadget_strings *usb_cser_strings[] = { + + static bool standalone_mode; + static unsigned int bridge_rx_buf_size = BRIDGE_RX_BUF_SIZE; ++static unsigned int bridge_rx_queue_size = BRIDGE_RX_QUEUE_SIZE; + + static inline struct f_cdev *func_to_port(struct usb_function *f) + { +@@ -1053,7 +1055,7 @@ static void usb_cser_start_io(struct f_cdev *port) + + ret = usb_cser_alloc_requests(port->port_usb.out, + &port->read_pool, +- BRIDGE_RX_QUEUE_SIZE, bridge_rx_buf_size, 0, ++ bridge_rx_queue_size, bridge_rx_buf_size, 0, + usb_cser_read_complete); + if (ret) { + pr_err("unable to allocate out requests\n"); +@@ -2077,11 +2079,13 @@ static int __init f_cdev_init(void) + __func__, cmdline); + standalone_mode = false; + bridge_rx_buf_size = BRIDGE_RX_BUF_SIZE; ++ bridge_rx_queue_size = BRIDGE_RX_QUEUE_SIZE; + } else { + pr_debug("%s standalone mode cmdline:\n", + __func__); + standalone_mode = true; + bridge_rx_buf_size = BRIDGE_RX_BUF_SIZE_STANDALONE; ++ bridge_rx_queue_size = BRIDGE_RX_QUEUE_SIZE_STANDALONE; + } + return 0; + } +diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c +index 8c380aca6208..5722d46b8005 100644 +--- a/drivers/usb/gadget/function/f_ecm.c ++++ b/drivers/usb/gadget/function/f_ecm.c +@@ -793,7 +793,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f) + fs_ecm_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, +- ecm_ss_function, NULL); ++ ecm_ss_function, ecm_ss_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c +index 007ec6e4a5d4..b8fcc202e0a3 100644 +--- a/drivers/usb/gadget/function/f_eem.c ++++ b/drivers/usb/gadget/function/f_eem.c +@@ -34,6 +34,11 @@ struct f_eem { + u8 ctrl_id; + }; + ++struct in_context { ++ struct sk_buff *skb; ++ struct usb_ep *ep; ++}; ++ + static inline struct f_eem *func_to_eem(struct usb_function *f) + { + return container_of(f, struct f_eem, port.func); +@@ -309,7 +314,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f) + eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, +- eem_ss_function, NULL); ++ eem_ss_function, eem_ss_function); + if (status) + goto fail; + +@@ -327,9 +332,12 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f) + + static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) + { +- struct sk_buff *skb = (struct sk_buff *)req->context; ++ struct in_context *ctx = req->context; + +- dev_kfree_skb_any(skb); ++ dev_kfree_skb_any(ctx->skb); ++ kfree(req->buf); ++ usb_ep_free_request(ctx->ep, req); ++ kfree(ctx); + } + + /* +@@ -417,7 +425,9 @@ static int eem_unwrap(struct gether *port, + * b15: bmType (0 == data, 1 == command) + */ + if (header & BIT(15)) { +- struct usb_request *req = cdev->req; ++ struct usb_request *req; ++ struct in_context *ctx; ++ struct usb_ep *ep; + u16 bmEEMCmd; + + /* EEM command packet format: +@@ -446,11 +456,36 @@ static int eem_unwrap(struct gether *port, + skb_trim(skb2, len); + put_unaligned_le16(BIT(15) | BIT(11) | len, + skb_push(skb2, 2)); ++ ++ ep = port->in_ep; ++ req = usb_ep_alloc_request(ep, GFP_ATOMIC); ++ if (!req) { ++ dev_kfree_skb_any(skb2); ++ goto next; ++ } ++ ++ req->buf = kmalloc(skb2->len, GFP_KERNEL); ++ if (!req->buf) { ++ usb_ep_free_request(ep, req); ++ dev_kfree_skb_any(skb2); ++ goto next; ++ } ++ ++ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) { ++ kfree(req->buf); ++ usb_ep_free_request(ep, req); ++ dev_kfree_skb_any(skb2); ++ goto next; ++ } ++ ctx->skb = skb2; ++ ctx->ep = ep; ++ + skb_copy_bits(skb2, 0, req->buf, skb2->len); + req->length = skb2->len; + req->complete = eem_cmd_complete; + req->zero = 1; +- req->context = skb2; ++ req->context = ctx; + if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) + DBG(cdev, "echo response queue fail\n"); + break; +@@ -502,7 +537,7 @@ static int eem_unwrap(struct gether *port, + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) { + DBG(cdev, "unable to unframe EEM packet\n"); +- continue; ++ goto next; + } + skb_trim(skb2, len - ETH_FCS_LEN); + +@@ -513,7 +548,7 @@ static int eem_unwrap(struct gether *port, + if (unlikely(!skb3)) { + DBG(cdev, "unable to realign EEM packet\n"); + dev_kfree_skb_any(skb2); +- continue; ++ goto next; + } + dev_kfree_skb_any(skb2); + skb_queue_tail(list, skb3); +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c +index 2bcad8b0fdb1..161a524a6233 100755 +--- a/drivers/usb/gadget/function/f_fs.c ++++ b/drivers/usb/gadget/function/f_fs.c +@@ -1472,10 +1472,11 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, + case FUNCTIONFS_ENDPOINT_DESC: + { + int desc_idx; +- struct usb_endpoint_descriptor *desc; ++ struct usb_endpoint_descriptor desc1, *desc; + + switch (epfile->ffs->gadget->speed) { + case USB_SPEED_SUPER: ++ case USB_SPEED_SUPER_PLUS: + desc_idx = 2; + break; + case USB_SPEED_HIGH: +@@ -1484,10 +1485,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, + default: + desc_idx = 0; + } ++ + desc = epfile->ep->descs[desc_idx]; ++ memcpy(&desc1, desc, desc->bLength); + + spin_unlock_irq(&epfile->ffs->eps_lock); +- ret = copy_to_user((void *)value, desc, sizeof(*desc)); ++ ret = copy_to_user((void *)value, &desc1, desc1.bLength); + if (ret) + ret = -EFAULT; + return ret; +@@ -2973,6 +2976,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, + + do { /* lang_count > 0 so we can use do-while */ + unsigned needed = needed_count; ++ u32 str_per_lang = str_count; + + if (unlikely(len < 3)) + goto error_free; +@@ -3008,7 +3012,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, + + data += length + 1; + len -= length + 1; +- } while (--str_count); ++ } while (--str_per_lang); + + s->id = 0; /* terminator */ + s->s = NULL; +@@ -3526,7 +3530,8 @@ static int _ffs_func_bind(struct usb_configuration *c, + } + + if (likely(super)) { +- func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs); ++ func->function.ss_descriptors = func->function.ssp_descriptors = ++ vla_ptr(vlabuf, d, ss_descs); + ss_len = ffs_do_descs(ffs->ss_descs_count, + vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len, + d_raw_descs__sz - fs_len - hs_len, +@@ -4192,6 +4197,7 @@ static void ffs_func_unbind(struct usb_configuration *c, + func->function.fs_descriptors = NULL; + func->function.hs_descriptors = NULL; + func->function.ss_descriptors = NULL; ++ func->function.ssp_descriptors = NULL; + func->interfaces_nums = NULL; + + ffs_event_add(ffs, FUNCTIONFS_UNBIND); +diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c +index 9db175e63ad7..e1a0c23e679c 100644 +--- a/drivers/usb/gadget/function/f_gsi.c ++++ b/drivers/usb/gadget/function/f_gsi.c +@@ -3025,6 +3025,7 @@ static int gsi_update_function_bind_params(struct f_gsi *gsi, + if (gsi->c_port.notify_req) { + kfree(gsi->c_port.notify_req->buf); + usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req); ++ gsi->c_port.notify_req = NULL; + } + /* we might as well release our claims on endpoints */ + if (gsi->c_port.notify) +@@ -3534,6 +3535,7 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f) + if (gsi->c_port.notify) { + kfree(gsi->c_port.notify_req->buf); + usb_ep_free_request(gsi->c_port.notify, gsi->c_port.notify_req); ++ gsi->c_port.notify_req = NULL; + } + } + +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c +index 8e83649f77ce..dd415b1589b5 100644 +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -91,7 +91,7 @@ static struct usb_interface_descriptor hidg_interface_desc = { + static struct hid_descriptor hidg_desc = { + .bLength = sizeof hidg_desc, + .bDescriptorType = HID_DT_HID, +- .bcdHID = 0x0101, ++ .bcdHID = cpu_to_le16(0x0101), + .bCountryCode = 0x00, + .bNumDescriptors = 0x1, + /*.desc[0].bDescriptorType = DYNAMIC */ +@@ -932,7 +932,7 @@ static void hidg_free_inst(struct usb_function_instance *f) + mutex_lock(&hidg_ida_lock); + + hidg_put_minor(opts->minor); +- if (idr_is_empty(&hidg_ida.idr)) ++ if (ida_is_empty(&hidg_ida)) + ghid_cleanup(); + + mutex_unlock(&hidg_ida_lock); +@@ -958,7 +958,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) + + mutex_lock(&hidg_ida_lock); + +- if (idr_is_empty(&hidg_ida.idr)) { ++ if (ida_is_empty(&hidg_ida)) { + status = ghid_setup(NULL, HIDG_MINORS); + if (status) { + ret = ERR_PTR(status); +@@ -971,7 +971,7 @@ static struct usb_function_instance *hidg_alloc_inst(void) + if (opts->minor < 0) { + ret = ERR_PTR(opts->minor); + kfree(opts); +- if (idr_is_empty(&hidg_ida.idr)) ++ if (ida_is_empty(&hidg_ida)) + ghid_cleanup(); + goto unlock; + } +diff --git a/drivers/usb/gadget/function/f_ipc.c b/drivers/usb/gadget/function/f_ipc.c +index bd8c726f6c5a..8f1c09e70055 100644 +--- a/drivers/usb/gadget/function/f_ipc.c ++++ b/drivers/usb/gadget/function/f_ipc.c +@@ -25,8 +25,8 @@ + + #define MAX_INST_NAME_LEN 40 + +-#define IPC_BRIDGE_MAX_READ_SZ (8 * 1024) +-#define IPC_BRIDGE_MAX_WRITE_SZ (8 * 1024) ++#define IPC_BRIDGE_MAX_READ_SZ (24 * 1024) ++#define IPC_BRIDGE_MAX_WRITE_SZ (24 * 1024) + + #define IPC_WRITE_WAIT_TIMEOUT 10000 + +@@ -296,7 +296,7 @@ static int ipc_write(struct platform_device *pdev, char *buf, + /* Notify the GPIO driver to wakeup the host and reintialize the + * completion structure. + */ +- } else if (!ipc_dev->online) { ++ } else if (ipc_dev->connected && !ipc_dev->online) { + sb_notifier_call_chain(EVT_WAKE_UP, NULL); + reinit_completion(&ipc_dev->write_done); + goto retry_write_done; +@@ -513,6 +513,7 @@ static int ipc_bind(struct usb_configuration *c, struct usb_function *f) + if (!ctxt->in_req) + goto fail; + ++ ctxt->in_req->zero = true; + ctxt->in_req->complete = ipc_in_complete; + ctxt->out_req = usb_ep_alloc_request(ctxt->out, GFP_KERNEL); + if (!ctxt->out_req) +diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c +index e70093835e14..8c1810d65598 100644 +--- a/drivers/usb/gadget/function/f_loopback.c ++++ b/drivers/usb/gadget/function/f_loopback.c +@@ -211,7 +211,7 @@ static int loopback_bind(struct usb_configuration *c, struct usb_function *f) + ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs, +- ss_loopback_descs, NULL); ++ ss_loopback_descs, ss_loopback_descs); + if (ret) + return ret; + +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c +index 4b4ce0e0dac1..8ba1cc715209 100644 +--- a/drivers/usb/gadget/function/f_midi.c ++++ b/drivers/usb/gadget/function/f_midi.c +@@ -1066,6 +1066,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) + f->hs_descriptors = usb_copy_descriptors(midi_function); + if (!f->hs_descriptors) + goto fail_f_midi; ++ ++ if (gadget_is_superspeed_plus(c->cdev->gadget)) { ++ f->ssp_descriptors = usb_copy_descriptors(midi_function); ++ if (!f->ssp_descriptors) ++ goto fail_f_midi; ++ } + } + + if (gadget_is_superspeed(c->cdev->gadget)) { +@@ -1389,7 +1395,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) + midi->id = kstrdup(opts->id, GFP_KERNEL); + if (opts->id && !midi->id) { + status = -ENOMEM; +- goto setup_fail; ++ goto midi_free; + } + midi->in_ports = opts->in_ports; + midi->out_ports = opts->out_ports; +@@ -1400,7 +1406,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) + + status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL); + if (status) +- goto setup_fail; ++ goto midi_free; + + spin_lock_init(&midi->transmit_lock); + +@@ -1417,9 +1423,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) + fi->f = &midi->func; + return &midi->func; + ++midi_free: ++ if (midi) ++ kfree(midi->id); ++ kfree(midi); + setup_fail: + mutex_unlock(&opts->lock); +- kfree(midi); ++ + return ERR_PTR(status); + } + +diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c +index cbbf26cac4df..ea62a114632b 100644 +--- a/drivers/usb/gadget/function/f_ncm.c ++++ b/drivers/usb/gadget/function/f_ncm.c +@@ -91,8 +91,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) + /* peak (theoretical) bulk transfer rate in bits-per-second */ + static inline unsigned ncm_bitrate(struct usb_gadget *g) + { +- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) +- return 13 * 1024 * 8 * 1000 * 8; ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) ++ return 4250000000U; ++ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) ++ return 3750000000U; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else +@@ -585,7 +587,7 @@ static void ncm_do_notify(struct f_ncm *ncm) + data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget)); + data[1] = data[0]; + +- DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget)); ++ DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget)); + ncm->notify_state = NCM_NOTIFY_CONNECT; + break; + } +@@ -1208,9 +1210,11 @@ static int ncm_unwrap_ntb(struct gether *port, + int ndp_index; + unsigned dg_len, dg_len2; + unsigned ndp_len; ++ unsigned block_len; + struct sk_buff *skb2; + int ret = -EINVAL; +- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); ++ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); ++ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); + const struct ndp_parser_opts *opts = ncm->parser_opts; + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; + int dgram_counter; +@@ -1232,8 +1236,9 @@ static int ncm_unwrap_ntb(struct gether *port, + } + tmp++; /* skip wSequence */ + ++ block_len = get_ncm(&tmp, opts->block_length); + /* (d)wBlockLength */ +- if (get_ncm(&tmp, opts->block_length) > max_size) { ++ if (block_len > ntb_max) { + INFO(port->func.config->cdev, "OUT size exceeded\n"); + goto err; + } +@@ -1242,15 +1247,23 @@ static int ncm_unwrap_ntb(struct gether *port, + + /* Run through all the NDP's in the NTB */ + do { +- /* NCM 3.2 */ +- if (((ndp_index % 4) != 0) && +- (ndp_index < opts->nth_size)) { ++ /* ++ * NCM 3.2 ++ * dwNdpIndex ++ */ ++ if (((ndp_index % 4) != 0) || ++ (ndp_index < opts->nth_size) || ++ (ndp_index > (block_len - ++ opts->ndp_size))) { + INFO(port->func.config->cdev, "Bad index: %#X\n", + ndp_index); + goto err; + } + +- /* walk through NDP */ ++ /* ++ * walk through NDP ++ * dwSignature ++ */ + tmp = (void *)(skb->data + ndp_index); + if (get_unaligned_le32(tmp) != ncm->ndp_sign) { + INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); +@@ -1261,14 +1274,15 @@ static int ncm_unwrap_ntb(struct gether *port, + ndp_len = get_unaligned_le16(tmp++); + /* + * NCM 3.3.1 ++ * wLength + * entry is 2 items + * item size is 16/32 bits, opts->dgram_item_len * 2 bytes + * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry + * Each entry is a dgram index and a dgram length. + */ + if ((ndp_len < opts->ndp_size +- + 2 * 2 * (opts->dgram_item_len * 2)) +- || (ndp_len % opts->ndplen_align != 0)) { ++ + 2 * 2 * (opts->dgram_item_len * 2)) || ++ (ndp_len % opts->ndplen_align != 0)) { + INFO(port->func.config->cdev, "Bad NDP length: %#X\n", + ndp_len); + goto err; +@@ -1285,8 +1299,21 @@ static int ncm_unwrap_ntb(struct gether *port, + + do { + index = index2; ++ /* wDatagramIndex[0] */ ++ if ((index < opts->nth_size) || ++ (index > block_len - opts->dpe_size)) { ++ INFO(port->func.config->cdev, ++ "Bad index: %#X\n", index); ++ goto err; ++ } ++ + dg_len = dg_len2; +- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ ++ /* ++ * wDatagramLength[0] ++ * ethernet hdr + crc or larger than max frame size ++ */ ++ if ((dg_len < 14 + crc_len) || ++ (dg_len > frame_max)) { + INFO(port->func.config->cdev, + "Bad dgram length: %#X\n", dg_len); + goto err; +@@ -1310,6 +1337,13 @@ static int ncm_unwrap_ntb(struct gether *port, + index2 = get_ncm(&tmp, opts->dgram_item_len); + dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + ++ /* wDatagramIndex[1] */ ++ if (index2 > block_len - opts->dpe_size) { ++ INFO(port->func.config->cdev, ++ "Bad index: %#X\n", index2); ++ goto err; ++ } ++ + /* + * Copy the data into a new skb. + * This ensures the truesize is correct +@@ -1326,7 +1360,6 @@ static int ncm_unwrap_ntb(struct gether *port, + ndp_len -= 2 * (opts->dgram_item_len * 2); + + dgram_counter++; +- + if (index2 == 0 || dg_len2 == 0) + break; + } while (ndp_len > 2 * (opts->dgram_item_len * 2)); +@@ -1515,7 +1548,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) + fs_ncm_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, +- ncm_ss_function, NULL); ++ ncm_ss_function, ncm_ss_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c +index 0de36cda6e41..c347fe13d45d 100644 +--- a/drivers/usb/gadget/function/f_printer.c ++++ b/drivers/usb/gadget/function/f_printer.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -69,7 +70,7 @@ struct printer_dev { + struct usb_gadget *gadget; + s8 interface; + struct usb_ep *in_ep, *out_ep; +- ++ struct kref kref; + struct list_head rx_reqs; /* List of free RX structs */ + struct list_head rx_reqs_active; /* List of Active RX xfers */ + struct list_head rx_buffers; /* List of completed xfers */ +@@ -223,6 +224,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, + + /*-------------------------------------------------------------------------*/ + ++static void printer_dev_free(struct kref *kref) ++{ ++ struct printer_dev *dev = container_of(kref, struct printer_dev, kref); ++ ++ kfree(dev); ++} ++ + static struct usb_request * + printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) + { +@@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd) + + spin_unlock_irqrestore(&dev->lock, flags); + ++ kref_get(&dev->kref); + DBG(dev, "printer_open returned %x\n", ret); + return ret; + } +@@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd) + dev->printer_status &= ~PRINTER_SELECTED; + spin_unlock_irqrestore(&dev->lock, flags); + ++ kref_put(&dev->kref, printer_dev_free); + DBG(dev, "printer_close\n"); + + return 0; +@@ -1047,7 +1057,8 @@ static int printer_func_bind(struct usb_configuration *c, + ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_printer_function, +- hs_printer_function, ss_printer_function, NULL); ++ hs_printer_function, ss_printer_function, ++ ss_printer_function); + if (ret) + return ret; + +@@ -1110,6 +1121,7 @@ static int printer_func_bind(struct usb_configuration *c, + printer_req_free(dev->in_ep, req); + } + ++ usb_free_all_descriptors(f); + return ret; + + } +@@ -1265,7 +1277,7 @@ static void gprinter_free_inst(struct usb_function_instance *f) + mutex_lock(&printer_ida_lock); + + gprinter_put_minor(opts->minor); +- if (idr_is_empty(&printer_ida.idr)) ++ if (ida_is_empty(&printer_ida)) + gprinter_cleanup(); + + mutex_unlock(&printer_ida_lock); +@@ -1289,7 +1301,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) + + mutex_lock(&printer_ida_lock); + +- if (idr_is_empty(&printer_ida.idr)) { ++ if (ida_is_empty(&printer_ida)) { + status = gprinter_setup(PRINTER_MINORS); + if (status) { + ret = ERR_PTR(status); +@@ -1302,7 +1314,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void) + if (opts->minor < 0) { + ret = ERR_PTR(opts->minor); + kfree(opts); +- if (idr_is_empty(&printer_ida.idr)) ++ if (ida_is_empty(&printer_ida)) + gprinter_cleanup(); + goto unlock; + } +@@ -1320,7 +1332,8 @@ static void gprinter_free(struct usb_function *f) + struct f_printer_opts *opts; + + opts = container_of(f->fi, struct f_printer_opts, func_inst); +- kfree(dev); ++ ++ kref_put(&dev->kref, printer_dev_free); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +@@ -1389,6 +1402,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) + return ERR_PTR(-ENOMEM); + } + ++ kref_init(&dev->kref); + ++opts->refcnt; + dev->minor = opts->minor; + dev->pnp_string = opts->pnp_string; +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c +index 4842fb5ef90f..529c667dea9e 100644 +--- a/drivers/usb/gadget/function/f_rndis.c ++++ b/drivers/usb/gadget/function/f_rndis.c +@@ -104,8 +104,10 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f) + /* peak (theoretical) bulk transfer rate in bits-per-second */ + static unsigned int bitrate(struct usb_gadget *g) + { ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) ++ return 4250000000U; + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) +- return 13 * 1024 * 8 * 1000 * 8; ++ return 3750000000U; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else +@@ -897,7 +899,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) + ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, +- eth_ss_function, NULL); ++ eth_ss_function, eth_ss_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c +index 405becf941f6..89d15954d337 100644 +--- a/drivers/usb/gadget/function/f_serial.c ++++ b/drivers/usb/gadget/function/f_serial.c +@@ -665,7 +665,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f) + } + + status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, +- gser_ss_function, NULL); ++ gser_ss_function, gser_ss_function); + if (status) + goto fail; + dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", +diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c +index 6e9d958004a0..1c5745f7abea 100644 +--- a/drivers/usb/gadget/function/f_sourcesink.c ++++ b/drivers/usb/gadget/function/f_sourcesink.c +@@ -435,7 +435,8 @@ sourcesink_bind(struct usb_configuration *c, struct usb_function *f) + ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_source_sink_descs, +- hs_source_sink_descs, ss_source_sink_descs, NULL); ++ hs_source_sink_descs, ss_source_sink_descs, ++ ss_source_sink_descs); + if (ret) + return ret; + +diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c +index 434b983f3b4c..055bd0706cdd 100644 +--- a/drivers/usb/gadget/function/f_subset.c ++++ b/drivers/usb/gadget/function/f_subset.c +@@ -362,7 +362,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f) + fs_subset_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, +- ss_eth_function, NULL); ++ ss_eth_function, ss_eth_function); + if (status) + goto fail; + +diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c +index d2351139342f..821f470c6a2f 100644 +--- a/drivers/usb/gadget/function/f_tcm.c ++++ b/drivers/usb/gadget/function/f_tcm.c +@@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) + goto err_sts; + + return 0; ++ + err_sts: +- usb_ep_free_request(fu->ep_status, stream->req_status); +- stream->req_status = NULL; +-err_out: + usb_ep_free_request(fu->ep_out, stream->req_out); + stream->req_out = NULL; ++err_out: ++ usb_ep_free_request(fu->ep_in, stream->req_in); ++ stream->req_in = NULL; + out: + return -ENOMEM; + } +@@ -2070,7 +2071,8 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f) + uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, uasp_fs_function_desc, +- uasp_hs_function_desc, uasp_ss_function_desc, NULL); ++ uasp_hs_function_desc, uasp_ss_function_desc, ++ uasp_ss_function_desc); + if (ret) + goto ep_fail; + +diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c +index f2ac0cbc29a4..be9616964a17 100644 +--- a/drivers/usb/gadget/function/f_uac1.c ++++ b/drivers/usb/gadget/function/f_uac1.c +@@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) + + /* Copy buffer is full, add it to the play_queue */ + if (audio_buf_size - copy_buf->actual < req->actual) { ++ spin_lock_irq(&audio->lock); + list_add_tail(©_buf->list, &audio->play_queue); ++ spin_unlock_irq(&audio->lock); + schedule_work(&audio->playback_work); + copy_buf = f_audio_buffer_alloc(audio_buf_size); + if (IS_ERR(copy_buf)) +diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c +index e887eaf9a2b0..7974f1c2a86a 100644 +--- a/drivers/usb/gadget/function/f_uac2.c ++++ b/drivers/usb/gadget/function/f_uac2.c +@@ -23,7 +23,7 @@ + #include "u_uac2.h" + + /* Keep everyone on toes */ +-#define USB_XFERS 8 ++#define USB_XFERS 32 + + /* + * The driver implements a simple UAC_2 topology. +@@ -775,7 +775,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = { + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1023), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 1, + }; + +@@ -784,7 +784,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = { + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1024), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 4, + }; + +@@ -859,7 +859,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = { + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1023), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 1, + }; + +@@ -868,7 +868,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = { + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, +- .wMaxPacketSize = cpu_to_le16(1024), ++ /* .wMaxPacketSize = DYNAMIC */ + .bInterval = 4, + }; + +@@ -1082,6 +1082,7 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + { + int chmask, srate, ssize; + u16 max_packet_size; ++ u8 bInterval; + + if (is_playback) { + chmask = uac2_opts->p_chmask; +@@ -1095,6 +1096,26 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, + + max_packet_size = num_channels(chmask) * ssize * + DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); ++ ++ /* max_packet_size can vary based on sample rate, no. of channels & ++ * sample size. If max_packet_size > ep_desc->wMaxPacketSize(i.e 1024), ++ * reduce bInterval accordingly & recalculate max_packet_size. Else ++ * required data rate may not fit per second. ++ */ ++ if (factor == 8000) { ++ if (max_packet_size > ep_desc->wMaxPacketSize) { ++ bInterval = ep_desc->bInterval - ++ (max_packet_size/ep_desc->wMaxPacketSize); ++ if ((bInterval < 1) || (bInterval > ep_desc->bInterval)) ++ ep_desc->bInterval = 1; ++ else ++ ep_desc->bInterval = bInterval; ++ ++ max_packet_size = num_channels(chmask) * ssize * ++ DIV_ROUND_UP(srate, ++ factor / (1 << (ep_desc->bInterval - 1))); ++ } ++ } + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size, + le16_to_cpu(ep_desc->wMaxPacketSize))); + } +@@ -1202,6 +1223,10 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) + uac2->p_prm.uac2 = uac2; + uac2->c_prm.uac2 = uac2; + ++ hs_epin_desc.bInterval = 4; ++ hs_epout_desc.bInterval = 4; ++ hs_epin_desc.wMaxPacketSize = cpu_to_le16(1024); ++ hs_epout_desc.wMaxPacketSize = cpu_to_le16(1024); + /* Calculate wMaxPacketSize according to audio bandwidth */ + set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); + set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); +diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c +index fbc942d7a484..104a0e78cf9b 100644 +--- a/drivers/usb/gadget/function/f_uvc.c ++++ b/drivers/usb/gadget/function/f_uvc.c +@@ -625,7 +625,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) + + uvc_hs_streaming_ep.wMaxPacketSize = + cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); +- uvc_hs_streaming_ep.bInterval = opts->streaming_interval; ++ ++ /* A high-bandwidth endpoint must specify a bInterval value of 1 */ ++ if (max_packet_mult > 1) ++ uvc_hs_streaming_ep.bInterval = 1; ++ else ++ uvc_hs_streaming_ep.bInterval = opts->streaming_interval; + + uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); + uvc_ss_streaming_ep.bInterval = opts->streaming_interval; +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c +index a0ad21f6994d..403633498d7d 100644 +--- a/drivers/usb/gadget/function/u_ether.c ++++ b/drivers/usb/gadget/function/u_ether.c +@@ -137,7 +137,7 @@ MODULE_PARM_DESC(tx_qmult, "Additional queue length multiplier for tx"); + static inline int qlen(struct usb_gadget *gadget, unsigned qmult) + { + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || +- gadget->speed == USB_SPEED_SUPER)) ++ gadget->speed >= USB_SPEED_SUPER)) + return qmult * DEFAULT_QLEN; + else + return DEFAULT_QLEN; +diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c +index 91fd78212488..bce3732bdd1d 100644 +--- a/drivers/usb/gadget/function/uvc_video.c ++++ b/drivers/usb/gadget/function/uvc_video.c +@@ -137,7 +137,8 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req) + if (ret < 0) { + printk(KERN_INFO "Failed to queue request (%d).\n", ret); + /* Isochronous endpoints can't be halted. */ +- if (usb_endpoint_xfer_bulk(video->ep->desc)) ++ if ((ret != -ESHUTDOWN) && ++ usb_endpoint_xfer_bulk(video->ep->desc)) + usb_ep_set_halt(video->ep); + } + +diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c +index c39de65a448b..270bb88c51cb 100644 +--- a/drivers/usb/gadget/legacy/acm_ms.c ++++ b/drivers/usb/gadget/legacy/acm_ms.c +@@ -207,8 +207,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail_string_ids; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c +index 25a2c2e48592..3396e7193dba 100644 +--- a/drivers/usb/gadget/legacy/ether.c ++++ b/drivers/usb/gadget/legacy/ether.c +@@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto fail1; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c +index a71a884f79fc..cccbb948821b 100644 +--- a/drivers/usb/gadget/legacy/hid.c ++++ b/drivers/usb/gadget/legacy/hid.c +@@ -175,8 +175,10 @@ static int hid_bind(struct usb_composite_dev *cdev) + struct usb_descriptor_header *usb_desc; + + usb_desc = usb_otg_descriptor_alloc(gadget); +- if (!usb_desc) ++ if (!usb_desc) { ++ status = -ENOMEM; + goto put; ++ } + usb_otg_descriptor_init(gadget, usb_desc); + otg_desc[0] = usb_desc; + otg_desc[1] = NULL; +diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c +index cb02e9ecd8e7..de0f3c2c9f6f 100644 +--- a/drivers/usb/gadget/legacy/inode.c ++++ b/drivers/usb/gadget/legacy/inode.c +@@ -2045,6 +2045,9 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) + return 0; + + Enomem: ++ kfree(CHIP); ++ CHIP = NULL; ++ + return -ENOMEM; + } + +diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h +index 2f03334c6874..d150a6795f48 100644 +--- a/drivers/usb/gadget/u_f.h ++++ b/drivers/usb/gadget/u_f.h +@@ -17,6 +17,7 @@ + #define __U_F_H__ + + #include ++#include + + /* Variable Length Array Macros **********************************************/ + #define vla_group(groupname) size_t groupname##__next = 0 +@@ -24,21 +25,36 @@ + + #define vla_item(groupname, type, name, n) \ + size_t groupname##_##name##__offset = ({ \ +- size_t align_mask = __alignof__(type) - 1; \ +- size_t offset = (groupname##__next + align_mask) & ~align_mask;\ +- size_t size = (n) * sizeof(type); \ +- groupname##__next = offset + size; \ ++ size_t offset = 0; \ ++ if (groupname##__next != SIZE_MAX) { \ ++ size_t align_mask = __alignof__(type) - 1; \ ++ size_t size = array_size(n, sizeof(type)); \ ++ offset = (groupname##__next + align_mask) & \ ++ ~align_mask; \ ++ if (check_add_overflow(offset, size, \ ++ &groupname##__next)) { \ ++ groupname##__next = SIZE_MAX; \ ++ offset = 0; \ ++ } \ ++ } \ + offset; \ + }) + + #define vla_item_with_sz(groupname, type, name, n) \ +- size_t groupname##_##name##__sz = (n) * sizeof(type); \ +- size_t groupname##_##name##__offset = ({ \ +- size_t align_mask = __alignof__(type) - 1; \ +- size_t offset = (groupname##__next + align_mask) & ~align_mask;\ +- size_t size = groupname##_##name##__sz; \ +- groupname##__next = offset + size; \ +- offset; \ ++ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ ++ size_t groupname##_##name##__offset = ({ \ ++ size_t offset = 0; \ ++ if (groupname##__next != SIZE_MAX) { \ ++ size_t align_mask = __alignof__(type) - 1; \ ++ offset = (groupname##__next + align_mask) & \ ++ ~align_mask; \ ++ if (check_add_overflow(offset, groupname##_##name##__sz,\ ++ &groupname##__next)) { \ ++ groupname##__next = SIZE_MAX; \ ++ offset = 0; \ ++ } \ ++ } \ ++ offset; \ + }) + + #define vla_ptr(ptr, groupname, name) \ +diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c +index 57dd3bad9539..ccf1e9fe5ebd 100644 +--- a/drivers/usb/gadget/udc/atmel_usba_udc.c ++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c +@@ -843,7 +843,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) + u32 status; + + DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", +- ep->ep.name, req); ++ ep->ep.name, _req); + + spin_lock_irqsave(&udc->lock, flags); + +diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig +index 0d7b8c9f72fd..778df4badf88 100644 +--- a/drivers/usb/gadget/udc/bdc/Kconfig ++++ b/drivers/usb/gadget/udc/bdc/Kconfig +@@ -14,7 +14,7 @@ if USB_BDC_UDC + comment "Platform Support" + config USB_BDC_PCI + tristate "BDC support for PCIe based platforms" +- depends on PCI ++ depends on PCI && BROKEN + default USB_BDC_UDC + help + Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. +diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c +index e9bd8d4abca0..f09a74d79c9e 100644 +--- a/drivers/usb/gadget/udc/bdc/bdc_core.c ++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c +@@ -286,6 +286,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) + * in that case reinit is passed as 1 + */ + if (reinit) { ++ int i; + /* Enable interrupts */ + temp = bdc_readl(bdc->regs, BDC_BDCSC); + temp |= BDC_GIE; +@@ -295,6 +296,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) + /* Initialize SRR to 0 */ + memset(bdc->srr.sr_bds, 0, + NUM_SR_ENTRIES * sizeof(struct bdc_bd)); ++ /* clear ep flags to avoid post disconnect stops/deconfigs */ ++ for (i = 1; i < bdc->num_eps; ++i) ++ bdc->bdc_ep_array[i]->flags = 0; + } else { + /* One time initiaization only */ + /* Enable status report function pointers */ +diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c +index 303735c7990c..8b1b48fa4ebf 100644 +--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c ++++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c +@@ -621,7 +621,6 @@ int bdc_ep_enable(struct bdc_ep *ep) + } + bdc_dbg_bd_list(bdc, ep); + /* only for ep0: config ep is called for ep0 from connect event */ +- ep->flags |= BDC_EP_ENABLED; + if (ep->ep_num == 1) + return ret; + +@@ -765,10 +764,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) + __func__, ep->name, start_bdi, end_bdi); + dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", + ep, (void *)ep->usb_ep.desc); +- /* Stop the ep to see where the HW is ? */ +- ret = bdc_stop_ep(bdc, ep->ep_num); +- /* if there is an issue with stopping ep, then no need to go further */ +- if (ret) ++ /* if still connected, stop the ep to see where the HW is ? */ ++ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { ++ ret = bdc_stop_ep(bdc, ep->ep_num); ++ /* if there is an issue, then no need to go further */ ++ if (ret) ++ return 0; ++ } else + return 0; + + /* +@@ -1917,7 +1919,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep) + __func__, ep->name, ep->flags); + + if (!(ep->flags & BDC_EP_ENABLED)) { +- dev_warn(bdc->dev, "%s is already disabled\n", ep->name); ++ if (bdc->gadget.speed != USB_SPEED_UNKNOWN) ++ dev_warn(bdc->dev, "%s is already disabled\n", ++ ep->name); + return 0; + } + spin_lock_irqsave(&bdc->lock, flags); +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 2dd30d0c177a..58e95bf663ae 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -1479,10 +1479,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t n) + { + struct usb_udc *udc = container_of(dev, struct usb_udc, dev); ++ ssize_t ret; + ++ mutex_lock(&udc_lock); + if (!udc->driver) { + dev_err(dev, "soft-connect without a gadget driver\n"); +- return -EOPNOTSUPP; ++ ret = -EOPNOTSUPP; ++ goto out; + } + + if (sysfs_streq(buf, "connect")) { +@@ -1494,10 +1497,14 @@ static ssize_t usb_udc_softconn_store(struct device *dev, + usb_gadget_udc_stop(udc); + } else { + dev_err(dev, "unsupported command '%s'\n", buf); +- return -EINVAL; ++ ret = -EINVAL; ++ goto out; + } + +- return n; ++ ret = n; ++out: ++ mutex_unlock(&udc_lock); ++ return ret; + } + static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store); + +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c +index 2f7023a289c9..8b16f0200d59 100644 +--- a/drivers/usb/gadget/udc/dummy_hcd.c ++++ b/drivers/usb/gadget/udc/dummy_hcd.c +@@ -918,6 +918,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value) + spin_lock_irqsave(&dum->lock, flags); + dum->pullup = (value != 0); + set_link_state(dum_hcd); ++ if (value == 0) { ++ /* ++ * Emulate synchronize_irq(): wait for callbacks to finish. ++ * This seems to be the best place to emulate the call to ++ * synchronize_irq() that's in usb_gadget_remove_driver(). ++ * Doing it in dummy_udc_stop() would be too late since it ++ * is called after the unbind callback and unbind shouldn't ++ * be invoked until all the other callbacks are finished. ++ */ ++ while (dum->callback_usage > 0) { ++ spin_unlock_irqrestore(&dum->lock, flags); ++ usleep_range(1000, 2000); ++ spin_lock_irqsave(&dum->lock, flags); ++ } ++ } + spin_unlock_irqrestore(&dum->lock, flags); + + usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); +@@ -998,14 +1013,6 @@ static int dummy_udc_stop(struct usb_gadget *g) + spin_lock_irq(&dum->lock); + dum->ints_enabled = 0; + stop_activity(dum); +- +- /* emulate synchronize_irq(): wait for callbacks to finish */ +- while (dum->callback_usage > 0) { +- spin_unlock_irq(&dum->lock); +- usleep_range(1000, 2000); +- spin_lock_irq(&dum->lock); +- } +- + dum->driver = NULL; + spin_unlock_irq(&dum->lock); + +@@ -2736,7 +2743,7 @@ static int __init init(void) + { + int retval = -ENOMEM; + int i; +- struct dummy *dum[MAX_NUM_UDC]; ++ struct dummy *dum[MAX_NUM_UDC] = {}; + + if (usb_disabled()) + return -ENODEV; +diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c +index 76e991557116..9e102ba9cf66 100644 +--- a/drivers/usb/gadget/udc/fotg210-udc.c ++++ b/drivers/usb/gadget/udc/fotg210-udc.c +@@ -340,15 +340,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep, + } else { + buffer = req->req.buf + req->req.actual; + length = ioread32(ep->fotg210->reg + +- FOTG210_FIBCR(ep->epnum - 1)); +- length &= FIBCR_BCFX; ++ FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX; ++ if (length > req->req.length - req->req.actual) ++ length = req->req.length - req->req.actual; + } + } else { + buffer = req->req.buf + req->req.actual; + if (req->req.length - req->req.actual > ep->ep.maxpacket) + length = ep->ep.maxpacket; + else +- length = req->req.length; ++ length = req->req.length - req->req.actual; + } + + d = dma_map_single(NULL, buffer, length, +@@ -385,8 +386,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep, + } + if (ep->dir_in) { /* if IN */ + fotg210_start_dma(ep, req); +- if ((req->req.length == req->req.actual) || +- (req->req.actual < ep->ep.maxpacket)) ++ if (req->req.length == req->req.actual) + fotg210_done(ep, req, 0); + } else { /* OUT */ + u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0); +@@ -827,7 +827,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210) + if (req->req.length) + fotg210_start_dma(ep, req); + +- if ((req->req.length - req->req.actual) < ep->ep.maxpacket) ++ if (req->req.actual == req->req.length) + fotg210_done(ep, req, 0); + } else { + fotg210_set_cxdone(fotg210); +@@ -856,12 +856,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep) + { + struct fotg210_request *req = list_entry(ep->queue.next, + struct fotg210_request, queue); ++ int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1); + + fotg210_start_dma(ep, req); + +- /* finish out transfer */ ++ /* Complete the request when it's full or a short packet arrived. ++ * Like other drivers, short_not_ok isn't handled. ++ */ ++ + if (req->req.length == req->req.actual || +- req->req.actual < ep->ep.maxpacket) ++ (disgr1 & DISGR1_SPK_INT(ep->epnum - 1))) + fotg210_done(ep, req, 0); + } + +@@ -1034,6 +1038,12 @@ static void fotg210_init(struct fotg210_udc *fotg210) + value &= ~DMCR_GLINT_EN; + iowrite32(value, fotg210->reg + FOTG210_DMCR); + ++ /* enable only grp2 irqs we handle */ ++ iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT ++ | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT ++ | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT), ++ fotg210->reg + FOTG210_DMISGR2); ++ + /* disable all fifo interrupt */ + iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1); + +diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c +index 5107987bd353..d363224dce6f 100644 +--- a/drivers/usb/gadget/udc/goku_udc.c ++++ b/drivers/usb/gadget/udc/goku_udc.c +@@ -1772,6 +1772,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) + goto err; + } + ++ pci_set_drvdata(pdev, dev); + spin_lock_init(&dev->lock); + dev->pdev = pdev; + dev->gadget.ops = &goku_ops; +@@ -1805,7 +1806,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) + } + dev->regs = (struct goku_udc_regs __iomem *) base; + +- pci_set_drvdata(pdev, dev); + INFO(dev, "%s\n", driver_desc); + INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); + INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); +diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c +index 9e246d2e55ca..f2b165182b4b 100644 +--- a/drivers/usb/gadget/udc/gr_udc.c ++++ b/drivers/usb/gadget/udc/gr_udc.c +@@ -2000,9 +2000,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) + + if (num == 0) { + _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); ++ if (!_req) ++ return -ENOMEM; ++ + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); +- if (!_req || !buf) { +- /* possible _req freed by gr_probe via gr_remove */ ++ if (!buf) { ++ gr_free_request(&ep->ep, _req); + return -ENOMEM; + } + +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c +index ac2aa04ca657..710793161795 100644 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c +@@ -1615,17 +1615,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, + const struct usb_endpoint_descriptor *desc) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + u16 maxpacket; + u32 tmp; + unsigned long flags; + + /* Verify EP data */ + if ((!_ep) || (!ep) || (!desc) || +- (desc->bDescriptorType != USB_DT_ENDPOINT)) { +- dev_dbg(udc->dev, "bad ep or descriptor\n"); ++ (desc->bDescriptorType != USB_DT_ENDPOINT)) + return -EINVAL; +- } ++ ++ udc = ep->udc; + maxpacket = usb_endpoint_maxp(desc); + if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) { + dev_dbg(udc->dev, "bad ep descriptor's packet size\n"); +@@ -1873,7 +1873,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) + static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + { + struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); +- struct lpc32xx_udc *udc = ep->udc; ++ struct lpc32xx_udc *udc; + unsigned long flags; + + if ((!ep) || (ep->hwep_num <= 1)) +@@ -1883,6 +1883,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value) + if (ep->is_in) + return -EAGAIN; + ++ udc = ep->udc; + spin_lock_irqsave(&udc->lock, flags); + + if (value == 1) { +diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c +index 6e977dc22570..1be409644a48 100644 +--- a/drivers/usb/gadget/udc/m66592-udc.c ++++ b/drivers/usb/gadget/udc/m66592-udc.c +@@ -1672,7 +1672,7 @@ static int m66592_probe(struct platform_device *pdev) + + err_add_udc: + m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); +- ++ m66592->ep0_req = NULL; + clean_up3: + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); +diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c +index ce73b3552269..8700db903382 100644 +--- a/drivers/usb/gadget/udc/mv_udc_core.c ++++ b/drivers/usb/gadget/udc/mv_udc_core.c +@@ -2317,7 +2317,8 @@ static int mv_udc_probe(struct platform_device *pdev) + return 0; + + err_create_workqueue: +- destroy_workqueue(udc->qwork); ++ if (udc->qwork) ++ destroy_workqueue(udc->qwork); + err_destroy_dma: + dma_pool_destroy(udc->dtd_pool); + err_free_dma: +diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c +index dfaed8e8cc52..c8c45264e94c 100644 +--- a/drivers/usb/gadget/udc/net2280.c ++++ b/drivers/usb/gadget/udc/net2280.c +@@ -3785,8 +3785,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) + return 0; + + done: +- if (dev) ++ if (dev) { + net2280_remove(pdev); ++ kfree(dev); ++ } + return retval; + } + +diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c +index 8a365aad66fe..18ce19162804 100644 +--- a/drivers/usb/gadget/udc/pch_udc.c ++++ b/drivers/usb/gadget/udc/pch_udc.c +@@ -604,18 +604,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev) + static inline void pch_udc_vbus_session(struct pch_udc_dev *dev, + int is_active) + { ++ unsigned long iflags; ++ ++ spin_lock_irqsave(&dev->lock, iflags); + if (is_active) { + pch_udc_reconnect(dev); + dev->vbus_session = 1; + } else { + if (dev->driver && dev->driver->disconnect) { +- spin_lock(&dev->lock); ++ spin_unlock_irqrestore(&dev->lock, iflags); + dev->driver->disconnect(&dev->gadget); +- spin_unlock(&dev->lock); ++ spin_lock_irqsave(&dev->lock, iflags); + } + pch_udc_set_disconnect(dev); + dev->vbus_session = 0; + } ++ spin_unlock_irqrestore(&dev->lock, iflags); + } + + /** +@@ -1172,20 +1176,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value) + static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on) + { + struct pch_udc_dev *dev; ++ unsigned long iflags; + + if (!gadget) + return -EINVAL; ++ + dev = container_of(gadget, struct pch_udc_dev, gadget); ++ ++ spin_lock_irqsave(&dev->lock, iflags); + if (is_on) { + pch_udc_reconnect(dev); + } else { + if (dev->driver && dev->driver->disconnect) { +- spin_lock(&dev->lock); ++ spin_unlock_irqrestore(&dev->lock, iflags); + dev->driver->disconnect(&dev->gadget); +- spin_unlock(&dev->lock); ++ spin_lock_irqsave(&dev->lock, iflags); + } + pch_udc_set_disconnect(dev); + } ++ spin_unlock_irqrestore(&dev->lock, iflags); + + return 0; + } +@@ -1777,7 +1786,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep, + } + /* prevent from using desc. - set HOST BUSY */ + dma_desc->status |= PCH_UDC_BS_HST_BSY; +- dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID); ++ dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID); + req->td_data = dma_desc; + req->td_data_last = dma_desc; + req->chain_len = 1; +@@ -2320,6 +2329,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num) + pch_udc_set_dma(dev, DMA_DIR_RX); + } + ++static int pch_udc_gadget_setup(struct pch_udc_dev *dev) ++ __must_hold(&dev->lock) ++{ ++ int rc; ++ ++ /* In some cases we can get an interrupt before driver gets setup */ ++ if (!dev->driver) ++ return -ESHUTDOWN; ++ ++ spin_unlock(&dev->lock); ++ rc = dev->driver->setup(&dev->gadget, &dev->setup_data); ++ spin_lock(&dev->lock); ++ return rc; ++} ++ + /** + * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts + * @dev: Reference to the device structure +@@ -2391,15 +2415,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev) + dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; + else /* OUT */ + dev->gadget.ep0 = &ep->ep; +- spin_lock(&dev->lock); + /* If Mass storage Reset */ + if ((dev->setup_data.bRequestType == 0x21) && + (dev->setup_data.bRequest == 0xFF)) + dev->prot_stall = 0; + /* call gadget with setup data received */ +- setup_supported = dev->driver->setup(&dev->gadget, +- &dev->setup_data); +- spin_unlock(&dev->lock); ++ setup_supported = pch_udc_gadget_setup(dev); + + if (dev->setup_data.bRequestType & USB_DIR_IN) { + ep->td_data->status = (ep->td_data->status & +@@ -2647,9 +2668,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) + dev->ep[i].halted = 0; + } + dev->stall = 0; +- spin_unlock(&dev->lock); +- dev->driver->setup(&dev->gadget, &dev->setup_data); +- spin_lock(&dev->lock); ++ pch_udc_gadget_setup(dev); + } + + /** +@@ -2684,9 +2703,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) + dev->stall = 0; + + /* call gadget zero with setup data received */ +- spin_unlock(&dev->lock); +- dev->driver->setup(&dev->gadget, &dev->setup_data); +- spin_lock(&dev->lock); ++ pch_udc_gadget_setup(dev); + } + + /** +@@ -2960,7 +2977,7 @@ static int init_dma_pools(struct pch_udc_dev *dev) + dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf, + UDC_EP0OUT_BUFF_SIZE * 4, + DMA_FROM_DEVICE); +- return 0; ++ return dma_mapping_error(&dev->pdev->dev, dev->dma_addr); + } + + static int pch_udc_start(struct usb_gadget *g, +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c +index 230e3248f386..80503c3604ca 100644 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c +@@ -1855,6 +1855,8 @@ static int r8a66597_probe(struct platform_device *pdev) + return PTR_ERR(reg); + + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!ires) ++ return -EINVAL; + irq = ires->start; + irq_trigger = ires->flags & IRQF_TRIGGER_MASK; + +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c +index eb3571ee59e3..08153a48704b 100644 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c +@@ -269,10 +269,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep, + static void s3c2410_udc_nuke(struct s3c2410_udc *udc, + struct s3c2410_ep *ep, int status) + { +- /* Sanity check */ +- if (&ep->queue == NULL) +- return; +- + while (!list_empty(&ep->queue)) { + struct s3c2410_request *req; + req = list_entry(ep->queue.next, struct s3c2410_request, +diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c +index 73a4dfba0edb..0173a9969b9a 100644 +--- a/drivers/usb/gadget/usbstring.c ++++ b/drivers/usb/gadget/usbstring.c +@@ -59,9 +59,9 @@ usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) + return -EINVAL; + + /* string descriptors have length, tag, then UTF16-LE text */ +- len = min ((size_t) 126, strlen (s->s)); ++ len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s)); + len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, +- (wchar_t *) &buf[2], 126); ++ (wchar_t *) &buf[2], USB_MAX_STRING_LEN); + if (len < 0) + return -EINVAL; + buf [0] = (len + 1) * 2; +diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c +index 7a603f66a9bc..44b7c3e780f6 100644 +--- a/drivers/usb/host/ehci-exynos.c ++++ b/drivers/usb/host/ehci-exynos.c +@@ -199,9 +199,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) + hcd->rsrc_len = resource_size(res); + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "Failed to get IRQ\n"); +- err = -ENODEV; ++ if (irq < 0) { ++ err = irq; + goto fail_io; + } + +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index e79ac2a45e59..c149733f2745 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -589,6 +590,7 @@ static int ehci_run (struct usb_hcd *hcd) + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 hcc_params; ++ int rc; + + hcd->uses_new_polling = 1; + +@@ -644,9 +646,20 @@ static int ehci_run (struct usb_hcd *hcd) + down_write(&ehci_cf_port_reset_rwsem); + ehci->rh_state = EHCI_RH_RUNNING; + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ++ ++ /* Wait until HC become operational */ + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + msleep(5); ++ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); ++ + up_write(&ehci_cf_port_reset_rwsem); ++ ++ if (rc) { ++ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", ++ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); ++ return rc; ++ } ++ + ehci->last_periodic_enable = ktime_get_real(); + + temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c +index 37acb0e63fb2..1bac045c94f3 100644 +--- a/drivers/usb/host/ehci-hub.c ++++ b/drivers/usb/host/ehci-hub.c +@@ -27,7 +27,6 @@ + */ + + /*-------------------------------------------------------------------------*/ +-#include + + #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) + +@@ -351,6 +350,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) + + unlink_empty_async_suspended(ehci); + ++ /* Some Synopsys controllers mistakenly leave IAA turned on */ ++ ehci_writel(ehci, STS_IAA, &ehci->regs->status); ++ + /* Any IAA cycle that started before the suspend is now invalid */ + end_iaa_cycle(ehci); + ehci_handle_start_intr_unlinks(ehci); +diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c +index 849806a75f1c..b29610899c9f 100644 +--- a/drivers/usb/host/ehci-mv.c ++++ b/drivers/usb/host/ehci-mv.c +@@ -196,12 +196,10 @@ static int mv_ehci_probe(struct platform_device *pdev) + hcd->rsrc_len = resource_size(r); + hcd->regs = ehci_mv->op_regs; + +- hcd->irq = platform_get_irq(pdev, 0); +- if (!hcd->irq) { +- dev_err(&pdev->dev, "Cannot get irq."); +- retval = -ENODEV; ++ retval = platform_get_irq(pdev, 0); ++ if (retval < 0) + goto err_disable_clk; +- } ++ hcd->irq = retval; + + ehci = hcd_to_ehci(hcd); + ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c +index c7a9b31eeaef..637079a35003 100644 +--- a/drivers/usb/host/ehci-mxc.c ++++ b/drivers/usb/host/ehci-mxc.c +@@ -63,6 +63,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) + } + + irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; + + hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev)); + if (!hcd) +diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c +index 94ea9fff13e6..9227a9ddac60 100644 +--- a/drivers/usb/host/ehci-omap.c ++++ b/drivers/usb/host/ehci-omap.c +@@ -237,6 +237,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) + + err_pm_runtime: + pm_runtime_put_sync(dev); ++ pm_runtime_disable(dev); + + err_phy: + for (i = 0; i < omap->nports; i++) { +diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c +index 3b3649d88c5f..a31015a95fd6 100644 +--- a/drivers/usb/host/ehci-pci.c ++++ b/drivers/usb/host/ehci-pci.c +@@ -229,6 +229,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) + ehci_info(ehci, "applying MosChip frame-index workaround\n"); + ehci->frame_index_bug = 1; + break; ++ case PCI_VENDOR_ID_HUAWEI: ++ /* Synopsys HC bug */ ++ if (pdev->device == 0xa239) { ++ ehci_info(ehci, "applying Synopsys HC workaround\n"); ++ ehci->has_synopsys_hc_bug = 1; ++ } ++ break; + } + + /* optional debug port, normally in the first BAR */ +@@ -305,6 +312,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) + if (pdev->vendor == PCI_VENDOR_ID_STMICRO + && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) + ; /* ConneXT has no sbrn register */ ++ else if (pdev->vendor == PCI_VENDOR_ID_HUAWEI ++ && pdev->device == 0xa239) ++ ; /* HUAWEI Kunpeng920 USB EHCI has no sbrn register */ + else + pci_read_config_byte(pdev, 0x60, &ehci->sbrn); + +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c +index 72853020a542..15249e664b89 100644 +--- a/drivers/usb/host/fotg210-hcd.c ++++ b/drivers/usb/host/fotg210-hcd.c +@@ -5599,7 +5599,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) + struct usb_hcd *hcd; + struct resource *res; + int irq; +- int retval = -ENODEV; ++ int retval; + struct fotg210_hcd *fotg210; + + if (usb_disabled()) +@@ -5619,7 +5619,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) + hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev, + dev_name(dev)); + if (!hcd) { +- dev_err(dev, "failed to create hcd with err %d\n", retval); ++ dev_err(dev, "failed to create hcd\n"); + retval = -ENOMEM; + goto fail_create_hcd; + } +diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c +index f07ccb25bc24..6f006cbf1f83 100644 +--- a/drivers/usb/host/fsl-mph-dr-of.c ++++ b/drivers/usb/host/fsl-mph-dr-of.c +@@ -98,10 +98,13 @@ static struct platform_device *fsl_usb2_device_register( + + pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask; + +- if (!pdev->dev.dma_mask) ++ if (!pdev->dev.dma_mask) { + pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask; +- else +- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); ++ } else { ++ retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); ++ if (retval) ++ goto error; ++ } + + retval = platform_device_add_data(pdev, pdata, sizeof(*pdata)); + if (retval) +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c +index 369869a29ebd..1654a1bc6414 100644 +--- a/drivers/usb/host/max3421-hcd.c ++++ b/drivers/usb/host/max3421-hcd.c +@@ -149,8 +149,6 @@ struct max3421_hcd { + */ + struct urb *curr_urb; + enum scheduling_pass sched_pass; +- struct usb_device *loaded_dev; /* dev that's loaded into the chip */ +- int loaded_epnum; /* epnum whose toggles are loaded */ + int urb_done; /* > 0 -> no errors, < 0: errno */ + size_t curr_len; + u8 hien; +@@ -488,39 +486,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) + * Caller must NOT hold HCD spinlock. + */ + static void +-max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, +- int force_toggles) ++max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) + { +- struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); +- int old_epnum, same_ep, rcvtog, sndtog; +- struct usb_device *old_dev; ++ int rcvtog, sndtog; + u8 hctl; + +- old_dev = max3421_hcd->loaded_dev; +- old_epnum = max3421_hcd->loaded_epnum; +- +- same_ep = (dev == old_dev && epnum == old_epnum); +- if (same_ep && !force_toggles) +- return; +- +- if (old_dev && !same_ep) { +- /* save the old end-points toggles: */ +- u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); +- +- rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; +- sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; +- +- /* no locking: HCD (i.e., we) own toggles, don't we? */ +- usb_settoggle(old_dev, old_epnum, 0, rcvtog); +- usb_settoggle(old_dev, old_epnum, 1, sndtog); +- } + /* setup new endpoint's toggle bits: */ + rcvtog = usb_gettoggle(dev, epnum, 0); + sndtog = usb_gettoggle(dev, epnum, 1); + hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | + BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); + +- max3421_hcd->loaded_epnum = epnum; + spi_wr8(hcd, MAX3421_REG_HCTL, hctl); + + /* +@@ -528,7 +504,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, + * address-assignment so it's best to just always load the + * address whenever the end-point changed/was forced. + */ +- max3421_hcd->loaded_dev = dev; + spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); + } + +@@ -663,7 +638,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + struct urb *urb, *curr_urb = NULL; + struct max3421_ep *max3421_ep; +- int epnum, force_toggles = 0; ++ int epnum; + struct usb_host_endpoint *ep; + struct list_head *pos; + unsigned long flags; +@@ -773,7 +748,6 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) + usb_settoggle(urb->dev, epnum, 0, 1); + usb_settoggle(urb->dev, epnum, 1, 1); + max3421_ep->pkt_state = PKT_STATE_SETUP; +- force_toggles = 1; + } else + max3421_ep->pkt_state = PKT_STATE_TRANSFER; + } +@@ -781,7 +755,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) + spin_unlock_irqrestore(&max3421_hcd->lock, flags); + + max3421_ep->last_active = max3421_hcd->frame_number; +- max3421_set_address(hcd, urb->dev, epnum, force_toggles); ++ max3421_set_address(hcd, urb->dev, epnum); + max3421_set_speed(hcd, urb->dev); + max3421_next_transfer(hcd, 0); + return 1; +@@ -1376,6 +1350,16 @@ max3421_urb_done(struct usb_hcd *hcd) + status = 0; + urb = max3421_hcd->curr_urb; + if (urb) { ++ /* save the old end-points toggles: */ ++ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); ++ int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; ++ int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; ++ int epnum = usb_endpoint_num(&urb->ep->desc); ++ ++ /* no locking: HCD (i.e., we) own toggles, don't we? */ ++ usb_settoggle(urb->dev, epnum, 0, rcvtog); ++ usb_settoggle(urb->dev, epnum, 1, sndtog); ++ + max3421_hcd->curr_urb = NULL; + spin_lock_irqsave(&max3421_hcd->lock, flags); + usb_hcd_unlink_urb_from_ep(hcd, urb); +diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c +index 6865b919403f..2ed062a2e93b 100644 +--- a/drivers/usb/host/ohci-exynos.c ++++ b/drivers/usb/host/ohci-exynos.c +@@ -166,9 +166,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) + hcd->rsrc_len = resource_size(res); + + irq = platform_get_irq(pdev, 0); +- if (!irq) { +- dev_err(&pdev->dev, "Failed to get IRQ\n"); +- err = -ENODEV; ++ if (irq < 0) { ++ err = irq; + goto fail_io; + } + +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c +index 76b893faae04..e6e356de5a3f 100644 +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -100,7 +100,7 @@ static void io_watchdog_func(unsigned long _ohci); + + + /* Some boards misreport power switching/overcurrent */ +-static bool distrust_firmware = true; ++static bool distrust_firmware; + module_param (distrust_firmware, bool, 0); + MODULE_PARM_DESC (distrust_firmware, + "true to distrust firmware power/overcurrent setup"); +@@ -665,20 +665,24 @@ static int ohci_run (struct ohci_hcd *ohci) + + /* handle root hub init quirks ... */ + val = roothub_a (ohci); +- val &= ~(RH_A_PSM | RH_A_OCPM); ++ /* Configure for per-port over-current protection by default */ ++ val &= ~RH_A_NOCP; ++ val |= RH_A_OCPM; + if (ohci->flags & OHCI_QUIRK_SUPERIO) { +- /* NSC 87560 and maybe others */ ++ /* NSC 87560 and maybe others. ++ * Ganged power switching, no over-current protection. ++ */ + val |= RH_A_NOCP; +- val &= ~(RH_A_POTPGT | RH_A_NPS); +- ohci_writel (ohci, val, &ohci->regs->roothub.a); ++ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM); + } else if ((ohci->flags & OHCI_QUIRK_AMD756) || + (ohci->flags & OHCI_QUIRK_HUB_POWER)) { + /* hub power always on; required for AMD-756 and some +- * Mac platforms. ganged overcurrent reporting, if any. ++ * Mac platforms. + */ + val |= RH_A_NPS; +- ohci_writel (ohci, val, &ohci->regs->roothub.a); + } ++ ohci_writel(ohci, val, &ohci->regs->roothub.a); ++ + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); + ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM, + &ohci->regs->roothub.b); +diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c +index a8b8d8b8d9f3..a960d2bb8dd1 100644 +--- a/drivers/usb/host/ohci-sm501.c ++++ b/drivers/usb/host/ohci-sm501.c +@@ -196,6 +196,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) + struct resource *mem; + + usb_remove_hcd(hcd); ++ iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + dma_release_declared_memory(&pdev->dev); +diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c +index 4e4d601af35c..2f48da0c0bb3 100644 +--- a/drivers/usb/host/oxu210hp-hcd.c ++++ b/drivers/usb/host/oxu210hp-hcd.c +@@ -3734,8 +3734,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev, + oxu->is_otg = otg; + + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); +- if (ret < 0) ++ if (ret < 0) { ++ usb_put_hcd(hcd); + return ERR_PTR(ret); ++ } + + device_wakeup_enable(hcd->self.controller); + return hcd; +diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c +index fd2a11473be7..455c59fe32fa 100644 +--- a/drivers/usb/host/sl811-hcd.c ++++ b/drivers/usb/host/sl811-hcd.c +@@ -1286,11 +1286,10 @@ sl811h_hub_control( + goto error; + put_unaligned_le32(sl811->port1, buf); + +-#ifndef VERBOSE +- if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ +-#endif +- dev_dbg(hcd->self.controller, "GetPortStatus %08x\n", +- sl811->port1); ++ if (__is_defined(VERBOSE) || ++ *(u16*)(buf+2)) /* only if wPortChange is interesting */ ++ dev_dbg(hcd->self.controller, "GetPortStatus %08x\n", ++ sl811->port1); + break; + case SetPortFeature: + if (wIndex != 1 || wLength != 0) +diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h +index e0244fb3903d..1e9b2ed83da2 100644 +--- a/drivers/usb/host/xhci-ext-caps.h ++++ b/drivers/usb/host/xhci-ext-caps.h +@@ -19,8 +19,9 @@ + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +-/* Up to 16 ms to halt an HC */ +-#define XHCI_MAX_HALT_USEC (16*1000) ++ ++/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */ ++#define XHCI_MAX_HALT_USEC (32 * 1000) + /* HC not running - set to 1 when run/stop bit is cleared. */ + #define XHCI_STS_HALT (1<<0) + +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index d3d65f037ea1..51a84f86ca49 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -627,15 +627,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + { + u32 pls = status_reg & PORT_PLS_MASK; + +- /* resume state is a xHCI internal state. +- * Do not report it to usb core, instead, pretend to be U3, +- * thus usb core knows it's not ready for transfer +- */ +- if (pls == XDEV_RESUME) { +- *status |= USB_SS_PORT_LS_U3; +- return; +- } +- + /* When the CAS bit is set then warm reset + * should be performed on port + */ +@@ -657,6 +648,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + */ + pls |= USB_PORT_STAT_CONNECTION; + } else { ++ /* ++ * Resume state is an xHCI internal state. Do not report it to ++ * usb core, instead, pretend to be U3, thus usb core knows ++ * it's not ready for transfer. ++ */ ++ if (pls == XDEV_RESUME) { ++ *status |= USB_SS_PORT_LS_U3; ++ return; ++ } ++ + /* + * If CAS bit isn't set but the Port is already at + * Compliance Mode, fake a connection so the USB core +@@ -1506,11 +1507,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) + * Inform the usbcore about resume-in-progress by returning + * a non-zero value even if there are no status changes. + */ ++ spin_lock_irqsave(&xhci->lock, flags); ++ + status = bus_state->resuming_ports; + + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; + +- spin_lock_irqsave(&xhci->lock, flags); + /* For each port, did anything change? If so, set that bit in buf. */ + for (i = 0; i < max_ports; i++) { + temp = readl(port_array[i]); +@@ -1638,6 +1640,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + hcd->state = HC_STATE_SUSPENDED; + bus_state->next_statechange = jiffies + msecs_to_jiffies(10); + spin_unlock_irqrestore(&xhci->lock, flags); ++ ++ if (bus_state->bus_suspended) ++ usleep_range(5000, 10000); ++ + return 0; + } + +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 59170b85e5d1..999b09173188 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -2268,6 +2268,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + + if (major_revision == 0x03) { + rhub = &xhci->usb3_rhub; ++ /* ++ * Some hosts incorrectly use sub-minor version for minor ++ * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01 ++ * for bcdUSB 0x310). Since there is no USB release with sub ++ * minor version 0x301 to 0x309, we can assume that they are ++ * incorrect and fix it here. ++ */ ++ if (minor_revision > 0x00 && minor_revision < 0x10) ++ minor_revision <<= 4; + } else if (major_revision <= 0x02) { + rhub = &xhci->usb2_rhub; + } else { +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c +index 144674913c78..3929ac8c6a73 100644 +--- a/drivers/usb/host/xhci-mtk-sch.c ++++ b/drivers/usb/host/xhci-mtk-sch.c +@@ -284,6 +284,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, + if (is_fs_or_ls(speed) && !has_tt) + return false; + ++ /* skip endpoint with zero maxpkt */ ++ if (usb_endpoint_maxp(&ep->desc) == 0) ++ return false; ++ + return true; + } + +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c +index c10875834a5a..0fe26152bae5 100644 +--- a/drivers/usb/host/xhci-mtk.c ++++ b/drivers/usb/host/xhci-mtk.c +@@ -470,6 +470,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci) + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; + if (mtk->lpm_support) + xhci->quirks |= XHCI_LPM_SUPPORT; ++ ++ /* ++ * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream, ++ * and it's 3 when support it. ++ */ ++ if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4) ++ xhci->quirks |= XHCI_BROKEN_STREAMS; + } + + /* called during probe() after chip reset completes */ +@@ -636,7 +643,8 @@ static int xhci_mtk_probe(struct platform_device *pdev) + if (ret) + goto put_usb3_hcd; + +- if (HCC_MAX_PSA(xhci->hcc_params) >= 4) ++ if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && ++ !(xhci->quirks & XHCI_BROKEN_STREAMS)) + xhci->shared_hcd->can_do_streams = 1; + + ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index adf21ac76e34..1b063e491f60 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -215,6 +215,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x1142) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; ++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && ++ pdev->device == 0x2142) ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c +old mode 100644 +new mode 100755 +index d6176d5f2940..17a115e96f71 +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -502,10 +502,34 @@ static int xhci_plat_runtime_resume(struct device *dev) + return ret; + } + ++static int xhci_plat_suspend(struct device *dev) ++{ ++ struct usb_hcd *hcd = dev_get_drvdata(dev); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ ++ if (!xhci || hcd_to_bus(hcd)->skip_resume) ++ return 0; ++ ++ return xhci_suspend(xhci, true); ++} ++ ++static int xhci_plat_resume(struct device *dev) ++{ ++ struct usb_hcd *hcd = dev_get_drvdata(dev); ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd); ++ ++ if (!xhci || hcd_to_bus(hcd)->skip_resume) ++ return 0; ++ ++ return xhci_resume(xhci, 0); ++} ++ + static const struct dev_pm_ops xhci_plat_pm_ops = { + .freeze = xhci_plat_pm_freeze, + .restore = xhci_plat_pm_restore, + .thaw = xhci_plat_pm_restore, ++ .suspend = xhci_plat_suspend, ++ .resume = xhci_plat_resume, + SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, + xhci_plat_runtime_idle) + }; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index f27aae512510..3d3b72cf304a 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -682,11 +682,16 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, +- seg->bounce_len, seg->bounce_offs); +- if (len != seg->bounce_len) +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", +- len, seg->bounce_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, ++ seg->bounce_len, seg->bounce_offs); ++ if (len != seg->bounce_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", ++ len, seg->bounce_len); ++ } else { ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, ++ seg->bounce_len); ++ } + seg->bounce_len = 0; + seg->bounce_offs = 0; + } +@@ -2841,6 +2846,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); ++ /* make sure TRB is fully written before giving it to the controller */ ++ wmb(); + trb->field[3] = cpu_to_le32(field4); + inc_enq(xhci, ring, more_trbs_coming); + } +@@ -3184,12 +3191,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, +- seg->bounce_buf, new_buff_len, enqd_len); +- if (len != new_buff_len) +- xhci_warn(xhci, +- "WARN Wrong bounce buffer write length: %zu != %d\n", +- len, new_buff_len); ++ if (urb->num_sgs) { ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, ++ seg->bounce_buf, new_buff_len, enqd_len); ++ if (len != new_buff_len) ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", ++ len, new_buff_len); ++ } else { ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); ++ } ++ + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 97d57a94776a..040c43097d55 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -579,6 +579,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, + enable); + if (err < 0) + break; ++ ++ /* ++ * wait 500us for LFPS detector to be disabled before ++ * sending ACK ++ */ ++ if (!enable) ++ usleep_range(500, 1000); + } + + if (err < 0) { +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index a4fead9772e5..ba677acb1d48 100755 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1079,6 +1079,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + struct usb_hcd *secondary_hcd; + int retval = 0; + bool comp_timer_running = false; ++ bool pending_portevent = false; + + if (!hcd->state || !xhci->suspended) + return 0; +@@ -1212,13 +1213,22 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) + + done: + if (retval == 0) { +- /* Resume root hubs only when have pending events. */ +- if (xhci_pending_portevent(xhci)) { ++ /* ++ * Resume roothubs only if there are pending events. ++ * USB 3 devices resend U3 LFPS wake after a 100ms delay if ++ * the first wake signalling failed, give it that chance. ++ */ ++ pending_portevent = xhci_pending_portevent(xhci); ++ if (!pending_portevent) { ++ msleep(120); ++ pending_portevent = xhci_pending_portevent(xhci); ++ } ++ ++ if (pending_portevent) { + usb_hcd_resume_root_hub(xhci->shared_hcd); + usb_hcd_resume_root_hub(hcd); + } + } +- + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject +@@ -1357,7 +1367,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, + * we need to issue an evaluate context command and wait on it. + */ + static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, +- unsigned int ep_index, struct urb *urb) ++ unsigned int ep_index, struct urb *urb, gfp_t mem_flags) + { + struct xhci_container_ctx *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; +@@ -1388,7 +1398,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, + * changes max packet sizes. + */ + +- command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); ++ command = xhci_alloc_command(xhci, false, true, mem_flags); + if (!command) + return -ENOMEM; + +@@ -1405,6 +1415,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, + xhci->devs[slot_id]->out_ctx, ep_index); + + ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); ++ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ + ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); + +@@ -1494,7 +1505,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + */ + if (urb->dev->speed == USB_SPEED_FULL) { + ret = xhci_check_maxpacket(xhci, slot_id, +- ep_index, urb); ++ ep_index, urb, mem_flags); + if (ret < 0) { + xhci_urb_free_priv(urb_priv); + urb->hcpriv = NULL; +@@ -1506,7 +1517,9 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + * atomic context to this function, which may allocate memory. + */ + spin_lock_irqsave(&xhci->lock, flags); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if (xhci->xhc_state & XHCI_STATE_DYING || ++ xhci_check_args(hcd, urb->dev, urb->ep, ++ true, true, __func__) <= 0) + goto dying; + ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); +@@ -1515,7 +1528,9 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + spin_unlock_irqrestore(&xhci->lock, flags); + } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { + spin_lock_irqsave(&xhci->lock, flags); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if (xhci->xhc_state & XHCI_STATE_DYING || ++ xhci_check_args(hcd, urb->dev, urb->ep, ++ true, true, __func__) <= 0) + goto dying; + if (xhci->devs[slot_id]->eps[ep_index].ep_state & + EP_GETTING_STREAMS) { +@@ -1537,7 +1552,9 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + spin_unlock_irqrestore(&xhci->lock, flags); + } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { + spin_lock_irqsave(&xhci->lock, flags); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if (xhci->xhc_state & XHCI_STATE_DYING || ++ xhci_check_args(hcd, urb->dev, urb->ep, ++ true, true, __func__) <= 0) + goto dying; + ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); +@@ -1546,7 +1563,9 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) + spin_unlock_irqrestore(&xhci->lock, flags); + } else { + spin_lock_irqsave(&xhci->lock, flags); +- if (xhci->xhc_state & XHCI_STATE_DYING) ++ if (xhci->xhc_state & XHCI_STATE_DYING || ++ xhci_check_args(hcd, urb->dev, urb->ep, ++ true, true, __func__) <= 0) + goto dying; + ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); +@@ -4308,6 +4327,9 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, + mutex_lock(hcd->bandwidth_mutex); + xhci_change_max_exit_latency(xhci, udev, 0); + mutex_unlock(hcd->bandwidth_mutex); ++ readl_poll_timeout(port_array[port_num], pm_val, ++ (pm_val & PORT_PLS_MASK) == XDEV_U0, ++ 100, 10000); + return 0; + } + } +@@ -4469,19 +4491,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, + { + unsigned long long timeout_ns; + ++ if (xhci->quirks & XHCI_INTEL_HOST) ++ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); ++ else ++ timeout_ns = udev->u1_params.sel; ++ + /* Prevent U1 if service interval is shorter than U1 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { +- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { ++ if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + +- if (xhci->quirks & XHCI_INTEL_HOST) +- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); +- else +- timeout_ns = udev->u1_params.sel; +- + /* The U1 timeout is encoded in 1us intervals. + * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. + */ +@@ -4533,19 +4555,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, + { + unsigned long long timeout_ns; + ++ if (xhci->quirks & XHCI_INTEL_HOST) ++ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); ++ else ++ timeout_ns = udev->u2_params.sel; ++ + /* Prevent U2 if service interval is shorter than U2 exit latency */ + if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { +- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { ++ if (xhci_service_interval_to_ns(desc) <= timeout_ns) { + dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); + return USB3_LPM_DISABLED; + } + } + +- if (xhci->quirks & XHCI_INTEL_HOST) +- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); +- else +- timeout_ns = udev->u2_params.sel; +- + /* The U2 timeout is encoded in 256us intervals */ + timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); + /* If the necessary timeout value is bigger than what we can set in the +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 6c0cfaa0c6d4..cd402a196758 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -709,7 +709,7 @@ struct xhci_ep_ctx { + * 4 - TRB error + * 5-7 - reserved + */ +-#define EP_STATE_MASK (0xf) ++#define EP_STATE_MASK (0x7) + #define EP_STATE_DISABLED 0 + #define EP_STATE_RUNNING 1 + #define EP_STATE_HALTED 2 +diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c +index 7fb0590187d4..a1bc516be154 100644 +--- a/drivers/usb/misc/adutux.c ++++ b/drivers/usb/misc/adutux.c +@@ -210,6 +210,7 @@ static void adu_interrupt_out_callback(struct urb *urb) + + if (status != 0) { + if ((status != -ENOENT) && ++ (status != -ESHUTDOWN) && + (status != -ECONNRESET)) { + dev_dbg(&dev->udev->dev, + "%s :nonzero status received: %d\n", __func__, +diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c +index baa57bc29192..1db97bbb7463 100644 +--- a/drivers/usb/misc/lvstest.c ++++ b/drivers/usb/misc/lvstest.c +@@ -440,7 +440,7 @@ static int lvs_rh_probe(struct usb_interface *intf, + USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); + if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { + dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); +- return ret; ++ return ret < 0 ? ret : -EINVAL; + } + + /* submit urb to poll interrupt endpoint */ +diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig +index 36bc28c884ad..47dabccafef4 100644 +--- a/drivers/usb/misc/sisusbvga/Kconfig ++++ b/drivers/usb/misc/sisusbvga/Kconfig +@@ -15,7 +15,7 @@ config USB_SISUSBVGA + + config USB_SISUSBVGA_CON + bool "Text console and mode switching support" if USB_SISUSBVGA +- depends on VT ++ depends on VT && BROKEN + select FONT_8x16 + ---help--- + Say Y here if you want a VGA text console via the USB dongle or +diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c +index 895e8c0288cf..30b3bdc4e676 100644 +--- a/drivers/usb/misc/sisusbvga/sisusb.c ++++ b/drivers/usb/misc/sisusbvga/sisusb.c +@@ -762,7 +762,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, + u8 swap8, fromkern = kernbuffer ? 1 : 0; + u16 swap16; + u32 swap32, flag = (length >> 28) & 1; +- char buf[4]; ++ u8 buf[4]; + + /* if neither kernbuffer not userbuffer are given, assume + * data in obuf +diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c +index 9795457723d8..ad71840db899 100644 +--- a/drivers/usb/misc/trancevibrator.c ++++ b/drivers/usb/misc/trancevibrator.c +@@ -74,9 +74,9 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, + /* Set speed */ + retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), + 0x01, /* vendor request: set speed */ +- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, ++ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + tv->speed, /* speed value */ +- 0, NULL, 0, USB_CTRL_GET_TIMEOUT); ++ 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (retval) { + tv->speed = old; + dev_dbg(&tv->udev->dev, "retval = %d\n", retval); +diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c +index e31f72b3a22c..54b8e8645e0b 100644 +--- a/drivers/usb/misc/usbtest.c ++++ b/drivers/usb/misc/usbtest.c +@@ -2771,6 +2771,7 @@ static void usbtest_disconnect(struct usb_interface *intf) + + usb_set_intfdata(intf, NULL); + dev_dbg(&intf->dev, "disconnect\n"); ++ kfree(dev->buf); + kfree(dev); + } + +diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c +index e77465a30ac6..0ec6d76d1563 100644 +--- a/drivers/usb/misc/uss720.c ++++ b/drivers/usb/misc/uss720.c +@@ -750,6 +750,7 @@ static int uss720_probe(struct usb_interface *intf, + parport_announce_port(pp); + + usb_set_intfdata(intf, pp); ++ usb_put_dev(usbdev); + return 0; + + probe_abort: +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c +index 2350502f9054..16cab1b09ad8 100644 +--- a/drivers/usb/misc/yurex.c ++++ b/drivers/usb/misc/yurex.c +@@ -502,11 +502,14 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, + prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); + dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, + dev->cntl_buffer[0]); +- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); ++ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); + if (retval >= 0) + timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); + finish_wait(&dev->waitq, &wait); + ++ /* make sure URB is idle after timeout or (spurious) CMD_ACK */ ++ usb_kill_urb(dev->cntl_urb); ++ + mutex_unlock(&dev->io_mutex); + + if (retval < 0) { +diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c +index c232229162e0..fcc709c043f0 100644 +--- a/drivers/usb/musb/musb_core.c ++++ b/drivers/usb/musb/musb_core.c +@@ -2097,32 +2097,35 @@ int musb_queue_resume_work(struct musb *musb, + { + struct musb_pending_work *w; + unsigned long flags; ++ bool is_suspended; + int error; + + if (WARN_ON(!callback)) + return -EINVAL; + +- if (pm_runtime_active(musb->controller)) +- return callback(musb, data); ++ spin_lock_irqsave(&musb->list_lock, flags); ++ is_suspended = musb->is_runtime_suspended; ++ ++ if (is_suspended) { ++ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); ++ if (!w) { ++ error = -ENOMEM; ++ goto out_unlock; ++ } + +- w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); +- if (!w) +- return -ENOMEM; ++ w->callback = callback; ++ w->data = data; + +- w->callback = callback; +- w->data = data; +- spin_lock_irqsave(&musb->list_lock, flags); +- if (musb->is_runtime_suspended) { + list_add_tail(&w->node, &musb->pending_list); + error = 0; +- } else { +- dev_err(musb->controller, "could not add resume work %p\n", +- callback); +- devm_kfree(musb->controller, w); +- error = -EINPROGRESS; + } ++ ++out_unlock: + spin_unlock_irqrestore(&musb->list_lock, flags); + ++ if (!is_suspended) ++ error = callback(musb, data); ++ + return error; + } + EXPORT_SYMBOL_GPL(musb_queue_resume_work); +diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c +index 241c22a48d63..9c9ba887622e 100644 +--- a/drivers/usb/pd/policy_engine.c ++++ b/drivers/usb/pd/policy_engine.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2016-2020, Linux Foundation. All rights reserved. ++/* Copyright (c) 2016-2021, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -404,6 +404,11 @@ struct rx_msg { + #define SXR_SEND_SVDM(pd, cmd, num, tx_vdos, pos) usbpd_send_svdm(pd, 0xFF01, \ + cmd, SVDM_CMD_TYPE_RESP_ACK, \ + num, tx_vdos, pos) ++ ++#define SXR_SEND_ATTEN(pd, cmd, num, tx_vdos, pos) usbpd_send_svdm(pd, 0xFF01, \ ++ cmd, SVDM_CMD_TYPE_INITIATOR, \ ++ num, tx_vdos, pos) ++ + struct usbpd { + struct device dev; + struct workqueue_struct *wq; +@@ -499,6 +504,7 @@ struct usbpd { + bool send_get_battery_status; + u32 battery_sts_dobj; + bool is_sxr_dp_sink; ++ u8 dp_hpd_status_db; + }; + + static LIST_HEAD(_usbpd); /* useful for debugging */ +@@ -3985,6 +3991,50 @@ static ssize_t get_battery_status_show(struct device *dev, + } + static DEVICE_ATTR_RW(get_battery_status); + ++static ssize_t dp_hpd_status_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, size_t size) ++{ ++ struct usbpd *pd = dev_get_drvdata(dev); ++ int val; ++ u32 tx_vdos[1]; ++ ++ if (sscanf(buf, "%d\n", &val) != 1) { ++ usbpd_dbg(&pd->dev, "Error: size is not 1\n"); ++ return -EINVAL; ++ } ++ ++ if (!sxr_dp_mode) { ++ usbpd_dbg(&pd->dev, "Error: Device is not in DP mode\n"); ++ pd->dp_hpd_status_db = 0; ++ return size; ++ } ++ pd->dp_hpd_status_db = val; ++ ++ if (val) { ++ usbpd_dbg(&pd->dev, "making hpd high\n"); ++ tx_vdos[0] = XR1_PIN_E_VDO; // Pin assign E ++ } else { ++ usbpd_dbg(&pd->dev, "making hpd low\n"); ++ tx_vdos[0] = XR1_DEFAULT_VDO; // Pin assign E ++ } ++ SXR_SEND_ATTEN(pd, USBPD_SVDM_ATTENTION, 0x1, tx_vdos, 0x1); ++ ++ return size; ++} ++ ++static ssize_t dp_hpd_status_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct usbpd *pd = dev_get_drvdata(dev); ++ ++ if (pd->dp_hpd_status_db == -EINVAL) ++ return -EINVAL; ++ ++ return snprintf(buf, PAGE_SIZE, "%s\n", ++ ((pd->dp_hpd_status_db >= 1) ? "on" : "off")); ++} ++static DEVICE_ATTR_RW(dp_hpd_status); ++ + static struct attribute *usbpd_attrs[] = { + &dev_attr_contract.attr, + &dev_attr_initial_pr.attr, +@@ -4009,6 +4059,7 @@ static struct attribute *usbpd_attrs[] = { + &dev_attr_get_pps_status.attr, + &dev_attr_get_battery_cap.attr, + &dev_attr_get_battery_status.attr, ++ &dev_attr_dp_hpd_status.attr, + NULL, + }; + ATTRIBUTE_GROUPS(usbpd); +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c +index f6a1ae8abb21..aafecac9ba26 100644 +--- a/drivers/usb/renesas_usbhs/fifo.c ++++ b/drivers/usb/renesas_usbhs/fifo.c +@@ -115,6 +115,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, + #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) + #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) + static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); ++static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable); ++static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable); + struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) + { + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); +@@ -138,8 +140,15 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) + dmaengine_terminate_all(chan); + usbhsf_fifo_clear(pipe, fifo); + usbhsf_dma_unmap(pkt); ++ } else { ++ if (usbhs_pipe_is_dir_in(pipe)) ++ usbhsf_rx_irq_ctrl(pipe, 0); ++ else ++ usbhsf_tx_irq_ctrl(pipe, 0); + } + ++ usbhs_pipe_running(pipe, 0); ++ + __usbhsf_pkt_del(pkt); + } + +diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c +index 8db4ca7d5d45..e8b0a9c34d72 100644 +--- a/drivers/usb/renesas_usbhs/pipe.c ++++ b/drivers/usb/renesas_usbhs/pipe.c +@@ -746,6 +746,8 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, + + void usbhs_pipe_free(struct usbhs_pipe *pipe) + { ++ usbhsp_pipe_select(pipe); ++ usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); + usbhsp_put_pipe(pipe); + } + +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index 0291dc7cd284..c6ff79360302 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -70,9 +70,12 @@ + + + static const struct usb_device_id id_table[] = { +- { USB_DEVICE(0x4348, 0x5523) }, +- { USB_DEVICE(0x1a86, 0x7523) }, ++ { USB_DEVICE(0x1a86, 0x5512) }, + { USB_DEVICE(0x1a86, 0x5523) }, ++ { USB_DEVICE(0x1a86, 0x7522) }, ++ { USB_DEVICE(0x1a86, 0x7523) }, ++ { USB_DEVICE(0x4348, 0x5523) }, ++ { USB_DEVICE(0x9986, 0x7523) }, + { }, + }; + MODULE_DEVICE_TABLE(usb, id_table); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 71a951f9dbe8..45be4c72401b 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ +@@ -142,6 +143,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ ++ { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ +@@ -151,6 +153,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ ++ { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ +@@ -198,6 +201,9 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ ++ { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ ++ { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ +@@ -255,6 +261,8 @@ static struct usb_serial_driver cp210x_device = { + .break_ctl = cp210x_break_ctl, + .set_termios = cp210x_set_termios, + .tx_empty = cp210x_tx_empty, ++ .throttle = usb_serial_generic_throttle, ++ .unthrottle = usb_serial_generic_unthrottle, + .tiocmget = cp210x_tiocmget, + .tiocmset = cp210x_tiocmset, + .port_probe = cp210x_port_probe, +@@ -765,6 +773,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, + u32 baud; + u16 bits = 0; + u32 ctl_hs; ++ u32 flow_repl; + + cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud); + +@@ -865,6 +874,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, + ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); + if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) { + dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__); ++ /* ++ * When the port is closed, the CP210x hardware disables ++ * auto-RTS and RTS is deasserted but it leaves auto-CTS when ++ * in hardware flow control mode. When re-opening the port, if ++ * auto-CTS is enabled on the cp210x, then auto-RTS must be ++ * re-enabled in the driver. ++ */ ++ flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); ++ flow_repl &= ~CP210X_SERIAL_RTS_MASK; ++ flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); ++ flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); ++ cp210x_write_reg_block(port, ++ CP210X_SET_FLOW, ++ &flow_ctl, ++ sizeof(flow_ctl)); ++ + cflag |= CRTSCTS; + } else { + dev_dbg(dev, "%s - flow control = NONE\n", __func__); +diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c +index 80260b08398b..b22a7b487a9f 100644 +--- a/drivers/usb/serial/cyberjack.c ++++ b/drivers/usb/serial/cyberjack.c +@@ -367,11 +367,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb) + struct cyberjack_private *priv = usb_get_serial_port_data(port); + struct device *dev = &port->dev; + int status = urb->status; ++ bool resubmitted = false; + +- set_bit(0, &port->write_urbs_free); + if (status) { + dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", + __func__, status); ++ set_bit(0, &port->write_urbs_free); + return; + } + +@@ -404,6 +405,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) + goto exit; + } + ++ resubmitted = true; ++ + dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent); + dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled); + +@@ -420,6 +423,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) + + exit: + spin_unlock(&priv->lock); ++ if (!resubmitted) ++ set_bit(0, &port->write_urbs_free); + usb_serial_port_softint(port); + } + +diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c +index 2c915be1db4c..5cdd6ad4d290 100644 +--- a/drivers/usb/serial/cypress_m8.c ++++ b/drivers/usb/serial/cypress_m8.c +@@ -63,6 +63,7 @@ static const struct usb_device_id id_table_earthmate[] = { + + static const struct usb_device_id id_table_cyphidcomrs232[] = { + { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, ++ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, + { } /* Terminating entry */ +@@ -77,6 +78,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, + { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, + { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, ++ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, + { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, +diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h +index 119d2e17077b..6d9820bffc20 100644 +--- a/drivers/usb/serial/cypress_m8.h ++++ b/drivers/usb/serial/cypress_m8.h +@@ -24,6 +24,9 @@ + #define VENDOR_ID_CYPRESS 0x04b4 + #define PRODUCT_ID_CYPHIDCOM 0x5500 + ++/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ ++#define VENDOR_ID_SAI 0x17dd ++ + /* FRWD Dongle - a GPS sports watch */ + #define VENDOR_ID_FRWD 0x6737 + #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c +index 7ab3235febfc..4aa6fe834091 100644 +--- a/drivers/usb/serial/digi_acceleport.c ++++ b/drivers/usb/serial/digi_acceleport.c +@@ -23,7 +23,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -201,14 +200,12 @@ struct digi_port { + int dp_throttle_restart; + wait_queue_head_t dp_flush_wait; + wait_queue_head_t dp_close_wait; /* wait queue for close */ +- struct work_struct dp_wakeup_work; + struct usb_serial_port *dp_port; + }; + + + /* Local Function Declarations */ + +-static void digi_wakeup_write_lock(struct work_struct *work); + static int digi_write_oob_command(struct usb_serial_port *port, + unsigned char *buf, int count, int interruptible); + static int digi_write_inb_command(struct usb_serial_port *port, +@@ -355,26 +352,6 @@ __releases(lock) + return timeout; + } + +- +-/* +- * Digi Wakeup Write +- * +- * Wake up port, line discipline, and tty processes sleeping +- * on writes. +- */ +- +-static void digi_wakeup_write_lock(struct work_struct *work) +-{ +- struct digi_port *priv = +- container_of(work, struct digi_port, dp_wakeup_work); +- struct usb_serial_port *port = priv->dp_port; +- unsigned long flags; +- +- spin_lock_irqsave(&priv->dp_port_lock, flags); +- tty_port_tty_wakeup(&port->port); +- spin_unlock_irqrestore(&priv->dp_port_lock, flags); +-} +- + /* + * Digi Write OOB Command + * +@@ -985,6 +962,7 @@ static void digi_write_bulk_callback(struct urb *urb) + struct digi_serial *serial_priv; + int ret = 0; + int status = urb->status; ++ bool wakeup; + + /* port and serial sanity check */ + if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) { +@@ -1011,6 +989,7 @@ static void digi_write_bulk_callback(struct urb *urb) + } + + /* try to send any buffered data on this port */ ++ wakeup = true; + spin_lock(&priv->dp_port_lock); + priv->dp_write_urb_in_use = 0; + if (priv->dp_out_buf_len > 0) { +@@ -1026,19 +1005,18 @@ static void digi_write_bulk_callback(struct urb *urb) + if (ret == 0) { + priv->dp_write_urb_in_use = 1; + priv->dp_out_buf_len = 0; ++ wakeup = false; + } + } +- /* wake up processes sleeping on writes immediately */ +- tty_port_tty_wakeup(&port->port); +- /* also queue up a wakeup at scheduler time, in case we */ +- /* lost the race in write_chan(). */ +- schedule_work(&priv->dp_wakeup_work); +- + spin_unlock(&priv->dp_port_lock); ++ + if (ret && ret != -EPERM) + dev_err_console(port, + "%s: usb_submit_urb failed, ret=%d, port=%d\n", + __func__, ret, priv->dp_port_num); ++ ++ if (wakeup) ++ tty_port_tty_wakeup(&port->port); + } + + static int digi_write_room(struct tty_struct *tty) +@@ -1238,7 +1216,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num) + init_waitqueue_head(&priv->dp_transmit_idle_wait); + init_waitqueue_head(&priv->dp_flush_wait); + init_waitqueue_head(&priv->dp_close_wait); +- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); + priv->dp_port = port; + + init_waitqueue_head(&port->write_wait); +@@ -1524,13 +1501,14 @@ static int digi_read_oob_callback(struct urb *urb) + rts = C_CRTSCTS(tty); + + if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) { ++ bool wakeup = false; ++ + spin_lock(&priv->dp_port_lock); + /* convert from digi flags to termiox flags */ + if (val & DIGI_READ_INPUT_SIGNALS_CTS) { + priv->dp_modem_signals |= TIOCM_CTS; +- /* port must be open to use tty struct */ + if (rts) +- tty_port_tty_wakeup(&port->port); ++ wakeup = true; + } else { + priv->dp_modem_signals &= ~TIOCM_CTS; + /* port must be open to use tty struct */ +@@ -1549,6 +1527,9 @@ static int digi_read_oob_callback(struct urb *urb) + priv->dp_modem_signals &= ~TIOCM_CD; + + spin_unlock(&priv->dp_port_lock); ++ ++ if (wakeup) ++ tty_port_tty_wakeup(&port->port); + } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) { + spin_lock(&priv->dp_port_lock); + priv->dp_transmit_idle = 1; +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index a7cb0968259e..fe6b32c2ff1c 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -214,6 +214,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, + { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, ++ { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, +@@ -606,6 +607,7 @@ static const struct usb_device_id id_table_combined[] = { + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, +@@ -708,6 +710,7 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, ++ { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, +@@ -1028,9 +1031,17 @@ static const struct usb_device_id id_table_combined[] = { + /* Sienna devices */ + { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, + { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, ++ /* IDS GmbH devices */ ++ { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, ++ { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, + /* U-Blox devices */ + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, ++ /* FreeCalypso USB adapters */ ++ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, ++ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { } /* Terminating entry */ + }; + +@@ -2051,12 +2062,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, + #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) + + static int ftdi_process_packet(struct usb_serial_port *port, +- struct ftdi_private *priv, char *packet, int len) ++ struct ftdi_private *priv, unsigned char *buf, int len) + { ++ unsigned char status; + int i; +- char status; + char flag; +- char *ch; + + if (len < 2) { + dev_dbg(&port->dev, "malformed packet\n"); +@@ -2066,7 +2076,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, + /* Compare new line status to the old one, signal if different/ + N.B. packet may be processed more than once, but differences + are only processed once. */ +- status = packet[0] & FTDI_STATUS_B0_MASK; ++ status = buf[0] & FTDI_STATUS_B0_MASK; + if (status != priv->prev_status) { + char diff_status = status ^ priv->prev_status; + +@@ -2092,13 +2102,12 @@ static int ftdi_process_packet(struct usb_serial_port *port, + } + + /* save if the transmitter is empty or not */ +- if (packet[1] & FTDI_RS_TEMT) ++ if (buf[1] & FTDI_RS_TEMT) + priv->transmit_empty = 1; + else + priv->transmit_empty = 0; + +- len -= 2; +- if (!len) ++ if (len == 2) + return 0; /* status only */ + + /* +@@ -2106,40 +2115,41 @@ static int ftdi_process_packet(struct usb_serial_port *port, + * data payload to avoid over-reporting. + */ + flag = TTY_NORMAL; +- if (packet[1] & FTDI_RS_ERR_MASK) { ++ if (buf[1] & FTDI_RS_ERR_MASK) { + /* Break takes precedence over parity, which takes precedence + * over framing errors */ +- if (packet[1] & FTDI_RS_BI) { ++ if (buf[1] & FTDI_RS_BI) { + flag = TTY_BREAK; + port->icount.brk++; + usb_serial_handle_break(port); +- } else if (packet[1] & FTDI_RS_PE) { ++ } else if (buf[1] & FTDI_RS_PE) { + flag = TTY_PARITY; + port->icount.parity++; +- } else if (packet[1] & FTDI_RS_FE) { ++ } else if (buf[1] & FTDI_RS_FE) { + flag = TTY_FRAME; + port->icount.frame++; + } + /* Overrun is special, not associated with a char */ +- if (packet[1] & FTDI_RS_OE) { ++ if (buf[1] & FTDI_RS_OE) { + port->icount.overrun++; + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); + } + } + +- port->icount.rx += len; +- ch = packet + 2; ++ port->icount.rx += len - 2; + + if (port->port.console && port->sysrq) { +- for (i = 0; i < len; i++, ch++) { +- if (!usb_serial_handle_sysrq_char(port, *ch)) +- tty_insert_flip_char(&port->port, *ch, flag); ++ for (i = 2; i < len; i++) { ++ if (usb_serial_handle_sysrq_char(port, buf[i])) ++ continue; ++ tty_insert_flip_char(&port->port, buf[i], flag); + } + } else { +- tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); ++ tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, ++ len - 2); + } + +- return len; ++ return len - 2; + } + + static void ftdi_process_read_urb(struct urb *urb) +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 32a40ab9a385..3b7cea8df446 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -38,6 +38,13 @@ + + #define FTDI_LUMEL_PD12_PID 0x6002 + ++/* ++ * Custom USB adapters made by Falconia Partners LLC ++ * for FreeCalypso project, ID codes allocated to Falconia by FTDI. ++ */ ++#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150 ++#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151 ++ + /* Sienna Serial Interface by Secyourit GmbH */ + #define FTDI_SIENNA_PID 0x8348 + +@@ -151,6 +158,9 @@ + /* Vardaan Enterprises Serial Interface VEUSB422R3 */ + #define FTDI_VARDAAN_PID 0xF070 + ++/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */ ++#define FTDI_AUTO_M3_OP_COM_V2_PID 0x4f50 ++ + /* + * Xsens Technologies BV products (http://www.xsens.com). + */ +@@ -159,6 +169,7 @@ + #define XSENS_AWINDA_DONGLE_PID 0x0102 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ + #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ ++#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ + #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ + + /* Xsens devices using FTDI VID */ +@@ -572,6 +583,7 @@ + #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ + #define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ + #define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ ++#define FTDI_NT_ORIONMX_PID 0x7c93 /* OrionMX */ + + /* + * Synapse Wireless product ids (FTDI_VID) +@@ -1558,6 +1570,13 @@ + #define UNJO_VID 0x22B7 + #define UNJO_ISODEBUG_V1_PID 0x150D + ++/* ++ * IDS GmbH ++ */ ++#define IDS_VID 0x2CAF ++#define IDS_SI31A_PID 0x13A2 ++#define IDS_CM31A_PID 0x13A3 ++ + /* + * U-Blox products (http://www.u-blox.com). + */ +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index 77555242e259..ceece1f52c8a 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -2959,26 +2959,32 @@ static int edge_startup(struct usb_serial *serial) + response = -ENODEV; + } + +- usb_free_urb(edge_serial->interrupt_read_urb); +- kfree(edge_serial->interrupt_in_buffer); +- +- usb_free_urb(edge_serial->read_urb); +- kfree(edge_serial->bulk_in_buffer); +- +- kfree(edge_serial); +- +- return response; ++ goto error; + } + + /* start interrupt read for this edgeport this interrupt will + * continue as long as the edgeport is connected */ + response = usb_submit_urb(edge_serial->interrupt_read_urb, + GFP_KERNEL); +- if (response) ++ if (response) { + dev_err(ddev, "%s - Error %d submitting control urb\n", + __func__, response); ++ ++ goto error; ++ } + } + return response; ++ ++error: ++ usb_free_urb(edge_serial->interrupt_read_urb); ++ kfree(edge_serial->interrupt_in_buffer); ++ ++ usb_free_urb(edge_serial->read_urb); ++ kfree(edge_serial->bulk_in_buffer); ++ ++ kfree(edge_serial); ++ ++ return response; + } + + +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c +index d57fb5199218..f3c0ad138c3e 100644 +--- a/drivers/usb/serial/iuu_phoenix.c ++++ b/drivers/usb/serial/iuu_phoenix.c +@@ -369,10 +369,11 @@ static void iuu_led_activity_on(struct urb *urb) + struct usb_serial_port *port = urb->context; + int result; + char *buf_ptr = port->write_urb->transfer_buffer; +- *buf_ptr++ = IUU_SET_LED; ++ + if (xmas) { +- get_random_bytes(buf_ptr, 6); +- *(buf_ptr+7) = 1; ++ buf_ptr[0] = IUU_SET_LED; ++ get_random_bytes(buf_ptr + 1, 6); ++ buf_ptr[7] = 1; + } else { + iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255); + } +@@ -390,13 +391,14 @@ static void iuu_led_activity_off(struct urb *urb) + struct usb_serial_port *port = urb->context; + int result; + char *buf_ptr = port->write_urb->transfer_buffer; ++ + if (xmas) { + iuu_rxcmd(urb); + return; +- } else { +- *buf_ptr++ = IUU_SET_LED; +- iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); + } ++ ++ iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); ++ + usb_fill_bulk_urb(port->write_urb, port->serial->dev, + usb_sndbulkpipe(port->serial->dev, + port->bulk_out_endpointAddress), +@@ -551,23 +553,29 @@ static int iuu_uart_flush(struct usb_serial_port *port) + struct device *dev = &port->dev; + int i; + int status; +- u8 rxcmd = IUU_UART_RX; ++ u8 *rxcmd; + struct iuu_private *priv = usb_get_serial_port_data(port); + + if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0) + return -EIO; + ++ rxcmd = kmalloc(1, GFP_KERNEL); ++ if (!rxcmd) ++ return -ENOMEM; ++ ++ rxcmd[0] = IUU_UART_RX; ++ + for (i = 0; i < 2; i++) { +- status = bulk_immediate(port, &rxcmd, 1); ++ status = bulk_immediate(port, rxcmd, 1); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_write error\n", __func__); +- return status; ++ goto out_free; + } + + status = read_immediate(port, &priv->len, 1); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_read error\n", __func__); +- return status; ++ goto out_free; + } + + if (priv->len > 0) { +@@ -575,12 +583,16 @@ static int iuu_uart_flush(struct usb_serial_port *port) + status = read_immediate(port, priv->buf, priv->len); + if (status != IUU_OPERATION_OK) { + dev_dbg(dev, "%s - uart_flush_read error\n", __func__); +- return status; ++ goto out_free; + } + } + } + dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__); + iuu_led(port, 0, 0xF000, 0, 0xFF); ++ ++out_free: ++ kfree(rxcmd); ++ + return status; + } + +@@ -717,14 +729,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, + struct iuu_private *priv = usb_get_serial_port_data(port); + unsigned long flags; + +- if (count > 256) +- return -ENOMEM; +- + spin_lock_irqsave(&priv->lock, flags); + ++ count = min(count, 256 - priv->writelen); ++ if (count == 0) ++ goto out; ++ + /* fill the buffer */ + memcpy(priv->writebuf + priv->writelen, buf, count); + priv->writelen += count; ++out: + spin_unlock_irqrestore(&priv->lock, flags); + + return count; +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index d17f7872f95a..1899190d4f5d 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -44,11 +44,12 @@ + #define DRIVER_AUTHOR "Brian Warner " + #define DRIVER_DESC "USB Keyspan PDA Converter driver" + ++#define KEYSPAN_TX_THRESHOLD 16 ++ + struct keyspan_pda_private { + int tx_room; + int tx_throttled; +- struct work_struct wakeup_work; +- struct work_struct unthrottle_work; ++ struct work_struct unthrottle_work; + struct usb_serial *serial; + struct usb_serial_port *port; + }; +@@ -101,15 +102,6 @@ static const struct usb_device_id id_table_fake_xircom[] = { + }; + #endif + +-static void keyspan_pda_wakeup_write(struct work_struct *work) +-{ +- struct keyspan_pda_private *priv = +- container_of(work, struct keyspan_pda_private, wakeup_work); +- struct usb_serial_port *port = priv->port; +- +- tty_port_tty_wakeup(&port->port); +-} +- + static void keyspan_pda_request_unthrottle(struct work_struct *work) + { + struct keyspan_pda_private *priv = +@@ -124,7 +116,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work) + 7, /* request_unthrottle */ + USB_TYPE_VENDOR | USB_RECIP_INTERFACE + | USB_DIR_OUT, +- 16, /* value: threshold */ ++ KEYSPAN_TX_THRESHOLD, + 0, /* index */ + NULL, + 0, +@@ -143,6 +135,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + int retval; + int status = urb->status; + struct keyspan_pda_private *priv; ++ unsigned long flags; ++ + priv = usb_get_serial_port_data(port); + + switch (status) { +@@ -176,18 +170,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) + break; + case 1: + /* status interrupt */ +- if (len < 3) { ++ if (len < 2) { + dev_warn(&port->dev, "short interrupt message received\n"); + break; + } +- dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]); ++ dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]); + switch (data[1]) { + case 1: /* modemline change */ + break; + case 2: /* tx unthrottle interrupt */ ++ spin_lock_irqsave(&port->lock, flags); + priv->tx_throttled = 0; ++ priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD); ++ spin_unlock_irqrestore(&port->lock, flags); + /* queue up a wakeup at scheduler time */ +- schedule_work(&priv->wakeup_work); ++ usb_serial_port_softint(port); + break; + default: + break; +@@ -447,6 +444,7 @@ static int keyspan_pda_write(struct tty_struct *tty, + int request_unthrottle = 0; + int rc = 0; + struct keyspan_pda_private *priv; ++ unsigned long flags; + + priv = usb_get_serial_port_data(port); + /* guess how much room is left in the device's ring buffer, and if we +@@ -466,13 +464,13 @@ static int keyspan_pda_write(struct tty_struct *tty, + the TX urb is in-flight (wait until it completes) + the device is full (wait until it says there is room) + */ +- spin_lock_bh(&port->lock); ++ spin_lock_irqsave(&port->lock, flags); + if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) { +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + return 0; + } + clear_bit(0, &port->write_urbs_free); +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + + /* At this point the URB is in our control, nobody else can submit it + again (the only sudden transition was the one from EINPROGRESS to +@@ -518,7 +516,8 @@ static int keyspan_pda_write(struct tty_struct *tty, + goto exit; + } + } +- if (count > priv->tx_room) { ++ ++ if (count >= priv->tx_room) { + /* we're about to completely fill the Tx buffer, so + we'll be throttled afterwards. */ + count = priv->tx_room; +@@ -551,7 +550,7 @@ static int keyspan_pda_write(struct tty_struct *tty, + + rc = count; + exit: +- if (rc < 0) ++ if (rc <= 0) + set_bit(0, &port->write_urbs_free); + return rc; + } +@@ -560,27 +559,28 @@ static int keyspan_pda_write(struct tty_struct *tty, + static void keyspan_pda_write_bulk_callback(struct urb *urb) + { + struct usb_serial_port *port = urb->context; +- struct keyspan_pda_private *priv; + + set_bit(0, &port->write_urbs_free); +- priv = usb_get_serial_port_data(port); + + /* queue up a wakeup at scheduler time */ +- schedule_work(&priv->wakeup_work); ++ usb_serial_port_softint(port); + } + + + static int keyspan_pda_write_room(struct tty_struct *tty) + { + struct usb_serial_port *port = tty->driver_data; +- struct keyspan_pda_private *priv; +- priv = usb_get_serial_port_data(port); +- /* used by n_tty.c for processing of tabs and such. Giving it our +- conservative guess is probably good enough, but needs testing by +- running a console through the device. */ +- return priv->tx_room; +-} ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port); ++ unsigned long flags; ++ int room = 0; + ++ spin_lock_irqsave(&port->lock, flags); ++ if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled) ++ room = priv->tx_room; ++ spin_unlock_irqrestore(&port->lock, flags); ++ ++ return room; ++} + + static int keyspan_pda_chars_in_buffer(struct tty_struct *tty) + { +@@ -660,8 +660,12 @@ static int keyspan_pda_open(struct tty_struct *tty, + } + static void keyspan_pda_close(struct usb_serial_port *port) + { ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port); ++ + usb_kill_urb(port->write_urb); + usb_kill_urb(port->interrupt_in_urb); ++ ++ cancel_work_sync(&priv->unthrottle_work); + } + + +@@ -732,7 +736,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port) + if (!priv) + return -ENOMEM; + +- INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); + INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); + priv->serial = port->serial; + priv->port = port; +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c +index 6cb45757818f..64f5765df5b6 100644 +--- a/drivers/usb/serial/kl5kusb105.c ++++ b/drivers/usb/serial/kl5kusb105.c +@@ -293,12 +293,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + priv->cfg.unknown2 = cfg->unknown2; + spin_unlock_irqrestore(&priv->lock, flags); + ++ kfree(cfg); ++ + /* READ_ON and urb submission */ + rc = usb_serial_generic_open(tty, port); +- if (rc) { +- retval = rc; +- goto err_free_cfg; +- } ++ if (rc) ++ return rc; + + rc = usb_control_msg(port->serial->dev, + usb_sndctrlpipe(port->serial->dev, 0), +@@ -341,8 +341,6 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + KLSI_TIMEOUT); + err_generic_close: + usb_serial_generic_close(port); +-err_free_cfg: +- kfree(cfg); + + return retval; + } +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index 14b45f3e6388..b16e37f43247 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -640,6 +640,8 @@ static void parport_mos7715_restore_state(struct parport *pp, + spin_unlock(&release_lock); + return; + } ++ mos_parport->shadowDCR = s->u.pc.ctr; ++ mos_parport->shadowECR = s->u.pc.ecr; + write_parport_reg_nonblock(mos_parport, MOS7720_DCR, + mos_parport->shadowDCR); + write_parport_reg_nonblock(mos_parport, MOS7720_ECR, +@@ -1237,8 +1239,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, + if (urb->transfer_buffer == NULL) { + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, + GFP_ATOMIC); +- if (!urb->transfer_buffer) ++ if (!urb->transfer_buffer) { ++ bytes_sent = -ENOMEM; + goto exit; ++ } + } + transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); + +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 0c92252c9316..31ca5d925b36 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1362,8 +1362,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, + if (urb->transfer_buffer == NULL) { + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, + GFP_ATOMIC); +- if (!urb->transfer_buffer) ++ if (!urb->transfer_buffer) { ++ bytes_sent = -ENOMEM; + goto exit; ++ } + } + transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); + +diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c +index 76564b3bebb9..cc0bf59bd08d 100644 +--- a/drivers/usb/serial/omninet.c ++++ b/drivers/usb/serial/omninet.c +@@ -27,6 +27,7 @@ + + #define ZYXEL_VENDOR_ID 0x0586 + #define ZYXEL_OMNINET_ID 0x1000 ++#define ZYXEL_OMNI_56K_PLUS_ID 0x1500 + /* This one seems to be a re-branded ZyXEL device */ + #define BT_IGNITIONPRO_ID 0x2000 + +@@ -44,6 +45,7 @@ static int omninet_port_remove(struct usb_serial_port *port); + + static const struct usb_device_id id_table[] = { + { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, ++ { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) }, + { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, + { } /* Terminating entry */ + }; +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 326e7109b8f8..02ded56bcbc6 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); + #define QUECTEL_PRODUCT_UC15 0x9090 + /* These u-blox products use Qualcomm's vendor ID */ + #define UBLOX_PRODUCT_R410M 0x90b2 ++#define UBLOX_PRODUCT_R6XX 0x90fa + /* These Yuga products use Qualcomm's vendor ID */ + #define YUGA_PRODUCT_CLM920_NC5 0x9625 + +@@ -248,6 +249,7 @@ static void option_instat_callback(struct urb *urb); + /* These Quectel products use Quectel's vendor ID */ + #define QUECTEL_PRODUCT_EC21 0x0121 + #define QUECTEL_PRODUCT_EC25 0x0125 ++#define QUECTEL_PRODUCT_EG95 0x0195 + #define QUECTEL_PRODUCT_BG96 0x0296 + #define QUECTEL_PRODUCT_EP06 0x0306 + +@@ -418,11 +420,14 @@ static void option_instat_callback(struct urb *urb); + #define CINTERION_PRODUCT_PH8 0x0053 + #define CINTERION_PRODUCT_AHXX 0x0055 + #define CINTERION_PRODUCT_PLXX 0x0060 ++#define CINTERION_PRODUCT_EXS82 0x006c + #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 + #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 + #define CINTERION_PRODUCT_CLS8 0x00b0 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 + + /* Olivetti products */ + #define OLIVETTI_VENDOR_ID 0x0b3c +@@ -528,6 +533,7 @@ static void option_instat_callback(struct urb *urb); + /* Cellient products */ + #define CELLIENT_VENDOR_ID 0x2692 + #define CELLIENT_PRODUCT_MEN200 0x9005 ++#define CELLIENT_PRODUCT_MPL200 0x9025 + + /* Hyundai Petatel Inc. products */ + #define PETATEL_VENDOR_ID 0x1ff4 +@@ -560,6 +566,9 @@ static void option_instat_callback(struct urb *urb); + + /* Device flags */ + ++/* Highest interface number which can be used with NCTRL() and RSVD() */ ++#define FLAG_IFNUM_MAX 7 ++ + /* Interface does not support modem-control requests */ + #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8) + +@@ -1090,11 +1099,15 @@ static const struct usb_device_id option_ids[] = { + /* u-blox products using Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), + .driver_info = RSVD(1) | RSVD(3) }, ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), ++ .driver_info = RSVD(3) }, + /* Quectel products using Quectel vendor ID */ + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), + .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), + .driver_info = RSVD(4) }, ++ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), ++ .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), +@@ -1168,6 +1181,12 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ + .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */ ++ .driver_info = NCTRL(2) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */ ++ .driver_info = NCTRL(0) | RSVD(1) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ ++ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), +@@ -1180,6 +1199,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */ ++ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), +@@ -1194,6 +1215,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */ ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ ++ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), +@@ -1202,6 +1227,10 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ + .driver_info = NCTRL(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ ++ .driver_info = NCTRL(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ ++ .driver_info = NCTRL(2) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ +@@ -1531,7 +1560,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ ++ .driver_info = RSVD(3) | RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, +@@ -1805,6 +1835,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ + .driver_info = RSVD(7) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), +@@ -1869,12 +1901,17 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), + .driver_info = RSVD(0) | RSVD(4) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), ++ .driver_info = RSVD(3)}, ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), ++ .driver_info = RSVD(0)}, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = RSVD(4) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), +@@ -1962,6 +1999,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, ++ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), ++ .driver_info = RSVD(1) | RSVD(4) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, + { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ +@@ -2013,12 +2052,19 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, +- { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ ++ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ + .driver_info = RSVD(4) | RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ + .driver_info = RSVD(6) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ ++ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ ++ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ ++ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ ++ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ ++ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ + { } /* Terminating entry */ + }; + MODULE_DEVICE_TABLE(usb, option_ids); +@@ -2062,6 +2108,14 @@ static struct usb_serial_driver * const serial_drivers[] = { + + module_usb_serial_driver(serial_drivers, option_ids); + ++static bool iface_is_reserved(unsigned long device_flags, u8 ifnum) ++{ ++ if (ifnum > FLAG_IFNUM_MAX) ++ return false; ++ ++ return device_flags & RSVD(ifnum); ++} ++ + static int option_probe(struct usb_serial *serial, + const struct usb_device_id *id) + { +@@ -2079,7 +2133,7 @@ static int option_probe(struct usb_serial *serial, + * the same class/subclass/protocol as the serial interfaces. Look at + * the Windows driver .INF files for reserved interface numbers. + */ +- if (device_flags & RSVD(iface_desc->bInterfaceNumber)) ++ if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber)) + return -ENODEV; + /* + * Don't bind network interface on Samsung GT-B3730, it is handled by +@@ -2096,6 +2150,14 @@ static int option_probe(struct usb_serial *serial, + return 0; + } + ++static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum) ++{ ++ if (ifnum > FLAG_IFNUM_MAX) ++ return false; ++ ++ return device_flags & NCTRL(ifnum); ++} ++ + static int option_attach(struct usb_serial *serial) + { + struct usb_interface_descriptor *iface_desc; +@@ -2111,7 +2173,7 @@ static int option_attach(struct usb_serial *serial) + + iface_desc = &serial->interface->cur_altsetting->desc; + +- if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) ++ if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber)) + data->use_send_setup = 1; + + if (device_flags & ZLP) +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index 4fcded2971d1..3dd0bbb36dd2 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -89,6 +89,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, ++ { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, +@@ -101,6 +102,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, + { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, ++ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) }, + { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, + { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, + { } /* Terminating entry */ +diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h +index 54d2fb974a41..62b8cd673aa1 100644 +--- a/drivers/usb/serial/pl2303.h ++++ b/drivers/usb/serial/pl2303.h +@@ -125,6 +125,7 @@ + + /* Hewlett-Packard POS Pole Displays */ + #define HP_VENDOR_ID 0x03f0 ++#define HP_LD381GC_PRODUCT_ID 0x0183 + #define HP_LM920_PRODUCT_ID 0x026b + #define HP_TD620_PRODUCT_ID 0x0956 + #define HP_LD960_PRODUCT_ID 0x0b39 +@@ -155,6 +156,7 @@ + /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ + #define ADLINK_VENDOR_ID 0x0b63 + #define ADLINK_ND6530_PRODUCT_ID 0x6530 ++#define ADLINK_ND6530GC_PRODUCT_ID 0x653a + + /* SMART USB Serial Adapter */ + #define SMART_VENDOR_ID 0x0b8c +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c +index c59e6d4a8a61..11fb4d78e2db 100644 +--- a/drivers/usb/serial/qcserial.c ++++ b/drivers/usb/serial/qcserial.c +@@ -159,6 +159,7 @@ static const struct usb_device_id id_table[] = { + {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ ++ {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ + {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c +index 19952ccd028a..ba69acaa7a30 100644 +--- a/drivers/usb/serial/quatech2.c ++++ b/drivers/usb/serial/quatech2.c +@@ -419,7 +419,7 @@ static void qt2_close(struct usb_serial_port *port) + + /* flush the port transmit buffer */ + i = usb_control_msg(serial->dev, +- usb_rcvctrlpipe(serial->dev, 0), ++ usb_sndctrlpipe(serial->dev, 0), + QT2_FLUSH_DEVICE, 0x40, 1, + port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); + +@@ -429,7 +429,7 @@ static void qt2_close(struct usb_serial_port *port) + + /* flush the port receive buffer */ + i = usb_control_msg(serial->dev, +- usb_rcvctrlpipe(serial->dev, 0), ++ usb_sndctrlpipe(serial->dev, 0), + QT2_FLUSH_DEVICE, 0x40, 0, + port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); + +@@ -701,7 +701,7 @@ static int qt2_attach(struct usb_serial *serial) + int status; + + /* power on unit */ +- status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), ++ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + 0xc2, 0x40, 0x8000, 0, NULL, 0, + QT2_USB_TIMEOUT); + if (status < 0) { +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index a7e41723c34c..720bcf29d4f6 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -41,6 +41,7 @@ + /* Vendor and product ids */ + #define TI_VENDOR_ID 0x0451 + #define IBM_VENDOR_ID 0x04b3 ++#define STARTECH_VENDOR_ID 0x14b0 + #define TI_3410_PRODUCT_ID 0x3410 + #define IBM_4543_PRODUCT_ID 0x4543 + #define IBM_454B_PRODUCT_ID 0x454b +@@ -378,6 +379,7 @@ static const struct usb_device_id ti_id_table_3410[] = { + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, ++ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { } /* terminator */ + }; + +@@ -416,6 +418,7 @@ static const struct usb_device_id ti_id_table_combined[] = { + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, + { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, ++ { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { } /* terminator */ + }; + +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c +index bb7556952a18..8378e92f7c04 100644 +--- a/drivers/usb/storage/uas.c ++++ b/drivers/usb/storage/uas.c +@@ -670,8 +670,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, + if (devinfo->resetting) { + cmnd->result = DID_ERROR << 16; + cmnd->scsi_done(cmnd); +- spin_unlock_irqrestore(&devinfo->lock, flags); +- return 0; ++ goto zombie; + } + + /* Find a free uas-tag */ +@@ -706,6 +705,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, + cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); + + err = uas_submit_urbs(cmnd, devinfo); ++ /* ++ * in case of fatal errors the SCSI layer is peculiar ++ * a command that has finished is a success for the purpose ++ * of queueing, no matter how fatal the error ++ */ ++ if (err == -ENODEV) { ++ cmnd->result = DID_ERROR << 16; ++ cmnd->scsi_done(cmnd); ++ goto zombie; ++ } + if (err) { + /* If we did nothing, give up now */ + if (cmdinfo->state & SUBMIT_STATUS_URB) { +@@ -716,6 +725,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, + } + + devinfo->cmnd[idx] = cmnd; ++zombie: + spin_unlock_irqrestore(&devinfo->lock, flags); + return 0; + } +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h +index 46079468df42..4a94effb64f7 100644 +--- a/drivers/usb/storage/unusual_devs.h ++++ b/drivers/usb/storage/unusual_devs.h +@@ -2347,7 +2347,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, +- US_FL_BROKEN_FUA ), ++ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), + + /* Reported by Andrey Rahmatullin */ + UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h +index 4df15faa66d7..cdff7dc63f9c 100644 +--- a/drivers/usb/storage/unusual_uas.h ++++ b/drivers/usb/storage/unusual_uas.h +@@ -41,6 +41,13 @@ + * and don't forget to CC: the USB development list + */ + ++/* Reported-by: Till Dörges */ ++UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, ++ "Sony", ++ "PSZ-HA*", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_REPORT_OPCODES), ++ + /* Reported-by: Julian Groß */ + UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + "LaCie", +@@ -48,6 +55,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + ++/* Reported-by: Julian Sikorski */ ++UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, ++ "LaCie", ++ "Rugged USB3-FW", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_IGNORE_UAS), ++ + /* + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI + * commands in UAS mode. Observed with the 1.28 firmware; are there others? +@@ -156,6 +170,20 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA), + ++/* Reported-by: Thinh Nguyen */ ++UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, ++ "PNY", ++ "Pro Elite SSD", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_ATA_1X), ++ ++/* Reported-by: Thinh Nguyen */ ++UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, ++ "PNY", ++ "Pro Elite SSD", ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL, ++ US_FL_NO_ATA_1X), ++ + /* Reported-by: Hans de Goede */ + UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, + "VIA", +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c +index a5a6a114219a..cec5805feb25 100644 +--- a/drivers/usb/usbip/stub_dev.c ++++ b/drivers/usb/usbip/stub_dev.c +@@ -60,6 +60,8 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + int sockfd = 0; + struct socket *socket; + int rv; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + if (!sdev) { + dev_err(dev, "sdev is null\n"); +@@ -75,6 +77,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + + dev_info(dev, "stub up\n"); + ++ mutex_lock(&sdev->ud.sysfs_lock); + spin_lock_irq(&sdev->ud.lock); + + if (sdev->ud.status != SDEV_ST_AVAILABLE) { +@@ -83,23 +86,49 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + } + + socket = sockfd_lookup(sockfd, &err); +- if (!socket) ++ if (!socket) { ++ dev_err(dev, "failed to lookup sock"); + goto err; ++ } + +- sdev->ud.tcp_socket = socket; +- sdev->ud.sockfd = sockfd; ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ goto sock_err; ++ } + ++ /* unlock and create threads and get tasks */ + spin_unlock_irq(&sdev->ud.lock); ++ tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ goto unlock_mutex; ++ } ++ tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ goto unlock_mutex; ++ } + +- sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, +- "stub_rx"); +- sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, +- "stub_tx"); ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + ++ /* lock and update sdev->ud state */ + spin_lock_irq(&sdev->ud.lock); ++ sdev->ud.tcp_socket = socket; ++ sdev->ud.sockfd = sockfd; ++ sdev->ud.tcp_rx = tcp_rx; ++ sdev->ud.tcp_tx = tcp_tx; + sdev->ud.status = SDEV_ST_USED; + spin_unlock_irq(&sdev->ud.lock); + ++ wake_up_process(sdev->ud.tcp_rx); ++ wake_up_process(sdev->ud.tcp_tx); ++ ++ mutex_unlock(&sdev->ud.sysfs_lock); ++ + } else { + dev_info(dev, "stub down\n"); + +@@ -110,12 +139,17 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + spin_unlock_irq(&sdev->ud.lock); + + usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); ++ mutex_unlock(&sdev->ud.sysfs_lock); + } + + return count; + ++sock_err: ++ sockfd_put(socket); + err: + spin_unlock_irq(&sdev->ud.lock); ++unlock_mutex: ++ mutex_unlock(&sdev->ud.sysfs_lock); + return -EINVAL; + } + static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd); +@@ -281,6 +315,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) + sdev->ud.side = USBIP_STUB; + sdev->ud.status = SDEV_ST_AVAILABLE; + spin_lock_init(&sdev->ud.lock); ++ mutex_init(&sdev->ud.sysfs_lock); + sdev->ud.tcp_socket = NULL; + sdev->ud.sockfd = -1; + +diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h +index 0b199a2664c0..3d47c681aea2 100644 +--- a/drivers/usb/usbip/usbip_common.h ++++ b/drivers/usb/usbip/usbip_common.h +@@ -278,6 +278,9 @@ struct usbip_device { + /* lock for status */ + spinlock_t lock; + ++ /* mutex for synchronizing sysfs store paths */ ++ struct mutex sysfs_lock; ++ + int sockfd; + struct socket *tcp_socket; + +diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c +index f8f7f3803a99..01eaae1f265b 100644 +--- a/drivers/usb/usbip/usbip_event.c ++++ b/drivers/usb/usbip/usbip_event.c +@@ -84,6 +84,7 @@ static void event_handler(struct work_struct *work) + while ((ud = get_event()) != NULL) { + usbip_dbg_eh("pending event %lx\n", ud->event); + ++ mutex_lock(&ud->sysfs_lock); + /* + * NOTE: shutdown must come first. + * Shutdown the device. +@@ -104,6 +105,7 @@ static void event_handler(struct work_struct *work) + ud->eh_ops.unusable(ud); + unset_event(ud, USBIP_EH_UNUSABLE); + } ++ mutex_unlock(&ud->sysfs_lock); + + wake_up(&ud->eh_waitq); + } +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c +index 8bda6455dfcb..fb7b03029b8e 100644 +--- a/drivers/usb/usbip/vhci_hcd.c ++++ b/drivers/usb/usbip/vhci_hcd.c +@@ -907,6 +907,7 @@ static void vhci_device_init(struct vhci_device *vdev) + vdev->ud.side = USBIP_VHCI; + vdev->ud.status = VDEV_ST_NULL; + spin_lock_init(&vdev->ud.lock); ++ mutex_init(&vdev->ud.sysfs_lock); + + INIT_LIST_HEAD(&vdev->priv_rx); + INIT_LIST_HEAD(&vdev->priv_tx); +diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c +index e8a008de8dbc..3496b402aa1b 100644 +--- a/drivers/usb/usbip/vhci_sysfs.c ++++ b/drivers/usb/usbip/vhci_sysfs.c +@@ -161,6 +161,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport) + + usbip_dbg_vhci_sysfs("enter\n"); + ++ mutex_lock(&vdev->ud.sysfs_lock); ++ + /* lock */ + spin_lock_irqsave(&vhci->lock, flags); + spin_lock(&vdev->ud.lock); +@@ -171,6 +173,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport) + /* unlock */ + spin_unlock(&vdev->ud.lock); + spin_unlock_irqrestore(&vhci->lock, flags); ++ mutex_unlock(&vdev->ud.sysfs_lock); + + return -EINVAL; + } +@@ -181,6 +184,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci, __u32 rhport) + + usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); + ++ mutex_unlock(&vdev->ud.sysfs_lock); ++ + return 0; + } + +@@ -278,6 +283,8 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, + struct vhci_device *vdev; + int err; + unsigned long flags; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + /* + * @rhport: port number of vhci_hcd +@@ -307,14 +314,43 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, + vhci = hcd_to_vhci(hcd); + vdev = &vhci->vdev[rhport]; + ++ mutex_lock(&vdev->ud.sysfs_lock); ++ + /* Extract socket from fd. */ + socket = sockfd_lookup(sockfd, &err); +- if (!socket) +- return -EINVAL; ++ if (!socket) { ++ dev_err(dev, "failed to lookup sock"); ++ err = -EINVAL; ++ goto unlock_mutex; ++ } ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ sockfd_put(socket); ++ err = -EINVAL; ++ goto unlock_mutex; ++ } ++ ++ /* create threads before locking */ ++ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ err = -EINVAL; ++ goto unlock_mutex; ++ } ++ tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ err = -EINVAL; ++ goto unlock_mutex; ++ } + +- /* now need lock until setting vdev status as used */ ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + +- /* begin a lock */ ++ /* now begin lock until setting vdev status set */ + spin_lock_irqsave(&vhci->lock, flags); + spin_lock(&vdev->ud.lock); + +@@ -324,9 +360,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, + spin_unlock_irqrestore(&vhci->lock, flags); + + sockfd_put(socket); ++ kthread_stop_put(tcp_rx); ++ kthread_stop_put(tcp_tx); + + dev_err(dev, "port %d already used\n", rhport); +- return -EINVAL; ++ err = -EINVAL; ++ goto unlock_mutex; + } + + dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", +@@ -338,18 +377,28 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, + vdev->speed = speed; + vdev->ud.sockfd = sockfd; + vdev->ud.tcp_socket = socket; ++ vdev->ud.tcp_rx = tcp_rx; ++ vdev->ud.tcp_tx = tcp_tx; + vdev->ud.status = VDEV_ST_NOTASSIGNED; + + spin_unlock(&vdev->ud.lock); + spin_unlock_irqrestore(&vhci->lock, flags); + /* end the lock */ + +- vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx"); +- vdev->ud.tcp_tx = kthread_get_run(vhci_tx_loop, &vdev->ud, "vhci_tx"); ++ wake_up_process(vdev->ud.tcp_rx); ++ wake_up_process(vdev->ud.tcp_tx); + + rh_port_connect(vdev, speed); + ++ dev_info(dev, "Device attached\n"); ++ ++ mutex_unlock(&vdev->ud.sysfs_lock); ++ + return count; ++ ++unlock_mutex: ++ mutex_unlock(&vdev->ud.sysfs_lock); ++ return err; + } + static DEVICE_ATTR(attach, S_IWUSR, NULL, store_attach); + +diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c +index 7091848df6c8..d61b22bb1d8b 100644 +--- a/drivers/usb/usbip/vudc_dev.c ++++ b/drivers/usb/usbip/vudc_dev.c +@@ -582,6 +582,7 @@ static int init_vudc_hw(struct vudc *udc) + init_waitqueue_head(&udc->tx_waitq); + + spin_lock_init(&ud->lock); ++ mutex_init(&ud->sysfs_lock); + ud->status = SDEV_ST_AVAILABLE; + ud->side = USBIP_VUDC; + +diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c +index 7efa374a4970..0a4482ced945 100644 +--- a/drivers/usb/usbip/vudc_sysfs.c ++++ b/drivers/usb/usbip/vudc_sysfs.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + #include + + #include "usbip_common.h" +@@ -102,8 +103,9 @@ static ssize_t dev_desc_read(struct file *file, struct kobject *kobj, + } + static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor)); + +-static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, +- const char *in, size_t count) ++static ssize_t store_sockfd(struct device *dev, ++ struct device_attribute *attr, ++ const char *in, size_t count) + { + struct vudc *udc = (struct vudc *) dev_get_drvdata(dev); + int rv; +@@ -112,6 +114,8 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + struct socket *socket; + unsigned long flags; + int ret; ++ struct task_struct *tcp_rx = NULL; ++ struct task_struct *tcp_tx = NULL; + + rv = kstrtoint(in, 0, &sockfd); + if (rv != 0) +@@ -121,6 +125,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + dev_err(dev, "no device"); + return -ENODEV; + } ++ mutex_lock(&udc->ud.sysfs_lock); + spin_lock_irqsave(&udc->lock, flags); + /* Don't export what we don't have */ + if (!udc->driver || !udc->pullup) { +@@ -150,24 +155,56 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + goto unlock_ud; + } + +- udc->ud.tcp_socket = socket; ++ if (socket->type != SOCK_STREAM) { ++ dev_err(dev, "Expecting SOCK_STREAM - found %d", ++ socket->type); ++ ret = -EINVAL; ++ goto sock_err; ++ } + ++ /* unlock and create threads and get tasks */ + spin_unlock_irq(&udc->ud.lock); + spin_unlock_irqrestore(&udc->lock, flags); + +- udc->ud.tcp_rx = kthread_get_run(&v_rx_loop, +- &udc->ud, "vudc_rx"); +- udc->ud.tcp_tx = kthread_get_run(&v_tx_loop, +- &udc->ud, "vudc_tx"); ++ tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx"); ++ if (IS_ERR(tcp_rx)) { ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx"); ++ if (IS_ERR(tcp_tx)) { ++ kthread_stop(tcp_rx); ++ sockfd_put(socket); ++ return -EINVAL; ++ } ++ ++ /* get task structs now */ ++ get_task_struct(tcp_rx); ++ get_task_struct(tcp_tx); + ++ /* lock and update udc->ud state */ + spin_lock_irqsave(&udc->lock, flags); + spin_lock_irq(&udc->ud.lock); ++ ++ udc->ud.tcp_socket = socket; ++ udc->ud.tcp_rx = tcp_rx; ++ udc->ud.tcp_tx = tcp_tx; + udc->ud.status = SDEV_ST_USED; ++ + spin_unlock_irq(&udc->ud.lock); + + do_gettimeofday(&udc->start_time); + v_start_timer(udc); + udc->connected = 1; ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ wake_up_process(udc->ud.tcp_rx); ++ wake_up_process(udc->ud.tcp_tx); ++ ++ mutex_unlock(&udc->ud.sysfs_lock); ++ return count; ++ + } else { + if (!udc->connected) { + dev_err(dev, "Device not connected"); +@@ -186,13 +223,17 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, + } + + spin_unlock_irqrestore(&udc->lock, flags); ++ mutex_unlock(&udc->ud.sysfs_lock); + + return count; + ++sock_err: ++ sockfd_put(socket); + unlock_ud: + spin_unlock_irq(&udc->ud.lock); + unlock: + spin_unlock_irqrestore(&udc->lock, flags); ++ mutex_unlock(&udc->ud.sysfs_lock); + + return ret; + } +diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig +index 24ee2605b9f0..0da884bfc7a8 100644 +--- a/drivers/vfio/pci/Kconfig ++++ b/drivers/vfio/pci/Kconfig +@@ -1,6 +1,7 @@ + config VFIO_PCI + tristate "VFIO support for PCI devices" + depends on VFIO && PCI && EVENTFD ++ depends on MMU + select VFIO_VIRQFD + select IRQ_BYPASS_MANAGER + help +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index c94167d87178..62fa5340c965 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + #include "vfio_pci_private.h" + +@@ -117,8 +118,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) + int bar; + struct vfio_pci_dummy_resource *dummy_res; + +- INIT_LIST_HEAD(&vdev->dummy_resources_list); +- + for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) { + res = vdev->pdev->resource + bar; + +@@ -181,6 +180,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) + + static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); + static void vfio_pci_disable(struct vfio_pci_device *vdev); ++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); + + /* + * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND +@@ -390,6 +390,19 @@ static void vfio_pci_release(void *device_data) + if (!(--vdev->refcnt)) { + vfio_spapr_pci_eeh_release(vdev->pdev); + vfio_pci_disable(vdev); ++ mutex_lock(&vdev->igate); ++ if (vdev->err_trigger) { ++ eventfd_ctx_put(vdev->err_trigger); ++ vdev->err_trigger = NULL; ++ } ++ mutex_unlock(&vdev->igate); ++ ++ mutex_lock(&vdev->igate); ++ if (vdev->req_trigger) { ++ eventfd_ctx_put(vdev->req_trigger); ++ vdev->req_trigger = NULL; ++ } ++ mutex_unlock(&vdev->igate); + } + + mutex_unlock(&driver_lock); +@@ -656,6 +669,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + return 0; + } + ++struct vfio_devices { ++ struct vfio_device **devices; ++ int cur_index; ++ int max_index; ++}; ++ + static long vfio_pci_ioctl(void *device_data, + unsigned int cmd, unsigned long arg) + { +@@ -729,7 +748,7 @@ static long vfio_pci_ioctl(void *device_data, + { + void __iomem *io; + size_t size; +- u16 orig_cmd; ++ u16 cmd; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.flags = 0; +@@ -749,10 +768,7 @@ static long vfio_pci_ioctl(void *device_data, + * Is it really there? Enable memory decode for + * implicit access in pci_map_rom(). + */ +- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); +- pci_write_config_word(pdev, PCI_COMMAND, +- orig_cmd | PCI_COMMAND_MEMORY); +- ++ cmd = vfio_pci_memory_lock_and_enable(vdev); + io = pci_map_rom(pdev, &size); + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; +@@ -760,8 +776,8 @@ static long vfio_pci_ioctl(void *device_data, + } else { + info.size = 0; + } ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); + +- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); + break; + } + case VFIO_PCI_VGA_REGION_INDEX: +@@ -909,8 +925,16 @@ static long vfio_pci_ioctl(void *device_data, + return ret; + + } else if (cmd == VFIO_DEVICE_RESET) { +- return vdev->reset_works ? +- pci_try_reset_function(vdev->pdev) : -EINVAL; ++ int ret; ++ ++ if (!vdev->reset_works) ++ return -EINVAL; ++ ++ vfio_pci_zap_and_down_write_memory_lock(vdev); ++ ret = pci_try_reset_function(vdev->pdev); ++ up_write(&vdev->memory_lock); ++ ++ return ret; + + } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { + struct vfio_pci_hot_reset_info hdr; +@@ -990,8 +1014,9 @@ static long vfio_pci_ioctl(void *device_data, + int32_t *group_fds; + struct vfio_pci_group_entry *groups; + struct vfio_pci_group_info info; ++ struct vfio_devices devs = { .cur_index = 0 }; + bool slot = false; +- int i, count = 0, ret = 0; ++ int i, group_idx, mem_idx = 0, count = 0, ret = 0; + + minsz = offsetofend(struct vfio_pci_hot_reset, count); + +@@ -1043,9 +1068,9 @@ static long vfio_pci_ioctl(void *device_data, + * user interface and store the group and iommu ID. This + * ensures the group is held across the reset. + */ +- for (i = 0; i < hdr.count; i++) { ++ for (group_idx = 0; group_idx < hdr.count; group_idx++) { + struct vfio_group *group; +- struct fd f = fdget(group_fds[i]); ++ struct fd f = fdget(group_fds[group_idx]); + if (!f.file) { + ret = -EBADF; + break; +@@ -1058,8 +1083,9 @@ static long vfio_pci_ioctl(void *device_data, + break; + } + +- groups[i].group = group; +- groups[i].id = vfio_external_user_iommu_id(group); ++ groups[group_idx].group = group; ++ groups[group_idx].id = ++ vfio_external_user_iommu_id(group); + } + + kfree(group_fds); +@@ -1078,14 +1104,65 @@ static long vfio_pci_ioctl(void *device_data, + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_validate_devs, + &info, slot); +- if (!ret) +- /* User has access, do the reset */ +- ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : +- pci_try_reset_bus(vdev->pdev->bus); ++ ++ if (ret) ++ goto hot_reset_release; ++ ++ devs.max_index = count; ++ devs.devices = kcalloc(count, sizeof(struct vfio_device *), ++ GFP_KERNEL); ++ if (!devs.devices) { ++ ret = -ENOMEM; ++ goto hot_reset_release; ++ } ++ ++ /* ++ * We need to get memory_lock for each device, but devices ++ * can share mmap_sem, therefore we need to zap and hold ++ * the vma_lock for each device, and only then get each ++ * memory_lock. ++ */ ++ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, ++ vfio_pci_try_zap_and_vma_lock_cb, ++ &devs, slot); ++ if (ret) ++ goto hot_reset_release; ++ ++ for (; mem_idx < devs.cur_index; mem_idx++) { ++ struct vfio_pci_device *tmp; ++ ++ tmp = vfio_device_data(devs.devices[mem_idx]); ++ ++ ret = down_write_trylock(&tmp->memory_lock); ++ if (!ret) { ++ ret = -EBUSY; ++ goto hot_reset_release; ++ } ++ mutex_unlock(&tmp->vma_lock); ++ } ++ ++ /* User has access, do the reset */ ++ ret = slot ? pci_try_reset_slot(vdev->pdev->slot) : ++ pci_try_reset_bus(vdev->pdev->bus); + + hot_reset_release: +- for (i--; i >= 0; i--) +- vfio_group_put_external_user(groups[i].group); ++ for (i = 0; i < devs.cur_index; i++) { ++ struct vfio_device *device; ++ struct vfio_pci_device *tmp; ++ ++ device = devs.devices[i]; ++ tmp = vfio_device_data(device); ++ ++ if (i < mem_idx) ++ up_write(&tmp->memory_lock); ++ else ++ mutex_unlock(&tmp->vma_lock); ++ vfio_device_put(device); ++ } ++ kfree(devs.devices); ++ ++ for (group_idx--; group_idx >= 0; group_idx--) ++ vfio_group_put_external_user(groups[group_idx].group); + + kfree(groups); + return ret; +@@ -1144,6 +1221,201 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, + return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); + } + ++/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ ++static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) ++{ ++ struct vfio_pci_mmap_vma *mmap_vma, *tmp; ++ ++ /* ++ * Lock ordering: ++ * vma_lock is nested under mmap_sem for vm_ops callback paths. ++ * The memory_lock semaphore is used by both code paths calling ++ * into this function to zap vmas and the vm_ops.fault callback ++ * to protect the memory enable state of the device. ++ * ++ * When zapping vmas we need to maintain the mmap_sem => vma_lock ++ * ordering, which requires using vma_lock to walk vma_list to ++ * acquire an mm, then dropping vma_lock to get the mmap_sem and ++ * reacquiring vma_lock. This logic is derived from similar ++ * requirements in uverbs_user_mmap_disassociate(). ++ * ++ * mmap_sem must always be the top-level lock when it is taken. ++ * Therefore we can only hold the memory_lock write lock when ++ * vma_list is empty, as we'd need to take mmap_sem to clear ++ * entries. vma_list can only be guaranteed empty when holding ++ * vma_lock, thus memory_lock is nested under vma_lock. ++ * ++ * This enables the vm_ops.fault callback to acquire vma_lock, ++ * followed by memory_lock read lock, while already holding ++ * mmap_sem without risk of deadlock. ++ */ ++ while (1) { ++ struct mm_struct *mm = NULL; ++ ++ if (try) { ++ if (!mutex_trylock(&vdev->vma_lock)) ++ return 0; ++ } else { ++ mutex_lock(&vdev->vma_lock); ++ } ++ while (!list_empty(&vdev->vma_list)) { ++ mmap_vma = list_first_entry(&vdev->vma_list, ++ struct vfio_pci_mmap_vma, ++ vma_next); ++ mm = mmap_vma->vma->vm_mm; ++ if (mmget_not_zero(mm)) ++ break; ++ ++ list_del(&mmap_vma->vma_next); ++ kfree(mmap_vma); ++ mm = NULL; ++ } ++ if (!mm) ++ return 1; ++ mutex_unlock(&vdev->vma_lock); ++ ++ if (try) { ++ if (!down_read_trylock(&mm->mmap_sem)) { ++ mmput(mm); ++ return 0; ++ } ++ } else { ++ down_read(&mm->mmap_sem); ++ } ++ if (mmget_still_valid(mm)) { ++ if (try) { ++ if (!mutex_trylock(&vdev->vma_lock)) { ++ up_read(&mm->mmap_sem); ++ mmput(mm); ++ return 0; ++ } ++ } else { ++ mutex_lock(&vdev->vma_lock); ++ } ++ list_for_each_entry_safe(mmap_vma, tmp, ++ &vdev->vma_list, vma_next) { ++ struct vm_area_struct *vma = mmap_vma->vma; ++ ++ if (vma->vm_mm != mm) ++ continue; ++ ++ list_del(&mmap_vma->vma_next); ++ kfree(mmap_vma); ++ ++ zap_vma_ptes(vma, vma->vm_start, ++ vma->vm_end - vma->vm_start); ++ } ++ mutex_unlock(&vdev->vma_lock); ++ } ++ up_read(&mm->mmap_sem); ++ mmput(mm); ++ } ++} ++ ++void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) ++{ ++ vfio_pci_zap_and_vma_lock(vdev, false); ++ down_write(&vdev->memory_lock); ++ mutex_unlock(&vdev->vma_lock); ++} ++ ++u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) ++{ ++ u16 cmd; ++ ++ down_write(&vdev->memory_lock); ++ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); ++ if (!(cmd & PCI_COMMAND_MEMORY)) ++ pci_write_config_word(vdev->pdev, PCI_COMMAND, ++ cmd | PCI_COMMAND_MEMORY); ++ ++ return cmd; ++} ++ ++void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) ++{ ++ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); ++ up_write(&vdev->memory_lock); ++} ++ ++/* Caller holds vma_lock */ ++static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, ++ struct vm_area_struct *vma) ++{ ++ struct vfio_pci_mmap_vma *mmap_vma; ++ ++ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); ++ if (!mmap_vma) ++ return -ENOMEM; ++ ++ mmap_vma->vma = vma; ++ list_add(&mmap_vma->vma_next, &vdev->vma_list); ++ ++ return 0; ++} ++ ++/* ++ * Zap mmaps on open so that we can fault them in on access and therefore ++ * our vma_list only tracks mappings accessed since last zap. ++ */ ++static void vfio_pci_mmap_open(struct vm_area_struct *vma) ++{ ++ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); ++} ++ ++static void vfio_pci_mmap_close(struct vm_area_struct *vma) ++{ ++ struct vfio_pci_device *vdev = vma->vm_private_data; ++ struct vfio_pci_mmap_vma *mmap_vma; ++ ++ mutex_lock(&vdev->vma_lock); ++ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { ++ if (mmap_vma->vma == vma) { ++ list_del(&mmap_vma->vma_next); ++ kfree(mmap_vma); ++ break; ++ } ++ } ++ mutex_unlock(&vdev->vma_lock); ++} ++ ++static int vfio_pci_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ struct vfio_pci_device *vdev = vma->vm_private_data; ++ int ret = VM_FAULT_NOPAGE; ++ ++ mutex_lock(&vdev->vma_lock); ++ down_read(&vdev->memory_lock); ++ ++ if (!__vfio_pci_memory_enabled(vdev)) { ++ ret = VM_FAULT_SIGBUS; ++ mutex_unlock(&vdev->vma_lock); ++ goto up_out; ++ } ++ ++ if (__vfio_pci_add_vma(vdev, vma)) { ++ ret = VM_FAULT_OOM; ++ mutex_unlock(&vdev->vma_lock); ++ goto up_out; ++ } ++ ++ mutex_unlock(&vdev->vma_lock); ++ ++ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, ++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) ++ ret = VM_FAULT_SIGBUS; ++ ++up_out: ++ up_read(&vdev->memory_lock); ++ return ret; ++} ++ ++static const struct vm_operations_struct vfio_pci_mmap_ops = { ++ .open = vfio_pci_mmap_open, ++ .close = vfio_pci_mmap_close, ++ .fault = vfio_pci_mmap_fault, ++}; ++ + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) + { + struct vfio_pci_device *vdev = device_data; +@@ -1209,8 +1481,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; + +- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, +- req_len, vma->vm_page_prot); ++ /* ++ * See remap_pfn_range(), called from vfio_pci_fault() but we can't ++ * change vm_flags within the fault handler. Set them now. ++ */ ++ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; ++ vma->vm_ops = &vfio_pci_mmap_ops; ++ ++ return 0; + } + + static void vfio_pci_request(void *device_data, unsigned int count) +@@ -1267,7 +1545,10 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + vdev->irq_type = VFIO_PCI_NUM_IRQS; + mutex_init(&vdev->igate); + spin_lock_init(&vdev->irqlock); +- ++ INIT_LIST_HEAD(&vdev->dummy_resources_list); ++ mutex_init(&vdev->vma_lock); ++ INIT_LIST_HEAD(&vdev->vma_list); ++ init_rwsem(&vdev->memory_lock); + ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); + if (ret) { + vfio_iommu_group_put(group, &pdev->dev); +@@ -1361,12 +1642,6 @@ static struct pci_driver vfio_pci_driver = { + .err_handler = &vfio_err_handlers, + }; + +-struct vfio_devices { +- struct vfio_device **devices; +- int cur_index; +- int max_index; +-}; +- + static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) + { + struct vfio_devices *devs = data; +@@ -1388,6 +1663,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) + return 0; + } + ++static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) ++{ ++ struct vfio_devices *devs = data; ++ struct vfio_device *device; ++ struct vfio_pci_device *vdev; ++ ++ if (devs->cur_index == devs->max_index) ++ return -ENOSPC; ++ ++ device = vfio_device_get_from_dev(&pdev->dev); ++ if (!device) ++ return -EINVAL; ++ ++ if (pci_dev_driver(pdev) != &vfio_pci_driver) { ++ vfio_device_put(device); ++ return -EBUSY; ++ } ++ ++ vdev = vfio_device_data(device); ++ ++ /* ++ * Locking multiple devices is prone to deadlock, runaway and ++ * unwind if we hit contention. ++ */ ++ if (!vfio_pci_zap_and_vma_lock(vdev, true)) { ++ vfio_device_put(device); ++ return -EBUSY; ++ } ++ ++ devs->devices[devs->cur_index++] = device; ++ return 0; ++} ++ + /* + * Attempt to do a bus/slot reset if there are devices affected by a reset for + * this device that are needs_reset and all of the affected devices are unused +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index 84905d074c4f..5b0f09b211be 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -400,6 +400,20 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) + *(__le32 *)(&p->write[off]) = cpu_to_le32(write); + } + ++/* Caller should hold memory_lock semaphore */ ++bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) ++{ ++ struct pci_dev *pdev = vdev->pdev; ++ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); ++ ++ /* ++ * SR-IOV VF memory enable is handled by the MSE bit in the ++ * PF SR-IOV capability, there's therefore no need to trigger ++ * faults based on the virtual value. ++ */ ++ return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); ++} ++ + /* + * Restore the *real* BARs after we detect a FLR or backdoor reset. + * (backdoor = some device specific technique that we didn't catch) +@@ -560,13 +574,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + + new_cmd = le32_to_cpu(val); + ++ phys_io = !!(phys_cmd & PCI_COMMAND_IO); ++ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); ++ new_io = !!(new_cmd & PCI_COMMAND_IO); ++ + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); + virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); + new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); + +- phys_io = !!(phys_cmd & PCI_COMMAND_IO); +- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); +- new_io = !!(new_cmd & PCI_COMMAND_IO); ++ if (!new_mem) ++ vfio_pci_zap_and_down_write_memory_lock(vdev); ++ else ++ down_write(&vdev->memory_lock); + + /* + * If the user is writing mem/io enable (new_mem/io) and we +@@ -583,8 +602,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + } + + count = vfio_default_config_write(vdev, pos, count, perm, offset, val); +- if (count < 0) ++ if (count < 0) { ++ if (offset == PCI_COMMAND) ++ up_write(&vdev->memory_lock); + return count; ++ } + + /* + * Save current memory/io enable bits in vconfig to allow for +@@ -595,6 +617,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, + + *virt_cmd &= cpu_to_le16(~mask); + *virt_cmd |= cpu_to_le16(new_cmd & mask); ++ ++ up_write(&vdev->memory_lock); + } + + /* Emulate INTx disable */ +@@ -832,8 +856,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, + pos - offset + PCI_EXP_DEVCAP, + &cap); + +- if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) ++ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { ++ vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); ++ up_write(&vdev->memory_lock); ++ } + } + + /* +@@ -911,8 +938,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, + pos - offset + PCI_AF_CAP, + &cap); + +- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) ++ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { ++ vfio_pci_zap_and_down_write_memory_lock(vdev); + pci_try_reset_function(vdev->pdev); ++ up_write(&vdev->memory_lock); ++ } + } + + return count; +@@ -1461,7 +1491,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev) + if (ret) + return ret; + +- if (cap <= PCI_CAP_ID_MAX) { ++ /* ++ * ID 0 is a NULL capability, conflicting with our fake ++ * PCI_CAP_ID_BASIC. As it has no content, consider it ++ * hidden for now. ++ */ ++ if (cap && cap <= PCI_CAP_ID_MAX) { + len = pci_cap_length[cap]; + if (len == 0xFF) { /* Variable length */ + len = vfio_cap_len(vdev, cap, pos); +@@ -1541,7 +1576,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) + if (len == 0xFF) { + len = vfio_ext_cap_len(vdev, ecap, epos); + if (len < 0) +- return ret; ++ return len; + } + } + +@@ -1700,6 +1735,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) + vconfig[PCI_INTERRUPT_PIN]); + + vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ ++ ++ /* ++ * VFs do no implement the memory enable bit of the COMMAND ++ * register therefore we'll not have it set in our initial ++ * copy of config space after pci_enable_device(). For ++ * consistency with PFs, set the virtual enable bit here. ++ */ ++ *(__le16 *)&vconfig[PCI_COMMAND] |= ++ cpu_to_le16(PCI_COMMAND_MEMORY); + } + + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) +@@ -1729,8 +1773,11 @@ void vfio_config_free(struct vfio_pci_device *vdev) + vdev->vconfig = NULL; + kfree(vdev->pci_config_map); + vdev->pci_config_map = NULL; +- kfree(vdev->msi_perm); +- vdev->msi_perm = NULL; ++ if (vdev->msi_perm) { ++ free_perm_bits(vdev->msi_perm); ++ kfree(vdev->msi_perm); ++ vdev->msi_perm = NULL; ++ } + } + + /* +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 94594dc63c41..c989f777bf77 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) + struct pci_dev *pdev = vdev->pdev; + unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; + int ret; ++ u16 cmd; + + if (!is_irq_none(vdev)) + return -EINVAL; +@@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) + return -ENOMEM; + + /* return the number of supported vectors if we can't get all: */ ++ cmd = vfio_pci_memory_lock_and_enable(vdev); + ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); + if (ret < nvec) { + if (ret > 0) + pci_free_irq_vectors(pdev); ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx); + return ret; + } ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); + + vdev->num_ctx = nvec; + vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : +@@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + struct pci_dev *pdev = vdev->pdev; + struct eventfd_ctx *trigger; + int irq, ret; ++ u16 cmd; + + if (vector < 0 || vector >= vdev->num_ctx) + return -EINVAL; +@@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + + if (vdev->ctx[vector].trigger) { + irq_bypass_unregister_producer(&vdev->ctx[vector].producer); ++ ++ cmd = vfio_pci_memory_lock_and_enable(vdev); + free_irq(irq, vdev->ctx[vector].trigger); ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); ++ + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(vdev->ctx[vector].trigger); + vdev->ctx[vector].trigger = NULL; +@@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + * such a reset it would be unsuccessful. To avoid this, restore the + * cached value of the message prior to enabling. + */ ++ cmd = vfio_pci_memory_lock_and_enable(vdev); + if (msix) { + struct msi_msg msg; + +@@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + + ret = request_irq(irq, vfio_msihandler, 0, + vdev->ctx[vector].name, trigger); ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); + if (ret) { + kfree(vdev->ctx[vector].name); + eventfd_ctx_put(trigger); +@@ -344,11 +355,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, + vdev->ctx[vector].producer.token = trigger; + vdev->ctx[vector].producer.irq = irq; + ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); +- if (unlikely(ret)) ++ if (unlikely(ret)) { + dev_info(&pdev->dev, + "irq bypass producer (token %p) registration fails: %d\n", + vdev->ctx[vector].producer.token, ret); + ++ vdev->ctx[vector].producer.token = NULL; ++ } + vdev->ctx[vector].trigger = trigger; + + return 0; +@@ -379,6 +392,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) + { + struct pci_dev *pdev = vdev->pdev; + int i; ++ u16 cmd; + + for (i = 0; i < vdev->num_ctx; i++) { + vfio_virqfd_disable(&vdev->ctx[i].unmask); +@@ -387,7 +401,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) + + vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + ++ cmd = vfio_pci_memory_lock_and_enable(vdev); + pci_free_irq_vectors(pdev); ++ vfio_pci_memory_unlock_and_restore(vdev, cmd); + + /* + * Both disable paths above use pci_intx_for_msi() to clear DisINTx +diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h +index f561ac1c78a0..f896cebb5c2c 100644 +--- a/drivers/vfio/pci/vfio_pci_private.h ++++ b/drivers/vfio/pci/vfio_pci_private.h +@@ -63,6 +63,11 @@ struct vfio_pci_dummy_resource { + struct list_head res_next; + }; + ++struct vfio_pci_mmap_vma { ++ struct vm_area_struct *vma; ++ struct list_head vma_next; ++}; ++ + struct vfio_pci_device { + struct pci_dev *pdev; + void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; +@@ -95,6 +100,9 @@ struct vfio_pci_device { + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; ++ struct mutex vma_lock; ++ struct list_head vma_list; ++ struct rw_semaphore memory_lock; + }; + + #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) +@@ -130,6 +138,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); ++ ++extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); ++extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device ++ *vdev); ++extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); ++extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, ++ u16 cmd); ++ + #ifdef CONFIG_VFIO_PCI_IGD + extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); + #else +diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c +index 357243d76f10..6445461a5601 100644 +--- a/drivers/vfio/pci/vfio_pci_rdwr.c ++++ b/drivers/vfio/pci/vfio_pci_rdwr.c +@@ -122,6 +122,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + size_t x_start = 0, x_end = 0; + resource_size_t end; + void __iomem *io; ++ struct resource *res = &vdev->pdev->resource[bar]; + ssize_t done; + + if (pci_resource_start(pdev, bar)) +@@ -137,6 +138,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + + count = min(count, (size_t)(end - pos)); + ++ if (res->flags & IORESOURCE_MEM) { ++ down_read(&vdev->memory_lock); ++ if (!__vfio_pci_memory_enabled(vdev)) { ++ up_read(&vdev->memory_lock); ++ return -EIO; ++ } ++ } ++ + if (bar == PCI_ROM_RESOURCE) { + /* + * The ROM can fill less space than the BAR, so we start the +@@ -144,20 +153,21 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + * filling large ROM BARs much faster. + */ + io = pci_map_rom(pdev, &x_start); +- if (!io) +- return -ENOMEM; ++ if (!io) { ++ done = -ENOMEM; ++ goto out; ++ } + x_end = end; + } else if (!vdev->barmap[bar]) { +- int ret; +- +- ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); +- if (ret) +- return ret; ++ done = pci_request_selected_regions(pdev, 1 << bar, "vfio"); ++ if (done) ++ goto out; + + io = pci_iomap(pdev, bar, 0); + if (!io) { + pci_release_selected_regions(pdev, 1 << bar); +- return -ENOMEM; ++ done = -ENOMEM; ++ goto out; + } + + vdev->barmap[bar] = io; +@@ -176,6 +186,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, + + if (bar == PCI_ROM_RESOURCE) + pci_unmap_rom(pdev, io); ++out: ++ if (res->flags & IORESOURCE_MEM) ++ up_read(&vdev->memory_lock); + + return done; + } +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c +index d143d08c4f0f..9b1b6c1e218d 100644 +--- a/drivers/vfio/platform/vfio_platform_common.c ++++ b/drivers/vfio/platform/vfio_platform_common.c +@@ -288,7 +288,7 @@ static int vfio_platform_open(void *device_data) + vfio_platform_regions_cleanup(vdev); + err_reg: + mutex_unlock(&driver_lock); +- module_put(THIS_MODULE); ++ module_put(vdev->parent_module); + return ret; + } + +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index a9f58f3867f0..ccef02ceaad9 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -213,6 +213,32 @@ static int put_pfn(unsigned long pfn, int prot) + return 0; + } + ++static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, ++ unsigned long vaddr, unsigned long *pfn, ++ bool write_fault) ++{ ++ int ret; ++ ++ ret = follow_pfn(vma, vaddr, pfn); ++ if (ret) { ++ bool unlocked = false; ++ ++ ret = fixup_user_fault(NULL, mm, vaddr, ++ FAULT_FLAG_REMOTE | ++ (write_fault ? FAULT_FLAG_WRITE : 0), ++ &unlocked); ++ if (unlocked) ++ return -EAGAIN; ++ ++ if (ret) ++ return ret; ++ ++ ret = follow_pfn(vma, vaddr, pfn); ++ } ++ ++ return ret; ++} ++ + static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) + { + struct page *page[1]; +@@ -226,12 +252,16 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) + + down_read(¤t->mm->mmap_sem); + ++retry: + vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); + + if (vma && vma->vm_flags & VM_PFNMAP) { +- if (!follow_pfn(vma, vaddr, pfn) && +- is_invalid_reserved_pfn(*pfn)) +- ret = 0; ++ ret = follow_fault_pfn(vma, current->mm, vaddr, pfn, prot & IOMMU_WRITE); ++ if (ret == -EAGAIN) ++ goto retry; ++ ++ if (!ret && !is_invalid_reserved_pfn(*pfn)) ++ ret = -EFAULT; + } + + up_read(¤t->mm->mmap_sem); +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c +index 861f43f8f9ce..66663757cd70 100644 +--- a/drivers/vhost/net.c ++++ b/drivers/vhost/net.c +@@ -377,6 +377,7 @@ static void handle_tx(struct vhost_net *net) + size_t hdr_size; + struct socket *sock; + struct vhost_net_ubuf_ref *uninitialized_var(ubufs); ++ struct ubuf_info *ubuf; + bool zcopy, zcopy_used; + int sent_pkts = 0; + +@@ -444,9 +445,7 @@ static void handle_tx(struct vhost_net *net) + + /* use msg_control to pass vhost zerocopy ubuf info to skb */ + if (zcopy_used) { +- struct ubuf_info *ubuf; + ubuf = nvq->ubuf_info + nvq->upend_idx; +- + vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); + vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; + ubuf->callback = vhost_zerocopy_callback; +@@ -465,7 +464,8 @@ static void handle_tx(struct vhost_net *net) + err = sock->ops->sendmsg(sock, &msg, len); + if (unlikely(err < 0)) { + if (zcopy_used) { +- vhost_net_ubuf_put(ubufs); ++ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) ++ vhost_net_ubuf_put(ubufs); + nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) + % UIO_MAXIOV; + } +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c +index b14e62f11075..62c61a283b35 100644 +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -306,8 +306,8 @@ static void vhost_vq_reset(struct vhost_dev *dev, + vq->call_ctx = NULL; + vq->call = NULL; + vq->log_ctx = NULL; +- vhost_reset_is_le(vq); + vhost_disable_cross_endian(vq); ++ vhost_reset_is_le(vq); + vq->busyloop_timeout = 0; + vq->umem = NULL; + vq->iotlb = NULL; +@@ -675,10 +675,16 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) + (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); + } + ++/* Make sure 64 bit math will not overflow. */ + static bool vhost_overflow(u64 uaddr, u64 size) + { +- /* Make sure 64 bit math will not overflow. */ +- return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; ++ if (uaddr > ULONG_MAX || size > ULONG_MAX) ++ return true; ++ ++ if (!size) ++ return false; ++ ++ return uaddr > ULONG_MAX - size + 1; + } + + /* Caller should have vq mutex and device mutex. */ +diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c +index 3bb02c60a2f5..da47542496cc 100644 +--- a/drivers/vhost/vringh.c ++++ b/drivers/vhost/vringh.c +@@ -272,13 +272,14 @@ __vringh_iov(struct vringh *vrh, u16 i, + desc_max = vrh->vring.num; + up_next = -1; + ++ /* You must want something! */ ++ if (WARN_ON(!riov && !wiov)) ++ return -EINVAL; ++ + if (riov) + riov->i = riov->used = 0; +- else if (wiov) ++ if (wiov) + wiov->i = wiov->used = 0; +- else +- /* You must want something! */ +- BUG(); + + for (;;) { + void *addr; +@@ -328,7 +329,7 @@ __vringh_iov(struct vringh *vrh, u16 i, + iov = wiov; + else { + iov = riov; +- if (unlikely(wiov && wiov->i)) { ++ if (unlikely(wiov && wiov->used)) { + vringh_bad("Readable desc %p after writable", + &descs[i]); + err = -EINVAL; +diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c +index 1771220b2437..90a24b975240 100644 +--- a/drivers/video/backlight/lm3630a_bl.c ++++ b/drivers/video/backlight/lm3630a_bl.c +@@ -183,7 +183,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl) + if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) { + lm3630a_pwm_ctrl(pchip, bl->props.brightness, + bl->props.max_brightness); +- return bl->props.brightness; ++ return 0; + } + + /* disable sleep */ +@@ -203,8 +203,8 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl) + return 0; + + out_i2c_err: +- dev_err(pchip->dev, "i2c failed to access\n"); +- return bl->props.brightness; ++ dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret)); ++ return ret; + } + + static int lm3630a_bank_a_get_brightness(struct backlight_device *bl) +@@ -260,7 +260,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl) + if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) { + lm3630a_pwm_ctrl(pchip, bl->props.brightness, + bl->props.max_brightness); +- return bl->props.brightness; ++ return 0; + } + + /* disable sleep */ +@@ -280,8 +280,8 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl) + return 0; + + out_i2c_err: +- dev_err(pchip->dev, "i2c failed to access REG_CTRL\n"); +- return bl->props.brightness; ++ dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret)); ++ return ret; + } + + static int lm3630a_bank_b_get_brightness(struct backlight_device *bl) +diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c +index 939f057836e1..4cdc7a3f6dc5 100644 +--- a/drivers/video/backlight/lp855x_bl.c ++++ b/drivers/video/backlight/lp855x_bl.c +@@ -460,7 +460,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = regulator_enable(lp->enable); + if (ret < 0) { + dev_err(lp->dev, "failed to enable vddio: %d\n", ret); +- return ret; ++ goto disable_supply; + } + + /* +@@ -475,24 +475,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id) + ret = lp855x_configure(lp); + if (ret) { + dev_err(lp->dev, "device config err: %d", ret); +- return ret; ++ goto disable_vddio; + } + + ret = lp855x_backlight_register(lp); + if (ret) { + dev_err(lp->dev, + "failed to register backlight. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group); + if (ret) { + dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret); +- return ret; ++ goto disable_vddio; + } + + backlight_update_status(lp->bl); ++ + return 0; ++ ++disable_vddio: ++ if (lp->enable) ++ regulator_disable(lp->enable); ++disable_supply: ++ if (lp->supply) ++ regulator_disable(lp->supply); ++ ++ return ret; + } + + static int lp855x_remove(struct i2c_client *cl) +@@ -501,6 +511,8 @@ static int lp855x_remove(struct i2c_client *cl) + + lp->bl->props.brightness = 0; + backlight_update_status(lp->bl); ++ if (lp->enable) ++ regulator_disable(lp->enable); + if (lp->supply) + regulator_disable(lp->supply); + sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group); +diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c +index d414c7a3acf5..a2f77625b717 100644 +--- a/drivers/video/backlight/sky81452-backlight.c ++++ b/drivers/video/backlight/sky81452-backlight.c +@@ -207,6 +207,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( + num_entry); + if (ret < 0) { + dev_err(dev, "led-sources node is invalid.\n"); ++ of_node_put(np); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig +index 38da6e299149..c31715019cb4 100644 +--- a/drivers/video/console/Kconfig ++++ b/drivers/video/console/Kconfig +@@ -22,31 +22,6 @@ config VGA_CONSOLE + + Say Y. + +-config VGACON_SOFT_SCROLLBACK +- bool "Enable Scrollback Buffer in System RAM" +- depends on VGA_CONSOLE +- default n +- help +- The scrollback buffer of the standard VGA console is located in +- the VGA RAM. The size of this RAM is fixed and is quite small. +- If you require a larger scrollback buffer, this can be placed in +- System RAM which is dynamically allocated during initialization. +- Placing the scrollback buffer in System RAM will slightly slow +- down the console. +- +- If you want this feature, say 'Y' here and enter the amount of +- RAM to allocate for this buffer. If unsure, say 'N'. +- +-config VGACON_SOFT_SCROLLBACK_SIZE +- int "Scrollback Buffer Size (in KB)" +- depends on VGACON_SOFT_SCROLLBACK +- range 1 1024 +- default "64" +- help +- Enter the amount of System RAM to allocate for the scrollback +- buffer. Each 64KB will give you approximately 16 80x25 +- screenfuls of scrollback buffer +- + config MDA_CONSOLE + depends on !M68K && !PARISC && ISA + tristate "MDA text console (dual-headed)" +diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c +index dbfe4eecf12e..a7ab8323304d 100644 +--- a/drivers/video/console/bitblit.c ++++ b/drivers/video/console/bitblit.c +@@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + region.color = 0; + region.rop = ROP_COPY; + +- if (rw && !bottom_only) { ++ if ((int) rw > 0 && !bottom_only) { + region.dx = info->var.xoffset + rs; + region.dy = 0; + region.width = rw; +@@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + info->fbops->fb_fillrect(info, ®ion); + } + +- if (bh) { ++ if ((int) bh > 0) { + region.dx = info->var.xoffset; + region.dy = info->var.yoffset + bs; + region.width = rs; +@@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); +diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c +index 178b507a6fe0..510bc3f51dcc 100644 +--- a/drivers/video/console/fbcon.c ++++ b/drivers/video/console/fbcon.c +@@ -101,12 +101,6 @@ static int logo_lines; + /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO + enums. */ + static int logo_shown = FBCON_LOGO_CANSHOW; +-/* Software scrollback */ +-static int fbcon_softback_size = 32768; +-static unsigned long softback_buf, softback_curr; +-static unsigned long softback_in; +-static unsigned long softback_top, softback_end; +-static int softback_lines; + /* console mappings */ + static int first_fb_vc; + static int last_fb_vc = MAX_NR_CONSOLES - 1; +@@ -140,8 +134,6 @@ static int fbcon_has_sysfs; + + static const struct consw fb_con; + +-#define CM_SOFTBACK (8) +- + #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) + + static int fbcon_set_origin(struct vc_data *); +@@ -349,18 +341,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, + return color; + } + +-static void fbcon_update_softback(struct vc_data *vc) +-{ +- int l = fbcon_softback_size / vc->vc_size_row; +- +- if (l > 5) +- softback_end = softback_buf + l * vc->vc_size_row; +- else +- /* Smaller scrollback makes no sense, and 0 would screw +- the operation totally */ +- softback_top = 0; +-} +- + static void fb_flashcursor(struct work_struct *work) + { + struct fb_info *info = container_of(work, struct fb_info, queue); +@@ -390,7 +370,7 @@ static void fb_flashcursor(struct work_struct *work) + c = scr_readw((u16 *) vc->vc_pos); + mode = (!ops->cursor_flash || ops->cursor_state.enable) ? + CM_ERASE : CM_DRAW; +- ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), ++ ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + console_unlock(); + } +@@ -450,13 +430,7 @@ static int __init fb_console_setup(char *this_opt) + } + + if (!strncmp(options, "scrollback:", 11)) { +- options += 11; +- if (*options) { +- fbcon_softback_size = simple_strtoul(options, &options, 0); +- if (*options == 'k' || *options == 'K') { +- fbcon_softback_size *= 1024; +- } +- } ++ pr_warn("Ignoring scrollback size option\n"); + continue; + } + +@@ -961,31 +935,6 @@ static const char *fbcon_startup(void) + p->con_rotate = initial_rotation; + set_blitting_type(vc, info); + +- if (info->fix.type != FB_TYPE_TEXT) { +- if (fbcon_softback_size) { +- if (!softback_buf) { +- softback_buf = +- (unsigned long) +- kmalloc(fbcon_softback_size, +- GFP_KERNEL); +- if (!softback_buf) { +- fbcon_softback_size = 0; +- softback_top = 0; +- } +- } +- } else { +- if (softback_buf) { +- kfree((void *) softback_buf); +- softback_buf = 0; +- softback_top = 0; +- } +- } +- if (softback_buf) +- softback_in = softback_top = softback_curr = +- softback_buf; +- softback_lines = 0; +- } +- + /* Setup default font */ + if (!p->fontdata && !vc->vc_font.data) { + if (!fontname[0] || !(font = find_font(fontname))) +@@ -1148,9 +1097,6 @@ static void fbcon_init(struct vc_data *vc, int init) + if (logo) + fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); + +- if (vc == svc && softback_buf) +- fbcon_update_softback(vc); +- + if (ops->rotate_font && ops->rotate_font(info, vc)) { + ops->rotate = FB_ROTATE_UR; + set_blitting_type(vc, info); +@@ -1310,7 +1256,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + { + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; +- int y; + int c = scr_readw((u16 *) vc->vc_pos); + + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); +@@ -1324,16 +1269,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + fbcon_add_cursor_timer(info); + + ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; +- if (mode & CM_SOFTBACK) { +- mode &= ~CM_SOFTBACK; +- y = softback_lines; +- } else { +- if (softback_lines) +- fbcon_set_origin(vc); +- y = 0; +- } + +- ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), ++ ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + } + +@@ -1404,8 +1341,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, + + if (con_is_visible(vc)) { + update_screen(vc); +- if (softback_buf) +- fbcon_update_softback(vc); + } + } + +@@ -1543,99 +1478,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) + scrollback_current = 0; + } + +-static void fbcon_redraw_softback(struct vc_data *vc, struct display *p, +- long delta) +-{ +- int count = vc->vc_rows; +- unsigned short *d, *s; +- unsigned long n; +- int line = 0; +- +- d = (u16 *) softback_curr; +- if (d == (u16 *) softback_in) +- d = (u16 *) vc->vc_origin; +- n = softback_curr + delta * vc->vc_size_row; +- softback_lines -= delta; +- if (delta < 0) { +- if (softback_curr < softback_top && n < softback_buf) { +- n += softback_end - softback_buf; +- if (n < softback_top) { +- softback_lines -= +- (softback_top - n) / vc->vc_size_row; +- n = softback_top; +- } +- } else if (softback_curr >= softback_top +- && n < softback_top) { +- softback_lines -= +- (softback_top - n) / vc->vc_size_row; +- n = softback_top; +- } +- } else { +- if (softback_curr > softback_in && n >= softback_end) { +- n += softback_buf - softback_end; +- if (n > softback_in) { +- n = softback_in; +- softback_lines = 0; +- } +- } else if (softback_curr <= softback_in && n > softback_in) { +- n = softback_in; +- softback_lines = 0; +- } +- } +- if (n == softback_curr) +- return; +- softback_curr = n; +- s = (u16 *) softback_curr; +- if (s == (u16 *) softback_in) +- s = (u16 *) vc->vc_origin; +- while (count--) { +- unsigned short *start; +- unsigned short *le; +- unsigned short c; +- int x = 0; +- unsigned short attr = 1; +- +- start = s; +- le = advance_row(s, 1); +- do { +- c = scr_readw(s); +- if (attr != (c & 0xff00)) { +- attr = c & 0xff00; +- if (s > start) { +- fbcon_putcs(vc, start, s - start, +- line, x); +- x += s - start; +- start = s; +- } +- } +- if (c == scr_readw(d)) { +- if (s > start) { +- fbcon_putcs(vc, start, s - start, +- line, x); +- x += s - start + 1; +- start = s + 1; +- } else { +- x++; +- start++; +- } +- } +- s++; +- d++; +- } while (s < le); +- if (s > start) +- fbcon_putcs(vc, start, s - start, line, x); +- line++; +- if (d == (u16 *) softback_end) +- d = (u16 *) softback_buf; +- if (d == (u16 *) softback_in) +- d = (u16 *) vc->vc_origin; +- if (s == (u16 *) softback_end) +- s = (u16 *) softback_buf; +- if (s == (u16 *) softback_in) +- s = (u16 *) vc->vc_origin; +- } +-} +- + static void fbcon_redraw_move(struct vc_data *vc, struct display *p, + int line, int count, int dy) + { +@@ -1775,31 +1617,6 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p, + } + } + +-static inline void fbcon_softback_note(struct vc_data *vc, int t, +- int count) +-{ +- unsigned short *p; +- +- if (vc->vc_num != fg_console) +- return; +- p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); +- +- while (count) { +- scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); +- count--; +- p = advance_row(p, 1); +- softback_in += vc->vc_size_row; +- if (softback_in == softback_end) +- softback_in = softback_buf; +- if (softback_in == softback_top) { +- softback_top += vc->vc_size_row; +- if (softback_top == softback_end) +- softback_top = softback_buf; +- } +- } +- softback_curr = softback_in; +-} +- + static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, + int count) + { +@@ -1822,8 +1639,6 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, + case SM_UP: + if (count > vc->vc_rows) /* Maximum realistic size */ + count = vc->vc_rows; +- if (softback_top) +- fbcon_softback_note(vc, t, count); + if (logo_shown >= 0) + goto redraw_up; + switch (p->scrollmode) { +@@ -2116,6 +1931,9 @@ static void updatescrollmode(struct display *p, + } + } + ++#define PITCH(w) (((w) + 7) >> 3) ++#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ ++ + static int fbcon_resize(struct vc_data *vc, unsigned int width, + unsigned int height, unsigned int user) + { +@@ -2125,6 +1943,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, + struct fb_var_screeninfo var = info->var; + int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + ++ if (p->userfont && FNTSIZE(vc->vc_font.data)) { ++ int size; ++ int pitch = PITCH(vc->vc_font.width); ++ ++ /* ++ * If user font, ensure that a possible change to user font ++ * height or width will not allow a font data out-of-bounds access. ++ * NOTE: must use original charcount in calculation as font ++ * charcount can change and cannot be used to determine the ++ * font data allocated size. ++ */ ++ if (pitch <= 0) ++ return -EINVAL; ++ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); ++ if (size > FNTSIZE(vc->vc_font.data)) ++ return -EINVAL; ++ } ++ + virt_w = FBCON_SWAP(ops->rotate, width, height); + virt_h = FBCON_SWAP(ops->rotate, height, width); + virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, +@@ -2150,7 +1986,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, + return -EINVAL; + + DPRINTK("resize now %ix%i\n", var.xres, var.yres); +- if (con_is_visible(vc)) { ++ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { + var.activate = FB_ACTIVATE_NOW | + FB_ACTIVATE_FORCE; + fb_set_var(info, &var); +@@ -2173,14 +2009,6 @@ static int fbcon_switch(struct vc_data *vc) + info = registered_fb[con2fb_map[vc->vc_num]]; + ops = info->fbcon_par; + +- if (softback_top) { +- if (softback_lines) +- fbcon_set_origin(vc); +- softback_top = softback_curr = softback_in = softback_buf; +- softback_lines = 0; +- fbcon_update_softback(vc); +- } +- + if (logo_shown >= 0) { + struct vc_data *conp2 = vc_cons[logo_shown].d; + +@@ -2406,6 +2234,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + + if (font->width <= 8) { + j = vc->vc_font.height; ++ if (font->charcount * j > FNTSIZE(fontdata)) ++ return -EINVAL; ++ + for (i = 0; i < font->charcount; i++) { + memcpy(data, fontdata, j); + memset(data + j, 0, 32 - j); +@@ -2414,6 +2245,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + } + } else if (font->width <= 16) { + j = vc->vc_font.height * 2; ++ if (font->charcount * j > FNTSIZE(fontdata)) ++ return -EINVAL; ++ + for (i = 0; i < font->charcount; i++) { + memcpy(data, fontdata, j); + memset(data + j, 0, 64 - j); +@@ -2421,6 +2255,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + fontdata += j; + } + } else if (font->width <= 24) { ++ if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata)) ++ return -EINVAL; ++ + for (i = 0; i < font->charcount; i++) { + for (j = 0; j < vc->vc_font.height; j++) { + *data++ = fontdata[0]; +@@ -2433,6 +2270,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) + } + } else { + j = vc->vc_font.height * 4; ++ if (font->charcount * j > FNTSIZE(fontdata)) ++ return -EINVAL; ++ + for (i = 0; i < font->charcount; i++) { + memcpy(data, fontdata, j); + memset(data + j, 0, 128 - j); +@@ -2514,9 +2354,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + int cnt; + char *old_data = NULL; + +- if (con_is_visible(vc) && softback_lines) +- fbcon_set_origin(vc); +- + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); + if (p->userfont) + old_data = vc->vc_font.data; +@@ -2542,8 +2379,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, + cols /= w; + rows /= h; + vc_resize(vc, cols, rows); +- if (con_is_visible(vc) && softback_buf) +- fbcon_update_softback(vc); + } else if (con_is_visible(vc) + && vc->vc_mode == KD_TEXT) { + fbcon_clear_margins(vc, 0); +@@ -2586,7 +2421,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne + int size; + int i, csum; + u8 *new_data, *data = font->data; +- int pitch = (font->width+7) >> 3; ++ int pitch = PITCH(font->width); + + /* Is there a reason why fbconsole couldn't handle any charcount >256? + * If not this check should be changed to charcount < 256 */ +@@ -2602,7 +2437,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne + if (fbcon_invalid_charcount(info, charcount)) + return -EINVAL; + +- size = h * pitch * charcount; ++ size = CALC_FONTSZ(h, pitch, charcount); + + new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); + +@@ -2701,19 +2536,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) + + static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) + { +- unsigned long p; +- int line; +- +- if (vc->vc_num != fg_console || !softback_lines) +- return (u16 *) (vc->vc_origin + offset); +- line = offset / vc->vc_size_row; +- if (line >= softback_lines) +- return (u16 *) (vc->vc_origin + offset - +- softback_lines * vc->vc_size_row); +- p = softback_curr + offset; +- if (p >= softback_end) +- p += softback_buf - softback_end; +- return (u16 *) p; ++ return (u16 *) (vc->vc_origin + offset); + } + + static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, +@@ -2725,24 +2548,9 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, + if (pos >= vc->vc_origin && pos < vc->vc_scr_end) { + unsigned long offset = (pos - vc->vc_origin) / 2; + +- x = offset % vc->vc_cols; +- y = offset / vc->vc_cols; +- if (vc->vc_num == fg_console) +- y += softback_lines; +- ret = pos + (vc->vc_cols - x) * 2; +- } else if (vc->vc_num == fg_console && softback_lines) { +- unsigned long offset = pos - softback_curr; +- +- if (pos < softback_curr) +- offset += softback_end - softback_buf; +- offset /= 2; + x = offset % vc->vc_cols; + y = offset / vc->vc_cols; + ret = pos + (vc->vc_cols - x) * 2; +- if (ret == softback_end) +- ret = softback_buf; +- if (ret == softback_in) +- ret = vc->vc_origin; + } else { + /* Should not happen */ + x = y = 0; +@@ -2770,106 +2578,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) + a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | + (((a) & 0x0700) << 4); + scr_writew(a, p++); +- if (p == (u16 *) softback_end) +- p = (u16 *) softback_buf; +- if (p == (u16 *) softback_in) +- p = (u16 *) vc->vc_origin; + } + } + +-static void fbcon_scrolldelta(struct vc_data *vc, int lines) +-{ +- struct fb_info *info = registered_fb[con2fb_map[fg_console]]; +- struct fbcon_ops *ops = info->fbcon_par; +- struct display *disp = &fb_display[fg_console]; +- int offset, limit, scrollback_old; +- +- if (softback_top) { +- if (vc->vc_num != fg_console) +- return; +- if (vc->vc_mode != KD_TEXT || !lines) +- return; +- if (logo_shown >= 0) { +- struct vc_data *conp2 = vc_cons[logo_shown].d; +- +- if (conp2->vc_top == logo_lines +- && conp2->vc_bottom == conp2->vc_rows) +- conp2->vc_top = 0; +- if (logo_shown == vc->vc_num) { +- unsigned long p, q; +- int i; +- +- p = softback_in; +- q = vc->vc_origin + +- logo_lines * vc->vc_size_row; +- for (i = 0; i < logo_lines; i++) { +- if (p == softback_top) +- break; +- if (p == softback_buf) +- p = softback_end; +- p -= vc->vc_size_row; +- q -= vc->vc_size_row; +- scr_memcpyw((u16 *) q, (u16 *) p, +- vc->vc_size_row); +- } +- softback_in = softback_curr = p; +- update_region(vc, vc->vc_origin, +- logo_lines * vc->vc_cols); +- } +- logo_shown = FBCON_LOGO_CANSHOW; +- } +- fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); +- fbcon_redraw_softback(vc, disp, lines); +- fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); +- return; +- } +- +- if (!scrollback_phys_max) +- return; +- +- scrollback_old = scrollback_current; +- scrollback_current -= lines; +- if (scrollback_current < 0) +- scrollback_current = 0; +- else if (scrollback_current > scrollback_max) +- scrollback_current = scrollback_max; +- if (scrollback_current == scrollback_old) +- return; +- +- if (fbcon_is_inactive(vc, info)) +- return; +- +- fbcon_cursor(vc, CM_ERASE); +- +- offset = disp->yscroll - scrollback_current; +- limit = disp->vrows; +- switch (disp->scrollmode) { +- case SCROLL_WRAP_MOVE: +- info->var.vmode |= FB_VMODE_YWRAP; +- break; +- case SCROLL_PAN_MOVE: +- case SCROLL_PAN_REDRAW: +- limit -= vc->vc_rows; +- info->var.vmode &= ~FB_VMODE_YWRAP; +- break; +- } +- if (offset < 0) +- offset += limit; +- else if (offset >= limit) +- offset -= limit; +- +- ops->var.xoffset = 0; +- ops->var.yoffset = offset * vc->vc_font.height; +- ops->update_start(info); +- +- if (!scrollback_current) +- fbcon_cursor(vc, CM_DRAW); +-} +- + static int fbcon_set_origin(struct vc_data *vc) + { +- if (softback_lines) +- fbcon_scrolldelta(vc, softback_lines); + return 0; + } + +@@ -2933,8 +2646,6 @@ static void fbcon_modechanged(struct fb_info *info) + + fbcon_set_palette(vc, color_table); + update_screen(vc); +- if (softback_buf) +- fbcon_update_softback(vc); + } + } + +@@ -3354,7 +3065,6 @@ static const struct consw fb_con = { + .con_font_default = fbcon_set_def_font, + .con_font_copy = fbcon_copy_font, + .con_set_palette = fbcon_set_palette, +- .con_scrolldelta = fbcon_scrolldelta, + .con_set_origin = fbcon_set_origin, + .con_invert_region = fbcon_invert_region, + .con_screen_pos = fbcon_screen_pos, +@@ -3563,9 +3273,6 @@ static void fbcon_exit(void) + if (fbcon_has_exited) + return; + +- kfree((void *)softback_buf); +- softback_buf = 0UL; +- + for (i = 0; i < FB_MAX; i++) { + int pending = 0; + +diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h +index 7aaa4eabbba0..701eecab3317 100644 +--- a/drivers/video/console/fbcon.h ++++ b/drivers/video/console/fbcon.h +@@ -62,7 +62,7 @@ struct fbcon_ops { + void (*clear_margins)(struct vc_data *vc, struct fb_info *info, + int bottom_only); + void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg); ++ int fg, int bg); + int (*update_start)(struct fb_info *info); + int (*rotate_font)(struct fb_info *info, struct vc_data *vc); + struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ +@@ -151,13 +151,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, + #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) + #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) + +-/* Font */ +-#define REFCOUNT(fd) (((int *)(fd))[-1]) +-#define FNTSIZE(fd) (((int *)(fd))[-2]) +-#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +-#define FNTSUM(fd) (((int *)(fd))[-4]) +-#define FONT_EXTRA_WORDS 4 +- + /* + * Scroll Method + */ +diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c +index 5a3cbf6dff4d..586702752005 100644 +--- a/drivers/video/console/fbcon_ccw.c ++++ b/drivers/video/console/fbcon_ccw.c +@@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, + region.color = 0; + region.rop = ROP_COPY; + +- if (rw && !bottom_only) { ++ if ((int) rw > 0 && !bottom_only) { + region.dx = 0; + region.dy = info->var.yoffset; + region.height = rw; +@@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, + info->fbops->fb_fillrect(info, ®ion); + } + +- if (bh) { ++ if ((int) bh > 0) { + region.dx = info->var.xoffset + bs; + region.dy = 0; + region.height = info->var.yres_virtual; +@@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c +index e7ee44db4e98..f5a1134049f8 100644 +--- a/drivers/video/console/fbcon_cw.c ++++ b/drivers/video/console/fbcon_cw.c +@@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, + region.color = 0; + region.rop = ROP_COPY; + +- if (rw && !bottom_only) { ++ if ((int) rw > 0 && !bottom_only) { + region.dx = 0; + region.dy = info->var.yoffset + rs; + region.height = rw; +@@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, + info->fbops->fb_fillrect(info, ®ion); + } + +- if (bh) { ++ if ((int) bh > 0) { + region.dx = info->var.xoffset; + region.dy = info->var.yoffset; + region.height = info->var.yres; +@@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +diff --git a/drivers/video/console/fbcon_rotate.c b/drivers/video/console/fbcon_rotate.c +index db6528f2d3f2..0e3321081945 100644 +--- a/drivers/video/console/fbcon_rotate.c ++++ b/drivers/video/console/fbcon_rotate.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include "fbcon.h" + #include "fbcon_rotate.h" +diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c +index 19e3714abfe8..cf8dac9ca2bb 100644 +--- a/drivers/video/console/fbcon_ud.c ++++ b/drivers/video/console/fbcon_ud.c +@@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, + region.color = 0; + region.rop = ROP_COPY; + +- if (rw && !bottom_only) { ++ if ((int) rw > 0 && !bottom_only) { + region.dy = 0; + region.dx = info->var.xoffset; + region.width = rw; +@@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, + info->fbops->fb_fillrect(info, ®ion); + } + +- if (bh) { ++ if ((int) bh > 0) { + region.dy = info->var.yoffset; + region.dx = info->var.xoffset; + region.height = bh; +@@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, + + cursor.set = 0; + +- if (softback_lines) { +- if (y + softback_lines >= vc->vc_rows) { +- mode = CM_ERASE; +- ops->cursor_flash = 0; +- return; +- } else +- y += softback_lines; +- } +- + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); +diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c +index e3b9521e4ec3..1a3a2f1d9817 100644 +--- a/drivers/video/console/newport_con.c ++++ b/drivers/video/console/newport_con.c +@@ -31,17 +31,14 @@ + #include + #include + +-#define FONT_DATA ((unsigned char *)font_vga_8x16.data) ++#define NEWPORT_LEN 0x10000 + +-/* borrowed from fbcon.c */ +-#define REFCOUNT(fd) (((int *)(fd))[-1]) +-#define FNTSIZE(fd) (((int *)(fd))[-2]) +-#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +-#define FONT_EXTRA_WORDS 3 ++#define FONT_DATA ((unsigned char *)font_vga_8x16.data) + + static unsigned char *font_data[MAX_NR_CONSOLES]; + + static struct newport_regs *npregs; ++static unsigned long newport_addr; + + static int logo_active; + static int topscan; +@@ -519,6 +516,7 @@ static int newport_set_font(int unit, struct console_font *op) + FNTSIZE(new_data) = size; + FNTCHARCNT(new_data) = op->charcount; + REFCOUNT(new_data) = 0; /* usage counter */ ++ FNTSUM(new_data) = 0; + + p = new_data; + for (i = 0; i < op->charcount; i++) { +@@ -701,7 +699,6 @@ const struct consw newport_con = { + static int newport_probe(struct gio_device *dev, + const struct gio_device_id *id) + { +- unsigned long newport_addr; + int err; + + if (!dev->resource.start) +@@ -711,7 +708,7 @@ static int newport_probe(struct gio_device *dev, + return -EBUSY; /* we only support one Newport as console */ + + newport_addr = dev->resource.start + 0xF0000; +- if (!request_mem_region(newport_addr, 0x10000, "Newport")) ++ if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport")) + return -ENODEV; + + npregs = (struct newport_regs *)/* ioremap cannot fail */ +@@ -719,6 +716,11 @@ static int newport_probe(struct gio_device *dev, + console_lock(); + err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); + console_unlock(); ++ ++ if (err) { ++ iounmap((void *)npregs); ++ release_mem_region(newport_addr, NEWPORT_LEN); ++ } + return err; + } + +@@ -726,6 +728,7 @@ static void newport_remove(struct gio_device *dev) + { + give_up_console(&newport_con); + iounmap((void *)npregs); ++ release_mem_region(newport_addr, NEWPORT_LEN); + } + + static struct gio_device_id newport_ids[] = { +diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c +index 15e8e1a89c45..691717276c3e 100644 +--- a/drivers/video/console/tileblit.c ++++ b/drivers/video/console/tileblit.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include "fbcon.h" + +@@ -80,7 +81,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, + } + + static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, +- int softback_lines, int fg, int bg) ++ int fg, int bg) + { + struct fb_tilecursor cursor; + int use_sw = (vc->vc_cursor_type & 0x10); +diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c +index 42c0a26646f6..cb9b87963129 100644 +--- a/drivers/video/console/vgacon.c ++++ b/drivers/video/console/vgacon.c +@@ -179,153 +179,6 @@ static inline void vga_set_mem_top(struct vc_data *c) + write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); + } + +-#ifdef CONFIG_VGACON_SOFT_SCROLLBACK +-/* software scrollback */ +-static void *vgacon_scrollback; +-static int vgacon_scrollback_tail; +-static int vgacon_scrollback_size; +-static int vgacon_scrollback_rows; +-static int vgacon_scrollback_cnt; +-static int vgacon_scrollback_cur; +-static int vgacon_scrollback_save; +-static int vgacon_scrollback_restore; +- +-static void vgacon_scrollback_init(int pitch) +-{ +- int rows = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024/pitch; +- +- if (vgacon_scrollback) { +- vgacon_scrollback_cnt = 0; +- vgacon_scrollback_tail = 0; +- vgacon_scrollback_cur = 0; +- vgacon_scrollback_rows = rows - 1; +- vgacon_scrollback_size = rows * pitch; +- } +-} +- +-static void vgacon_scrollback_startup(void) +-{ +- vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT); +- vgacon_scrollback_init(vga_video_num_columns * 2); +-} +- +-static void vgacon_scrollback_update(struct vc_data *c, int t, int count) +-{ +- void *p; +- +- if (!vgacon_scrollback_size || c->vc_num != fg_console) +- return; +- +- p = (void *) (c->vc_origin + t * c->vc_size_row); +- +- while (count--) { +- scr_memcpyw(vgacon_scrollback + vgacon_scrollback_tail, +- p, c->vc_size_row); +- vgacon_scrollback_cnt++; +- p += c->vc_size_row; +- vgacon_scrollback_tail += c->vc_size_row; +- +- if (vgacon_scrollback_tail >= vgacon_scrollback_size) +- vgacon_scrollback_tail = 0; +- +- if (vgacon_scrollback_cnt > vgacon_scrollback_rows) +- vgacon_scrollback_cnt = vgacon_scrollback_rows; +- +- vgacon_scrollback_cur = vgacon_scrollback_cnt; +- } +-} +- +-static void vgacon_restore_screen(struct vc_data *c) +-{ +- vgacon_scrollback_save = 0; +- +- if (!vga_is_gfx && !vgacon_scrollback_restore) { +- scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, +- c->vc_screenbuf_size > vga_vram_size ? +- vga_vram_size : c->vc_screenbuf_size); +- vgacon_scrollback_restore = 1; +- vgacon_scrollback_cur = vgacon_scrollback_cnt; +- } +-} +- +-static void vgacon_scrolldelta(struct vc_data *c, int lines) +-{ +- int start, end, count, soff; +- +- if (!lines) { +- c->vc_visible_origin = c->vc_origin; +- vga_set_mem_top(c); +- return; +- } +- +- if (!vgacon_scrollback) +- return; +- +- if (!vgacon_scrollback_save) { +- vgacon_cursor(c, CM_ERASE); +- vgacon_save_screen(c); +- vgacon_scrollback_save = 1; +- } +- +- vgacon_scrollback_restore = 0; +- start = vgacon_scrollback_cur + lines; +- end = start + abs(lines); +- +- if (start < 0) +- start = 0; +- +- if (start > vgacon_scrollback_cnt) +- start = vgacon_scrollback_cnt; +- +- if (end < 0) +- end = 0; +- +- if (end > vgacon_scrollback_cnt) +- end = vgacon_scrollback_cnt; +- +- vgacon_scrollback_cur = start; +- count = end - start; +- soff = vgacon_scrollback_tail - ((vgacon_scrollback_cnt - end) * +- c->vc_size_row); +- soff -= count * c->vc_size_row; +- +- if (soff < 0) +- soff += vgacon_scrollback_size; +- +- count = vgacon_scrollback_cnt - start; +- +- if (count > c->vc_rows) +- count = c->vc_rows; +- +- if (count) { +- int copysize; +- +- int diff = c->vc_rows - count; +- void *d = (void *) c->vc_origin; +- void *s = (void *) c->vc_screenbuf; +- +- count *= c->vc_size_row; +- /* how much memory to end of buffer left? */ +- copysize = min(count, vgacon_scrollback_size - soff); +- scr_memcpyw(d, vgacon_scrollback + soff, copysize); +- d += copysize; +- count -= copysize; +- +- if (count) { +- scr_memcpyw(d, vgacon_scrollback, count); +- d += count; +- } +- +- if (diff) +- scr_memcpyw(d, s, diff * c->vc_size_row); +- } else +- vgacon_cursor(c, CM_MOVE); +-} +-#else +-#define vgacon_scrollback_startup(...) do { } while (0) +-#define vgacon_scrollback_init(...) do { } while (0) +-#define vgacon_scrollback_update(...) do { } while (0) +- + static void vgacon_restore_screen(struct vc_data *c) + { + if (c->vc_origin != c->vc_visible_origin) +@@ -361,7 +214,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) + } + vga_set_mem_top(c); + } +-#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ + + static const char *vgacon_startup(void) + { +@@ -558,10 +410,7 @@ static const char *vgacon_startup(void) + vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; + vgacon_yres = vga_scan_lines; + +- if (!vga_init_done) { +- vgacon_scrollback_startup(); +- vga_init_done = 1; +- } ++ vga_init_done = 1; + + return display_desc; + } +@@ -585,7 +434,7 @@ static void vgacon_init(struct vc_data *c, int init) + vc_resize(c, vga_video_num_columns, vga_video_num_lines); + + c->vc_scan_lines = vga_scan_lines; +- c->vc_font.height = vga_video_font_height; ++ c->vc_font.height = c->vc_cell_height = vga_video_font_height; + c->vc_complement_mask = 0x7700; + if (vga_512_chars) + c->vc_hi_font_mask = 0x0800; +@@ -723,32 +572,32 @@ static void vgacon_cursor(struct vc_data *c, int mode) + switch (c->vc_cursor_type & 0x0f) { + case CUR_UNDERLINE: + vgacon_set_cursor_size(c->vc_x, +- c->vc_font.height - +- (c->vc_font.height < ++ c->vc_cell_height - ++ (c->vc_cell_height < + 10 ? 2 : 3), +- c->vc_font.height - +- (c->vc_font.height < ++ c->vc_cell_height - ++ (c->vc_cell_height < + 10 ? 1 : 2)); + break; + case CUR_TWO_THIRDS: + vgacon_set_cursor_size(c->vc_x, +- c->vc_font.height / 3, +- c->vc_font.height - +- (c->vc_font.height < ++ c->vc_cell_height / 3, ++ c->vc_cell_height - ++ (c->vc_cell_height < + 10 ? 1 : 2)); + break; + case CUR_LOWER_THIRD: + vgacon_set_cursor_size(c->vc_x, +- (c->vc_font.height * 2) / 3, +- c->vc_font.height - +- (c->vc_font.height < ++ (c->vc_cell_height * 2) / 3, ++ c->vc_cell_height - ++ (c->vc_cell_height < + 10 ? 1 : 2)); + break; + case CUR_LOWER_HALF: + vgacon_set_cursor_size(c->vc_x, +- c->vc_font.height / 2, +- c->vc_font.height - +- (c->vc_font.height < ++ c->vc_cell_height / 2, ++ c->vc_cell_height - ++ (c->vc_cell_height < + 10 ? 1 : 2)); + break; + case CUR_NONE: +@@ -759,7 +608,7 @@ static void vgacon_cursor(struct vc_data *c, int mode) + break; + default: + vgacon_set_cursor_size(c->vc_x, 1, +- c->vc_font.height); ++ c->vc_cell_height); + break; + } + break; +@@ -770,13 +619,13 @@ static int vgacon_doresize(struct vc_data *c, + unsigned int width, unsigned int height) + { + unsigned long flags; +- unsigned int scanlines = height * c->vc_font.height; ++ unsigned int scanlines = height * c->vc_cell_height; + u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; + + raw_spin_lock_irqsave(&vga_lock, flags); + + vgacon_xres = width * VGA_FONTWIDTH; +- vgacon_yres = height * c->vc_font.height; ++ vgacon_yres = height * c->vc_cell_height; + if (vga_video_type >= VIDEO_TYPE_VGAC) { + outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); + max_scan = inb_p(vga_video_port_val); +@@ -831,9 +680,9 @@ static int vgacon_doresize(struct vc_data *c, + static int vgacon_switch(struct vc_data *c) + { + int x = c->vc_cols * VGA_FONTWIDTH; +- int y = c->vc_rows * c->vc_font.height; ++ int y = c->vc_rows * c->vc_cell_height; + int rows = screen_info.orig_video_lines * vga_default_font_height/ +- c->vc_font.height; ++ c->vc_cell_height; + /* + * We need to save screen size here as it's the only way + * we can spot the screen has been resized and we need to +@@ -857,7 +706,6 @@ static int vgacon_switch(struct vc_data *c) + vgacon_doresize(c, c->vc_cols, c->vc_rows); + } + +- vgacon_scrollback_init(c->vc_size_row); + return 0; /* Redrawing not needed */ + } + +@@ -1272,7 +1120,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) + cursor_size_lastto = 0; + c->vc_sw->con_cursor(c, CM_DRAW); + } +- c->vc_font.height = fontheight; ++ c->vc_font.height = c->vc_cell_height = fontheight; + vc_resize(c, 0, rows); /* Adjust console size */ + } + } +@@ -1326,12 +1174,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width, + if ((width << 1) * height > vga_vram_size) + return -EINVAL; + ++ if (user) { ++ /* ++ * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed ++ * the video mode! Set the new defaults then and go away. ++ */ ++ screen_info.orig_video_cols = width; ++ screen_info.orig_video_lines = height; ++ vga_default_font_height = c->vc_cell_height; ++ return 0; ++ } + if (width % 2 || width > screen_info.orig_video_cols || + height > (screen_info.orig_video_lines * vga_default_font_height)/ +- c->vc_font.height) +- /* let svgatextmode tinker with video timings and +- return success */ +- return (user) ? 0 : -EINVAL; ++ c->vc_cell_height) ++ return -EINVAL; + + if (con_is_visible(c) && !vga_is_gfx) /* who knows */ + vgacon_doresize(c, width, height); +@@ -1387,7 +1243,6 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, + oldo = c->vc_origin; + delta = lines * c->vc_size_row; + if (dir == SM_UP) { +- vgacon_scrollback_update(c, t, lines); + if (c->vc_scr_end + delta >= vga_vram_end) { + scr_memcpyw((u16 *) vga_vram_base, + (u16 *) (oldo + delta), +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig +index 1809e6663b2d..b0a94c154c7b 100644 +--- a/drivers/video/fbdev/Kconfig ++++ b/drivers/video/fbdev/Kconfig +@@ -1405,6 +1405,7 @@ config FB_ATY + select FB_CFB_IMAGEBLIT + select FB_BACKLIGHT if FB_ATY_BACKLIGHT + select FB_MACMODES if PPC ++ select FB_ATY_CT if SPARC64 && PCI + help + This driver supports graphics boards with the ATI Mach64 chips. + Say Y if you have such a graphics board. +@@ -1415,7 +1416,6 @@ config FB_ATY + config FB_ATY_CT + bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support" + depends on PCI && FB_ATY +- default y if SPARC64 && PCI + help + Say Y here to support use of ATI's 64-bit Rage boards (or other + boards based on the Mach64 CT, VT, GT, and LT chipsets) as a +diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c +index 2811c4afde01..e8ea76848104 100644 +--- a/drivers/video/fbdev/core/fbcmap.c ++++ b/drivers/video/fbdev/core/fbcmap.c +@@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) + if (!len) + return 0; + +- cmap->red = kmalloc(size, flags); ++ cmap->red = kzalloc(size, flags); + if (!cmap->red) + goto fail; +- cmap->green = kmalloc(size, flags); ++ cmap->green = kzalloc(size, flags); + if (!cmap->green) + goto fail; +- cmap->blue = kmalloc(size, flags); ++ cmap->blue = kzalloc(size, flags); + if (!cmap->blue) + goto fail; + if (transp) { +- cmap->transp = kmalloc(size, flags); ++ cmap->transp = kzalloc(size, flags); + if (!cmap->transp) + goto fail; + } else { +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 50c6bf9834ee..a1a21382ab32 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1001,6 +1001,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) + goto done; + } + ++ /* bitfill_aligned() assumes that it's at least 8x8 */ ++ if (var->xres < 8 || var->yres < 8) ++ return -EINVAL; ++ + ret = info->fbops->fb_check_var(var, info); + + if (ret) +diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c +index 59e1cae57948..03c0b1b8747b 100644 +--- a/drivers/video/fbdev/hgafb.c ++++ b/drivers/video/fbdev/hgafb.c +@@ -286,7 +286,7 @@ static int hga_card_detect(void) + + hga_vram = ioremap(0xb0000, hga_vram_len); + if (!hga_vram) +- goto error; ++ return -ENOMEM; + + if (request_region(0x3b0, 12, "hgafb")) + release_io_ports = 1; +@@ -346,13 +346,18 @@ static int hga_card_detect(void) + hga_type_name = "Hercules"; + break; + } +- return 1; ++ return 0; + error: + if (release_io_ports) + release_region(0x3b0, 12); + if (release_io_port) + release_region(0x3bf, 1); +- return 0; ++ ++ iounmap(hga_vram); ++ ++ pr_err("hgafb: HGA card not detected.\n"); ++ ++ return -EINVAL; + } + + /** +@@ -550,13 +555,11 @@ static struct fb_ops hgafb_ops = { + static int hgafb_probe(struct platform_device *pdev) + { + struct fb_info *info; ++ int ret; + +- if (! hga_card_detect()) { +- printk(KERN_INFO "hgafb: HGA card not detected.\n"); +- if (hga_vram) +- iounmap(hga_vram); +- return -EINVAL; +- } ++ ret = hga_card_detect(); ++ if (ret) ++ return ret; + + printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n", + hga_type_name, hga_vram_len/1024); +diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c +index 2fd49b2358f8..6e680007cf6b 100644 +--- a/drivers/video/fbdev/hyperv_fb.c ++++ b/drivers/video/fbdev/hyperv_fb.c +@@ -712,7 +712,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) + goto err1; + } + +- fb_virt = ioremap(par->mem->start, screen_fb_size); ++ /* ++ * Map the VRAM cacheable for performance. ++ */ ++ fb_virt = ioremap_wc(par->mem->start, screen_fb_size); + if (!fb_virt) + goto err2; + +diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c +index 4ef9dc94e813..4363c64d74e8 100644 +--- a/drivers/video/fbdev/imsttfb.c ++++ b/drivers/video/fbdev/imsttfb.c +@@ -1516,11 +1516,6 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + info->fix.smem_start = addr; + info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? + 0x400000 : 0x800000); +- if (!info->screen_base) { +- release_mem_region(addr, size); +- framebuffer_release(info); +- return -ENOMEM; +- } + info->fix.mmio_start = addr + 0x800000; + par->dc_regs = ioremap(addr + 0x800000, 0x1000); + par->cmap_regs_phys = addr + 0x840000; +diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c +index 55da18f8b6b0..87050de57669 100644 +--- a/drivers/video/fbdev/msm/mdp3_ctrl.c ++++ b/drivers/video/fbdev/msm/mdp3_ctrl.c +@@ -303,6 +303,9 @@ void vsync_notify_handler(void *arg) + { + struct mdp3_session_data *session = (struct mdp3_session_data *)arg; + ++ if (!session) ++ return; ++ + session->vsync_time = ktime_get(); + MDSS_XLOG(ktime_to_ms(session->vsync_time)); + sysfs_notify_dirent(session->vsync_event_sd); +diff --git a/drivers/video/fbdev/msm/mdss_spi_display.c b/drivers/video/fbdev/msm/mdss_spi_display.c +index 798ead9f3762..1e138dcd4858 100644 +--- a/drivers/video/fbdev/msm/mdss_spi_display.c ++++ b/drivers/video/fbdev/msm/mdss_spi_display.c +@@ -303,7 +303,6 @@ int mdss_spi_display_pre_commit(struct msm_fb_data_type *mfd, + mdss_spi_display_notify(ctrl_pdata, MDP_NOTIFY_FRAME_TIMEOUT); + return -EINVAL; + } +- mdss_spi_display_notify(ctrl_pdata, MDP_NOTIFY_FRAME_DONE); + + /* swap buffer */ + temp_buf = ctrl_pdata->front_buf; +diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.c b/drivers/video/fbdev/msm/mdss_spi_panel.c +index 63c73a08b319..d62e0336cad8 100644 +--- a/drivers/video/fbdev/msm/mdss_spi_panel.c ++++ b/drivers/video/fbdev/msm/mdss_spi_panel.c +@@ -535,6 +535,7 @@ int mdss_spi_panel_kickoff(struct mdss_panel_data *pdata, + + rc = mdp3_spi_tx_pixel(tx_buf, ctrl_pdata->byte_per_frame); + mutex_unlock(&ctrl_pdata->spi_tx_mutex); ++ enable_spi_panel_te_irq(ctrl_pdata, false); + + return rc; + } +@@ -549,6 +550,7 @@ void mdss_spi_tx_fb_complete(void *ctx) + atomic_read(&ctrl_pdata->koff_cnt)); + } + wake_up_all(&ctrl_pdata->tx_done_waitq); ++ mdss_spi_display_notify(ctrl_pdata, MDP_NOTIFY_FRAME_DONE); + } + } + #endif +diff --git a/drivers/video/fbdev/msm/mdss_spi_panel.h b/drivers/video/fbdev/msm/mdss_spi_panel.h +index 08042fe8e3b5..5d6b67bd41b2 100644 +--- a/drivers/video/fbdev/msm/mdss_spi_panel.h ++++ b/drivers/video/fbdev/msm/mdss_spi_panel.h +@@ -167,7 +167,7 @@ int mdss_spi_wait_tx_done(struct spi_panel_data *ctrl_pdata); + int mdss_spi_panel_reset(struct mdss_panel_data *pdata, int enable); + int mdss_spi_panel_on(struct mdss_panel_data *pdata); + int mdss_spi_panel_off(struct mdss_panel_data *pdata); +- ++int mdss_spi_display_notify(struct spi_panel_data *ctrl_pdata, int event); + #else + static inline int is_spi_panel_continuous_splash_on( + struct mdss_panel_data *pdata) +diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c +index db023a97d1ea..e243254a5721 100644 +--- a/drivers/video/fbdev/neofb.c ++++ b/drivers/video/fbdev/neofb.c +@@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info) + #else + printk(KERN_ERR + "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); ++ kfree(info->monspecs.modedb); + return -1; + #endif + default: +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +index 7a75dfda9845..00f5a54aaf9b 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +@@ -531,8 +531,11 @@ int dispc_runtime_get(void) + DSSDBG("dispc_runtime_get\n"); + + r = pm_runtime_get_sync(&dispc.pdev->dev); +- WARN_ON(r < 0); +- return r < 0 ? r : 0; ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&dispc.pdev->dev); ++ return r; ++ } ++ return 0; + } + EXPORT_SYMBOL(dispc_runtime_get); + +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +index 30d49f3800b3..2bfd9063cdfc 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +@@ -1148,8 +1148,11 @@ static int dsi_runtime_get(struct platform_device *dsidev) + DSSDBG("dsi_runtime_get\n"); + + r = pm_runtime_get_sync(&dsi->pdev->dev); +- WARN_ON(r < 0); +- return r < 0 ? r : 0; ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&dsi->pdev->dev); ++ return r; ++ } ++ return 0; + } + + static void dsi_runtime_put(struct platform_device *dsidev) +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c +index 48c6500c24e1..acecee5b1c10 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c +@@ -778,8 +778,11 @@ int dss_runtime_get(void) + DSSDBG("dss_runtime_get\n"); + + r = pm_runtime_get_sync(&dss.pdev->dev); +- WARN_ON(r < 0); +- return r < 0 ? r : 0; ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&dss.pdev->dev); ++ return r; ++ } ++ return 0; + } + + void dss_runtime_put(void) +@@ -843,7 +846,7 @@ static const struct dss_features omap34xx_dss_feats = { + }; + + static const struct dss_features omap3630_dss_feats = { +- .fck_div_max = 32, ++ .fck_div_max = 31, + .dss_fck_multiplier = 1, + .parent_clk_name = "dpll4_ck", + .dpi_select_source = &dss_dpi_select_source_omap2_omap3, +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +index 156a254705ea..ab64bf0215e8 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +@@ -50,9 +50,10 @@ static int hdmi_runtime_get(void) + DSSDBG("hdmi_runtime_get\n"); + + r = pm_runtime_get_sync(&hdmi.pdev->dev); +- WARN_ON(r < 0); +- if (r < 0) ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&hdmi.pdev->dev); + return r; ++ } + + return 0; + } +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +index 4da36bcab977..c6efaca3235a 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +@@ -54,9 +54,10 @@ static int hdmi_runtime_get(void) + DSSDBG("hdmi_runtime_get\n"); + + r = pm_runtime_get_sync(&hdmi.pdev->dev); +- WARN_ON(r < 0); +- if (r < 0) ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&hdmi.pdev->dev); + return r; ++ } + + return 0; + } +diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c +index 392464da12e4..96714b4596d2 100644 +--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c ++++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c +@@ -402,8 +402,11 @@ static int venc_runtime_get(void) + DSSDBG("venc_runtime_get\n"); + + r = pm_runtime_get_sync(&venc.pdev->dev); +- WARN_ON(r < 0); +- return r < 0 ? r : 0; ++ if (WARN_ON(r < 0)) { ++ pm_runtime_put_sync(&venc.pdev->dev); ++ return r; ++ } ++ return 0; + } + + static void venc_runtime_put(void) +diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c +index a2564ab91e62..27478ffeeacd 100644 +--- a/drivers/video/fbdev/pvr2fb.c ++++ b/drivers/video/fbdev/pvr2fb.c +@@ -1029,6 +1029,8 @@ static int __init pvr2fb_setup(char *options) + if (!options || !*options) + return 0; + ++ cable_arg[0] = output_arg[0] = 0; ++ + while ((this_opt = strsep(&options, ","))) { + if (!*this_opt) + continue; +diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c +index 8503310a3816..7f8b6af29aab 100644 +--- a/drivers/video/fbdev/pxafb.c ++++ b/drivers/video/fbdev/pxafb.c +@@ -2447,8 +2447,8 @@ static int pxafb_remove(struct platform_device *dev) + + free_pages_exact(fbi->video_mem, fbi->video_mem_size); + +- dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, +- fbi->dma_buff_phys); ++ dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, ++ fbi->dma_buff_phys); + + iounmap(fbi->mmio_base); + +diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c +index dfe3eb769638..fde27feae5d0 100644 +--- a/drivers/video/fbdev/sis/init.c ++++ b/drivers/video/fbdev/sis/init.c +@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + + i = 0; + ++ if (SiS_Pr->ChipType == SIS_730) ++ queuedata = &FQBQData730[0]; ++ else ++ queuedata = &FQBQData[0]; ++ + if(ModeNo > 0x13) { + + /* Get VCLK */ +@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, + /* Get half colordepth */ + colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)]; + +- if(SiS_Pr->ChipType == SIS_730) { +- queuedata = &FQBQData730[0]; +- } else { +- queuedata = &FQBQData[0]; +- } +- + do { + templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth; + +diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c +index 0d92ff366a7b..17efcdd4dc99 100644 +--- a/drivers/video/fbdev/sm712fb.c ++++ b/drivers/video/fbdev/sm712fb.c +@@ -1428,6 +1428,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb, + static void smtc_unmap_smem(struct smtcfb_info *sfb) + { + if (sfb && sfb->fb->screen_base) { ++ if (sfb->chip_id == 0x720) ++ sfb->fb->screen_base -= 0x00200000; + iounmap(sfb->fb->screen_base); + sfb->fb->screen_base = NULL; + } +diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c +index 5f0690c8fc93..aea8fd85cbf7 100644 +--- a/drivers/video/fbdev/vga16fb.c ++++ b/drivers/video/fbdev/vga16fb.c +@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info) + } + + static void vga16fb_clock_chip(struct vga16fb_par *par, +- unsigned int pixclock, ++ unsigned int *pixclock, + const struct fb_info *info, + int mul, int div) + { +@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + { 0 /* bad */, 0x00, 0x00}}; + int err; + +- pixclock = (pixclock * mul) / div; ++ *pixclock = (*pixclock * mul) / div; + best = vgaclocks; +- err = pixclock - best->pixclock; ++ err = *pixclock - best->pixclock; + if (err < 0) err = -err; + for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) { + int tmp; + +- tmp = pixclock - ptr->pixclock; ++ tmp = *pixclock - ptr->pixclock; + if (tmp < 0) tmp = -tmp; + if (tmp < err) { + err = tmp; +@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par, + } + par->misc |= best->misc; + par->clkdiv = best->seq_clock_mode; +- pixclock = (best->pixclock * div) / mul; ++ *pixclock = (best->pixclock * div) / mul; + } + + #define FAIL(X) return -EINVAL +@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var, + + if (mode & MODE_8BPP) + /* pixel clock == vga clock / 2 */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 2); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2); + else + /* pixel clock == vga clock */ +- vga16fb_clock_chip(par, var->pixclock, info, 1, 1); ++ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1); + + var->red.offset = var->green.offset = var->blue.offset = + var->transp.offset = 0; +@@ -1122,7 +1122,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i + char oldop = setop(0); + char oldsr = setsr(0); + char oldmask = selectmask(); +- const char *cdat = image->data; ++ const unsigned char *cdat = image->data; + u32 dx = image->dx; + char __iomem *where; + int y; +diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c +index 10951c82f6ed..7bd4c27cfb14 100644 +--- a/drivers/video/fbdev/w100fb.c ++++ b/drivers/video/fbdev/w100fb.c +@@ -583,6 +583,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + memsize=par->mach->mem->size; + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); + vfree(par->saved_extmem); ++ par->saved_extmem = NULL; + } + if (par->saved_intmem) { + memsize=MEM_INT_SIZE; +@@ -591,6 +592,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) + else + memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); + vfree(par->saved_intmem); ++ par->saved_intmem = NULL; + } + } + +diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c +index 732e9abdcf96..29b968003525 100644 +--- a/drivers/virt/fsl_hypervisor.c ++++ b/drivers/virt/fsl_hypervisor.c +@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + unsigned int i; + long ret = 0; +- int num_pinned; /* return value from get_user_pages() */ ++ int num_pinned = 0; /* return value from get_user_pages_fast() */ + phys_addr_t remote_paddr; /* The next address in the remote buffer */ + uint32_t count; /* The number of bytes left to copy */ + +@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + return -EINVAL; + + /* +- * The array of pages returned by get_user_pages() covers only ++ * The array of pages returned by get_user_pages_fast() covers only + * page-aligned memory. Since the user buffer is probably not + * page-aligned, we need to handle the discrepancy. + * +@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + + /* + * 'pages' is an array of struct page pointers that's initialized by +- * get_user_pages(). ++ * get_user_pages_fast(). + */ + pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) { +@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + if (!sg_list_unaligned) { + pr_debug("fsl-hv: could not allocate S/G list\n"); + ret = -ENOMEM; +- goto exit; ++ goto free_pages; + } + sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list)); + +@@ -253,7 +253,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + up_read(¤t->mm->mmap_sem); + + if (num_pinned != num_pages) { +- /* get_user_pages() failed */ + pr_debug("fsl-hv: could not lock source buffer\n"); + ret = (num_pinned < 0) ? num_pinned : -EFAULT; + goto exit; +@@ -295,13 +294,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p) + virt_to_phys(sg_list), num_pages); + + exit: +- if (pages) { +- for (i = 0; i < num_pages; i++) +- if (pages[i]) +- put_page(pages[i]); ++ if (pages && (num_pinned > 0)) { ++ for (i = 0; i < num_pinned; i++) ++ put_page(pages[i]); + } + + kfree(sg_list_unaligned); ++free_pages: + kfree(pages); + + if (!ret) +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index e459cd7302e2..cf7eccfe3469 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -785,6 +785,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) + { + struct vring_virtqueue *vq = to_vvq(_vq); + ++ if (unlikely(vq->broken)) ++ return false; ++ + virtio_mb(vq->weak_barriers); + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); + } +@@ -1147,7 +1150,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); + +- return vq->broken; ++ return READ_ONCE(vq->broken); + } + EXPORT_SYMBOL_GPL(virtqueue_is_broken); + +@@ -1161,7 +1164,9 @@ void virtio_break_device(struct virtio_device *dev) + + list_for_each_entry(_vq, &dev->vqs, list) { + struct vring_virtqueue *vq = to_vvq(_vq); +- vq->broken = true; ++ ++ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ ++ WRITE_ONCE(vq->broken, true); + } + } + EXPORT_SYMBOL_GPL(virtio_break_device); +diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c +index dacb5919970c..d2e9d2f6a784 100644 +--- a/drivers/w1/masters/mxc_w1.c ++++ b/drivers/w1/masters/mxc_w1.c +@@ -15,7 +15,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -48,12 +48,12 @@ struct mxc_w1_device { + static u8 mxc_w1_ds2_reset_bus(void *data) + { + struct mxc_w1_device *dev = data; +- unsigned long timeout; ++ ktime_t timeout; + + writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL); + + /* Wait for reset sequence 511+512us, use 1500us for sure */ +- timeout = jiffies + usecs_to_jiffies(1500); ++ timeout = ktime_add_us(ktime_get(), 1500); + + udelay(511 + 512); + +@@ -63,7 +63,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data) + /* PST bit is valid after the RPP bit is self-cleared */ + if (!(ctrl & MXC_W1_CONTROL_RPP)) + return !(ctrl & MXC_W1_CONTROL_PST); +- } while (time_is_after_jiffies(timeout)); ++ } while (ktime_before(ktime_get(), timeout)); + + return 1; + } +@@ -76,12 +76,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data) + static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) + { + struct mxc_w1_device *dev = data; +- unsigned long timeout; ++ ktime_t timeout; + + writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL); + + /* Wait for read/write bit (60us, Max 120us), use 200us for sure */ +- timeout = jiffies + usecs_to_jiffies(200); ++ timeout = ktime_add_us(ktime_get(), 200); + + udelay(60); + +@@ -91,7 +91,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) + /* RDST bit is valid after the WR1/RD bit is self-cleared */ + if (!(ctrl & MXC_W1_CONTROL_WR(bit))) + return !!(ctrl & MXC_W1_CONTROL_RDST); +- } while (time_is_after_jiffies(timeout)); ++ } while (ktime_before(ktime_get(), timeout)); + + return 0; + } +diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c +index 86637fec4eaa..6bc6823f81fa 100644 +--- a/drivers/w1/masters/omap_hdq.c ++++ b/drivers/w1/masters/omap_hdq.c +@@ -204,7 +204,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + /* check irqstatus */ + if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { + dev_dbg(hdq_data->dev, "timeout waiting for" +- " TXCOMPLETE/RXCOMPLETE, %x", *status); ++ " TXCOMPLETE/RXCOMPLETE, %x\n", *status); + ret = -ETIMEDOUT; + goto out; + } +@@ -215,7 +215,7 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) + OMAP_HDQ_FLAG_CLEAR, &tmp_status); + if (ret) { + dev_dbg(hdq_data->dev, "timeout waiting GO bit" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + } + + out: +@@ -231,7 +231,7 @@ static irqreturn_t hdq_isr(int irq, void *_hdq) + spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); + hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); + spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); +- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); ++ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus); + + if (hdq_data->hdq_irqstatus & + (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE +@@ -339,7 +339,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + tmp_status = hdq_data->hdq_irqstatus; + /* check irqstatus */ + if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { +- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", ++ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n", + tmp_status); + ret = -ETIMEDOUT; + goto out; +@@ -366,7 +366,7 @@ static int omap_hdq_break(struct hdq_data *hdq_data) + &tmp_status); + if (ret) + dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" +- " return to zero, %x", tmp_status); ++ " return to zero, %x\n", tmp_status); + + out: + mutex_unlock(&hdq_data->hdq_mutex); +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c +index f5ad8023c2e6..1c47e6345b57 100644 +--- a/drivers/watchdog/aspeed_wdt.c ++++ b/drivers/watchdog/aspeed_wdt.c +@@ -100,7 +100,7 @@ static int aspeed_wdt_set_timeout(struct watchdog_device *wdd, + + wdd->timeout = timeout; + +- actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000); ++ actual = min(timeout, wdd->max_hw_heartbeat_ms / 1000); + + writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE); + writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); +diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c +index daeb645fcea8..519419136ce8 100644 +--- a/drivers/watchdog/da9062_wdt.c ++++ b/drivers/watchdog/da9062_wdt.c +@@ -94,11 +94,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, + unsigned int regval) + { + struct da9062 *chip = wdt->hw; +- int ret; +- +- ret = da9062_reset_watchdog_timer(wdt); +- if (ret) +- return ret; + + return regmap_update_bits(chip->regmap, + DA9062AA_CONTROL_D, +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c +index 88cd2a52d8d3..ae4974701e5c 100644 +--- a/drivers/watchdog/f71808e_wdt.c ++++ b/drivers/watchdog/f71808e_wdt.c +@@ -688,9 +688,9 @@ static int __init watchdog_init(int sioaddr) + * into the module have been registered yet. + */ + watchdog.sioaddr = sioaddr; +- watchdog.ident.options = WDIOC_SETTIMEOUT +- | WDIOF_MAGICCLOSE +- | WDIOF_KEEPALIVEPING; ++ watchdog.ident.options = WDIOF_MAGICCLOSE ++ | WDIOF_KEEPALIVEPING ++ | WDIOF_CARDRESET; + + snprintf(watchdog.ident.identity, + sizeof(watchdog.ident.identity), "%s watchdog", +@@ -704,6 +704,13 @@ static int __init watchdog_init(int sioaddr) + wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); + watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + ++ /* ++ * We don't want WDTMOUT_STS to stick around till regular reboot. ++ * Write 1 to the bit to clear it to zero. ++ */ ++ superio_outb(sioaddr, F71808FG_REG_WDT_CONF, ++ wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); ++ + superio_exit(sioaddr); + + err = watchdog_set_timeout(timeout); +diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c +index fd171e6caa16..1b63d1d85aa0 100644 +--- a/drivers/watchdog/lpc18xx_wdt.c ++++ b/drivers/watchdog/lpc18xx_wdt.c +@@ -300,7 +300,7 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev) + struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev); + + dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n"); +- del_timer(&lpc18xx_wdt->timer); ++ del_timer_sync(&lpc18xx_wdt->timer); + + watchdog_unregister_device(&lpc18xx_wdt->wdt_dev); + clk_disable_unprepare(lpc18xx_wdt->wdt_clk); +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c +index 4f47b5e90956..8b88824a88dc 100644 +--- a/drivers/watchdog/qcom-wdt.c ++++ b/drivers/watchdog/qcom-wdt.c +@@ -121,7 +121,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action, + */ + wmb(); + +- msleep(150); ++ mdelay(150); + return 0; + } + +diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c +index 47a8f1b1087d..4568af9a165b 100644 +--- a/drivers/watchdog/rdc321x_wdt.c ++++ b/drivers/watchdog/rdc321x_wdt.c +@@ -244,6 +244,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) + + rdc321x_wdt_device.sb_pdev = pdata->sb_pdev; + rdc321x_wdt_device.base_reg = r->start; ++ rdc321x_wdt_device.queue = 0; ++ rdc321x_wdt_device.default_ticks = ticks; + + err = misc_register(&rdc321x_wdt_misc); + if (err < 0) { +@@ -258,14 +260,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev) + rdc321x_wdt_device.base_reg, RDC_WDT_RST); + + init_completion(&rdc321x_wdt_device.stop); +- rdc321x_wdt_device.queue = 0; + + clear_bit(0, &rdc321x_wdt_device.inuse); + + setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0); + +- rdc321x_wdt_device.default_ticks = ticks; +- + dev_info(&pdev->dev, "watchdog init success\n"); + + return 0; +diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c +index 2eef58a0cf05..152db059d5aa 100644 +--- a/drivers/watchdog/sbc60xxwdt.c ++++ b/drivers/watchdog/sbc60xxwdt.c +@@ -152,7 +152,7 @@ static void wdt_startup(void) + static void wdt_turnoff(void) + { + /* Stop the timer */ +- del_timer(&timer); ++ del_timer_sync(&timer); + inb_p(wdt_stop); + pr_info("Watchdog timer is now disabled...\n"); + } +diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c +index 1cfd3f6a13d5..08500db8324f 100644 +--- a/drivers/watchdog/sc520_wdt.c ++++ b/drivers/watchdog/sc520_wdt.c +@@ -190,7 +190,7 @@ static int wdt_startup(void) + static int wdt_turnoff(void) + { + /* Stop the timer */ +- del_timer(&timer); ++ del_timer_sync(&timer); + + /* Stop the watchdog */ + wdt_config(0); +diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c +index f0483c75ed32..4b52cf321747 100644 +--- a/drivers/watchdog/w83877f_wdt.c ++++ b/drivers/watchdog/w83877f_wdt.c +@@ -170,7 +170,7 @@ static void wdt_startup(void) + static void wdt_turnoff(void) + { + /* Stop the timer */ +- del_timer(&timer); ++ del_timer_sync(&timer); + + wdt_change(WDT_DISABLE); + +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 05f9f5983ee1..1b76e8a99c40 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -634,11 +634,13 @@ static int add_ballooned_pages(int nr_pages) + if (xen_hotplug_unpopulated) { + st = reserve_additional_memory(); + if (st != BP_ECANCELED) { ++ int rc; ++ + mutex_unlock(&balloon_mutex); +- wait_event(balloon_wq, ++ rc = wait_event_interruptible(balloon_wq, + !list_empty(&ballooned_pages)); + mutex_lock(&balloon_mutex); +- return 0; ++ return rc ? -ENOMEM : 0; + } + } + +@@ -694,6 +696,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) + out_undo: + mutex_unlock(&balloon_mutex); + free_xenballooned_pages(pgno, pages); ++ /* ++ * NB: free_xenballooned_pages will only subtract pgno pages, but since ++ * target_unpopulated is incremented with nr_pages at the start we need ++ * to remove the remaining ones also, or accounting will be screwed. ++ */ ++ balloon_stats.target_unpopulated -= nr_pages - pgno; + return ret; + } + EXPORT_SYMBOL(alloc_xenballooned_pages); +diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c +index bdff01095f54..ec3417a9e1a4 100644 +--- a/drivers/xen/events/events_2l.c ++++ b/drivers/xen/events/events_2l.c +@@ -46,6 +46,11 @@ static unsigned evtchn_2l_max_channels(void) + return EVTCHN_2L_NR_CHANNELS; + } + ++static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) ++{ ++ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); ++} ++ + static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) + { + clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); +@@ -70,12 +75,6 @@ static bool evtchn_2l_is_pending(unsigned port) + return sync_test_bit(port, BM(&s->evtchn_pending[0])); + } + +-static bool evtchn_2l_test_and_set_mask(unsigned port) +-{ +- struct shared_info *s = HYPERVISOR_shared_info; +- return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); +-} +- + static void evtchn_2l_mask(unsigned port) + { + struct shared_info *s = HYPERVISOR_shared_info; +@@ -90,6 +89,8 @@ static void evtchn_2l_unmask(unsigned port) + + BUG_ON(!irqs_disabled()); + ++ smp_wmb(); /* All writes before unmask must be visible. */ ++ + if (unlikely((cpu != cpu_from_evtchn(port)))) + do_hypercall = 1; + else { +@@ -158,7 +159,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu, + * a bitset of words which contain pending event bits. The second + * level is a bitset of pending events themselves. + */ +-static void evtchn_2l_handle_events(unsigned cpu) ++static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl) + { + int irq; + xen_ulong_t pending_words; +@@ -239,10 +240,7 @@ static void evtchn_2l_handle_events(unsigned cpu) + + /* Process port. */ + port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; +- irq = get_evtchn_to_irq(port); +- +- if (irq != -1) +- generic_handle_irq(irq); ++ handle_irq_for_port(port, ctrl); + + bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; + +@@ -354,18 +352,27 @@ static void evtchn_2l_resume(void) + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); + } + ++static int evtchn_2l_percpu_deinit(unsigned int cpu) ++{ ++ memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * ++ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); ++ ++ return 0; ++} ++ + static const struct evtchn_ops evtchn_ops_2l = { + .max_channels = evtchn_2l_max_channels, + .nr_channels = evtchn_2l_max_channels, ++ .remove = evtchn_2l_remove, + .bind_to_cpu = evtchn_2l_bind_to_cpu, + .clear_pending = evtchn_2l_clear_pending, + .set_pending = evtchn_2l_set_pending, + .is_pending = evtchn_2l_is_pending, +- .test_and_set_mask = evtchn_2l_test_and_set_mask, + .mask = evtchn_2l_mask, + .unmask = evtchn_2l_unmask, + .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, ++ .percpu_deinit = evtchn_2l_percpu_deinit, + }; + + void __init xen_evtchn_2l_init(void) +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index 4b0cc9d0ca53..fbb6a4701ea3 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -32,6 +32,10 @@ + #include + #include + #include ++#include ++#include ++#include ++#include + + #ifdef CONFIG_X86 + #include +@@ -62,6 +66,15 @@ + + #include "events_internal.h" + ++#undef MODULE_PARAM_PREFIX ++#define MODULE_PARAM_PREFIX "xen." ++ ++static uint __read_mostly event_loop_timeout = 2; ++module_param(event_loop_timeout, uint, 0644); ++ ++static uint __read_mostly event_eoi_delay = 10; ++module_param(event_eoi_delay, uint, 0644); ++ + const struct evtchn_ops *evtchn_ops; + + /* +@@ -70,6 +83,25 @@ const struct evtchn_ops *evtchn_ops; + */ + static DEFINE_MUTEX(irq_mapping_update_lock); + ++/* ++ * Lock protecting event handling loop against removing event channels. ++ * Adding of event channels is no issue as the associated IRQ becomes active ++ * only after everything is setup (before request_[threaded_]irq() the handler ++ * can't be entered for an event, as the event channel will be unmasked only ++ * then). ++ */ ++static DEFINE_RWLOCK(evtchn_rwlock); ++ ++/* ++ * Lock hierarchy: ++ * ++ * irq_mapping_update_lock ++ * evtchn_rwlock ++ * IRQ-desc lock ++ * percpu eoi_list_lock ++ * irq_info->lock ++ */ ++ + static LIST_HEAD(xen_irq_list_head); + + /* IRQ <-> VIRQ mapping. */ +@@ -91,18 +123,23 @@ static bool (*pirq_needs_eoi)(unsigned irq); + /* Xen will never allocate port zero for any purpose. */ + #define VALID_EVTCHN(chn) ((chn) != 0) + ++static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; ++ + static struct irq_chip xen_dynamic_chip; ++static struct irq_chip xen_lateeoi_chip; + static struct irq_chip xen_percpu_chip; + static struct irq_chip xen_pirq_chip; + static void enable_dynirq(struct irq_data *data); + static void disable_dynirq(struct irq_data *data); + +-static void clear_evtchn_to_irq_row(unsigned row) ++static DEFINE_PER_CPU(unsigned int, irq_epoch); ++ ++static void clear_evtchn_to_irq_row(int *evtchn_row) + { + unsigned col; + + for (col = 0; col < EVTCHN_PER_ROW; col++) +- evtchn_to_irq[row][col] = -1; ++ WRITE_ONCE(evtchn_row[col], -1); + } + + static void clear_evtchn_to_irq_all(void) +@@ -112,7 +149,7 @@ static void clear_evtchn_to_irq_all(void) + for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { + if (evtchn_to_irq[row] == NULL) + continue; +- clear_evtchn_to_irq_row(row); ++ clear_evtchn_to_irq_row(evtchn_to_irq[row]); + } + } + +@@ -120,6 +157,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) + { + unsigned row; + unsigned col; ++ int *evtchn_row; + + if (evtchn >= xen_evtchn_max_channels()) + return -EINVAL; +@@ -132,14 +170,21 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) + if (irq == -1) + return 0; + +- evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); +- if (evtchn_to_irq[row] == NULL) ++ evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); ++ if (evtchn_row == NULL) + return -ENOMEM; + +- clear_evtchn_to_irq_row(row); ++ clear_evtchn_to_irq_row(evtchn_row); ++ ++ /* ++ * We've prepared an empty row for the mapping. If a different ++ * thread was faster inserting it, we can drop ours. ++ */ ++ if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) ++ free_page((unsigned long) evtchn_row); + } + +- evtchn_to_irq[row][col] = irq; ++ WRITE_ONCE(evtchn_to_irq[row][col], irq); + return 0; + } + +@@ -149,13 +194,24 @@ int get_evtchn_to_irq(unsigned evtchn) + return -1; + if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) + return -1; +- return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; ++ return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); + } + + /* Get info for IRQ */ + struct irq_info *info_for_irq(unsigned irq) + { +- return irq_get_handler_data(irq); ++ if (irq < nr_legacy_irqs()) ++ return legacy_info_ptrs[irq]; ++ else ++ return irq_get_chip_data(irq); ++} ++ ++static void set_info_for_irq(unsigned int irq, struct irq_info *info) ++{ ++ if (irq < nr_legacy_irqs()) ++ legacy_info_ptrs[irq] = info; ++ else ++ irq_set_chip_data(irq, info); + } + + /* Constructors for packed IRQ information. */ +@@ -173,6 +229,8 @@ static int xen_irq_info_common_setup(struct irq_info *info, + info->irq = irq; + info->evtchn = evtchn; + info->cpu = cpu; ++ info->mask_reason = EVT_MASK_REASON_EXPLICIT; ++ raw_spin_lock_init(&info->lock); + + ret = set_evtchn_to_irq(evtchn, irq); + if (ret < 0) +@@ -239,6 +297,7 @@ static int xen_irq_info_pirq_setup(unsigned irq, + static void xen_irq_info_cleanup(struct irq_info *info) + { + set_evtchn_to_irq(info->evtchn, -1); ++ xen_evtchn_port_remove(info->evtchn, info->cpu); + info->evtchn = 0; + } + +@@ -247,10 +306,14 @@ static void xen_irq_info_cleanup(struct irq_info *info) + */ + unsigned int evtchn_from_irq(unsigned irq) + { +- if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))) ++ const struct irq_info *info = NULL; ++ ++ if (likely(irq < nr_irqs)) ++ info = info_for_irq(irq); ++ if (!info) + return 0; + +- return info_for_irq(irq)->evtchn; ++ return info->evtchn; + } + + unsigned irq_from_evtchn(unsigned int evtchn) +@@ -315,6 +378,34 @@ unsigned int cpu_from_evtchn(unsigned int evtchn) + return ret; + } + ++static void do_mask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&info->lock, flags); ++ ++ if (!info->mask_reason) ++ mask_evtchn(info->evtchn); ++ ++ info->mask_reason |= reason; ++ ++ raw_spin_unlock_irqrestore(&info->lock, flags); ++} ++ ++static void do_unmask(struct irq_info *info, u8 reason) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&info->lock, flags); ++ ++ info->mask_reason &= ~reason; ++ ++ if (!info->mask_reason) ++ unmask_evtchn(info->evtchn); ++ ++ raw_spin_unlock_irqrestore(&info->lock, flags); ++} ++ + #ifdef CONFIG_X86 + static bool pirq_check_eoi_map(unsigned irq) + { +@@ -369,9 +460,160 @@ void notify_remote_via_irq(int irq) + } + EXPORT_SYMBOL_GPL(notify_remote_via_irq); + ++struct lateeoi_work { ++ struct delayed_work delayed; ++ spinlock_t eoi_list_lock; ++ struct list_head eoi_list; ++}; ++ ++static DEFINE_PER_CPU(struct lateeoi_work, lateeoi); ++ ++static void lateeoi_list_del(struct irq_info *info) ++{ ++ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&eoi->eoi_list_lock, flags); ++ list_del_init(&info->eoi_list); ++ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); ++} ++ ++static void lateeoi_list_add(struct irq_info *info) ++{ ++ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); ++ struct irq_info *elem; ++ u64 now = get_jiffies_64(); ++ unsigned long delay; ++ unsigned long flags; ++ ++ if (now < info->eoi_time) ++ delay = info->eoi_time - now; ++ else ++ delay = 1; ++ ++ spin_lock_irqsave(&eoi->eoi_list_lock, flags); ++ ++ if (list_empty(&eoi->eoi_list)) { ++ list_add(&info->eoi_list, &eoi->eoi_list); ++ mod_delayed_work_on(info->eoi_cpu, system_wq, ++ &eoi->delayed, delay); ++ } else { ++ list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) { ++ if (elem->eoi_time <= info->eoi_time) ++ break; ++ } ++ list_add(&info->eoi_list, &elem->eoi_list); ++ } ++ ++ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); ++} ++ ++static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) ++{ ++ evtchn_port_t evtchn; ++ unsigned int cpu; ++ unsigned int delay = 0; ++ ++ evtchn = info->evtchn; ++ if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) ++ return; ++ ++ if (spurious) { ++ if ((1 << info->spurious_cnt) < (HZ << 2)) ++ info->spurious_cnt++; ++ if (info->spurious_cnt > 1) { ++ delay = 1 << (info->spurious_cnt - 2); ++ if (delay > HZ) ++ delay = HZ; ++ if (!info->eoi_time) ++ info->eoi_cpu = smp_processor_id(); ++ info->eoi_time = get_jiffies_64() + delay; ++ } ++ } else { ++ info->spurious_cnt = 0; ++ } ++ ++ cpu = info->eoi_cpu; ++ if (info->eoi_time && ++ (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { ++ lateeoi_list_add(info); ++ return; ++ } ++ ++ info->eoi_time = 0; ++ ++ /* is_active hasn't been reset yet, do it now. */ ++ smp_store_release(&info->is_active, 0); ++ do_unmask(info, EVT_MASK_REASON_EOI_PENDING); ++} ++ ++static void xen_irq_lateeoi_worker(struct work_struct *work) ++{ ++ struct lateeoi_work *eoi; ++ struct irq_info *info; ++ u64 now = get_jiffies_64(); ++ unsigned long flags; ++ ++ eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed); ++ ++ read_lock_irqsave(&evtchn_rwlock, flags); ++ ++ while (true) { ++ spin_lock(&eoi->eoi_list_lock); ++ ++ info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info, ++ eoi_list); ++ ++ if (info == NULL || now < info->eoi_time) { ++ spin_unlock(&eoi->eoi_list_lock); ++ break; ++ } ++ ++ list_del_init(&info->eoi_list); ++ ++ spin_unlock(&eoi->eoi_list_lock); ++ ++ info->eoi_time = 0; ++ ++ xen_irq_lateeoi_locked(info, false); ++ } ++ ++ if (info) ++ mod_delayed_work_on(info->eoi_cpu, system_wq, ++ &eoi->delayed, info->eoi_time - now); ++ ++ read_unlock_irqrestore(&evtchn_rwlock, flags); ++} ++ ++static void xen_cpu_init_eoi(unsigned int cpu) ++{ ++ struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu); ++ ++ INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker); ++ spin_lock_init(&eoi->eoi_list_lock); ++ INIT_LIST_HEAD(&eoi->eoi_list); ++} ++ ++void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) ++{ ++ struct irq_info *info; ++ unsigned long flags; ++ ++ read_lock_irqsave(&evtchn_rwlock, flags); ++ ++ info = info_for_irq(irq); ++ ++ if (info) ++ xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS); ++ ++ read_unlock_irqrestore(&evtchn_rwlock, flags); ++} ++EXPORT_SYMBOL_GPL(xen_irq_lateeoi); ++ + static void xen_irq_init(unsigned irq) + { + struct irq_info *info; ++ + #ifdef CONFIG_SMP + /* By default all event channels notify CPU#0. */ + cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); +@@ -384,8 +626,9 @@ static void xen_irq_init(unsigned irq) + info->type = IRQT_UNBOUND; + info->refcnt = -1; + +- irq_set_handler_data(irq, info); ++ set_info_for_irq(irq, info); + ++ INIT_LIST_HEAD(&info->eoi_list); + list_add_tail(&info->list, &xen_irq_list_head); + } + +@@ -433,17 +676,25 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) + + static void xen_free_irq(unsigned irq) + { +- struct irq_info *info = irq_get_handler_data(irq); ++ struct irq_info *info = info_for_irq(irq); ++ unsigned long flags; + + if (WARN_ON(!info)) + return; + ++ write_lock_irqsave(&evtchn_rwlock, flags); ++ ++ if (!list_empty(&info->eoi_list)) ++ lateeoi_list_del(info); ++ + list_del(&info->list); + +- irq_set_handler_data(irq, NULL); ++ set_info_for_irq(irq, NULL); + + WARN_ON(info->refcnt > 0); + ++ write_unlock_irqrestore(&evtchn_rwlock, flags); ++ + kfree(info); + + /* Legacy IRQ descriptors are managed by the arch. */ +@@ -462,6 +713,12 @@ static void xen_evtchn_close(unsigned int port) + BUG(); + } + ++static void event_handler_exit(struct irq_info *info) ++{ ++ smp_store_release(&info->is_active, 0); ++ clear_evtchn(info->evtchn); ++} ++ + static void pirq_query_unmask(int irq) + { + struct physdev_irq_status_query irq_status; +@@ -480,7 +737,8 @@ static void pirq_query_unmask(int irq) + + static void eoi_pirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ int evtchn = info ? info->evtchn : 0; + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + int rc = 0; + +@@ -489,16 +747,15 @@ static void eoi_pirq(struct irq_data *data) + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + if (pirq_needs_eoi(data->irq)) { + rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); +@@ -549,7 +806,8 @@ static unsigned int __startup_pirq(unsigned int irq) + goto err; + + out: +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); ++ + eoi_pirq(irq_get_irq_data(irq)); + + return 0; +@@ -576,7 +834,7 @@ static void shutdown_pirq(struct irq_data *data) + if (!VALID_EVTCHN(evtchn)) + return; + +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + xen_evtchn_close(evtchn); + xen_irq_info_cleanup(info); + } +@@ -610,7 +868,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); + static void __unbind_from_irq(unsigned int irq) + { + int evtchn = evtchn_from_irq(irq); +- struct irq_info *info = irq_get_handler_data(irq); ++ struct irq_info *info = info_for_irq(irq); + + if (info->refcnt > 0) { + info->refcnt--; +@@ -835,7 +1093,7 @@ int xen_pirq_from_irq(unsigned irq) + } + EXPORT_SYMBOL_GPL(xen_pirq_from_irq); + +-int bind_evtchn_to_irq(unsigned int evtchn) ++static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) + { + int irq; + int ret; +@@ -852,7 +1110,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) + if (irq < 0) + goto out; + +- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, ++ irq_set_chip_and_handler_name(irq, chip, + handle_edge_irq, "event"); + + ret = xen_irq_info_evtchn_setup(irq, evtchn); +@@ -873,8 +1131,19 @@ int bind_evtchn_to_irq(unsigned int evtchn) + + return irq; + } ++ ++int bind_evtchn_to_irq(evtchn_port_t evtchn) ++{ ++ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip); ++} + EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); + ++int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn) ++{ ++ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip); ++} ++EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi); ++ + static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) + { + struct evtchn_bind_ipi bind_ipi; +@@ -916,8 +1185,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) + return irq; + } + +-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, +- unsigned int remote_port) ++static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain, ++ evtchn_port_t remote_port, ++ struct irq_chip *chip) + { + struct evtchn_bind_interdomain bind_interdomain; + int err; +@@ -928,10 +1198,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, + err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, + &bind_interdomain); + +- return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); ++ return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port, ++ chip); ++} ++ ++int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, ++ evtchn_port_t remote_port) ++{ ++ return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, ++ &xen_dynamic_chip); + } + EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); + ++int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, ++ evtchn_port_t remote_port) ++{ ++ return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, ++ &xen_lateeoi_chip); ++} ++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi); ++ + static int find_virq(unsigned int virq, unsigned int cpu) + { + struct evtchn_status status; +@@ -1027,14 +1313,15 @@ static void unbind_from_irq(unsigned int irq) + mutex_unlock(&irq_mapping_update_lock); + } + +-int bind_evtchn_to_irqhandler(unsigned int evtchn, +- irq_handler_t handler, +- unsigned long irqflags, +- const char *devname, void *dev_id) ++static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, void *dev_id, ++ struct irq_chip *chip) + { + int irq, retval; + +- irq = bind_evtchn_to_irq(evtchn); ++ irq = bind_evtchn_to_irq_chip(evtchn, chip); + if (irq < 0) + return irq; + retval = request_irq(irq, handler, irqflags, devname, dev_id); +@@ -1045,18 +1332,38 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, + + return irq; + } ++ ++int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, void *dev_id) ++{ ++ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, ++ devname, dev_id, ++ &xen_dynamic_chip); ++} + EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); + +-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, +- unsigned int remote_port, +- irq_handler_t handler, +- unsigned long irqflags, +- const char *devname, +- void *dev_id) ++int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, void *dev_id) ++{ ++ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags, ++ devname, dev_id, ++ &xen_lateeoi_chip); ++} ++EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi); ++ ++static int bind_interdomain_evtchn_to_irqhandler_chip( ++ unsigned int remote_domain, evtchn_port_t remote_port, ++ irq_handler_t handler, unsigned long irqflags, ++ const char *devname, void *dev_id, struct irq_chip *chip) + { + int irq, retval; + +- irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); ++ irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port, ++ chip); + if (irq < 0) + return irq; + +@@ -1068,8 +1375,33 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, + + return irq; + } ++ ++int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, ++ evtchn_port_t remote_port, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, ++ void *dev_id) ++{ ++ return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, ++ remote_port, handler, irqflags, devname, ++ dev_id, &xen_dynamic_chip); ++} + EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); + ++int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, ++ evtchn_port_t remote_port, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, ++ void *dev_id) ++{ ++ return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain, ++ remote_port, handler, irqflags, devname, ++ dev_id, &xen_lateeoi_chip); ++} ++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi); ++ + int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, + irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +@@ -1114,7 +1446,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, + + void unbind_from_irqhandler(unsigned int irq, void *dev_id) + { +- struct irq_info *info = irq_get_handler_data(irq); ++ struct irq_info *info = info_for_irq(irq); + + if (WARN_ON(!info)) + return; +@@ -1148,7 +1480,7 @@ int evtchn_make_refcounted(unsigned int evtchn) + if (irq == -1) + return -ENOENT; + +- info = irq_get_handler_data(irq); ++ info = info_for_irq(irq); + + if (!info) + return -ENOENT; +@@ -1176,13 +1508,13 @@ int evtchn_get(unsigned int evtchn) + if (irq == -1) + goto done; + +- info = irq_get_handler_data(irq); ++ info = info_for_irq(irq); + + if (!info) + goto done; + + err = -EINVAL; +- if (info->refcnt <= 0) ++ if (info->refcnt <= 0 || info->refcnt == SHRT_MAX) + goto done; + + info->refcnt++; +@@ -1221,6 +1553,56 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) + notify_remote_via_irq(irq); + } + ++struct evtchn_loop_ctrl { ++ ktime_t timeout; ++ unsigned count; ++ bool defer_eoi; ++}; ++ ++void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) ++{ ++ int irq; ++ struct irq_info *info; ++ ++ irq = get_evtchn_to_irq(port); ++ if (irq == -1) ++ return; ++ ++ /* ++ * Check for timeout every 256 events. ++ * We are setting the timeout value only after the first 256 ++ * events in order to not hurt the common case of few loop ++ * iterations. The 256 is basically an arbitrary value. ++ * ++ * In case we are hitting the timeout we need to defer all further ++ * EOIs in order to ensure to leave the event handling loop rather ++ * sooner than later. ++ */ ++ if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) { ++ ktime_t kt = ktime_get(); ++ ++ if (!ctrl->timeout.tv64) { ++ kt = ktime_add_ms(kt, ++ jiffies_to_msecs(event_loop_timeout)); ++ ctrl->timeout = kt; ++ } else if (kt.tv64 > ctrl->timeout.tv64) { ++ ctrl->defer_eoi = true; ++ } ++ } ++ ++ info = info_for_irq(irq); ++ if (xchg_acquire(&info->is_active, 1)) ++ return; ++ ++ if (ctrl->defer_eoi) { ++ info->eoi_cpu = smp_processor_id(); ++ info->irq_epoch = __this_cpu_read(irq_epoch); ++ info->eoi_time = get_jiffies_64() + event_eoi_delay; ++ } ++ ++ generic_handle_irq(irq); ++} ++ + static DEFINE_PER_CPU(unsigned, xed_nesting_count); + + static void __xen_evtchn_do_upcall(void) +@@ -1228,6 +1610,9 @@ static void __xen_evtchn_do_upcall(void) + struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); + int cpu = get_cpu(); + unsigned count; ++ struct evtchn_loop_ctrl ctrl = { 0 }; ++ ++ read_lock(&evtchn_rwlock); + + do { + vcpu_info->evtchn_upcall_pending = 0; +@@ -1235,7 +1620,7 @@ static void __xen_evtchn_do_upcall(void) + if (__this_cpu_inc_return(xed_nesting_count) - 1) + goto out; + +- xen_evtchn_handle_events(cpu); ++ xen_evtchn_handle_events(cpu, &ctrl); + + BUG_ON(!irqs_disabled()); + +@@ -1244,6 +1629,14 @@ static void __xen_evtchn_do_upcall(void) + } while (count != 1 || vcpu_info->evtchn_upcall_pending); + + out: ++ read_unlock(&evtchn_rwlock); ++ ++ /* ++ * Increment irq_epoch only now to defer EOIs only for ++ * xen_irq_lateeoi() invocations occurring from inside the loop ++ * above. ++ */ ++ __this_cpu_inc(irq_epoch); + + put_cpu(); + } +@@ -1306,8 +1699,8 @@ void rebind_evtchn_irq(int evtchn, int irq) + static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) + { + struct evtchn_bind_vcpu bind_vcpu; +- int evtchn = evtchn_from_irq(irq); +- int masked; ++ struct irq_info *info = info_for_irq(irq); ++ int evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return -1; +@@ -1323,7 +1716,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) + * Mask the event while changing the VCPU binding to prevent + * it being delivered on an unexpected VCPU. + */ +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + + /* + * If this fails, it usually just indicates that we're dealing with a +@@ -1333,8 +1726,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) + bind_evtchn_to_cpu(evtchn, tcpu); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 0; + } +@@ -1349,39 +1741,41 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, + + static void enable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void disable_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- mask_evtchn(evtchn); ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); + } + + static void ack_dynirq(struct irq_data *data) + { +- int evtchn = evtchn_from_irq(data->irq); ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return; + + if (unlikely(irqd_is_setaffinity_pending(data)) && + likely(!irqd_irq_disabled(data))) { +- int masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + +- clear_evtchn(evtchn); ++ event_handler_exit(info); + + irq_move_masked_irq(data); + +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + } else +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + + static void mask_ack_dynirq(struct irq_data *data) +@@ -1390,18 +1784,51 @@ static void mask_ack_dynirq(struct irq_data *data) + ack_dynirq(data); + } + ++static void lateeoi_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (!VALID_EVTCHN(evtchn)) ++ return; ++ ++ do_mask(info, EVT_MASK_REASON_EOI_PENDING); ++ ++ if (unlikely(irqd_is_setaffinity_pending(data)) && ++ likely(!irqd_irq_disabled(data))) { ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); ++ ++ clear_evtchn(evtchn); ++ ++ irq_move_masked_irq(data); ++ ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); ++ } else ++ clear_evtchn(evtchn); ++} ++ ++static void lateeoi_mask_ack_dynirq(struct irq_data *data) ++{ ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; ++ ++ if (VALID_EVTCHN(evtchn)) { ++ do_mask(info, EVT_MASK_REASON_EXPLICIT); ++ ack_dynirq(data); ++ } ++} ++ + static int retrigger_dynirq(struct irq_data *data) + { +- unsigned int evtchn = evtchn_from_irq(data->irq); +- int masked; ++ struct irq_info *info = info_for_irq(data->irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (!VALID_EVTCHN(evtchn)) + return 0; + +- masked = test_and_set_mask(evtchn); ++ do_mask(info, EVT_MASK_REASON_TEMPORARY); + set_evtchn(evtchn); +- if (!masked) +- unmask_evtchn(evtchn); ++ do_unmask(info, EVT_MASK_REASON_TEMPORARY); + + return 1; + } +@@ -1496,10 +1923,11 @@ static void restore_cpu_ipis(unsigned int cpu) + /* Clear an irq's pending state, in preparation for polling on it */ + void xen_clear_irq_pending(int irq) + { +- int evtchn = evtchn_from_irq(irq); ++ struct irq_info *info = info_for_irq(irq); ++ evtchn_port_t evtchn = info ? info->evtchn : 0; + + if (VALID_EVTCHN(evtchn)) +- clear_evtchn(evtchn); ++ event_handler_exit(info); + } + EXPORT_SYMBOL(xen_clear_irq_pending); + void xen_set_irq_pending(int irq) +@@ -1600,6 +2028,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { + .irq_retrigger = retrigger_dynirq, + }; + ++static struct irq_chip xen_lateeoi_chip __read_mostly = { ++ /* The chip name needs to contain "xen-dyn" for irqbalance to work. */ ++ .name = "xen-dyn-lateeoi", ++ ++ .irq_disable = disable_dynirq, ++ .irq_mask = disable_dynirq, ++ .irq_unmask = enable_dynirq, ++ ++ .irq_ack = lateeoi_ack_dynirq, ++ .irq_mask_ack = lateeoi_mask_ack_dynirq, ++ ++ .irq_set_affinity = set_affinity_irq, ++ .irq_retrigger = retrigger_dynirq, ++}; ++ + static struct irq_chip xen_pirq_chip __read_mostly = { + .name = "xen-pirq", + +@@ -1667,12 +2110,31 @@ void xen_callback_vector(void) + void xen_callback_vector(void) {} + #endif + +-#undef MODULE_PARAM_PREFIX +-#define MODULE_PARAM_PREFIX "xen." +- + static bool fifo_events = true; + module_param(fifo_events, bool, 0); + ++static int xen_evtchn_cpu_prepare(unsigned int cpu) ++{ ++ int ret = 0; ++ ++ xen_cpu_init_eoi(cpu); ++ ++ if (evtchn_ops->percpu_init) ++ ret = evtchn_ops->percpu_init(cpu); ++ ++ return ret; ++} ++ ++static int xen_evtchn_cpu_dead(unsigned int cpu) ++{ ++ int ret = 0; ++ ++ if (evtchn_ops->percpu_deinit) ++ ret = evtchn_ops->percpu_deinit(cpu); ++ ++ return ret; ++} ++ + void __init xen_init_IRQ(void) + { + int ret = -EINVAL; +@@ -1682,6 +2144,12 @@ void __init xen_init_IRQ(void) + if (ret < 0) + xen_evtchn_2l_init(); + ++ xen_cpu_init_eoi(smp_processor_id()); ++ ++ cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, ++ "CPUHP_XEN_EVTCHN_PREPARE", ++ xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); ++ + evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), + sizeof(*evtchn_to_irq), GFP_KERNEL); + BUG_ON(!evtchn_to_irq); +diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c +index 7ef27c6ed72f..3f7d325d2be4 100644 +--- a/drivers/xen/events/events_fifo.c ++++ b/drivers/xen/events/events_fifo.c +@@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(unsigned port) + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); + } + +-static bool evtchn_fifo_test_and_set_mask(unsigned port) +-{ +- event_word_t *word = event_word_from_port(port); +- return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +-} +- + static void evtchn_fifo_mask(unsigned port) + { + event_word_t *word = event_word_from_port(port); +@@ -227,19 +221,25 @@ static bool evtchn_fifo_is_masked(unsigned port) + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); + } + /* +- * Clear MASKED, spinning if BUSY is set. ++ * Clear MASKED if not PENDING, spinning if BUSY is set. ++ * Return true if mask was cleared. + */ +-static void clear_masked(volatile event_word_t *word) ++static bool clear_masked_cond(volatile event_word_t *word) + { + event_word_t new, old, w; + + w = *word; + + do { ++ if (w & (1 << EVTCHN_FIFO_PENDING)) ++ return false; ++ + old = w & ~(1 << EVTCHN_FIFO_BUSY); + new = old & ~(1 << EVTCHN_FIFO_MASKED); + w = sync_cmpxchg(word, old, new); + } while (w != old); ++ ++ return true; + } + + static void evtchn_fifo_unmask(unsigned port) +@@ -248,8 +248,7 @@ static void evtchn_fifo_unmask(unsigned port) + + BUG_ON(!irqs_disabled()); + +- clear_masked(word); +- if (evtchn_fifo_is_pending(port)) { ++ if (!clear_masked_cond(word)) { + struct evtchn_unmask unmask = { .port = port }; + (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); + } +@@ -270,19 +269,9 @@ static uint32_t clear_linked(volatile event_word_t *word) + return w & EVTCHN_FIFO_LINK_MASK; + } + +-static void handle_irq_for_port(unsigned port) +-{ +- int irq; +- +- irq = get_evtchn_to_irq(port); +- if (irq != -1) +- generic_handle_irq(irq); +-} +- +-static void consume_one_event(unsigned cpu, ++static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl, + struct evtchn_fifo_control_block *control_block, +- unsigned priority, unsigned long *ready, +- bool drop) ++ unsigned priority, unsigned long *ready) + { + struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); + uint32_t head; +@@ -315,16 +304,17 @@ static void consume_one_event(unsigned cpu, + clear_bit(priority, ready); + + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { +- if (unlikely(drop)) ++ if (unlikely(!ctrl)) + pr_warn("Dropping pending event for port %u\n", port); + else +- handle_irq_for_port(port); ++ handle_irq_for_port(port, ctrl); + } + + q->head[priority] = head; + } + +-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) ++static void __evtchn_fifo_handle_events(unsigned cpu, ++ struct evtchn_loop_ctrl *ctrl) + { + struct evtchn_fifo_control_block *control_block; + unsigned long ready; +@@ -336,14 +326,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) + + while (ready) { + q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); +- consume_one_event(cpu, control_block, q, &ready, drop); ++ consume_one_event(cpu, ctrl, control_block, q, &ready); + ready |= xchg(&control_block->ready, 0); + } + } + +-static void evtchn_fifo_handle_events(unsigned cpu) ++static void evtchn_fifo_handle_events(unsigned cpu, ++ struct evtchn_loop_ctrl *ctrl) + { +- __evtchn_fifo_handle_events(cpu, false); ++ __evtchn_fifo_handle_events(cpu, ctrl); + } + + static void evtchn_fifo_resume(void) +@@ -381,21 +372,6 @@ static void evtchn_fifo_resume(void) + event_array_pages = 0; + } + +-static const struct evtchn_ops evtchn_ops_fifo = { +- .max_channels = evtchn_fifo_max_channels, +- .nr_channels = evtchn_fifo_nr_channels, +- .setup = evtchn_fifo_setup, +- .bind_to_cpu = evtchn_fifo_bind_to_cpu, +- .clear_pending = evtchn_fifo_clear_pending, +- .set_pending = evtchn_fifo_set_pending, +- .is_pending = evtchn_fifo_is_pending, +- .test_and_set_mask = evtchn_fifo_test_and_set_mask, +- .mask = evtchn_fifo_mask, +- .unmask = evtchn_fifo_unmask, +- .handle_events = evtchn_fifo_handle_events, +- .resume = evtchn_fifo_resume, +-}; +- + static int evtchn_fifo_alloc_control_block(unsigned cpu) + { + void *control_block = NULL; +@@ -418,19 +394,35 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu) + return ret; + } + +-static int xen_evtchn_cpu_prepare(unsigned int cpu) ++static int evtchn_fifo_percpu_init(unsigned int cpu) + { + if (!per_cpu(cpu_control_block, cpu)) + return evtchn_fifo_alloc_control_block(cpu); + return 0; + } + +-static int xen_evtchn_cpu_dead(unsigned int cpu) ++static int evtchn_fifo_percpu_deinit(unsigned int cpu) + { +- __evtchn_fifo_handle_events(cpu, true); ++ __evtchn_fifo_handle_events(cpu, NULL); + return 0; + } + ++static const struct evtchn_ops evtchn_ops_fifo = { ++ .max_channels = evtchn_fifo_max_channels, ++ .nr_channels = evtchn_fifo_nr_channels, ++ .setup = evtchn_fifo_setup, ++ .bind_to_cpu = evtchn_fifo_bind_to_cpu, ++ .clear_pending = evtchn_fifo_clear_pending, ++ .set_pending = evtchn_fifo_set_pending, ++ .is_pending = evtchn_fifo_is_pending, ++ .mask = evtchn_fifo_mask, ++ .unmask = evtchn_fifo_unmask, ++ .handle_events = evtchn_fifo_handle_events, ++ .resume = evtchn_fifo_resume, ++ .percpu_init = evtchn_fifo_percpu_init, ++ .percpu_deinit = evtchn_fifo_percpu_deinit, ++}; ++ + int __init xen_evtchn_fifo_init(void) + { + int cpu = get_cpu(); +@@ -444,9 +436,6 @@ int __init xen_evtchn_fifo_init(void) + + evtchn_ops = &evtchn_ops_fifo; + +- cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, +- "CPUHP_XEN_EVTCHN_PREPARE", +- xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); + out: + put_cpu(); + return ret; +diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h +index 50c2050a1e32..cc37b711491c 100644 +--- a/drivers/xen/events/events_internal.h ++++ b/drivers/xen/events/events_internal.h +@@ -32,11 +32,22 @@ enum xen_irq_type { + */ + struct irq_info { + struct list_head list; +- int refcnt; +- enum xen_irq_type type; /* type */ ++ struct list_head eoi_list; ++ short refcnt; ++ short spurious_cnt; ++ short type; /* type */ ++ u8 mask_reason; /* Why is event channel masked */ ++#define EVT_MASK_REASON_EXPLICIT 0x01 ++#define EVT_MASK_REASON_TEMPORARY 0x02 ++#define EVT_MASK_REASON_EOI_PENDING 0x04 ++ u8 is_active; /* Is event just being handled? */ + unsigned irq; + unsigned int evtchn; /* event channel */ + unsigned short cpu; /* cpu bound */ ++ unsigned short eoi_cpu; /* EOI must happen on this cpu */ ++ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ ++ u64 eoi_time; /* Time in jiffies when to EOI. */ ++ raw_spinlock_t lock; + + union { + unsigned short virq; +@@ -55,28 +66,34 @@ struct irq_info { + #define PIRQ_SHAREABLE (1 << 1) + #define PIRQ_MSI_GROUP (1 << 2) + ++struct evtchn_loop_ctrl; ++ + struct evtchn_ops { + unsigned (*max_channels)(void); + unsigned (*nr_channels)(void); + + int (*setup)(struct irq_info *info); ++ void (*remove)(evtchn_port_t port, unsigned int cpu); + void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); + + void (*clear_pending)(unsigned port); + void (*set_pending)(unsigned port); + bool (*is_pending)(unsigned port); +- bool (*test_and_set_mask)(unsigned port); + void (*mask)(unsigned port); + void (*unmask)(unsigned port); + +- void (*handle_events)(unsigned cpu); ++ void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl); + void (*resume)(void); ++ ++ int (*percpu_init)(unsigned int cpu); ++ int (*percpu_deinit)(unsigned int cpu); + }; + + extern const struct evtchn_ops *evtchn_ops; + + extern int **evtchn_to_irq; + int get_evtchn_to_irq(unsigned int evtchn); ++void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl); + + struct irq_info *info_for_irq(unsigned irq); + unsigned cpu_from_irq(unsigned irq); +@@ -98,6 +115,13 @@ static inline int xen_evtchn_port_setup(struct irq_info *info) + return 0; + } + ++static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, ++ unsigned int cpu) ++{ ++ if (evtchn_ops->remove) ++ evtchn_ops->remove(evtchn, cpu); ++} ++ + static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, + unsigned cpu) + { +@@ -119,11 +143,6 @@ static inline bool test_evtchn(unsigned port) + return evtchn_ops->is_pending(port); + } + +-static inline bool test_and_set_mask(unsigned port) +-{ +- return evtchn_ops->test_and_set_mask(port); +-} +- + static inline void mask_evtchn(unsigned port) + { + return evtchn_ops->mask(port); +@@ -134,9 +153,10 @@ static inline void unmask_evtchn(unsigned port) + return evtchn_ops->unmask(port); + } + +-static inline void xen_evtchn_handle_events(unsigned cpu) ++static inline void xen_evtchn_handle_events(unsigned cpu, ++ struct evtchn_loop_ctrl *ctrl) + { +- return evtchn_ops->handle_events(cpu); ++ return evtchn_ops->handle_events(cpu, ctrl); + } + + static inline void xen_evtchn_resume(void) +diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c +index e8c7f09d01be..bcf0b1e60f2c 100644 +--- a/drivers/xen/evtchn.c ++++ b/drivers/xen/evtchn.c +@@ -178,7 +178,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data) + "Interrupt for port %d, but apparently not enabled; per-user %p\n", + evtchn->port, u); + +- disable_irq_nosync(irq); + evtchn->enabled = false; + + spin_lock(&u->ring_prod_lock); +@@ -304,7 +303,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, + evtchn = find_evtchn(u, port); + if (evtchn && !evtchn->enabled) { + evtchn->enabled = true; +- enable_irq(irq_from_evtchn(port)); ++ xen_irq_lateeoi(irq_from_evtchn(port), 0); + } + } + +@@ -404,8 +403,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) + if (rc < 0) + goto err; + +- rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, +- u->name, evtchn); ++ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0, ++ u->name, evtchn); + if (rc < 0) + goto err; + +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c +index 910b5d40c6e9..69d59102ff1b 100644 +--- a/drivers/xen/gntdev.c ++++ b/drivers/xen/gntdev.c +@@ -293,36 +293,47 @@ static int map_grant_pages(struct grant_map *map) + * to the kernel linear addresses of the struct pages. + * These ptes are completely different from the user ptes dealt + * with find_grant_ptes. ++ * Note that GNTMAP_device_map isn't needed here: The ++ * dev_bus_addr output field gets consumed only from ->map_ops, ++ * and by not requesting it when mapping we also avoid needing ++ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra ++ * reference to the page in the hypervisor). + */ ++ unsigned int flags = (map->flags & ~GNTMAP_device_map) | ++ GNTMAP_host_map; ++ + for (i = 0; i < map->count; i++) { + unsigned long address = (unsigned long) + pfn_to_kaddr(page_to_pfn(map->pages[i])); + BUG_ON(PageHighMem(map->pages[i])); + +- gnttab_set_map_op(&map->kmap_ops[i], address, +- map->flags | GNTMAP_host_map, ++ gnttab_set_map_op(&map->kmap_ops[i], address, flags, + map->grants[i].ref, + map->grants[i].domid); + gnttab_set_unmap_op(&map->kunmap_ops[i], address, +- map->flags | GNTMAP_host_map, -1); ++ flags, -1); + } + } + + pr_debug("map %d+%d\n", map->index, map->count); + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, + map->pages, map->count); +- if (err) +- return err; + + for (i = 0; i < map->count; i++) { +- if (map->map_ops[i].status) { ++ if (map->map_ops[i].status == GNTST_okay) ++ map->unmap_ops[i].handle = map->map_ops[i].handle; ++ else if (!err) + err = -EINVAL; +- continue; +- } + +- map->unmap_ops[i].handle = map->map_ops[i].handle; +- if (use_ptemod) +- map->kunmap_ops[i].handle = map->kmap_ops[i].handle; ++ if (map->flags & GNTMAP_device_map) ++ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; ++ ++ if (use_ptemod) { ++ if (map->kmap_ops[i].status == GNTST_okay) ++ map->kunmap_ops[i].handle = map->kmap_ops[i].handle; ++ else if (!err) ++ err = -EINVAL; ++ } + } + return err; + } +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c +index 5f6b77ea34fb..128375ff80b8 100644 +--- a/drivers/xen/preempt.c ++++ b/drivers/xen/preempt.c +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); + asmlinkage __visible void xen_maybe_preempt_hcall(void) + { + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) +- && need_resched())) { ++ && need_resched() && !preempt_count())) { + /* + * Clear flag as we may be rescheduled on a different + * cpu. +diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c +index ee5ce9286d61..83d798d12400 100644 +--- a/drivers/xen/xen-pciback/pci_stub.c ++++ b/drivers/xen/xen-pciback/pci_stub.c +@@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, + wmb(); + notify_remote_via_irq(pdev->evtchn_irq); + ++ /* Enable IRQ to signal "request done". */ ++ xen_pcibk_lateeoi(pdev, 0); ++ + ret = wait_event_timeout(xen_pcibk_aer_wait_queue, + !(test_bit(_XEN_PCIB_active, (unsigned long *) + &sh_info->flags)), 300*HZ); + ++ /* Enable IRQ for pcifront request if not already active. */ ++ if (!test_bit(_PDEVF_op_active, &pdev->flags)) ++ xen_pcibk_lateeoi(pdev, 0); ++ + if (!ret) { + if (test_bit(_XEN_PCIB_active, + (unsigned long *)&sh_info->flags)) { +@@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev, + } + clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags); + +- if (test_bit(_XEN_PCIF_active, +- (unsigned long *)&sh_info->flags)) { +- dev_dbg(&psdev->dev->dev, +- "schedule pci_conf service in " DRV_NAME "\n"); +- xen_pcibk_test_and_schedule_op(psdev->pdev); +- } +- + res = (pci_ers_result_t)aer_op->err; + return res; + } +diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h +index 7af369b6aaa2..b97cf348cdc1 100644 +--- a/drivers/xen/xen-pciback/pciback.h ++++ b/drivers/xen/xen-pciback/pciback.h +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + + #define DRV_NAME "xen-pciback" +@@ -26,6 +27,8 @@ struct pci_dev_entry { + #define PDEVF_op_active (1<<(_PDEVF_op_active)) + #define _PCIB_op_pending (1) + #define PCIB_op_pending (1<<(_PCIB_op_pending)) ++#define _EOI_pending (2) ++#define EOI_pending (1<<(_EOI_pending)) + + struct xen_pcibk_device { + void *pci_dev_data; +@@ -181,12 +184,17 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev) + irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id); + void xen_pcibk_do_op(struct work_struct *data); + ++static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev, ++ unsigned int eoi_flag) ++{ ++ if (test_and_clear_bit(_EOI_pending, &pdev->flags)) ++ xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag); ++} ++ + int xen_pcibk_xenbus_register(void); + void xen_pcibk_xenbus_unregister(void); + + extern int verbose_request; +- +-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev); + #endif + + /* Handles shared IRQs that can to device domain and control domain. */ +diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c +index e7fbed56c044..eb5dd80530e7 100644 +--- a/drivers/xen/xen-pciback/pciback_ops.c ++++ b/drivers/xen/xen-pciback/pciback_ops.c +@@ -296,26 +296,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, + return 0; + } + #endif ++ ++static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev) ++{ ++ return test_bit(_XEN_PCIF_active, ++ (unsigned long *)&pdev->sh_info->flags) && ++ !test_and_set_bit(_PDEVF_op_active, &pdev->flags); ++} ++ + /* + * Now the same evtchn is used for both pcifront conf_read_write request + * as well as pcie aer front end ack. We use a new work_queue to schedule + * xen_pcibk conf_read_write service for avoiding confict with aer_core + * do_recovery job which also use the system default work_queue + */ +-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) ++static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) + { ++ bool eoi = true; ++ + /* Check that frontend is requesting an operation and that we are not + * already processing a request */ +- if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags) +- && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) { ++ if (xen_pcibk_test_op_pending(pdev)) { + schedule_work(&pdev->op_work); ++ eoi = false; + } + /*_XEN_PCIB_active should have been cleared by pcifront. And also make + sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/ + if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) + && test_bit(_PCIB_op_pending, &pdev->flags)) { + wake_up(&xen_pcibk_aer_wait_queue); ++ eoi = false; + } ++ ++ /* EOI if there was nothing to do. */ ++ if (eoi) ++ xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS); + } + + /* Performing the configuration space reads/writes must not be done in atomic +@@ -323,10 +338,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev) + * use of semaphores). This function is intended to be called from a work + * queue in process context taking a struct xen_pcibk_device as a parameter */ + +-void xen_pcibk_do_op(struct work_struct *data) ++static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev) + { +- struct xen_pcibk_device *pdev = +- container_of(data, struct xen_pcibk_device, op_work); + struct pci_dev *dev; + struct xen_pcibk_dev_data *dev_data = NULL; + struct xen_pci_op *op = &pdev->op; +@@ -399,16 +412,31 @@ void xen_pcibk_do_op(struct work_struct *data) + smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ + clear_bit(_PDEVF_op_active, &pdev->flags); + smp_mb__after_atomic(); /* /before/ final check for work */ ++} + +- /* Check to see if the driver domain tried to start another request in +- * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. +- */ +- xen_pcibk_test_and_schedule_op(pdev); ++void xen_pcibk_do_op(struct work_struct *data) ++{ ++ struct xen_pcibk_device *pdev = ++ container_of(data, struct xen_pcibk_device, op_work); ++ ++ do { ++ xen_pcibk_do_one_op(pdev); ++ } while (xen_pcibk_test_op_pending(pdev)); ++ ++ xen_pcibk_lateeoi(pdev, 0); + } + + irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id) + { + struct xen_pcibk_device *pdev = dev_id; ++ bool eoi; ++ ++ /* IRQs might come in before pdev->evtchn_irq is written. */ ++ if (unlikely(pdev->evtchn_irq != irq)) ++ pdev->evtchn_irq = irq; ++ ++ eoi = test_and_set_bit(_EOI_pending, &pdev->flags); ++ WARN(eoi, "IRQ while EOI pending\n"); + + xen_pcibk_test_and_schedule_op(pdev); + +diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c +index c99f8bb1c56c..e6c7509a3d87 100644 +--- a/drivers/xen/xen-pciback/vpci.c ++++ b/drivers/xen/xen-pciback/vpci.c +@@ -68,7 +68,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, + struct pci_dev *dev, int devid, + publish_pci_dev_cb publish_cb) + { +- int err = 0, slot, func = -1; ++ int err = 0, slot, func = PCI_FUNC(dev->devfn); + struct pci_dev_entry *t, *dev_entry; + struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; + +@@ -93,23 +93,26 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, + + /* + * Keep multi-function devices together on the virtual PCI bus, except +- * virtual functions. ++ * that we want to keep virtual functions at func 0 on their own. They ++ * aren't multi-function devices and hence their presence at func 0 ++ * may cause guests to not scan the other functions. + */ +- if (!dev->is_virtfn) { ++ if (!dev->is_virtfn || func) { + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + if (list_empty(&vpci_dev->dev_list[slot])) + continue; + + t = list_entry(list_first(&vpci_dev->dev_list[slot]), + struct pci_dev_entry, list); ++ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn)) ++ continue; + + if (match_slot(dev, t->dev)) { + pr_info("vpci: %s: assign to virtual slot %d func %d\n", + pci_name(dev), slot, +- PCI_FUNC(dev->devfn)); ++ func); + list_add_tail(&dev_entry->list, + &vpci_dev->dev_list[slot]); +- func = PCI_FUNC(dev->devfn); + goto unlock; + } + } +@@ -122,7 +125,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, + pci_name(dev), slot); + list_add_tail(&dev_entry->list, + &vpci_dev->dev_list[slot]); +- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn); + goto unlock; + } + } +diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c +index 5ce878c51d03..291290533102 100644 +--- a/drivers/xen/xen-pciback/xenbus.c ++++ b/drivers/xen/xen-pciback/xenbus.c +@@ -122,7 +122,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, + + pdev->sh_info = vaddr; + +- err = bind_interdomain_evtchn_to_irqhandler( ++ err = bind_interdomain_evtchn_to_irqhandler_lateeoi( + pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event, + 0, DRV_NAME, pdev); + if (err < 0) { +@@ -357,7 +357,8 @@ static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev, + return err; + } + +-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) ++static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev, ++ enum xenbus_state state) + { + int err = 0; + int num_devs; +@@ -371,9 +372,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) + dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n"); + + mutex_lock(&pdev->dev_lock); +- /* Make sure we only reconfigure once */ +- if (xenbus_read_driver_state(pdev->xdev->nodename) != +- XenbusStateReconfiguring) ++ if (xenbus_read_driver_state(pdev->xdev->nodename) != state) + goto out; + + err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d", +@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) + } + } + ++ if (state != XenbusStateReconfiguring) ++ /* Make sure we only reconfigure once. */ ++ goto out; ++ + err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured); + if (err) { + xenbus_dev_fatal(pdev->xdev, err, +@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, + break; + + case XenbusStateReconfiguring: +- xen_pcibk_reconfigure(pdev); ++ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring); + break; + + case XenbusStateConnected: +@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch, + xen_pcibk_setup_backend(pdev); + break; + ++ case XenbusStateInitialised: ++ /* ++ * We typically move to Initialised when the first device was ++ * added. Hence subsequent devices getting added may need ++ * reconfiguring. ++ */ ++ xen_pcibk_reconfigure(pdev, XenbusStateInitialised); ++ break; ++ + default: + break; + } +@@ -689,7 +701,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev, + + /* watch the backend node for backend configuration information */ + err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch, +- xen_pcibk_be_watch); ++ NULL, xen_pcibk_be_watch); + if (err) + goto out; + +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c +index 992cb8fa272c..4bba877ef547 100644 +--- a/drivers/xen/xen-scsiback.c ++++ b/drivers/xen/xen-scsiback.c +@@ -91,7 +91,6 @@ struct vscsibk_info { + unsigned int irq; + + struct vscsiif_back_ring ring; +- int ring_error; + + spinlock_t ring_lock; + atomic_t nr_unreplied_reqs; +@@ -424,12 +423,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, + return 0; + + err = gnttab_map_refs(map, NULL, pg, cnt); +- BUG_ON(err); + for (i = 0; i < cnt; i++) { + if (unlikely(map[i].status != GNTST_okay)) { + pr_err("invalid buffer -- could not remap it\n"); + map[i].handle = SCSIBACK_INVALID_HANDLE; +- err = -ENOMEM; ++ if (!err) ++ err = -ENOMEM; + } else { + get_page(pg[i]); + } +@@ -723,7 +722,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info, + return pending_req; + } + +-static int scsiback_do_cmd_fn(struct vscsibk_info *info) ++static int scsiback_do_cmd_fn(struct vscsibk_info *info, ++ unsigned int *eoi_flags) + { + struct vscsiif_back_ring *ring = &info->ring; + struct vscsiif_request ring_req; +@@ -740,11 +740,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) + rc = ring->rsp_prod_pvt; + pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", + info->domid, rp, rc, rp - rc); +- info->ring_error = 1; +- return 0; ++ return -EINVAL; + } + + while ((rc != rp)) { ++ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; ++ + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + +@@ -803,13 +804,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) + static irqreturn_t scsiback_irq_fn(int irq, void *dev_id) + { + struct vscsibk_info *info = dev_id; ++ int rc; ++ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; + +- if (info->ring_error) +- return IRQ_HANDLED; +- +- while (scsiback_do_cmd_fn(info)) ++ while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0) + cond_resched(); + ++ /* In case of a ring error we keep the event channel masked. */ ++ if (!rc) ++ xen_irq_lateeoi(irq, eoi_flags); ++ + return IRQ_HANDLED; + } + +@@ -830,7 +834,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, + sring = (struct vscsiif_sring *)area; + BACK_RING_INIT(&info->ring, sring, PAGE_SIZE); + +- err = bind_interdomain_evtchn_to_irq(info->domid, evtchn); ++ err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn); + if (err < 0) + goto unmap_page; + +@@ -1253,7 +1257,6 @@ static int scsiback_probe(struct xenbus_device *dev, + + info->domid = dev->otherend_id; + spin_lock_init(&info->ring_lock); +- info->ring_error = 0; + atomic_set(&info->nr_unreplied_reqs, 0); + init_waitqueue_head(&info->waiting_to_free); + info->dev = dev; +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c +index df27cefb2fa3..8bbd887ca422 100644 +--- a/drivers/xen/xenbus/xenbus_client.c ++++ b/drivers/xen/xenbus/xenbus_client.c +@@ -114,18 +114,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); + */ + int xenbus_watch_path(struct xenbus_device *dev, const char *path, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char **, unsigned int), + void (*callback)(struct xenbus_watch *, + const char **, unsigned int)) + { + int err; + + watch->node = path; ++ watch->will_handle = will_handle; + watch->callback = callback; + + err = register_xenbus_watch(watch); + + if (err) { + watch->node = NULL; ++ watch->will_handle = NULL; + watch->callback = NULL; + xenbus_dev_fatal(dev, err, "adding watch on %s", path); + } +@@ -152,6 +156,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); + */ + int xenbus_watch_pathfmt(struct xenbus_device *dev, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char **, unsigned int), + void (*callback)(struct xenbus_watch *, + const char **, unsigned int), + const char *pathfmt, ...) +@@ -168,7 +174,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, + xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); + return -ENOMEM; + } +- err = xenbus_watch_path(dev, path, watch, callback); ++ err = xenbus_watch_path(dev, path, watch, will_handle, callback); + + if (err) + kfree(path); +@@ -384,8 +390,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, + int i, j; + + for (i = 0; i < nr_pages; i++) { +- err = gnttab_grant_foreign_access(dev->otherend_id, +- virt_to_gfn(vaddr), 0); ++ unsigned long gfn; ++ ++ if (is_vmalloc_addr(vaddr)) ++ gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); ++ else ++ gfn = virt_to_gfn(vaddr); ++ ++ err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); + if (err < 0) { + xenbus_dev_fatal(dev, err, + "granting access to ring page"); +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c +index c2d447687e33..ba7590d75985 100644 +--- a/drivers/xen/xenbus/xenbus_probe.c ++++ b/drivers/xen/xenbus/xenbus_probe.c +@@ -137,6 +137,7 @@ static int watch_otherend(struct xenbus_device *dev) + container_of(dev->dev.bus, struct xen_bus_type, bus); + + return xenbus_watch_pathfmt(dev, &dev->otherend_watch, ++ bus->otherend_will_handle, + bus->otherend_changed, + "%s/%s", dev->otherend, "state"); + } +diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h +index c9ec7ca1f7ab..2c394c6ba605 100644 +--- a/drivers/xen/xenbus/xenbus_probe.h ++++ b/drivers/xen/xenbus/xenbus_probe.h +@@ -42,6 +42,8 @@ struct xen_bus_type { + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); + int (*probe)(struct xen_bus_type *bus, const char *type, + const char *dir); ++ bool (*otherend_will_handle)(struct xenbus_watch *watch, ++ const char **vec, unsigned int len); + void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, + unsigned int len); + struct bus_type bus; +diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c +index 04f7f85a5edf..597c0b038454 100644 +--- a/drivers/xen/xenbus/xenbus_probe_backend.c ++++ b/drivers/xen/xenbus/xenbus_probe_backend.c +@@ -181,6 +181,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, + return err; + } + ++static bool frontend_will_handle(struct xenbus_watch *watch, ++ const char **vec, unsigned int len) ++{ ++ return watch->nr_pending == 0; ++} ++ + static void frontend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) + { +@@ -192,6 +198,7 @@ static struct xen_bus_type xenbus_backend = { + .levels = 3, /* backend/type// */ + .get_bus_id = backend_bus_id, + .probe = xenbus_probe_backend, ++ .otherend_will_handle = frontend_will_handle, + .otherend_changed = frontend_changed, + .bus = { + .name = "xen-backend", +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c +index 22f7cd711c57..88b443637106 100644 +--- a/drivers/xen/xenbus/xenbus_xs.c ++++ b/drivers/xen/xenbus/xenbus_xs.c +@@ -699,6 +699,8 @@ int register_xenbus_watch(struct xenbus_watch *watch) + + sprintf(token, "%lX", (long)watch); + ++ watch->nr_pending = 0; ++ + down_read(&xs_state.watch_mutex); + + spin_lock(&watches_lock); +@@ -748,12 +750,15 @@ void unregister_xenbus_watch(struct xenbus_watch *watch) + + /* Cancel pending watch events. */ + spin_lock(&watch_events_lock); +- list_for_each_entry_safe(msg, tmp, &watch_events, list) { +- if (msg->u.watch.handle != watch) +- continue; +- list_del(&msg->list); +- kfree(msg->u.watch.vec); +- kfree(msg); ++ if (watch->nr_pending) { ++ list_for_each_entry_safe(msg, tmp, &watch_events, list) { ++ if (msg->u.watch.handle != watch) ++ continue; ++ list_del(&msg->list); ++ kfree(msg->u.watch.vec); ++ kfree(msg); ++ } ++ watch->nr_pending = 0; + } + spin_unlock(&watch_events_lock); + +@@ -800,7 +805,6 @@ void xs_suspend_cancel(void) + + static int xenwatch_thread(void *unused) + { +- struct list_head *ent; + struct xs_stored_msg *msg; + + for (;;) { +@@ -813,13 +817,15 @@ static int xenwatch_thread(void *unused) + mutex_lock(&xenwatch_mutex); + + spin_lock(&watch_events_lock); +- ent = watch_events.next; +- if (ent != &watch_events) +- list_del(ent); ++ msg = list_first_entry_or_null(&watch_events, ++ struct xs_stored_msg, list); ++ if (msg) { ++ list_del(&msg->list); ++ msg->u.watch.handle->nr_pending--; ++ } + spin_unlock(&watch_events_lock); + +- if (ent != &watch_events) { +- msg = list_entry(ent, struct xs_stored_msg, list); ++ if (msg) { + msg->u.watch.handle->callback( + msg->u.watch.handle, + (const char **)msg->u.watch.vec, +@@ -901,9 +907,15 @@ static int process_msg(void) + spin_lock(&watches_lock); + msg->u.watch.handle = find_watch( + msg->u.watch.vec[XS_WATCH_TOKEN]); +- if (msg->u.watch.handle != NULL) { ++ if (msg->u.watch.handle != NULL && ++ (!msg->u.watch.handle->will_handle || ++ msg->u.watch.handle->will_handle( ++ msg->u.watch.handle, ++ (const char **)msg->u.watch.vec, ++ msg->u.watch.vec_size))) { + spin_lock(&watch_events_lock); + list_add_tail(&msg->list, &watch_events); ++ msg->u.watch.handle->nr_pending++; + wake_up(&watch_events_waitq); + spin_unlock(&watch_events_lock); + } else { +diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c +index a8ff43068619..719c737ace26 100644 +--- a/fs/9p/v9fs.c ++++ b/fs/9p/v9fs.c +@@ -457,10 +457,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) + } + + #ifdef CONFIG_9P_FSCACHE +- if (v9ses->fscache) { ++ if (v9ses->fscache) + v9fs_cache_session_put_cookie(v9ses); +- kfree(v9ses->cachetag); +- } ++ kfree(v9ses->cachetag); + #endif + kfree(v9ses->uname); + kfree(v9ses->aname); +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c +index e963b83afc71..6be16c74e3e9 100644 +--- a/fs/9p/vfs_file.c ++++ b/fs/9p/vfs_file.c +@@ -624,9 +624,9 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma) + struct writeback_control wbc = { + .nr_to_write = LONG_MAX, + .sync_mode = WB_SYNC_ALL, +- .range_start = vma->vm_pgoff * PAGE_SIZE, ++ .range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE, + /* absolute end, byte at end included */ +- .range_end = vma->vm_pgoff * PAGE_SIZE + ++ .range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE + + (vma->vm_end - vma->vm_start - 1), + }; + +diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c +index 0ec65c133b93..e57f12317ab6 100644 +--- a/fs/affs/amigaffs.c ++++ b/fs/affs/amigaffs.c +@@ -391,23 +391,23 @@ prot_to_mode(u32 prot) + umode_t mode = 0; + + if (!(prot & FIBF_NOWRITE)) +- mode |= S_IWUSR; ++ mode |= 0200; + if (!(prot & FIBF_NOREAD)) +- mode |= S_IRUSR; ++ mode |= 0400; + if (!(prot & FIBF_NOEXECUTE)) +- mode |= S_IXUSR; ++ mode |= 0100; + if (prot & FIBF_GRP_WRITE) +- mode |= S_IWGRP; ++ mode |= 0020; + if (prot & FIBF_GRP_READ) +- mode |= S_IRGRP; ++ mode |= 0040; + if (prot & FIBF_GRP_EXECUTE) +- mode |= S_IXGRP; ++ mode |= 0010; + if (prot & FIBF_OTR_WRITE) +- mode |= S_IWOTH; ++ mode |= 0002; + if (prot & FIBF_OTR_READ) +- mode |= S_IROTH; ++ mode |= 0004; + if (prot & FIBF_OTR_EXECUTE) +- mode |= S_IXOTH; ++ mode |= 0001; + + return mode; + } +@@ -418,24 +418,51 @@ mode_to_prot(struct inode *inode) + u32 prot = AFFS_I(inode)->i_protect; + umode_t mode = inode->i_mode; + +- if (!(mode & S_IXUSR)) ++ /* ++ * First, clear all RWED bits for owner, group, other. ++ * Then, recalculate them afresh. ++ * ++ * We'll always clear the delete-inhibit bit for the owner, as that is ++ * the classic single-user mode AmigaOS protection bit and we need to ++ * stay compatible with all scenarios. ++ * ++ * Since multi-user AmigaOS is an extension, we'll only set the ++ * delete-allow bit if any of the other bits in the same user class ++ * (group/other) are used. ++ */ ++ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD ++ | FIBF_NOWRITE | FIBF_NODELETE ++ | FIBF_GRP_EXECUTE | FIBF_GRP_READ ++ | FIBF_GRP_WRITE | FIBF_GRP_DELETE ++ | FIBF_OTR_EXECUTE | FIBF_OTR_READ ++ | FIBF_OTR_WRITE | FIBF_OTR_DELETE); ++ ++ /* Classic single-user AmigaOS flags. These are inverted. */ ++ if (!(mode & 0100)) + prot |= FIBF_NOEXECUTE; +- if (!(mode & S_IRUSR)) ++ if (!(mode & 0400)) + prot |= FIBF_NOREAD; +- if (!(mode & S_IWUSR)) ++ if (!(mode & 0200)) + prot |= FIBF_NOWRITE; +- if (mode & S_IXGRP) ++ ++ /* Multi-user extended flags. Not inverted. */ ++ if (mode & 0010) + prot |= FIBF_GRP_EXECUTE; +- if (mode & S_IRGRP) ++ if (mode & 0040) + prot |= FIBF_GRP_READ; +- if (mode & S_IWGRP) ++ if (mode & 0020) + prot |= FIBF_GRP_WRITE; +- if (mode & S_IXOTH) ++ if (mode & 0070) ++ prot |= FIBF_GRP_DELETE; ++ ++ if (mode & 0001) + prot |= FIBF_OTR_EXECUTE; +- if (mode & S_IROTH) ++ if (mode & 0004) + prot |= FIBF_OTR_READ; +- if (mode & S_IWOTH) ++ if (mode & 0002) + prot |= FIBF_OTR_WRITE; ++ if (mode & 0007) ++ prot |= FIBF_OTR_DELETE; + + AFFS_I(inode)->i_protect = prot; + } +diff --git a/fs/affs/file.c b/fs/affs/file.c +index 0deec9cc2362..0daca9d00cd8 100644 +--- a/fs/affs/file.c ++++ b/fs/affs/file.c +@@ -427,6 +427,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, + return ret; + } + ++static int affs_write_end(struct file *file, struct address_space *mapping, ++ loff_t pos, unsigned int len, unsigned int copied, ++ struct page *page, void *fsdata) ++{ ++ struct inode *inode = mapping->host; ++ int ret; ++ ++ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); ++ ++ /* Clear Archived bit on file writes, as AmigaOS would do */ ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; ++ mark_inode_dirty(inode); ++ } ++ ++ return ret; ++} ++ + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) + { + return generic_block_bmap(mapping,block,affs_get_block); +@@ -436,7 +454,7 @@ const struct address_space_operations affs_aops = { + .readpage = affs_readpage, + .writepage = affs_writepage, + .write_begin = affs_write_begin, +- .write_end = generic_write_end, ++ .write_end = affs_write_end, + .direct_IO = affs_direct_IO, + .bmap = _affs_bmap + }; +@@ -793,6 +811,12 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, + if (tmp > inode->i_size) + inode->i_size = AFFS_I(inode)->mmu_private = tmp; + ++ /* Clear Archived bit on file writes, as AmigaOS would do */ ++ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { ++ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; ++ mark_inode_dirty(inode); ++ } ++ + err_first_bh: + unlock_page(page); + put_page(page); +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c +index f842261ce973..2bda9245cabe 100644 +--- a/fs/binfmt_misc.c ++++ b/fs/binfmt_misc.c +@@ -695,12 +695,24 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + struct super_block *sb = file_inode(file)->i_sb; + struct dentry *root = sb->s_root, *dentry; + int err = 0; ++ struct file *f = NULL; + + e = create_entry(buffer, count); + + if (IS_ERR(e)) + return PTR_ERR(e); + ++ if (e->flags & MISC_FMT_OPEN_FILE) { ++ f = open_exec(e->interpreter); ++ if (IS_ERR(f)) { ++ pr_notice("register: failed to install interpreter file %s\n", ++ e->interpreter); ++ kfree(e); ++ return PTR_ERR(f); ++ } ++ e->interp_file = f; ++ } ++ + inode_lock(d_inode(root)); + dentry = lookup_one_len(e->name, root, strlen(e->name)); + err = PTR_ERR(dentry); +@@ -724,21 +736,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + goto out2; + } + +- if (e->flags & MISC_FMT_OPEN_FILE) { +- struct file *f; +- +- f = open_exec(e->interpreter); +- if (IS_ERR(f)) { +- err = PTR_ERR(f); +- pr_notice("register: failed to install interpreter file %s\n", e->interpreter); +- simple_release_fs(&bm_mnt, &entry_count); +- iput(inode); +- inode = NULL; +- goto out2; +- } +- e->interp_file = f; +- } +- + e->dentry = dget(dentry); + inode->i_private = e; + inode->i_fop = &bm_entry_operations; +@@ -755,6 +752,8 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer, + inode_unlock(d_inode(root)); + + if (err) { ++ if (f) ++ filp_close(f, NULL); + kfree(e); + return err; + } +diff --git a/fs/block_dev.c b/fs/block_dev.c +index af27a71ec344..7be7b046d68e 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -1620,6 +1620,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) + struct gendisk *disk = bdev->bd_disk; + struct block_device *victim = NULL; + ++ /* ++ * Sync early if it looks like we're the last one. If someone else ++ * opens the block device between now and the decrement of bd_openers ++ * then we did a sync that we didn't need to, but that's not the end ++ * of the world and we want to avoid long (could be several minute) ++ * syncs while holding the mutex. ++ */ ++ if (bdev->bd_openers == 1) ++ sync_blockdev(bdev); ++ + mutex_lock_nested(&bdev->bd_mutex, for_part); + if (for_part) + bdev->bd_part_count--; +diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig +index 80e9c18ea64f..fd6b67c40d9d 100644 +--- a/fs/btrfs/Kconfig ++++ b/fs/btrfs/Kconfig +@@ -9,6 +9,8 @@ config BTRFS_FS + select RAID6_PQ + select XOR_BLOCKS + select SRCU ++ depends on !PPC_256K_PAGES # powerpc ++ depends on !PAGE_SIZE_256KB # hexagon + + help + Btrfs is a general purpose copy-on-write filesystem with extents, +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 2973d256bb44..bb008ac507fe 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -1550,6 +1550,7 @@ static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans, + if (ret < 0 && ret != -ENOENT) { + ulist_free(tmp); + ulist_free(*roots); ++ *roots = NULL; + return ret; + } + node = ulist_next(tmp, &uiter); +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index d4d8b7e36b2f..2534e44cfd40 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -290,7 +290,7 @@ static void end_compressed_bio_write(struct bio *bio) + cb->start, + cb->start + cb->len - 1, + NULL, +- bio->bi_error ? 0 : 1); ++ !cb->errors); + cb->compressed_pages[0]->mapping = NULL; + + end_compressed_writeback(inode, cb); +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c +index b5ebb43b1824..cad06c60ad66 100644 +--- a/fs/btrfs/ctree.c ++++ b/fs/btrfs/ctree.c +@@ -279,9 +279,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, + ret = btrfs_inc_ref(trans, root, cow, 1); + else + ret = btrfs_inc_ref(trans, root, cow, 0); +- +- if (ret) ++ if (ret) { ++ btrfs_tree_unlock(cow); ++ free_extent_buffer(cow); ++ btrfs_abort_transaction(trans, ret); + return ret; ++ } + + btrfs_mark_buffer_dirty(cow); + *cow_ret = cow; +@@ -1122,6 +1125,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + + ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); + if (ret) { ++ btrfs_tree_unlock(cow); ++ free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); + return ret; + } +@@ -1129,6 +1134,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { + ret = btrfs_reloc_cow_block(trans, root, buf, cow); + if (ret) { ++ btrfs_tree_unlock(cow); ++ free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); + return ret; + } +@@ -1160,6 +1167,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, + if (last_ref) { + ret = tree_mod_log_free_eb(root->fs_info, buf); + if (ret) { ++ btrfs_tree_unlock(cow); ++ free_extent_buffer(cow); + btrfs_abort_transaction(trans, ret); + return ret; + } +@@ -1360,7 +1369,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + btrfs_tree_read_unlock_blocking(eb); + free_extent_buffer(eb); + +- extent_buffer_get(eb_rewin); ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), ++ eb_rewin, btrfs_header_level(eb_rewin)); + btrfs_tree_read_lock(eb_rewin); + __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); + WARN_ON(btrfs_header_nritems(eb_rewin) > +@@ -1412,7 +1422,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) + btrfs_warn(root->fs_info, + "failed to read tree block %llu from get_old_root", logical); + } else { ++ btrfs_tree_read_lock(old); + eb = btrfs_clone_extent_buffer(old); ++ btrfs_tree_read_unlock(old); + free_extent_buffer(old); + } + } else if (old_root) { +@@ -1430,8 +1442,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) + + if (!eb) + return NULL; +- extent_buffer_get(eb); +- btrfs_tree_read_lock(eb); + if (old_root) { + btrfs_set_header_bytenr(eb, eb->start); + btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); +@@ -1439,6 +1449,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) + btrfs_set_header_level(eb, old_root->level); + btrfs_set_header_generation(eb, old_generation); + } ++ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, ++ btrfs_header_level(eb)); ++ btrfs_tree_read_lock(eb); + if (tm) + __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); + else +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 2bc37d03d407..abfc09051048 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -3261,6 +3261,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); + int btrfs_parse_options(struct btrfs_root *root, char *options, + unsigned long new_flags); + int btrfs_sync_fs(struct super_block *sb, int wait); ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, ++ u64 subvol_objectid); + + static inline __printf(2, 3) + void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 4d8f8a8c9c90..29e75fba5376 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1076,6 +1076,14 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + btrfs_delayed_inode_release_metadata(root, node); + btrfs_release_delayed_inode(node); + ++ /* ++ * If we fail to update the delayed inode we need to abort the ++ * transaction, because we could leave the inode with the improper ++ * counts behind. ++ */ ++ if (ret && ret != -ENOENT) ++ btrfs_abort_transaction(trans, ret); ++ + return ret; + + search: +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index ee4b78da7d4a..9e2cc7b815f1 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1527,9 +1527,16 @@ int btrfs_init_fs_root(struct btrfs_root *root) + spin_lock_init(&root->ino_cache_lock); + init_waitqueue_head(&root->ino_cache_wait); + +- ret = get_anon_bdev(&root->anon_dev); +- if (ret) +- goto fail; ++ /* ++ * Don't assign anonymous block device to roots that are not exposed to ++ * userspace, the id pool is limited to 1M ++ */ ++ if (is_fstree(root->root_key.objectid) && ++ btrfs_root_refs(&root->root_item) > 0) { ++ ret = get_anon_bdev(&root->anon_dev); ++ if (ret) ++ goto fail; ++ } + + mutex_lock(&root->objectid_mutex); + ret = btrfs_find_highest_objectid(root, +@@ -4425,6 +4432,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) + cache->io_ctl.inode = NULL; + iput(inode); + } ++ ASSERT(cache->io_ctl.pages == NULL); + btrfs_put_block_group(cache); + } + +diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c +index 2513a7f53334..92f80ed64219 100644 +--- a/fs/btrfs/export.c ++++ b/fs/btrfs/export.c +@@ -55,9 +55,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, + return type; + } + +-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, +- u64 root_objectid, u32 generation, +- int check_generation) ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, ++ u64 root_objectid, u32 generation, ++ int check_generation) + { + struct btrfs_fs_info *fs_info = btrfs_sb(sb); + struct btrfs_root *root; +@@ -150,7 +150,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, + return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); + } + +-static struct dentry *btrfs_get_parent(struct dentry *child) ++struct dentry *btrfs_get_parent(struct dentry *child) + { + struct inode *dir = d_inode(child); + struct btrfs_root *root = BTRFS_I(dir)->root; +diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h +index 074348a95841..7a305e554999 100644 +--- a/fs/btrfs/export.h ++++ b/fs/btrfs/export.h +@@ -16,4 +16,9 @@ struct btrfs_fid { + u64 parent_root_objectid; + } __attribute__ ((packed)); + ++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, ++ u64 root_objectid, u32 generation, ++ int check_generation); ++struct dentry *btrfs_get_parent(struct dentry *child); ++ + #endif +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index a83f353e4418..b5bff1e760a3 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -9435,8 +9435,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, + */ + if (!for_reloc && root_dropped == false) + btrfs_add_dead_root(root); +- if (err && err != -EAGAIN) +- btrfs_handle_fs_error(fs_info, err, NULL); + return err; + } + +@@ -10645,7 +10643,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; +- goto out_put_group; ++ goto out; + } + + /* +@@ -10684,7 +10682,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + ret = btrfs_orphan_add(trans, inode); + if (ret) { + btrfs_add_delayed_iput(inode); +- goto out_put_group; ++ goto out; + } + clear_nlink(inode); + /* One for the block groups ref */ +@@ -10707,13 +10705,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); + if (ret < 0) +- goto out_put_group; ++ goto out; + if (ret > 0) + btrfs_release_path(path); + if (ret == 0) { + ret = btrfs_del_item(trans, tree_root, path); + if (ret) +- goto out_put_group; ++ goto out; + btrfs_release_path(path); + } + +@@ -10722,6 +10720,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + &root->fs_info->block_group_cache_tree); + RB_CLEAR_NODE(&block_group->cache_node); + ++ /* Once for the block groups rbtree */ ++ btrfs_put_block_group(block_group); ++ + if (root->fs_info->first_logical_byte == block_group->key.objectid) + root->fs_info->first_logical_byte = (u64)-1; + spin_unlock(&root->fs_info->block_group_cache_lock); +@@ -10871,10 +10872,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + + ret = remove_block_group_free_space(trans, root->fs_info, block_group); + if (ret) +- goto out_put_group; +- +- /* Once for the block groups rbtree */ +- btrfs_put_block_group(block_group); ++ goto out; + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0) +@@ -10884,10 +10882,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, + + ret = btrfs_del_item(trans, root, path); + +-out_put_group: ++out: + /* Once for the lookup reference */ + btrfs_put_block_group(block_group); +-out: + btrfs_free_path(path); + return ret; + } +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 6e0b6bc8ae9f..e7c8a134c447 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -3874,6 +3874,10 @@ int btree_write_cache_pages(struct address_space *mapping, + if (!ret) { + free_extent_buffer(eb); + continue; ++ } else if (ret < 0) { ++ done = 1; ++ free_extent_buffer(eb); ++ break; + } + + ret = write_one_eb(eb, fs_info, wbc, &epd); +@@ -4329,6 +4333,8 @@ int try_release_extent_mapping(struct extent_map_tree *map, + + /* once for us */ + free_extent_map(em); ++ ++ cond_resched(); /* Allow large-extent preemption. */ + } + } + return try_release_extent_state(map, tree, page, mask); +@@ -4896,25 +4902,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, + static void check_buffer_tree_ref(struct extent_buffer *eb) + { + int refs; +- /* the ref bit is tricky. We have to make sure it is set +- * if we have the buffer dirty. Otherwise the +- * code to free a buffer can end up dropping a dirty +- * page ++ /* ++ * The TREE_REF bit is first set when the extent_buffer is added ++ * to the radix tree. It is also reset, if unset, when a new reference ++ * is created by find_extent_buffer. + * +- * Once the ref bit is set, it won't go away while the +- * buffer is dirty or in writeback, and it also won't +- * go away while we have the reference count on the +- * eb bumped. ++ * It is only cleared in two cases: freeing the last non-tree ++ * reference to the extent_buffer when its STALE bit is set or ++ * calling releasepage when the tree reference is the only reference. + * +- * We can't just set the ref bit without bumping the +- * ref on the eb because free_extent_buffer might +- * see the ref bit and try to clear it. If this happens +- * free_extent_buffer might end up dropping our original +- * ref by mistake and freeing the page before we are able +- * to add one more ref. ++ * In both cases, care is taken to ensure that the extent_buffer's ++ * pages are not under io. However, releasepage can be concurrently ++ * called with creating new references, which is prone to race ++ * conditions between the calls to check_buffer_tree_ref in those ++ * codepaths and clearing TREE_REF in try_release_extent_buffer. + * +- * So bump the ref count first, then set the bit. If someone +- * beat us to it, drop the ref we added. ++ * The actual lifetime of the extent_buffer in the radix tree is ++ * adequately protected by the refcount, but the TREE_REF bit and ++ * its corresponding reference are not. To protect against this ++ * class of races, we call check_buffer_tree_ref from the codepaths ++ * which trigger io after they set eb->io_pages. Note that once io is ++ * initiated, TREE_REF can no longer be cleared, so that is the ++ * moment at which any such race is best fixed. + */ + refs = atomic_read(&eb->refs); + if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) +@@ -5378,6 +5387,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, + clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); + eb->read_mirror = 0; + atomic_set(&eb->io_pages, num_reads); ++ /* ++ * It is possible for releasepage to clear the TREE_REF bit before we ++ * set io_pages. See check_buffer_tree_ref for a more detailed comment. ++ */ ++ check_buffer_tree_ref(eb); + for (i = 0; i < num_pages; i++) { + page = eb->pages[i]; + +@@ -5467,9 +5481,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, + } + } + +-int read_extent_buffer_to_user(const struct extent_buffer *eb, +- void __user *dstv, +- unsigned long start, unsigned long len) ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, ++ void __user *dstv, ++ unsigned long start, unsigned long len) + { + size_t cur; + size_t offset; +@@ -5490,7 +5504,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, + + cur = min(len, (PAGE_SIZE - offset)); + kaddr = page_address(page); +- if (copy_to_user(dst, kaddr + offset, cur)) { ++ if (probe_user_write(dst, kaddr + offset, cur)) { + ret = -EFAULT; + break; + } +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h +index 9ecdc9584df7..75c03aa1800f 100644 +--- a/fs/btrfs/extent_io.h ++++ b/fs/btrfs/extent_io.h +@@ -401,9 +401,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, + void read_extent_buffer(const struct extent_buffer *eb, void *dst, + unsigned long start, + unsigned long len); +-int read_extent_buffer_to_user(const struct extent_buffer *eb, +- void __user *dst, unsigned long start, +- unsigned long len); ++int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, ++ void __user *dst, unsigned long start, ++ unsigned long len); + void write_extent_buffer(struct extent_buffer *eb, const void *src, + unsigned long start, unsigned long len); + void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index d0d571c47d33..374d91830a90 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -608,7 +608,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + u64 end_byte = bytenr + len; + u64 csum_end; + struct extent_buffer *leaf; +- int ret; ++ int ret = 0; + u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); + int blocksize_bits = root->fs_info->sb->s_blocksize_bits; + +@@ -626,6 +626,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + path->leave_spinning = 1; + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0) { ++ ret = 0; + if (path->slots[0] == 0) + break; + path->slots[0]--; +@@ -656,7 +657,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + if (key.offset >= bytenr && csum_end <= end_byte) { + ret = btrfs_del_item(trans, root, path); + if (ret) +- goto out; ++ break; + if (key.offset == bytenr) + break; + } else if (key.offset < bytenr && csum_end > end_byte) { +@@ -700,8 +701,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + ret = btrfs_split_item(trans, root, path, &key, offset); + if (ret && ret != -EAGAIN) { + btrfs_abort_transaction(trans, ret); +- goto out; ++ break; + } ++ ret = 0; + + key.offset = end_byte - 1; + } else { +@@ -711,8 +713,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + } + btrfs_release_path(path); + } +- ret = 0; +-out: + btrfs_free_path(path); + return ret; + } +@@ -779,10 +779,12 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + nritems = btrfs_header_nritems(path->nodes[0]); + if (!nritems || (path->slots[0] >= nritems - 1)) { + ret = btrfs_next_leaf(root, path); +- if (ret == 1) ++ if (ret < 0) { ++ goto out; ++ } else if (ret > 0) { + found_next = 1; +- if (ret != 0) + goto insert; ++ } + slot = path->slots[0]; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index 03661b744eaf..79454ea77153 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1089,7 +1089,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + int del_nr = 0; + int del_slot = 0; + int recow; +- int ret; ++ int ret = 0; + u64 ino = btrfs_ino(inode); + + path = btrfs_alloc_path(); +@@ -1309,7 +1309,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + } + out: + btrfs_free_path(path); +- return 0; ++ return ret; + } + + /* +diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c +index a84a1ceb260a..f74cb39a64e5 100644 +--- a/fs/btrfs/free-space-cache.c ++++ b/fs/btrfs/free-space-cache.c +@@ -753,8 +753,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + while (num_entries) { + e = kmem_cache_zalloc(btrfs_free_space_cachep, + GFP_NOFS); +- if (!e) ++ if (!e) { ++ ret = -ENOMEM; + goto free_cache; ++ } + + ret = io_ctl_read_entry(&io_ctl, e, &type); + if (ret) { +@@ -763,6 +765,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + } + + if (!e->bytes) { ++ ret = -1; + kmem_cache_free(btrfs_free_space_cachep, e); + goto free_cache; + } +@@ -782,6 +785,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + num_bitmaps--; + e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); + if (!e->bitmap) { ++ ret = -ENOMEM; + kmem_cache_free( + btrfs_free_space_cachep, e); + goto free_cache; +@@ -1165,7 +1169,6 @@ int btrfs_wait_cache_io(struct btrfs_root *root, + ret = update_cache_item(trans, root, inode, path, offset, + io_ctl->entries, io_ctl->bitmaps); + out: +- io_ctl_free(io_ctl); + if (ret) { + invalidate_inode_pages2(inode->i_mapping); + BTRFS_I(inode)->generation = 0; +@@ -1314,6 +1317,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, + * them out later + */ + io_ctl_drop_pages(io_ctl); ++ io_ctl_free(io_ctl); + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, GFP_NOFS); +@@ -2152,7 +2156,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, + static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, bool update_stat) + { +- struct btrfs_free_space *left_info; ++ struct btrfs_free_space *left_info = NULL; + struct btrfs_free_space *right_info; + bool merged = false; + u64 offset = info->offset; +@@ -2167,7 +2171,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, + if (right_info && rb_prev(&right_info->offset_index)) + left_info = rb_entry(rb_prev(&right_info->offset_index), + struct btrfs_free_space, offset_index); +- else ++ else if (!right_info) + left_info = tree_search_offset(ctl, offset - 1, 0, 0); + + if (right_info && !right_info->bitmap) { +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 250c8403ec67..b744e7d33d87 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -484,7 +484,7 @@ static noinline void compress_file_range(struct inode *inode, + * inode has not been flagged as nocompress. This flag can + * change at any time if we discover bad compression ratios. + */ +- if (inode_need_compress(inode)) { ++ if (nr_pages > 1 && inode_need_compress(inode)) { + WARN_ON(pages); + pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); + if (!pages) { +@@ -947,7 +947,7 @@ static noinline int cow_file_range(struct inode *inode, + u64 alloc_hint = 0; + u64 num_bytes; + unsigned long ram_size; +- u64 disk_num_bytes; ++ u64 min_alloc_size; + u64 cur_alloc_size; + u64 blocksize = root->sectorsize; + struct btrfs_key ins; +@@ -963,7 +963,6 @@ static noinline int cow_file_range(struct inode *inode, + + num_bytes = ALIGN(end - start + 1, blocksize); + num_bytes = max(blocksize, num_bytes); +- disk_num_bytes = num_bytes; + + /* if this is a small write inside eof, kick off defrag */ + if (num_bytes < SZ_64K && +@@ -992,18 +991,33 @@ static noinline int cow_file_range(struct inode *inode, + } + } + +- BUG_ON(disk_num_bytes > +- btrfs_super_total_bytes(root->fs_info->super_copy)); ++ BUG_ON(num_bytes > btrfs_super_total_bytes(root->fs_info->super_copy)); + + alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + +- while (disk_num_bytes > 0) { ++ /* ++ * Relocation relies on the relocated extents to have exactly the same ++ * size as the original extents. Normally writeback for relocation data ++ * extents follows a NOCOW path because relocation preallocates the ++ * extents. However, due to an operation such as scrub turning a block ++ * group to RO mode, it may fallback to COW mode, so we must make sure ++ * an extent allocated during COW has exactly the requested size and can ++ * not be split into smaller extents, otherwise relocation breaks and ++ * fails during the stage where it updates the bytenr of file extent ++ * items. ++ */ ++ if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) ++ min_alloc_size = num_bytes; ++ else ++ min_alloc_size = root->sectorsize; ++ ++ while (num_bytes > 0) { + unsigned long op; + +- cur_alloc_size = disk_num_bytes; ++ cur_alloc_size = num_bytes; + ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, +- root->sectorsize, 0, alloc_hint, ++ min_alloc_size, 0, alloc_hint, + &ins, 1, 1); + if (ret < 0) + goto out_unlock; +@@ -1058,7 +1072,7 @@ static noinline int cow_file_range(struct inode *inode, + + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); + +- if (disk_num_bytes < cur_alloc_size) ++ if (num_bytes < cur_alloc_size) + break; + + /* we're not doing compressed IO, don't unlock the first +@@ -1076,8 +1090,10 @@ static noinline int cow_file_range(struct inode *inode, + delalloc_end, locked_page, + EXTENT_LOCKED | EXTENT_DELALLOC, + op); +- disk_num_bytes -= cur_alloc_size; +- num_bytes -= cur_alloc_size; ++ if (num_bytes < cur_alloc_size) ++ num_bytes = 0; ++ else ++ num_bytes -= cur_alloc_size; + alloc_hint = ins.objectid + ins.offset; + start += cur_alloc_size; + } +@@ -5424,11 +5440,13 @@ void btrfs_evict_inode(struct inode *inode) + } + + /* +- * this returns the key found in the dir entry in the location pointer. ++ * Return the key found in the dir entry in the location pointer, fill @type ++ * with BTRFS_FT_*, and return 0. ++ * + * If no dir entries were found, location->objectid is 0. + */ + static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, +- struct btrfs_key *location) ++ struct btrfs_key *location, u8 *type) + { + const char *name = dentry->d_name.name; + int namelen = dentry->d_name.len; +@@ -5450,6 +5468,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, + goto out_err; + + btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); ++ if (!ret) ++ *type = btrfs_dir_type(path->nodes[0], di); + out: + btrfs_free_path(path); + return ret; +@@ -5739,19 +5759,25 @@ static struct inode *new_simple_dir(struct super_block *s, + return inode; + } + ++static inline u8 btrfs_inode_type(struct inode *inode) ++{ ++ return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; ++} ++ + struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + { + struct inode *inode; + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_root *sub_root = root; + struct btrfs_key location; ++ u8 di_type = 0; + int index; + int ret = 0; + + if (dentry->d_name.len > BTRFS_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + +- ret = btrfs_inode_by_name(dir, dentry, &location); ++ ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); + if (ret < 0) + return ERR_PTR(ret); + +@@ -5760,6 +5786,18 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) + + if (location.type == BTRFS_INODE_ITEM_KEY) { + inode = btrfs_iget(dir->i_sb, &location, root, NULL); ++ if (IS_ERR(inode)) ++ return inode; ++ ++ /* Do extra check against inode mode with di_type */ ++ if (btrfs_inode_type(inode) != di_type) { ++ btrfs_crit(root->fs_info, ++"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", ++ inode->i_mode, btrfs_inode_type(inode), ++ di_type); ++ iput(inode); ++ return ERR_PTR(-EUCLEAN); ++ } + return inode; + } + +@@ -6375,11 +6413,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, + return ERR_PTR(ret); + } + +-static inline u8 btrfs_inode_type(struct inode *inode) +-{ +- return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; +-} +- + /* + * utility function to add 'inode' into 'parent_inode' with + * a give name and a given sequence number. +@@ -6965,6 +6998,14 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, + extent_start = found_key.offset; + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { ++ /* Only regular file could have regular/prealloc extent */ ++ if (!S_ISREG(inode->i_mode)) { ++ err = -EUCLEAN; ++ btrfs_crit(root->fs_info, ++ "regular/prealloc extent found for non-regular inode %llu", ++ btrfs_ino(inode)); ++ goto out; ++ } + extent_end = extent_start + + btrfs_file_extent_num_bytes(leaf, item); + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { +@@ -8494,7 +8535,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + btrfs_io_bio(bio)->logical = file_offset; +- atomic_inc(&dip->pending_bios); + + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { + nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len); +@@ -8560,7 +8600,8 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, + if (!ret) + return 0; + +- bio_put(bio); ++ if (bio != orig_bio) ++ bio_put(bio); + out_err: + dip->errors = 1; + /* +@@ -8607,7 +8648,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, + io_bio->bi_private = dip; + dip->orig_bio = io_bio; + dip->dio_bio = dio_bio; +- atomic_set(&dip->pending_bios, 0); ++ atomic_set(&dip->pending_bios, 1); + btrfs_bio = btrfs_io_bio(io_bio); + btrfs_bio->logical = file_offset; + +@@ -9591,8 +9632,14 @@ static int btrfs_rename_exchange(struct inode *old_dir, + bool root_log_pinned = false; + bool dest_log_pinned = false; + +- /* we only allow rename subvolume link between subvolumes */ +- if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) ++ /* ++ * For non-subvolumes allow exchange only within one subvolume, in the ++ * same inode namespace. Two subvolumes (represented as directory) can ++ * be exchanged as they're a logical link and have a fixed inode number. ++ */ ++ if (root != dest && ++ (old_ino != BTRFS_FIRST_FREE_OBJECTID || ++ new_ino != BTRFS_FIRST_FREE_OBJECTID)) + return -EXDEV; + + /* close the race window with snapshot create/destroy ioctl */ +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index eefe103c65da..cb6e1ce1e25f 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -557,8 +557,6 @@ static noinline int create_subvol(struct inode *dir, + btrfs_set_root_otransid(root_item, trans->transid); + + btrfs_tree_unlock(leaf); +- free_extent_buffer(leaf); +- leaf = NULL; + + btrfs_set_root_dirid(root_item, new_dirid); + +@@ -567,8 +565,22 @@ static noinline int create_subvol(struct inode *dir, + key.type = BTRFS_ROOT_ITEM_KEY; + ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, + root_item); +- if (ret) ++ if (ret) { ++ /* ++ * Since we don't abort the transaction in this case, free the ++ * tree block so that we don't leak space and leave the ++ * filesystem in an inconsistent state (an extent item in the ++ * extent tree without backreferences). Also no need to have ++ * the tree block locked since it is not in any tree at this ++ * point, so no other task can find it and use it. ++ */ ++ btrfs_free_tree_block(trans, root, leaf, 0, 1); ++ free_extent_buffer(leaf); + goto fail; ++ } ++ ++ free_extent_buffer(leaf); ++ leaf = NULL; + + key.offset = (u64)-1; + new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); +@@ -2041,9 +2053,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, + sh.len = item_len; + sh.transid = found_transid; + +- /* copy search result header */ +- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { +- ret = -EFAULT; ++ /* ++ * Copy search result header. If we fault then loop again so we ++ * can fault in the pages and -EFAULT there if there's a ++ * problem. Otherwise we'll fault and then copy the buffer in ++ * properly this next time through ++ */ ++ if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { ++ ret = 0; + goto out; + } + +@@ -2051,10 +2068,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, + + if (item_len) { + char __user *up = ubuf + *sk_offset; +- /* copy the item */ +- if (read_extent_buffer_to_user(leaf, up, +- item_off, item_len)) { +- ret = -EFAULT; ++ /* ++ * Copy the item, same behavior as above, but reset the ++ * * sk_offset so we copy the full thing again. ++ */ ++ if (read_extent_buffer_to_user_nofault(leaf, up, ++ item_off, item_len)) { ++ ret = 0; ++ *sk_offset -= sizeof(sh); + goto out; + } + +@@ -2142,6 +2163,11 @@ static noinline int search_ioctl(struct inode *inode, + key.offset = sk->min_offset; + + while (1) { ++ ret = fault_in_pages_writeable(ubuf + sk_offset, ++ *buf_size - sk_offset); ++ if (ret) ++ break; ++ + ret = btrfs_search_forward(root, &key, path, sk->min_transid); + if (ret != 0) { + if (ret > 0) +@@ -3840,6 +3866,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode, + ret = -EINTR; + goto out; + } ++ ++ cond_resched(); + } + ret = 0; + +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 0355e6d9e21c..d6795c6fdd66 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -461,13 +461,13 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) + break; + } + out: ++ btrfs_free_path(path); + fs_info->qgroup_flags |= flags; + if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) + clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && + ret >= 0) + ret = qgroup_rescan_init(fs_info, rescan_progress, 0); +- btrfs_free_path(path); + + if (ret < 0) { + ulist_free(fs_info->qgroup_ulist); +@@ -2340,8 +2340,10 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + } + btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); + +- if (done && !ret) ++ if (done && !ret) { + ret = 1; ++ fs_info->qgroup_rescan_progress.objectid = (u64)-1; ++ } + return ret; + } + +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c +index 5aa07de5750e..4b5d806e8fc6 100644 +--- a/fs/btrfs/raid56.c ++++ b/fs/btrfs/raid56.c +@@ -1179,22 +1179,19 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) + int nr_data = rbio->nr_data; + int stripe; + int pagenr; +- int p_stripe = -1; +- int q_stripe = -1; ++ bool has_qstripe; + struct bio_list bio_list; + struct bio *bio; + int ret; + + bio_list_init(&bio_list); + +- if (rbio->real_stripes - rbio->nr_data == 1) { +- p_stripe = rbio->real_stripes - 1; +- } else if (rbio->real_stripes - rbio->nr_data == 2) { +- p_stripe = rbio->real_stripes - 2; +- q_stripe = rbio->real_stripes - 1; +- } else { ++ if (rbio->real_stripes - rbio->nr_data == 1) ++ has_qstripe = false; ++ else if (rbio->real_stripes - rbio->nr_data == 2) ++ has_qstripe = true; ++ else + BUG(); +- } + + /* at this point we either have a full stripe, + * or we've read the full stripe from the drive. +@@ -1238,7 +1235,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) + SetPageUptodate(p); + pointers[stripe++] = kmap(p); + +- if (q_stripe != -1) { ++ if (has_qstripe) { + + /* + * raid6, add the qstripe and call the +@@ -2306,8 +2303,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + int nr_data = rbio->nr_data; + int stripe; + int pagenr; +- int p_stripe = -1; +- int q_stripe = -1; ++ bool has_qstripe; + struct page *p_page = NULL; + struct page *q_page = NULL; + struct bio_list bio_list; +@@ -2317,14 +2313,12 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + + bio_list_init(&bio_list); + +- if (rbio->real_stripes - rbio->nr_data == 1) { +- p_stripe = rbio->real_stripes - 1; +- } else if (rbio->real_stripes - rbio->nr_data == 2) { +- p_stripe = rbio->real_stripes - 2; +- q_stripe = rbio->real_stripes - 1; +- } else { ++ if (rbio->real_stripes - rbio->nr_data == 1) ++ has_qstripe = false; ++ else if (rbio->real_stripes - rbio->nr_data == 2) ++ has_qstripe = true; ++ else + BUG(); +- } + + if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { + is_replace = 1; +@@ -2346,17 +2340,22 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + goto cleanup; + SetPageUptodate(p_page); + +- if (q_stripe != -1) { ++ if (has_qstripe) { ++ /* RAID6, allocate and map temp space for the Q stripe */ + q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!q_page) { + __free_page(p_page); + goto cleanup; + } + SetPageUptodate(q_page); ++ pointers[rbio->real_stripes - 1] = kmap(q_page); + } + + atomic_set(&rbio->error, 0); + ++ /* Map the parity stripe just once */ ++ pointers[nr_data] = kmap(p_page); ++ + for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { + struct page *p; + void *parity; +@@ -2366,17 +2365,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + pointers[stripe] = kmap(p); + } + +- /* then add the parity stripe */ +- pointers[stripe++] = kmap(p_page); +- +- if (q_stripe != -1) { +- +- /* +- * raid6, add the qstripe and call the +- * library function to fill in our p/q +- */ +- pointers[stripe++] = kmap(q_page); +- ++ if (has_qstripe) { ++ /* RAID6, call the library function to fill in our P/Q */ + raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, + pointers); + } else { +@@ -2397,12 +2387,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + + for (stripe = 0; stripe < nr_data; stripe++) + kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); +- kunmap(p_page); + } + ++ kunmap(p_page); + __free_page(p_page); +- if (q_page) ++ if (q_page) { ++ kunmap(q_page); + __free_page(q_page); ++ } + + writeback: + /* +diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c +index 0d1565d71231..d4ab8e185ef2 100644 +--- a/fs/btrfs/reada.c ++++ b/fs/btrfs/reada.c +@@ -456,6 +456,8 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, + } + have_zone = 1; + } ++ if (!have_zone) ++ radix_tree_delete(&fs_info->reada_tree, index); + spin_unlock(&fs_info->reada_lock); + btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); + +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index 1003b983a8d7..5caf4dbdd801 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -1335,9 +1335,7 @@ static void __del_reloc_root(struct btrfs_root *root) + RB_CLEAR_NODE(&node->rb_node); + } + spin_unlock(&rc->reloc_root_tree.lock); +- if (!node) +- return; +- BUG_ON((struct btrfs_root *)node->data != root); ++ ASSERT(!node || (struct btrfs_root *)node->data == root); + } + + spin_lock(&root->fs_info->trans_lock); +@@ -1803,8 +1801,8 @@ int replace_path(struct btrfs_trans_handle *trans, + int ret; + int slot; + +- BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); +- BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); ++ ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); ++ ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); + + last_snapshot = btrfs_root_last_snapshot(&src->root_item); + again: +@@ -1836,7 +1834,7 @@ int replace_path(struct btrfs_trans_handle *trans, + parent = eb; + while (1) { + level = btrfs_header_level(parent); +- BUG_ON(level < lowest_level); ++ ASSERT(level >= lowest_level); + + ret = btrfs_bin_search(parent, &key, level, &slot); + if (ret && slot > 0) +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c +index 16c0585cd81c..9fd7bc699aae 100644 +--- a/fs/btrfs/scrub.c ++++ b/fs/btrfs/scrub.c +@@ -919,11 +919,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) + have_csum = sblock_to_check->pagev[0]->have_csum; + dev = sblock_to_check->pagev[0]->dev; + +- if (sctx->is_dev_replace && !is_metadata && !have_csum) { +- sblocks_for_recheck = NULL; +- goto nodatasum_case; +- } +- + /* + * read all mirrors one after the other. This includes to + * re-read the extent or metadata block that failed (that was +@@ -1036,13 +1031,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) + goto out; + } + +- if (!is_metadata && !have_csum) { ++ /* ++ * NOTE: Even for nodatasum case, it's still possible that it's a ++ * compressed data extent, thus scrub_fixup_nodatasum(), which write ++ * inode page cache onto disk, could cause serious data corruption. ++ * ++ * So here we could only read from disk, and hope our recovery could ++ * reach disk before the newer write. ++ */ ++ if (0 && !is_metadata && !have_csum) { + struct scrub_fixup_nodatasum *fixup_nodatasum; + + WARN_ON(sctx->is_dev_replace); + +-nodatasum_case: +- + /* + * !is_metadata and !have_csum, this means that the data + * might not be COWed, that it might be modified +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index edfc7ba38b33..080b12d65b0c 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -35,6 +35,7 @@ + #include "btrfs_inode.h" + #include "transaction.h" + #include "compression.h" ++#include "xattr.h" + + /* + * Maximum number of references an extent can have in order for us to attempt to +@@ -4368,6 +4369,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key, + struct fs_path *p; + struct posix_acl_xattr_header dummy_acl; + ++ /* Capabilities are emitted by finish_inode_if_needed */ ++ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) ++ return 0; ++ + p = fs_path_alloc(); + if (!p) + return -ENOMEM; +@@ -4904,6 +4909,64 @@ static int send_extent_data(struct send_ctx *sctx, + return 0; + } + ++/* ++ * Search for a capability xattr related to sctx->cur_ino. If the capability is ++ * found, call send_set_xattr function to emit it. ++ * ++ * Return 0 if there isn't a capability, or when the capability was emitted ++ * successfully, or < 0 if an error occurred. ++ */ ++static int send_capabilities(struct send_ctx *sctx) ++{ ++ struct fs_path *fspath = NULL; ++ struct btrfs_path *path; ++ struct btrfs_dir_item *di; ++ struct extent_buffer *leaf; ++ unsigned long data_ptr; ++ char *buf = NULL; ++ int buf_len; ++ int ret = 0; ++ ++ path = alloc_path_for_send(); ++ if (!path) ++ return -ENOMEM; ++ ++ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, ++ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); ++ if (!di) { ++ /* There is no xattr for this inode */ ++ goto out; ++ } else if (IS_ERR(di)) { ++ ret = PTR_ERR(di); ++ goto out; ++ } ++ ++ leaf = path->nodes[0]; ++ buf_len = btrfs_dir_data_len(leaf, di); ++ ++ fspath = fs_path_alloc(); ++ buf = kmalloc(buf_len, GFP_KERNEL); ++ if (!fspath || !buf) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); ++ if (ret < 0) ++ goto out; ++ ++ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); ++ read_extent_buffer(leaf, buf, data_ptr, buf_len); ++ ++ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, ++ strlen(XATTR_NAME_CAPS), buf, buf_len); ++out: ++ kfree(buf); ++ fs_path_free(fspath); ++ btrfs_free_path(path); ++ return ret; ++} ++ + static int clone_range(struct send_ctx *sctx, + struct clone_root *clone_root, + const u64 disk_byte, +@@ -5615,6 +5678,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) + goto out; + } + ++ ret = send_capabilities(sctx); ++ if (ret < 0) ++ goto out; ++ + /* + * If other directory inodes depended on our current directory + * inode's move/rename, now do their move/rename operations. +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index 9286603a6a98..3a0cb745164f 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -948,8 +948,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, + return error; + } + +-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, +- u64 subvol_objectid) ++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, ++ u64 subvol_objectid) + { + struct btrfs_root *root = fs_info->tree_root; + struct btrfs_root *fs_root; +@@ -1225,6 +1225,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) + struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); + struct btrfs_root *root = info->tree_root; + char *compress_type; ++ const char *subvol_name; + + if (btrfs_test_opt(info, DEGRADED)) + seq_puts(seq, ",degraded"); +@@ -1311,8 +1312,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) + #endif + seq_printf(seq, ",subvolid=%llu", + BTRFS_I(d_inode(dentry))->root->root_key.objectid); +- seq_puts(seq, ",subvol="); +- seq_dentry(seq, dentry, " \t\n\\"); ++ subvol_name = btrfs_get_subvol_name_from_objectid(info, ++ BTRFS_I(d_inode(dentry))->root->root_key.objectid); ++ if (!IS_ERR(subvol_name)) { ++ seq_puts(seq, ",subvol="); ++ seq_escape(seq, subvol_name, " \t\n\\"); ++ kfree(subvol_name); ++ } + return 0; + } + +@@ -1430,8 +1436,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, + goto out; + } + } +- subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), +- subvol_objectid); ++ subvol_name = btrfs_get_subvol_name_from_objectid( ++ btrfs_sb(mnt->mnt_sb), subvol_objectid); + if (IS_ERR(subvol_name)) { + root = ERR_CAST(subvol_name); + subvol_name = NULL; +diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c +index 9edc2674b8a7..ace3209f3a39 100644 +--- a/fs/btrfs/tests/btrfs-tests.c ++++ b/fs/btrfs/tests/btrfs-tests.c +@@ -51,7 +51,13 @@ static struct file_system_type test_type = { + + struct inode *btrfs_new_test_inode(void) + { +- return new_inode(test_mnt->mnt_sb); ++ struct inode *inode; ++ ++ inode = new_inode(test_mnt->mnt_sb); ++ if (inode) ++ inode_init_owner(inode, NULL, S_IFREG); ++ ++ return inode; + } + + static int btrfs_init_test_fs(void) +diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c +index 0bf46808ce8f..ee89de7f4d61 100644 +--- a/fs/btrfs/tests/inode-tests.c ++++ b/fs/btrfs/tests/inode-tests.c +@@ -245,6 +245,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) + return ret; + } + ++ inode->i_mode = S_IFREG; + BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; + BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; + BTRFS_I(inode)->location.offset = 0; +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 31df020634cd..d0fbe49420fc 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1287,8 +1287,10 @@ int btrfs_defrag_root(struct btrfs_root *root) + + while (1) { + trans = btrfs_start_transaction(root, 0); +- if (IS_ERR(trans)) +- return PTR_ERR(trans); ++ if (IS_ERR(trans)) { ++ ret = PTR_ERR(trans); ++ break; ++ } + + ret = btrfs_defrag_leaves(trans, root); + +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index f79682937faf..11ecd798864c 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1529,6 +1529,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + break; + + if (ret == 1) { ++ ret = 0; + if (path->slots[0] == 0) + break; + path->slots[0]--; +@@ -1541,17 +1542,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + + ret = btrfs_del_item(trans, root, path); + if (ret) +- goto out; ++ break; + + btrfs_release_path(path); + inode = read_one_inode(root, key.offset); +- if (!inode) +- return -EIO; ++ if (!inode) { ++ ret = -EIO; ++ break; ++ } + + ret = fixup_inode_link_count(trans, root, inode); + iput(inode); + if (ret) +- goto out; ++ break; + + /* + * fixup on a directory may create new entries, +@@ -1560,8 +1563,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + */ + key.offset = (u64)-1; + } +- ret = 0; +-out: + btrfs_release_path(path); + return ret; + } +@@ -1600,8 +1601,6 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, + ret = btrfs_update_inode(trans, root, inode); + } else if (ret == -EEXIST) { + ret = 0; +- } else { +- BUG(); /* Logic Error */ + } + iput(inode); + +@@ -3191,11 +3190,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, + btrfs_free_path(path); + out_unlock: + mutex_unlock(&BTRFS_I(dir)->log_mutex); +- if (ret == -ENOSPC) { ++ if (err == -ENOSPC) { + btrfs_set_log_full_commit(root->fs_info, trans); +- ret = 0; +- } else if (ret < 0) +- btrfs_abort_transaction(trans, ret); ++ err = 0; ++ } else if (err < 0 && err != -ENOENT) { ++ /* ENOENT can be returned if the entry hasn't been fsynced yet */ ++ btrfs_abort_transaction(trans, err); ++ } + + btrfs_end_log_trans(root); + +@@ -3355,6 +3356,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, + * search and this search we'll not find the key again and can just + * bail. + */ ++search: + ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); + if (ret != 0) + goto done; +@@ -3374,6 +3376,13 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, + + if (min_key.objectid != ino || min_key.type != key_type) + goto done; ++ ++ if (need_resched()) { ++ btrfs_release_path(path); ++ cond_resched(); ++ goto search; ++ } ++ + ret = overwrite_item(trans, log, dst_path, src, i, + &min_key); + if (ret) { +@@ -3755,11 +3764,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, + log->fs_info->csum_root, + ds + cs, ds + cs + cl - 1, + &ordered_sums, 0); +- if (ret) { +- btrfs_release_path(dst_path); +- kfree(ins_data); +- return ret; +- } ++ if (ret) ++ break; + } + } + } +@@ -3772,7 +3778,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, + * we have to do this after the loop above to avoid changing the + * log tree while trying to change the log tree. + */ +- ret = 0; + while (!list_empty(&ordered_sums)) { + struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, + struct btrfs_ordered_sum, +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 8a847ef934cf..d194cef9e2e6 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -2431,9 +2431,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) + btrfs_set_super_num_devices(root->fs_info->super_copy, + tmp + 1); + +- /* add sysfs device entry */ +- btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); +- + /* + * we've got more storage, clear any full flags on the space + * infos +@@ -2441,6 +2438,10 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) + btrfs_clear_space_info_full(root->fs_info); + + unlock_chunks(root); ++ ++ /* add sysfs device entry */ ++ btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); ++ + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + if (seeding_dev) { +@@ -4181,6 +4182,7 @@ static int btrfs_uuid_scan_kthread(void *data) + goto skip; + } + update_tree: ++ btrfs_release_path(path); + if (!btrfs_is_empty_uuid(root_item.uuid)) { + ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, + root_item.uuid, +@@ -4206,6 +4208,7 @@ static int btrfs_uuid_scan_kthread(void *data) + } + + skip: ++ btrfs_release_path(path); + if (trans) { + ret = btrfs_end_transaction(trans, fs_info->uuid_root); + trans = NULL; +@@ -4213,7 +4216,6 @@ static int btrfs_uuid_scan_kthread(void *data) + break; + } + +- btrfs_release_path(path); + if (key.offset < (u64)-1) { + key.offset++; + } else if (key.type < BTRFS_ROOT_ITEM_KEY) { +@@ -6413,6 +6415,13 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root, + return -EIO; + } + ++ if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && ++ (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) { ++ btrfs_err(root->fs_info, ++ "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set", ++ type & BTRFS_BLOCK_GROUP_PROFILE_MASK); ++ return -EUCLEAN; ++ } + if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { + btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type); + return -EIO; +@@ -6854,6 +6863,14 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) + mutex_lock(&uuid_mutex); + lock_chunks(root); + ++ /* ++ * It is possible for mount and umount to race in such a way that ++ * we execute this code path, but open_fs_devices failed to clear ++ * total_rw_bytes. We certainly want it cleared before reading the ++ * device items, so clear it here. ++ */ ++ root->fs_info->fs_devices->total_rw_bytes = 0; ++ + /* + * Read all device items, and then all the chunk items. All + * device items are found before any chunk item (their object id +diff --git a/fs/buffer.c b/fs/buffer.c +index 991596618349..44eb74a00e9a 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2789,16 +2789,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, + /* Is the page fully outside i_size? (truncate in progress) */ + offset = i_size & (PAGE_SIZE-1); + if (page->index >= end_index+1 || !offset) { +- /* +- * The page may have dirty, unmapped buffers. For example, +- * they may have been added in ext3_writepage(). Make them +- * freeable here, so the page does not leak. +- */ +-#if 0 +- /* Not really sure about this - do we need this ? */ +- if (page->mapping->a_ops->invalidatepage) +- page->mapping->a_ops->invalidatepage(page, offset); +-#endif + unlock_page(page); + return 0; /* don't care */ + } +@@ -2993,12 +2983,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, + /* Is the page fully outside i_size? (truncate in progress) */ + offset = i_size & (PAGE_SIZE-1); + if (page->index >= end_index+1 || !offset) { +- /* +- * The page may have dirty, unmapped buffers. For example, +- * they may have been added in ext3_writepage(). Make them +- * freeable here, so the page does not leak. +- */ +- do_invalidatepage(page, 0, PAGE_SIZE); + unlock_page(page); + return 0; /* don't care */ + } +@@ -3239,6 +3223,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) + WARN_ON(atomic_read(&bh->b_count) < 1); + lock_buffer(bh); + if (test_clear_buffer_dirty(bh)) { ++ /* ++ * The bh should be mapped, but it might not be if the ++ * device was hot-removed. Not much we can do but fail the I/O. ++ */ ++ if (!buffer_mapped(bh)) { ++ unlock_buffer(bh); ++ return -EIO; ++ } ++ + get_bh(bh); + bh->b_end_io = end_buffer_write_sync; + ret = submit_bh(REQ_OP_WRITE, op_flags, bh); +diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c +index 7dba96d5fef1..496eeceae040 100644 +--- a/fs/cachefiles/rdwr.c ++++ b/fs/cachefiles/rdwr.c +@@ -125,7 +125,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, + _debug("reissue read"); + ret = bmapping->a_ops->readpage(NULL, backpage); + if (ret < 0) +- goto unlock_discard; ++ goto discard; + } + + /* but the page may have been read before the monitor was installed, so +@@ -142,6 +142,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object, + + unlock_discard: + unlock_page(backpage); ++discard: + spin_lock_irq(&object->work_lock); + list_del(&monitor->op_link); + spin_unlock_irq(&object->work_lock); +diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c +index 546d643b09d4..72ff09d0438c 100644 +--- a/fs/ceph/addr.c ++++ b/fs/ceph/addr.c +@@ -72,10 +72,6 @@ static int ceph_set_page_dirty(struct page *page) + struct inode *inode; + struct ceph_inode_info *ci; + struct ceph_snap_context *snapc; +- int ret; +- +- if (unlikely(!mapping)) +- return !TestSetPageDirty(page); + + if (PageDirty(page)) { + dout("%p set_page_dirty %p idx %lu -- already dirty\n", +@@ -121,11 +117,7 @@ static int ceph_set_page_dirty(struct page *page) + page->private = (unsigned long)snapc; + SetPagePrivate(page); + +- ret = __set_page_dirty_nobuffers(page); +- WARN_ON(!PageLocked(page)); +- WARN_ON(!page->mapping); +- +- return ret; ++ return __set_page_dirty_nobuffers(page); + } + + /* +@@ -1380,7 +1372,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_file_info *fi = vma->vm_file->private_data; + struct page *pinned_page = NULL; +- loff_t off = vmf->pgoff << PAGE_SHIFT; ++ loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT; + int want, got, ret; + sigset_t oldset; + +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index e11aacb35d6b..0eb2ada032c7 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -927,12 +927,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) + { + struct ceph_mds_session *session = cap->session; + struct ceph_inode_info *ci = cap->ci; +- struct ceph_mds_client *mdsc = +- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; ++ struct ceph_mds_client *mdsc; + int removed = 0; + ++ /* 'ci' being NULL means the remove have already occurred */ ++ if (!ci) { ++ dout("%s: cap inode is NULL\n", __func__); ++ return; ++ } ++ + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + ++ mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; ++ + /* remove from inode's cap rbtree, and clear auth cap */ + rb_erase(&cap->ci_node, &ci->i_caps); + if (ci->i_auth_cap == cap) +@@ -1570,6 +1577,7 @@ static int try_nonblocking_invalidate(struct inode *inode) + u32 invalidating_gen = ci->i_rdcache_gen; + + spin_unlock(&ci->i_ceph_lock); ++ ceph_fscache_invalidate(inode); + invalidate_mapping_pages(&inode->i_data, 0, -1); + spin_lock(&ci->i_ceph_lock); + +@@ -1807,12 +1815,24 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, + if (mutex_trylock(&session->s_mutex) == 0) { + dout("inverting session/ino locks on %p\n", + session); ++ session = ceph_get_mds_session(session); + spin_unlock(&ci->i_ceph_lock); + if (took_snap_rwsem) { + up_read(&mdsc->snap_rwsem); + took_snap_rwsem = 0; + } +- mutex_lock(&session->s_mutex); ++ if (session) { ++ mutex_lock(&session->s_mutex); ++ ceph_put_mds_session(session); ++ } else { ++ /* ++ * Because we take the reference while ++ * holding the i_ceph_lock, it should ++ * never be NULL. Throw a warning if it ++ * ever is. ++ */ ++ WARN_ON_ONCE(true); ++ } + goto retry; + } + } +diff --git a/fs/ceph/file.c b/fs/ceph/file.c +index e7ddb23d9bb7..e818344a052c 100644 +--- a/fs/ceph/file.c ++++ b/fs/ceph/file.c +@@ -1773,6 +1773,7 @@ const struct file_operations ceph_file_fops = { + .mmap = ceph_mmap, + .fsync = ceph_fsync, + .lock = ceph_lock, ++ .setlease = simple_nosetlease, + .flock = ceph_flock, + .splice_write = iter_file_splice_write, + .unlocked_ioctl = ceph_ioctl, +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c +index 049cff197d2a..5e12ea92f7cd 100644 +--- a/fs/ceph/inode.c ++++ b/fs/ceph/inode.c +@@ -1762,6 +1762,7 @@ static void ceph_invalidate_work(struct work_struct *work) + orig_gen = ci->i_rdcache_gen; + spin_unlock(&ci->i_ceph_lock); + ++ ceph_fscache_invalidate(inode); + if (invalidate_inode_pages2(inode->i_mapping) < 0) { + pr_err("invalidate_pages %p fails\n", inode); + } +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index 3139fbd4c34e..4ec5a109df82 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -3386,6 +3386,9 @@ static void delayed_work(struct work_struct *work) + dout("mdsc delayed_work\n"); + ceph_check_delayed_caps(mdsc); + ++ if (mdsc->stopping) ++ return; ++ + mutex_lock(&mdsc->mutex); + renew_interval = mdsc->mdsmap->m_session_timeout >> 2; + renew_caps = time_after_eq(jiffies, HZ*renew_interval + +@@ -3717,7 +3720,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) + static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) + { + dout("stop\n"); +- cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ ++ /* ++ * Make sure the delayed work stopped before releasing ++ * the resources. ++ * ++ * Because the cancel_delayed_work_sync() will only ++ * guarantee that the work finishes executing. But the ++ * delayed work will re-arm itself again after that. ++ */ ++ flush_delayed_work(&mdsc->delayed_work); ++ + if (mdsc->mdsmap) + ceph_mdsmap_destroy(mdsc->mdsmap); + kfree(mdsc->sessions); +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c +index a3b56544c21b..ae1f2817bd6a 100644 +--- a/fs/cifs/asn1.c ++++ b/fs/cifs/asn1.c +@@ -541,8 +541,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -552,8 +552,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 1\n", ++ cls, con, tag, end); + return 0; + } + +@@ -563,8 +563,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_CTX) || (con != ASN1_CON) + || (tag != ASN1_EOC)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 0\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p exit 0\n", ++ cls, con, tag, end); + return 0; + } + +@@ -575,8 +575,8 @@ decode_negTokenInit(unsigned char *security_blob, int length, + return 0; + } else if ((cls != ASN1_UNI) || (con != ASN1_CON) + || (tag != ASN1_SEQ)) { +- cifs_dbg(FYI, "cls = %d con = %d tag = %d end = %p (%d) exit 1\n", +- cls, con, tag, end, *end); ++ cifs_dbg(FYI, "cls = %d con = %d tag = %d sequence_end = %p exit 1\n", ++ cls, con, tag, sequence_end); + return 0; + } + +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c +index 211ac472cb9d..942874257a09 100644 +--- a/fs/cifs/cifs_unicode.c ++++ b/fs/cifs/cifs_unicode.c +@@ -493,7 +493,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, + else if (map_chars == SFM_MAP_UNI_RSVD) { + bool end_of_string; + +- if (i == srclen - 1) ++ /** ++ * Remap spaces and periods found at the end of every ++ * component of the path. The special cases of '.' and ++ * '..' do not need to be dealt with explicitly because ++ * they are addressed in namei.c:link_path_walk(). ++ **/ ++ if ((i == srclen - 1) || (source[i+1] == '\\')) + end_of_string = true; + else + end_of_string = false; +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index be84d49f2406..375ccd209206 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -208,7 +208,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) + rc = server->ops->queryfs(xid, tcon, buf); + + free_xid(xid); +- return 0; ++ return rc; + } + + static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 7ae21ad420fb..a12258c32e8a 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -242,8 +242,9 @@ struct smb_version_operations { + int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); + bool (*is_oplock_break)(char *, struct TCP_Server_Info *); + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); +- void (*downgrade_oplock)(struct TCP_Server_Info *, +- struct cifsInodeInfo *, bool); ++ void (*downgrade_oplock)(struct TCP_Server_Info *server, ++ struct cifsInodeInfo *cinode, __u32 oplock, ++ unsigned int epoch, bool *purge_cache); + /* process transaction2 response */ + bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, + char *, int); +@@ -1080,6 +1081,8 @@ struct cifsFileInfo { + unsigned int f_flags; + bool invalidHandle:1; /* file closed via session abend */ + bool oplock_break_cancelled:1; ++ unsigned int oplock_epoch; /* epoch from the lease break */ ++ __u32 oplock_level; /* oplock/lease level from the lease break */ + int count; + spinlock_t file_info_lock; /* protects four flag/count fields above */ + struct mutex fh_mutex; /* prevents reopen race after dead ses*/ +@@ -1191,7 +1194,7 @@ struct cifsInodeInfo { + unsigned int epoch; /* used to track lease state changes */ + #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ + #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ +-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ ++#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ + #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ + #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ + #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 803a7663da2a..f7e957eaa064 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -730,6 +730,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) + list_del_init(&server->tcp_ses_list); + spin_unlock(&cifs_tcp_ses_lock); + ++ cancel_delayed_work_sync(&server->echo); ++ + spin_lock(&GlobalMid_Lock); + server->tcpStatus = CifsExiting; + spin_unlock(&GlobalMid_Lock); +@@ -4213,9 +4215,12 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) + vol_info->retry = master_tcon->retry; + vol_info->nocase = master_tcon->nocase; + vol_info->local_lease = master_tcon->local_lease; ++ vol_info->resilient = master_tcon->use_resilient; ++ vol_info->persistent = master_tcon->use_persistent; + vol_info->no_linux_ext = !master_tcon->unix_ext; + vol_info->sectype = master_tcon->ses->sectype; + vol_info->sign = master_tcon->ses->sign; ++ vol_info->seal = master_tcon->seal; + + rc = cifs_set_vol_auth(vol_info, master_tcon->ses); + if (rc) { +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 0262c8f7e7c7..09f49dab7739 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -830,6 +830,7 @@ static int + cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + { + struct inode *inode; ++ int rc; + + if (flags & LOOKUP_RCU) + return -ECHILD; +@@ -839,8 +840,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) + CIFS_I(inode)->time = 0; /* force reval */ + +- if (cifs_revalidate_dentry(direntry)) +- return 0; ++ rc = cifs_revalidate_dentry(direntry); ++ if (rc) { ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); ++ switch (rc) { ++ case -ENOENT: ++ case -ESTALE: ++ /* ++ * Those errors mean the dentry is invalid ++ * (file was deleted or recreated) ++ */ ++ return 0; ++ default: ++ /* ++ * Otherwise some unexpected error happened ++ * report it as-is to VFS layer ++ */ ++ return rc; ++ } ++ } + else { + /* + * If the inode wasn't known to be a dfs entry when +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index b2919166855f..e2ce90fc504e 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -163,6 +163,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, + goto posix_open_ret; + } + } else { ++ cifs_revalidate_mapping(*pinode); + cifs_fattr_to_inode(*pinode, &fattr); + } + +@@ -3531,7 +3532,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, + break; + + __SetPageLocked(page); +- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { ++ rc = add_to_page_cache_locked(page, mapping, page->index, gfp); ++ if (rc) { + __ClearPageLocked(page); + break; + } +@@ -3547,6 +3549,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, + struct list_head *page_list, unsigned num_pages) + { + int rc; ++ int err = 0; + struct list_head tmplist; + struct cifsFileInfo *open_file = file->private_data; + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); +@@ -3587,7 +3590,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, + * the order of declining indexes. When we put the pages in + * the rdata->pages, then we want them in increasing order. + */ +- while (!list_empty(page_list)) { ++ while (!list_empty(page_list) && !err) { + unsigned int i, nr_pages, bytes, rsize; + loff_t offset; + struct page *page, *tpage; +@@ -3610,9 +3613,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, + return 0; + } + +- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, ++ nr_pages = 0; ++ err = readpages_get_pages(mapping, page_list, rsize, &tmplist, + &nr_pages, &offset, &bytes); +- if (rc) { ++ if (!nr_pages) { + add_credits_and_wake_if(server, credits, 0); + break; + } +@@ -3912,12 +3916,13 @@ void cifs_oplock_break(struct work_struct *work) + struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); + struct TCP_Server_Info *server = tcon->ses->server; + int rc = 0; ++ bool purge_cache = false; + + wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, + TASK_UNINTERRUPTIBLE); + +- server->ops->downgrade_oplock(server, cinode, +- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); ++ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, ++ cfile->oplock_epoch, &purge_cache); + + if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && + cifs_has_mand_locks(cinode)) { +@@ -3932,18 +3937,21 @@ void cifs_oplock_break(struct work_struct *work) + else + break_lease(inode, O_WRONLY); + rc = filemap_fdatawrite(inode->i_mapping); +- if (!CIFS_CACHE_READ(cinode)) { ++ if (!CIFS_CACHE_READ(cinode) || purge_cache) { + rc = filemap_fdatawait(inode->i_mapping); + mapping_set_error(inode->i_mapping, rc); + cifs_zap_mapping(inode); + } + cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); ++ if (CIFS_CACHE_WRITE(cinode)) ++ goto oplock_break_ack; + } + + rc = cifs_push_locks(cfile); + if (rc) + cifs_dbg(VFS, "Push locks rc = %d\n", rc); + ++oplock_break_ack: + /* + * releasing stale oplock after recent reconnect of smb session using + * a now incorrect file handle is not a data integrity issue but do +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index dfa85ad5b481..5478902a0100 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -2188,6 +2188,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, + if (rc == 0) { + cifsInode->server_eof = attrs->ia_size; + cifs_setsize(inode, attrs->ia_size); ++ ++ /* ++ * The man page of truncate says if the size changed, ++ * then the st_ctime and st_mtime fields for the file ++ * are updated. ++ */ ++ attrs->ia_ctime = attrs->ia_mtime = current_time(inode); ++ attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; ++ + cifs_truncate_page(inode->i_mapping, inode->i_size); + } + +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c +index 5e75df69062d..bdf151e94916 100644 +--- a/fs/cifs/misc.c ++++ b/fs/cifs/misc.c +@@ -481,21 +481,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, + &pCifsInode->flags); + +- /* +- * Set flag if the server downgrades the oplock +- * to L2 else clear. +- */ +- if (pSMB->OplockLevel) +- set_bit( +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &pCifsInode->flags); +- else +- clear_bit( +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &pCifsInode->flags); +- +- cifs_queue_oplock_break(netfile); ++ netfile->oplock_epoch = 0; ++ netfile->oplock_level = pSMB->OplockLevel; + netfile->oplock_break_cancelled = false; ++ cifs_queue_oplock_break(netfile); + + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c +index 6f5d78b172ba..9a1f01c2f020 100644 +--- a/fs/cifs/smb1ops.c ++++ b/fs/cifs/smb1ops.c +@@ -378,12 +378,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) + + static void + cifs_downgrade_oplock(struct TCP_Server_Info *server, +- struct cifsInodeInfo *cinode, bool set_level2) ++ struct cifsInodeInfo *cinode, __u32 oplock, ++ unsigned int epoch, bool *purge_cache) + { +- if (set_level2) +- cifs_set_oplock_level(cinode, OPLOCK_READ); +- else +- cifs_set_oplock_level(cinode, 0); ++ cifs_set_oplock_level(cinode, oplock); + } + + static bool +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c +index 7b7b47e26dbd..075b285bbd3e 100644 +--- a/fs/cifs/smb2misc.c ++++ b/fs/cifs/smb2misc.c +@@ -491,7 +491,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, + + cifs_dbg(FYI, "found in the open list\n"); + cifs_dbg(FYI, "lease key match, lease break 0x%x\n", +- le32_to_cpu(rsp->NewLeaseState)); ++ lease_state); + + if (ack_req) + cfile->oplock_break_cancelled = false; +@@ -500,17 +500,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, + + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); + +- /* +- * Set or clear flags depending on the lease state being READ. +- * HANDLE caching flag should be added when the client starts +- * to defer closing remote file handles with HANDLE leases. +- */ +- if (lease_state & SMB2_LEASE_READ_CACHING_HE) +- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &cinode->flags); +- else +- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &cinode->flags); ++ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); ++ cfile->oplock_level = lease_state; + + cifs_queue_oplock_break(cfile); + kfree(lw); +@@ -533,7 +524,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, + + cifs_dbg(FYI, "found in the pending open list\n"); + cifs_dbg(FYI, "lease key match, lease break 0x%x\n", +- le32_to_cpu(rsp->NewLeaseState)); ++ lease_state); + + open->oplock = lease_state; + } +@@ -645,18 +636,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) + set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, + &cinode->flags); + +- /* +- * Set flag if the server downgrades the oplock +- * to L2 else clear. +- */ +- if (rsp->OplockLevel) +- set_bit( +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &cinode->flags); +- else +- clear_bit( +- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, +- &cinode->flags); ++ cfile->oplock_epoch = 0; ++ cfile->oplock_level = rsp->OplockLevel; ++ + spin_unlock(&cfile->file_info_lock); + + cifs_queue_oplock_break(cfile); +@@ -669,8 +651,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) + } + } + spin_unlock(&cifs_tcp_ses_lock); +- cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); +- return false; ++ cifs_dbg(FYI, "No file id matched, oplock break ignored\n"); ++ return true; + } + + void +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c +index 67d9b7a277a3..fffba1b1da2e 100644 +--- a/fs/cifs/smb2ops.c ++++ b/fs/cifs/smb2ops.c +@@ -629,6 +629,8 @@ smb2_clone_range(const unsigned int xid, + cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk)); + + /* Request server copy to target from src identified by key */ ++ kfree(retbuf); ++ retbuf = NULL; + rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, + trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, + true /* is_fsctl */, (char *)pcchunk, +@@ -1220,6 +1222,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, + inode = d_inode(cfile->dentry); + cifsi = CIFS_I(inode); + ++ /* ++ * We zero the range through ioctl, so we need remove the page caches ++ * first, otherwise the data may be inconsistent with the server. ++ */ ++ truncate_pagecache_range(inode, offset, offset + len - 1); ++ + /* if file not oplocked can't be sure whether asking to extend size */ + if (!CIFS_CACHE_READ(cifsi)) + if (keep_size == false) +@@ -1276,6 +1284,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) + return -EOPNOTSUPP; + ++ /* ++ * We implement the punch hole through ioctl, so we need remove the page ++ * caches first, otherwise the data may be inconsistent with the server. ++ */ ++ truncate_pagecache_range(inode, offset, offset + len - 1); ++ + cifs_dbg(FYI, "offset %lld len %lld", offset, len); + + fsctl_buf.FileOffset = cpu_to_le64(offset); +@@ -1367,22 +1381,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, + + static void + smb2_downgrade_oplock(struct TCP_Server_Info *server, +- struct cifsInodeInfo *cinode, bool set_level2) ++ struct cifsInodeInfo *cinode, __u32 oplock, ++ unsigned int epoch, bool *purge_cache) + { +- if (set_level2) +- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, +- 0, NULL); +- else +- server->ops->set_oplock_level(cinode, 0, 0, NULL); ++ server->ops->set_oplock_level(cinode, oplock, 0, NULL); + } + + static void +-smb21_downgrade_oplock(struct TCP_Server_Info *server, +- struct cifsInodeInfo *cinode, bool set_level2) ++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, ++ unsigned int epoch, bool *purge_cache); ++ ++static void ++smb3_downgrade_oplock(struct TCP_Server_Info *server, ++ struct cifsInodeInfo *cinode, __u32 oplock, ++ unsigned int epoch, bool *purge_cache) + { +- server->ops->set_oplock_level(cinode, +- set_level2 ? SMB2_LEASE_READ_CACHING_HE : +- 0, 0, NULL); ++ unsigned int old_state = cinode->oplock; ++ unsigned int old_epoch = cinode->epoch; ++ unsigned int new_state; ++ ++ if (epoch > old_epoch) { ++ smb21_set_oplock_level(cinode, oplock, 0, NULL); ++ cinode->epoch = epoch; ++ } ++ ++ new_state = cinode->oplock; ++ *purge_cache = false; ++ ++ if ((old_state & CIFS_CACHE_READ_FLG) != 0 && ++ (new_state & CIFS_CACHE_READ_FLG) == 0) ++ *purge_cache = true; ++ else if (old_state == new_state && (epoch - old_epoch > 1)) ++ *purge_cache = true; + } + + static void +@@ -1697,7 +1727,7 @@ struct smb_version_operations smb21_operations = { + .print_stats = smb2_print_stats, + .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, +- .downgrade_oplock = smb21_downgrade_oplock, ++ .downgrade_oplock = smb2_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, + .negotiate_wsize = smb2_negotiate_wsize, +@@ -1781,7 +1811,7 @@ struct smb_version_operations smb30_operations = { + .dump_share_caps = smb2_dump_share_caps, + .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, +- .downgrade_oplock = smb21_downgrade_oplock, ++ .downgrade_oplock = smb3_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, + .negotiate_wsize = smb2_negotiate_wsize, +@@ -1871,7 +1901,7 @@ struct smb_version_operations smb311_operations = { + .dump_share_caps = smb2_dump_share_caps, + .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, +- .downgrade_oplock = smb21_downgrade_oplock, ++ .downgrade_oplock = smb3_downgrade_oplock, + .need_neg = smb2_need_neg, + .negotiate = smb2_negotiate, + .negotiate_wsize = smb2_negotiate_wsize, +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 0a23b6002ff1..cf1a3d2f6ad8 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -776,6 +776,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) + spnego_key = cifs_get_spnego_key(ses); + if (IS_ERR(spnego_key)) { + rc = PTR_ERR(spnego_key); ++ if (rc == -ENOKEY) ++ cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); + spnego_key = NULL; + goto out; + } +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h +index 1af7afae3ad1..1a0c48074573 100644 +--- a/fs/cifs/smb2pdu.h ++++ b/fs/cifs/smb2pdu.h +@@ -1025,7 +1025,7 @@ struct smb2_oplock_break { + struct smb2_lease_break { + struct smb2_hdr hdr; + __le16 StructureSize; /* Must be 44 */ +- __le16 Reserved; ++ __le16 Epoch; + __le32 Flags; + __u8 LeaseKey[16]; + __le32 CurrentLeaseState; +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c +index 02ac9067a354..9fa3285425fe 100644 +--- a/fs/compat_ioctl.c ++++ b/fs/compat_ioctl.c +@@ -1340,23 +1340,6 @@ COMPATIBLE_IOCTL(DMX_GET_PES_PIDS) + COMPATIBLE_IOCTL(DMX_GET_CAPS) + COMPATIBLE_IOCTL(DMX_SET_SOURCE) + COMPATIBLE_IOCTL(DMX_GET_STC) +-COMPATIBLE_IOCTL(FE_GET_INFO) +-COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD) +-COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD) +-COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY) +-COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST) +-COMPATIBLE_IOCTL(FE_SET_TONE) +-COMPATIBLE_IOCTL(FE_SET_VOLTAGE) +-COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE) +-COMPATIBLE_IOCTL(FE_READ_STATUS) +-COMPATIBLE_IOCTL(FE_READ_BER) +-COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH) +-COMPATIBLE_IOCTL(FE_READ_SNR) +-COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS) +-COMPATIBLE_IOCTL(FE_SET_FRONTEND) +-COMPATIBLE_IOCTL(FE_GET_FRONTEND) +-COMPATIBLE_IOCTL(FE_GET_EVENT) +-COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD) + COMPATIBLE_IOCTL(VIDEO_STOP) + COMPATIBLE_IOCTL(VIDEO_PLAY) + COMPATIBLE_IOCTL(VIDEO_FREEZE) +diff --git a/fs/configfs/file.c b/fs/configfs/file.c +index 7285440bc62e..71f665eb0316 100644 +--- a/fs/configfs/file.c ++++ b/fs/configfs/file.c +@@ -392,7 +392,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + + attr = to_attr(dentry); + if (!attr) +- goto out_put_item; ++ goto out_free_buffer; + + if (type & CONFIGFS_ITEM_BIN_ATTR) { + buffer->bin_attr = to_bin_attr(dentry); +@@ -405,7 +405,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + /* Grab the module reference for this attribute if we have one */ + error = -ENODEV; + if (!try_module_get(buffer->owner)) +- goto out_put_item; ++ goto out_free_buffer; + + error = -EACCES; + if (!buffer->item->ci_type) +@@ -449,8 +449,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type + + out_put_module: + module_put(buffer->owner); +-out_put_item: +- config_item_put(buffer->item); + out_free_buffer: + up_read(&frag->frag_sem); + kfree(buffer); +@@ -498,13 +496,13 @@ static int configfs_release_bin_file(struct inode *inode, struct file *file) + buffer->bin_buffer_size); + } + up_read(&frag->frag_sem); +- /* vfree on NULL is safe */ +- vfree(buffer->bin_buffer); +- buffer->bin_buffer = NULL; +- buffer->bin_buffer_size = 0; +- buffer->needs_read_fill = 1; + } + ++ vfree(buffer->bin_buffer); ++ buffer->bin_buffer = NULL; ++ buffer->bin_buffer_size = 0; ++ buffer->needs_read_fill = 1; ++ + configfs_release(inode, file); + return 0; + } +diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c +index 019247b51718..8b208406dca8 100644 +--- a/fs/crypto/fname.c ++++ b/fs/crypto/fname.c +@@ -276,13 +276,8 @@ int fscrypt_fname_disk_to_usr(struct inode *inode, + oname->name); + return 0; + } +- if (hash) { +- digested_name.hash = hash; +- digested_name.minor_hash = minor_hash; +- } else { +- digested_name.hash = 0; +- digested_name.minor_hash = 0; +- } ++ digested_name.hash = hash; ++ digested_name.minor_hash = minor_hash; + memcpy(digested_name.digest, + FSCRYPT_FNAME_DIGEST(iname->name, iname->len), + FSCRYPT_FNAME_DIGEST_SIZE); +diff --git a/fs/direct-io.c b/fs/direct-io.c +index 205548ec4cba..beec6efce8ac 100644 +--- a/fs/direct-io.c ++++ b/fs/direct-io.c +@@ -837,6 +837,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, + struct buffer_head *map_bh) + { + int ret = 0; ++ int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ + + if (dio->op == REQ_OP_WRITE) { + /* +@@ -875,10 +876,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, + sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; + out: + /* +- * If sdio->boundary then we want to schedule the IO now to ++ * If boundary then we want to schedule the IO now to + * avoid metadata seeks. + */ +- if (sdio->boundary) { ++ if (boundary) { + ret = dio_send_cur_page(dio, sdio, map_bh); + if (sdio->bio) + dio_bio_submit(dio, sdio); +diff --git a/fs/dlm/config.c b/fs/dlm/config.c +index df955d2209ce..10d25b7830bf 100644 +--- a/fs/dlm/config.c ++++ b/fs/dlm/config.c +@@ -80,6 +80,9 @@ struct dlm_cluster { + unsigned int cl_new_rsb_count; + unsigned int cl_recover_callbacks; + char cl_cluster_name[DLM_LOCKSPACE_LEN]; ++ ++ struct dlm_spaces *sps; ++ struct dlm_comms *cms; + }; + + static struct dlm_cluster *config_item_to_cluster(struct config_item *i) +@@ -218,6 +221,7 @@ struct dlm_space { + struct list_head members; + struct mutex members_lock; + int members_count; ++ struct dlm_nodes *nds; + }; + + struct dlm_comms { +@@ -355,6 +359,9 @@ static struct config_group *make_cluster(struct config_group *g, + if (!cl || !sps || !cms) + goto fail; + ++ cl->sps = sps; ++ cl->cms = cms; ++ + config_group_init_type_name(&cl->group, name, &cluster_type); + config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type); + config_group_init_type_name(&cms->cs_group, "comms", &comms_type); +@@ -404,6 +411,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i) + static void release_cluster(struct config_item *i) + { + struct dlm_cluster *cl = config_item_to_cluster(i); ++ ++ kfree(cl->sps); ++ kfree(cl->cms); + kfree(cl); + } + +@@ -426,6 +436,7 @@ static struct config_group *make_space(struct config_group *g, const char *name) + INIT_LIST_HEAD(&sp->members); + mutex_init(&sp->members_lock); + sp->members_count = 0; ++ sp->nds = nds; + return &sp->group; + + fail: +@@ -447,6 +458,7 @@ static void drop_space(struct config_group *g, struct config_item *i) + static void release_space(struct config_item *i) + { + struct dlm_space *sp = config_item_to_space(i); ++ kfree(sp->nds); + kfree(sp); + } + +diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c +index 466f7d60edc2..fabce23fdbac 100644 +--- a/fs/dlm/debug_fs.c ++++ b/fs/dlm/debug_fs.c +@@ -545,6 +545,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) + + if (bucket >= ls->ls_rsbtbl_size) { + kfree(ri); ++ ++*pos; + return NULL; + } + tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; +diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h +index 216b61604ef9..c211156aabe2 100644 +--- a/fs/dlm/dlm_internal.h ++++ b/fs/dlm/dlm_internal.h +@@ -100,7 +100,6 @@ do { \ + __LINE__, __FILE__, #x, jiffies); \ + {do} \ + printk("\n"); \ +- BUG(); \ + panic("DLM: Record message above and reboot.\n"); \ + } \ + } +diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c +index b14bb2c46042..499f54f99891 100644 +--- a/fs/dlm/lockspace.c ++++ b/fs/dlm/lockspace.c +@@ -626,6 +626,9 @@ static int new_lockspace(const char *name, const char *cluster, + wait_event(ls->ls_recover_lock_wait, + test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + ++ /* let kobject handle freeing of ls if there's an error */ ++ do_unreg = 1; ++ + ls->ls_kobj.kset = dlm_kset; + error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, + "%s", ls->ls_name); +@@ -633,9 +636,6 @@ static int new_lockspace(const char *name, const char *cluster, + goto out_recoverd; + kobject_uevent(&ls->ls_kobj, KOBJ_ADD); + +- /* let kobject handle freeing of ls if there's an error */ +- do_unreg = 1; +- + /* This uevent triggers dlm_controld in userspace to add us to the + group of nodes that are members of this lockspace (managed by the + cluster infrastructure.) Once it's done that, it tells us who the +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c +index 609998de533e..0d8aaf9c61be 100644 +--- a/fs/dlm/lowcomms.c ++++ b/fs/dlm/lowcomms.c +@@ -599,7 +599,7 @@ static void close_connection(struct connection *con, bool and_other, + } + if (con->othercon && and_other) { + /* Will only re-enter once. */ +- close_connection(con->othercon, false, true, true); ++ close_connection(con->othercon, false, tx, rx); + } + if (con->rx_page) { + __free_page(con->rx_page); +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c +index ff6cf23be8a2..cb77e7ee2c9f 100644 +--- a/fs/ecryptfs/crypto.c ++++ b/fs/ecryptfs/crypto.c +@@ -339,10 +339,8 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, + struct extent_crypt_result ecr; + int rc = 0; + +- if (!crypt_stat || !crypt_stat->tfm +- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) +- return -EINVAL; +- ++ BUG_ON(!crypt_stat || !crypt_stat->tfm ++ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); + if (unlikely(ecryptfs_verbosity > 0)) { + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", + crypt_stat->key_size); +diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c +index 151872dcc1f4..7468f2753e99 100644 +--- a/fs/ecryptfs/main.c ++++ b/fs/ecryptfs/main.c +@@ -506,6 +506,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags + goto out; + } + ++ if (!dev_name) { ++ rc = -EINVAL; ++ err = "Device name cannot be null"; ++ goto out; ++ } ++ + rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); + if (rc) { + err = "Error parsing options"; +diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c +index 71fccccf317e..5decb3e06563 100644 +--- a/fs/efivarfs/inode.c ++++ b/fs/efivarfs/inode.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -104,6 +105,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, + var->var.VariableName[i] = '\0'; + + inode->i_private = var; ++ kmemleak_ignore(var); + + err = efivar_entry_add(var, &efivarfs_list); + if (err) +diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c +index d7a7c53803c1..b1e6acec5304 100644 +--- a/fs/efivarfs/super.c ++++ b/fs/efivarfs/super.c +@@ -146,6 +146,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + ++ /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */ ++ strreplace(name, '/', '!'); ++ + inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0, + is_removable); + if (!inode) +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 4bcbab679afb..bb3f4be68fff 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -223,8 +223,7 @@ struct eventpoll { + struct file *file; + + /* used to optimize loop detection check */ +- int visited; +- struct list_head visited_list_link; ++ u64 gen; + }; + + /* Wait structure used by the poll hooks */ +@@ -268,6 +267,8 @@ static long max_user_watches __read_mostly; + */ + static DEFINE_MUTEX(epmutex); + ++static u64 loop_check_gen = 0; ++ + /* Used to check for epoll file descriptor inclusion loops */ + static struct nested_calls poll_loop_ncalls; + +@@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly; + /* Slab cache used to allocate "struct eppoll_entry" */ + static struct kmem_cache *pwq_cache __read_mostly; + +-/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ +-static LIST_HEAD(visited_list); +- + /* + * List of files with newly added links, where we may need to limit the number + * of emanating paths. Protected by the epmutex. +@@ -1263,7 +1261,7 @@ static int reverse_path_check(void) + + static int ep_create_wakeup_source(struct epitem *epi) + { +- const char *name; ++ struct name_snapshot n; + struct wakeup_source *ws; + + if (!epi->ep->ws) { +@@ -1272,8 +1270,9 @@ static int ep_create_wakeup_source(struct epitem *epi) + return -ENOMEM; + } + +- name = epi->ffd.file->f_path.dentry->d_name.name; +- ws = wakeup_source_register(name); ++ take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); ++ ws = wakeup_source_register(n.name); ++ release_dentry_name_snapshot(&n); + + if (!ws) + return -ENOMEM; +@@ -1333,6 +1332,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + RCU_INIT_POINTER(epi->ws, NULL); + } + ++ /* Add the current item to the list of active epoll hook for this file */ ++ spin_lock(&tfile->f_lock); ++ list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); ++ spin_unlock(&tfile->f_lock); ++ ++ /* ++ * Add the current item to the RB tree. All RB tree operations are ++ * protected by "mtx", and ep_insert() is called with "mtx" held. ++ */ ++ ep_rbtree_insert(ep, epi); ++ ++ /* now check if we've created too many backpaths */ ++ error = -EINVAL; ++ if (full_check && reverse_path_check()) ++ goto error_remove_epi; ++ + /* Initialize the poll table using the queue callback */ + epq.epi = epi; + init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); +@@ -1355,22 +1370,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + if (epi->nwait < 0) + goto error_unregister; + +- /* Add the current item to the list of active epoll hook for this file */ +- spin_lock(&tfile->f_lock); +- list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); +- spin_unlock(&tfile->f_lock); +- +- /* +- * Add the current item to the RB tree. All RB tree operations are +- * protected by "mtx", and ep_insert() is called with "mtx" held. +- */ +- ep_rbtree_insert(ep, epi); +- +- /* now check if we've created too many backpaths */ +- error = -EINVAL; +- if (full_check && reverse_path_check()) +- goto error_remove_epi; +- + /* We have to drop the new item inside our item list to keep track of it */ + spin_lock_irqsave(&ep->lock, flags); + +@@ -1396,6 +1395,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + + return 0; + ++error_unregister: ++ ep_unregister_pollwait(ep, epi); + error_remove_epi: + spin_lock(&tfile->f_lock); + list_del_rcu(&epi->fllink); +@@ -1403,9 +1404,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + + rb_erase(&epi->rbn, &ep->rbr); + +-error_unregister: +- ep_unregister_pollwait(ep, epi); +- + /* + * We need to do this because an event could have been arrived on some + * allocated wait queue. Note that we don't care about the ep->ovflist +@@ -1727,13 +1725,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + struct epitem *epi; + + mutex_lock_nested(&ep->mtx, call_nests + 1); +- ep->visited = 1; +- list_add(&ep->visited_list_link, &visited_list); ++ ep->gen = loop_check_gen; + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + ep_tovisit = epi->ffd.file->private_data; +- if (ep_tovisit->visited) ++ if (ep_tovisit->gen == loop_check_gen) + continue; + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, +@@ -1774,18 +1771,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + */ + static int ep_loop_check(struct eventpoll *ep, struct file *file) + { +- int ret; +- struct eventpoll *ep_cur, *ep_next; +- +- ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ++ return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +- /* clear visited list */ +- list_for_each_entry_safe(ep_cur, ep_next, &visited_list, +- visited_list_link) { +- ep_cur->visited = 0; +- list_del(&ep_cur->visited_list_link); +- } +- return ret; + } + + static void clear_tfile_check_list(void) +@@ -1942,6 +1929,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + mutex_lock_nested(&ep->mtx, 0); + if (op == EPOLL_CTL_ADD) { + if (!list_empty(&f.file->f_ep_links) || ++ ep->gen == loop_check_gen || + is_file_epoll(tf.file)) { + full_check = 1; + mutex_unlock(&ep->mtx); +@@ -2002,6 +1990,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + error_tgt_fput: + if (full_check) { + clear_tfile_check_list(); ++ loop_check_gen++; + mutex_unlock(&epmutex); + } + +diff --git a/fs/exec.c b/fs/exec.c +index 3291665e4739..d1ccac4df9f2 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1021,7 +1021,7 @@ static int exec_mmap(struct mm_struct *mm) + /* Notify parent that we're no longer interested in the old VM */ + tsk = current; + old_mm = current->mm; +- mm_release(tsk, old_mm); ++ exec_mm_release(tsk, old_mm); + + if (old_mm) { + sync_mm_rss(old_mm); +diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c +index 395fc074c0db..6e1907cc1741 100644 +--- a/fs/ext2/ialloc.c ++++ b/fs/ext2/ialloc.c +@@ -79,6 +79,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) + if (dir) + le16_add_cpu(&desc->bg_used_dirs_count, -1); + spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); ++ percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); + if (dir) + percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); + mark_buffer_dirty(bh); +@@ -530,7 +531,7 @@ struct inode *ext2_new_inode(struct inode *dir, umode_t mode, + goto fail; + } + +- percpu_counter_add(&sbi->s_freeinodes_counter, -1); ++ percpu_counter_dec(&sbi->s_freeinodes_counter); + if (S_ISDIR(mode)) + percpu_counter_inc(&sbi->s_dirs_counter); + +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c +index 45c7b0f9a8e3..c50dcda7b708 100644 +--- a/fs/ext4/block_validity.c ++++ b/fs/ext4/block_validity.c +@@ -23,6 +23,7 @@ struct ext4_system_zone { + struct rb_node node; + ext4_fsblk_t start_blk; + unsigned int count; ++ u32 ino; + }; + + static struct kmem_cache *ext4_system_zone_cachep; +@@ -43,7 +44,8 @@ void ext4_exit_system_zone(void) + static inline int can_merge(struct ext4_system_zone *entry1, + struct ext4_system_zone *entry2) + { +- if ((entry1->start_blk + entry1->count) == entry2->start_blk) ++ if ((entry1->start_blk + entry1->count) == entry2->start_blk && ++ entry1->ino == entry2->ino) + return 1; + return 0; + } +@@ -55,9 +57,9 @@ static inline int can_merge(struct ext4_system_zone *entry1, + */ + static int add_system_zone(struct ext4_sb_info *sbi, + ext4_fsblk_t start_blk, +- unsigned int count) ++ unsigned int count, u32 ino) + { +- struct ext4_system_zone *new_entry = NULL, *entry; ++ struct ext4_system_zone *new_entry, *entry; + struct rb_node **n = &sbi->system_blks.rb_node, *node; + struct rb_node *parent = NULL, *new_node = NULL; + +@@ -68,30 +70,21 @@ static int add_system_zone(struct ext4_sb_info *sbi, + n = &(*n)->rb_left; + else if (start_blk >= (entry->start_blk + entry->count)) + n = &(*n)->rb_right; +- else { +- if (start_blk + count > (entry->start_blk + +- entry->count)) +- entry->count = (start_blk + count - +- entry->start_blk); +- new_node = *n; +- new_entry = rb_entry(new_node, struct ext4_system_zone, +- node); +- break; +- } ++ else /* Unexpected overlap of system zones. */ ++ return -EFSCORRUPTED; + } + +- if (!new_entry) { +- new_entry = kmem_cache_alloc(ext4_system_zone_cachep, +- GFP_KERNEL); +- if (!new_entry) +- return -ENOMEM; +- new_entry->start_blk = start_blk; +- new_entry->count = count; +- new_node = &new_entry->node; ++ new_entry = kmem_cache_alloc(ext4_system_zone_cachep, ++ GFP_KERNEL); ++ if (!new_entry) ++ return -ENOMEM; ++ new_entry->start_blk = start_blk; ++ new_entry->count = count; ++ new_entry->ino = ino; ++ new_node = &new_entry->node; + +- rb_link_node(new_node, parent, n); +- rb_insert_color(new_node, &sbi->system_blks); +- } ++ rb_link_node(new_node, parent, n); ++ rb_insert_color(new_node, &sbi->system_blks); + + /* Can we merge to the left? */ + node = rb_prev(new_node); +@@ -163,16 +156,16 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) + if (n == 0) { + i++; + } else { +- if (!ext4_data_block_valid(sbi, map.m_pblk, n)) { +- ext4_error(sb, "blocks %llu-%llu from inode %u " ++ err = add_system_zone(sbi, map.m_pblk, n, ino); ++ if (err < 0) { ++ if (err == -EFSCORRUPTED) { ++ ext4_error(sb, ++ "blocks %llu-%llu from inode %u " + "overlap system zone", map.m_pblk, + map.m_pblk + map.m_len - 1, ino); +- err = -EFSCORRUPTED; ++ } + break; + } +- err = add_system_zone(sbi, map.m_pblk, n); +- if (err < 0) +- break; + i += n; + } + } +@@ -201,16 +194,16 @@ int ext4_setup_system_zone(struct super_block *sb) + if (ext4_bg_has_super(sb, i) && + ((i < 5) || ((i % flex_size) == 0))) + add_system_zone(sbi, ext4_group_first_block_no(sb, i), +- ext4_bg_num_gdb(sb, i) + 1); ++ ext4_bg_num_gdb(sb, i) + 1, 0); + gdp = ext4_get_group_desc(sb, i, NULL); +- ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1); ++ ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1, 0); + if (ret) + return ret; +- ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1); ++ ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1, 0); + if (ret) + return ret; + ret = add_system_zone(sbi, ext4_inode_table(sb, gdp), +- sbi->s_itb_per_group); ++ sbi->s_itb_per_group, 0); + if (ret) + return ret; + } +@@ -243,10 +236,11 @@ void ext4_release_system_zone(struct super_block *sb) + * start_blk+count) is valid; 0 if some part of the block region + * overlaps with filesystem metadata blocks. + */ +-int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, +- unsigned int count) ++int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, ++ unsigned int count) + { + struct ext4_system_zone *entry; ++ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct rb_node *n = sbi->system_blks.rb_node; + + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || +@@ -262,6 +256,8 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, + else if (start_blk >= (entry->start_blk + entry->count)) + n = n->rb_right; + else { ++ if (entry->ino == inode->i_ino) ++ return 1; + sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); + return 0; + } +@@ -284,8 +280,7 @@ int ext4_check_blockref(const char *function, unsigned int line, + while (bref < p+max) { + blk = le32_to_cpu(*bref++); + if (blk && +- unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), +- blk, 1))) { ++ unlikely(!ext4_inode_block_valid(inode, blk, 1))) { + es->s_last_error_block = cpu_to_le64(blk); + ext4_error_inode(inode, function, line, blk, + "invalid block"); +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index af727a4b4565..2cc22df451e8 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -2387,7 +2387,8 @@ void ext4_insert_dentry(struct inode *inode, + struct ext4_filename *fname); + static inline void ext4_update_dx_flag(struct inode *inode) + { +- if (!ext4_has_feature_dir_index(inode->i_sb)) { ++ if (!ext4_has_feature_dir_index(inode->i_sb) && ++ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { + /* ext4_iget() should have caught this... */ + WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb)); + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); +@@ -3126,9 +3127,9 @@ extern void ext4_release_system_zone(struct super_block *sb); + extern int ext4_setup_system_zone(struct super_block *sb); + extern int __init ext4_init_system_zone(void); + extern void ext4_exit_system_zone(void); +-extern int ext4_data_block_valid(struct ext4_sb_info *sbi, +- ext4_fsblk_t start_blk, +- unsigned int count); ++extern int ext4_inode_block_valid(struct inode *inode, ++ ext4_fsblk_t start_blk, ++ unsigned int count); + extern int ext4_check_blockref(const char *, unsigned int, + struct inode *, __le32 *, unsigned int); + +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h +index a284fb28944b..63291c265aa0 100644 +--- a/fs/ext4/ext4_extents.h ++++ b/fs/ext4/ext4_extents.h +@@ -169,10 +169,13 @@ struct ext4_ext_path { + (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) + #define EXT_LAST_INDEX(__hdr__) \ + (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) +-#define EXT_MAX_EXTENT(__hdr__) \ +- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++#define EXT_MAX_EXTENT(__hdr__) \ ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \ ++ : 0) + #define EXT_MAX_INDEX(__hdr__) \ +- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1) ++ ((le16_to_cpu((__hdr__)->eh_max)) ? \ ++ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0) + + static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode) + { +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 9df320362563..eab980f4adc0 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -389,7 +389,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) + */ + if (lblock + len <= lblock) + return 0; +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); ++ return ext4_inode_block_valid(inode, block, len); + } + + static int ext4_valid_extent_idx(struct inode *inode, +@@ -397,7 +397,7 @@ static int ext4_valid_extent_idx(struct inode *inode, + { + ext4_fsblk_t block = ext4_idx_pblock(ext_idx); + +- return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); ++ return ext4_inode_block_valid(inode, block, 1); + } + + static int ext4_valid_extent_entries(struct inode *inode, +@@ -554,14 +554,10 @@ __read_extent_tree_block(const char *function, unsigned int line, + } + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) + return bh; +- if (!ext4_has_feature_journal(inode->i_sb) || +- (inode->i_ino != +- le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { +- err = __ext4_ext_check(function, line, inode, +- ext_block_hdr(bh), depth, pblk); +- if (err) +- goto errout; +- } ++ err = __ext4_ext_check(function, line, inode, ++ ext_block_hdr(bh), depth, pblk); ++ if (err) ++ goto errout; + set_buffer_verified(bh); + /* + * If this is a leaf block, cache all of its entries +@@ -874,6 +870,7 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) + eh->eh_entries = 0; + eh->eh_magic = EXT4_EXT_MAGIC; + eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); ++ eh->eh_generation = 0; + ext4_mark_inode_dirty(handle, inode); + return 0; + } +@@ -1130,6 +1127,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, + neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); + neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_depth = 0; ++ neh->eh_generation = 0; + + /* move remainder of path[depth] to the new leaf */ + if (unlikely(path[depth].p_hdr->eh_entries != +@@ -1207,6 +1205,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, + neh->eh_magic = EXT4_EXT_MAGIC; + neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); + neh->eh_depth = cpu_to_le16(depth - i); ++ neh->eh_generation = 0; + fidx = EXT_FIRST_INDEX(neh); + fidx->ei_block = border; + ext4_idx_store_pblock(fidx, oldblock); +@@ -2916,7 +2915,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, + * in use to avoid freeing it when removing blocks. + */ + if (sbi->s_cluster_ratio > 1) { +- pblk = ext4_ext_pblock(ex) + end - ee_block + 2; ++ pblk = ext4_ext_pblock(ex) + end - ee_block + 1; + partial_cluster = + -(long long) EXT4_B2C(sbi, pblk); + } +@@ -3278,7 +3277,10 @@ static int ext4_split_extent_at(handle_t *handle, + ext4_ext_mark_unwritten(ex2); + + err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); +- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { ++ if (err != -ENOSPC && err != -EDQUOT) ++ goto out; ++ ++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) { + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { + if (split_flag & EXT4_EXT_DATA_VALID1) { + err = ext4_ext_zeroout(inode, ex2); +@@ -3304,30 +3306,30 @@ static int ext4_split_extent_at(handle_t *handle, + ext4_ext_pblock(&orig_ex)); + } + +- if (err) +- goto fix_extent_len; +- /* update the extent length and mark as initialized */ +- ex->ee_len = cpu_to_le16(ee_len); +- ext4_ext_try_to_merge(handle, inode, path, ex); +- err = ext4_ext_dirty(handle, inode, path + path->p_depth); +- if (err) +- goto fix_extent_len; +- +- /* update extent status tree */ +- err = ext4_zeroout_es(inode, &zero_ex); +- +- goto out; +- } else if (err) +- goto fix_extent_len; +- +-out: +- ext4_ext_show_leaf(inode, path); +- return err; ++ if (!err) { ++ /* update the extent length and mark as initialized */ ++ ex->ee_len = cpu_to_le16(ee_len); ++ ext4_ext_try_to_merge(handle, inode, path, ex); ++ err = ext4_ext_dirty(handle, inode, path + path->p_depth); ++ if (!err) ++ /* update extent status tree */ ++ err = ext4_zeroout_es(inode, &zero_ex); ++ /* If we failed at this point, we don't know in which ++ * state the extent tree exactly is so don't try to fix ++ * length of the original extent as it may do even more ++ * damage. ++ */ ++ goto out; ++ } ++ } + + fix_extent_len: + ex->ee_len = orig_ex.ee_len; + ext4_ext_dirty(handle, inode, path + path->p_depth); + return err; ++out: ++ ext4_ext_show_leaf(inode, path); ++ return err; + } + + /* +diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c +index 37e059202cd2..ce0f58c84b0a 100644 +--- a/fs/ext4/extents_status.c ++++ b/fs/ext4/extents_status.c +@@ -1080,11 +1080,9 @@ static unsigned long ext4_es_scan(struct shrinker *shrink, + ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); + trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); + +- if (!nr_to_scan) +- return ret; +- + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); + ++ ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); + trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); + return nr_shrunk; + } +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c +index 88effb1053c7..6dc0b89c7b55 100644 +--- a/fs/ext4/fsync.c ++++ b/fs/ext4/fsync.c +@@ -43,30 +43,28 @@ + */ + static int ext4_sync_parent(struct inode *inode) + { +- struct dentry *dentry = NULL; +- struct inode *next; ++ struct dentry *dentry, *next; + int ret = 0; + + if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) + return 0; +- inode = igrab(inode); ++ dentry = d_find_any_alias(inode); ++ if (!dentry) ++ return 0; + while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); +- dentry = d_find_any_alias(inode); +- if (!dentry) +- break; +- next = igrab(d_inode(dentry->d_parent)); ++ ++ next = dget_parent(dentry); + dput(dentry); +- if (!next) +- break; +- iput(inode); +- inode = next; ++ dentry = next; ++ inode = dentry->d_inode; ++ + /* + * The directory inode may have gone through rmdir by now. But + * the inode itself and its blocks are still allocated (we hold +- * a reference to the inode so it didn't go through +- * ext4_evict_inode()) and so we are safe to flush metadata +- * blocks and the inode. ++ * a reference to the inode via its dentry), so it didn't go ++ * through ext4_evict_inode()) and so we are safe to flush ++ * metadata blocks and the inode. + */ + ret = sync_mapping_buffers(inode->i_mapping); + if (ret) +@@ -75,7 +73,7 @@ static int ext4_sync_parent(struct inode *inode) + if (ret) + break; + } +- iput(inode); ++ dput(dentry); + return ret; + } + +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index ac11b54e2554..fbf58d87c3f6 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -405,7 +405,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, + * + * We always try to spread first-level directories. + * +- * If there are blockgroups with both free inodes and free blocks counts ++ * If there are blockgroups with both free inodes and free clusters counts + * not worse than average we return one with smallest directory count. + * Otherwise we simply return a random group. + * +@@ -414,7 +414,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, + * It's OK to put directory into a group unless + * it has too many directories already (max_dirs) or + * it has too few free inodes left (min_inodes) or +- * it has too few free blocks left (min_blocks) or ++ * it has too few free clusters left (min_clusters) or + * Parent's group is preferred, if it doesn't satisfy these + * conditions we search cyclically through the rest. If none + * of the groups look good we just look for a group with more +@@ -430,7 +430,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, + ext4_group_t real_ngroups = ext4_get_groups_count(sb); + int inodes_per_group = EXT4_INODES_PER_GROUP(sb); + unsigned int freei, avefreei, grp_free; +- ext4_fsblk_t freeb, avefreec; ++ ext4_fsblk_t freec, avefreec; + unsigned int ndirs; + int max_dirs, min_inodes; + ext4_grpblk_t min_clusters; +@@ -449,9 +449,8 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, + + freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); + avefreei = freei / ngroups; +- freeb = EXT4_C2B(sbi, +- percpu_counter_read_positive(&sbi->s_freeclusters_counter)); +- avefreec = freeb; ++ freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter); ++ avefreec = freec; + do_div(avefreec, ngroups); + ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); + +@@ -1290,6 +1289,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, + handle_t *handle; + ext4_fsblk_t blk; + int num, ret = 0, used_blks = 0; ++ unsigned long used_inos = 0; + + /* This should not happen, but just to be sure check this */ + if (sb->s_flags & MS_RDONLY) { +@@ -1320,22 +1320,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, + * used inodes so we need to skip blocks with used inodes in + * inode table. + */ +- if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) +- used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - +- ext4_itable_unused_count(sb, gdp)), +- sbi->s_inodes_per_block); +- +- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || +- ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - +- ext4_itable_unused_count(sb, gdp)) < +- EXT4_FIRST_INO(sb)))) { +- ext4_error(sb, "Something is wrong with group %u: " +- "used itable blocks: %d; " +- "itable unused count: %u", +- group, used_blks, +- ext4_itable_unused_count(sb, gdp)); +- ret = 1; +- goto err_out; ++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { ++ used_inos = EXT4_INODES_PER_GROUP(sb) - ++ ext4_itable_unused_count(sb, gdp); ++ used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block); ++ ++ /* Bogus inode unused count? */ ++ if (used_blks < 0 || used_blks > sbi->s_itb_per_group) { ++ ext4_error(sb, "Something is wrong with group %u: " ++ "used itable blocks: %d; " ++ "itable unused count: %u", ++ group, used_blks, ++ ext4_itable_unused_count(sb, gdp)); ++ ret = 1; ++ goto err_out; ++ } ++ ++ used_inos += group * EXT4_INODES_PER_GROUP(sb); ++ /* ++ * Are there some uninitialized inodes in the inode table ++ * before the first normal inode? ++ */ ++ if ((used_blks != sbi->s_itb_per_group) && ++ (used_inos < EXT4_FIRST_INO(sb))) { ++ ext4_error(sb, "Something is wrong with group %u: " ++ "itable unused count: %u; " ++ "itables initialized count: %ld", ++ group, ext4_itable_unused_count(sb, gdp), ++ used_inos); ++ ret = 1; ++ goto err_out; ++ } + } + + blk = ext4_inode_table(sb, gdp) + used_blks; +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c +index d2844fe9040d..ff7e1ac6ee53 100644 +--- a/fs/ext4/indirect.c ++++ b/fs/ext4/indirect.c +@@ -840,8 +840,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, + else if (ext4_should_journal_data(inode)) + flags |= EXT4_FREE_BLOCKS_FORGET; + +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, +- count)) { ++ if (!ext4_inode_block_valid(inode, block_to_free, count)) { + EXT4_ERROR_INODE(inode, "attempt to clear invalid " + "blocks %llu len %lu", + (unsigned long long) block_to_free, count); +@@ -1003,8 +1002,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, + if (!nr) + continue; /* A hole */ + +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), +- nr, 1)) { ++ if (!ext4_inode_block_valid(inode, nr, 1)) { + EXT4_ERROR_INODE(inode, + "invalid indirect mapped " + "block %lu (level %d)", +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 7e777624072a..a410c3b6aa40 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1903,6 +1903,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline) + + ext4_write_lock_xattr(inode, &no_expand); + if (!ext4_has_inline_data(inode)) { ++ ext4_write_unlock_xattr(inode, &no_expand); + *has_inline = 0; + ext4_journal_stop(handle); + return; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 5f2431d01df6..25fadccd9fc1 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -380,8 +380,7 @@ static int __check_block_validity(struct inode *inode, const char *func, + (inode->i_ino == + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) + return 0; +- if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, +- map->m_len)) { ++ if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { + ext4_error_inode(inode, func, line, map->m_pblk, + "lblock %lu mapped to illegal pblock %llu " + "(length %d)", (unsigned long) map->m_lblk, +@@ -1996,13 +1995,13 @@ static int __ext4_journalled_writepage(struct page *page, + if (!ret) + ret = err; + +- if (!ext4_has_inline_data(inode)) +- ext4_walk_page_buffers(NULL, page_bufs, 0, len, +- NULL, bput_one); + ext4_set_inode_state(inode, EXT4_STATE_JDATA); + out: + unlock_page(page); + out_no_pagelock: ++ if (!inline_data && page_bufs) ++ ext4_walk_page_buffers(NULL, page_bufs, 0, len, ++ NULL, bput_one); + brelse(inode_bh); + return ret; + } +@@ -3594,6 +3593,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) + struct address_space *mapping = iocb->ki_filp->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; ++ loff_t offset = iocb->ki_pos; ++ loff_t size = i_size_read(inode); ++ ++ if (offset >= size) ++ return 0; + + /* + * Shared inode_lock is enough for us - it protects against concurrent +@@ -4765,7 +4769,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, + + ret = 0; + if (ei->i_file_acl && +- !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { ++ !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) { + ext4_error_inode(inode, function, line, 0, + "iget: bad extended attribute block %llu", + ei->i_file_acl); +@@ -4955,7 +4959,7 @@ static int ext4_do_update_inode(handle_t *handle, + struct ext4_inode_info *ei = EXT4_I(inode); + struct buffer_head *bh = iloc->bh; + struct super_block *sb = inode->i_sb; +- int err = 0, rc, block; ++ int err = 0, block; + int need_datasync = 0, set_large_file = 0; + uid_t i_uid; + gid_t i_gid; +@@ -5065,9 +5069,9 @@ static int ext4_do_update_inode(handle_t *handle, + bh->b_data); + + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); +- rc = ext4_handle_dirty_metadata(handle, NULL, bh); +- if (!err) +- err = rc; ++ err = ext4_handle_dirty_metadata(handle, NULL, bh); ++ if (err) ++ goto out_brelse; + ext4_clear_inode_state(inode, EXT4_STATE_NEW); + if (set_large_file) { + BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index 6265013bc6a6..f06458ed547b 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -807,7 +807,10 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + err = ext4_journal_get_write_access(handle, sbi->s_sbh); + if (err) + goto pwsalt_err_journal; ++ lock_buffer(sbi->s_sbh); + generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); ++ ext4_superblock_csum_set(sb); ++ unlock_buffer(sbi->s_sbh); + err = ext4_handle_dirty_metadata(handle, NULL, + sbi->s_sbh); + pwsalt_err_journal: +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index d78793341acf..6fb1daab9215 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2964,7 +2964,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, + block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); + + len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); +- if (!ext4_data_block_valid(sbi, block, len)) { ++ if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { + ext4_error(sb, "Allocating blocks %llu-%llu which overlap " + "fs metadata", block, block+len); + /* File system mounted not to panic on error +@@ -4651,6 +4651,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, + ext4_group_first_block_no(sb, group) + + EXT4_C2B(sbi, cluster), + "Block already on to-be-freed list"); ++ kmem_cache_free(ext4_free_data_cachep, new_entry); + return 0; + } + } +@@ -4725,7 +4726,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, + + sbi = EXT4_SB(sb); + if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && +- !ext4_data_block_valid(sbi, block, count)) { ++ !ext4_inode_block_valid(inode, block, count)) { + ext4_error(sb, "Freeing blocks not in datazone - " + "block = %llu, count = %lu", block, count); + goto error_return; +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 740b736065c8..861e2e87dcd8 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -1290,8 +1290,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, + ext4_match(fname, de)) { + /* found a match - just to be sure, do + * a full check */ +- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, +- bh->b_size, offset)) ++ if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, ++ buf_size, offset)) + return -1; + *res_dir = de; + return 1; +@@ -1755,7 +1755,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + blocksize, hinfo, map); + map -= count; + dx_sort_map(map, count); +- /* Split the existing block in the middle, size-wise */ ++ /* Ensure that neither split block is over half full */ + size = 0; + move = 0; + for (i = count-1; i >= 0; i--) { +@@ -1765,8 +1765,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + size += map[i].size; + move++; + } +- /* map index at which we will split */ +- split = count - move; ++ /* ++ * map index at which we will split ++ * ++ * If the sum of active entries didn't exceed half the block size, just ++ * split it in half by count; each resulting block will have at least ++ * half the space free. ++ */ ++ if (i > 0) ++ split = count - move; ++ else ++ split = count/2; ++ + hash2 = map[split].hash; + continued = hash2 == map[split - 1].hash; + dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", +@@ -2335,7 +2345,7 @@ int ext4_generic_delete_entry(handle_t *handle, + de = (struct ext4_dir_entry_2 *)entry_buf; + while (i < buf_size - csum_size) { + if (ext4_check_dir_entry(dir, NULL, de, bh, +- bh->b_data, bh->b_size, i)) ++ entry_buf, buf_size, i)) + return -EFSCORRUPTED; + if (de == de_del) { + if (pde) +@@ -3375,12 +3385,35 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent, + return retval; + } + } +- brelse(ent->bh); +- ent->bh = NULL; + + return 0; + } + ++static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, ++ unsigned ino, unsigned file_type) ++{ ++ struct ext4_renament old = *ent; ++ int retval = 0; ++ ++ /* ++ * old->de could have moved from under us during make indexed dir, ++ * so the old->de may no longer valid and need to find it again ++ * before reset old inode info. ++ */ ++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); ++ if (IS_ERR(old.bh)) ++ retval = PTR_ERR(old.bh); ++ if (!old.bh) ++ retval = -ENOENT; ++ if (retval) { ++ ext4_std_error(old.dir->i_sb, retval); ++ return; ++ } ++ ++ ext4_setent(handle, &old, ino, file_type); ++ brelse(old.bh); ++} ++ + static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, + const struct qstr *d_name) + { +@@ -3540,14 +3573,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + */ + retval = -ENOENT; + if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) +- goto end_rename; ++ goto release_bh; + + new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, + &new.de, &new.inlined); + if (IS_ERR(new.bh)) { + retval = PTR_ERR(new.bh); + new.bh = NULL; +- goto end_rename; ++ goto release_bh; + } + if (new.bh) { + if (!new.inode) { +@@ -3564,18 +3597,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); + if (IS_ERR(handle)) { + retval = PTR_ERR(handle); +- handle = NULL; +- goto end_rename; ++ goto release_bh; + } + } else { + whiteout = ext4_whiteout_for_rename(&old, credits, &handle); + if (IS_ERR(whiteout)) { + retval = PTR_ERR(whiteout); +- whiteout = NULL; +- goto end_rename; ++ goto release_bh; + } + } + ++ old_file_type = old.de->file_type; + if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) + ext4_handle_sync(handle); + +@@ -3603,7 +3635,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + force_reread = (new.dir->i_ino == old.dir->i_ino && + ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); + +- old_file_type = old.de->file_type; + if (whiteout) { + /* + * Do this before adding a new entry, so the old entry is sure +@@ -3675,17 +3706,23 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, + retval = 0; + + end_rename: +- brelse(old.dir_bh); +- brelse(old.bh); +- brelse(new.bh); + if (whiteout) { +- if (retval) ++ if (retval) { ++ ext4_resetent(handle, &old, ++ old.inode->i_ino, old_file_type); + drop_nlink(whiteout); ++ ext4_orphan_add(handle, whiteout); ++ } + unlock_new_inode(whiteout); ++ ext4_journal_stop(handle); + iput(whiteout); +- } +- if (handle) ++ } else { + ext4_journal_stop(handle); ++ } ++release_bh: ++ brelse(old.dir_bh); ++ brelse(old.bh); ++ brelse(new.bh); + return retval; + } + +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index df08af6462c0..91f34e52276c 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1578,8 +1578,8 @@ static const struct mount_opts { + {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | + EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA), + MOPT_CLEAR | MOPT_Q}, +- {Opt_usrjquota, 0, MOPT_Q}, +- {Opt_grpjquota, 0, MOPT_Q}, ++ {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING}, ++ {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING}, + {Opt_offusrjquota, 0, MOPT_Q}, + {Opt_offgrpjquota, 0, MOPT_Q}, + {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, +@@ -4340,6 +4340,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + #ifdef CONFIG_QUOTA + failed_mount8: + ext4_unregister_sysfs(sb); ++ kobject_put(&sbi->s_kobj); + #endif + failed_mount7: + ext4_unregister_li_request(sb); +@@ -4681,15 +4682,10 @@ static int ext4_commit_super(struct super_block *sb, int sync) + struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; + int error = 0; + +- if (!sbh || block_device_ejected(sb)) +- return error; +- +- /* +- * The superblock bh should be mapped, but it might not be if the +- * device was hot-removed. Not much we can do but fail the I/O. +- */ +- if (!buffer_mapped(sbh)) +- return error; ++ if (!sbh) ++ return -EINVAL; ++ if (block_device_ejected(sb)) ++ return -ENODEV; + + /* + * If the file system is mounted read-only, don't update the +@@ -5158,7 +5154,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) + ext4_register_li_request(sb, first_not_zeroed); + } + +- ext4_setup_system_zone(sb); ++ err = ext4_setup_system_zone(sb); ++ if (err) ++ goto restore_opts; ++ + if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) + ext4_commit_super(sb, 1); + +@@ -5423,6 +5422,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, + /* Quotafile not on the same filesystem? */ + if (path->dentry->d_sb != sb) + return -EXDEV; ++ ++ /* Quota already enabled for this file? */ ++ if (IS_NOQUOTA(d_inode(path->dentry))) ++ return -EBUSY; ++ + /* Journaling quota? */ + if (EXT4_SB(sb)->s_qf_names[type]) { + /* Quotafile not in fs root? */ +diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c +index a28eac29234d..ada853a95986 100644 +--- a/fs/f2fs/checkpoint.c ++++ b/fs/f2fs/checkpoint.c +@@ -243,6 +243,8 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, + blkno * NAT_ENTRY_PER_BLOCK); + break; + case META_SIT: ++ if (unlikely(blkno >= TOTAL_SEGS(sbi))) ++ goto out; + /* get sit block addr */ + fio.new_blkaddr = current_sit_addr(sbi, + blkno * SIT_ENTRY_PER_BLOCK); +@@ -1044,8 +1046,12 @@ int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) + get_pages(sbi, is_dir ? + F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); + retry: +- if (unlikely(f2fs_cp_error(sbi))) ++ if (unlikely(f2fs_cp_error(sbi))) { ++ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, ++ get_pages(sbi, is_dir ? ++ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); + return -EIO; ++ } + + spin_lock(&sbi->inode_lock[type]); + +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index e21905089e16..d66f59934045 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -768,7 +768,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) + if (ia_valid & ATTR_MODE) { + umode_t mode = attr->ia_mode; + +- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) ++ if (!in_group_p(inode->i_gid) && ++ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) + mode &= ~S_ISGID; + set_acl_inode(inode, mode); + } +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c +index 2b40293ea9bc..cb02aeb59d45 100644 +--- a/fs/f2fs/inline.c ++++ b/fs/f2fs/inline.c +@@ -229,7 +229,8 @@ int f2fs_convert_inline_inode(struct inode *inode) + + f2fs_put_page(page, 1); + +- f2fs_balance_fs(sbi, dn.node_changed); ++ if (!err) ++ f2fs_balance_fs(sbi, dn.node_changed); + + return err; + } +diff --git a/fs/fat/inode.c b/fs/fat/inode.c +index 7e2ef060d07f..11dd089786a0 100644 +--- a/fs/fat/inode.c ++++ b/fs/fat/inode.c +@@ -1513,6 +1513,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, + goto out; + } + ++ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { ++ if (!silent) ++ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); ++ goto out; ++ } ++ + error = 0; + + out: +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c +index 2a9e755877d4..e7815bebaeb8 100644 +--- a/fs/fs-writeback.c ++++ b/fs/fs-writeback.c +@@ -45,7 +45,6 @@ struct wb_completion { + struct wb_writeback_work { + long nr_pages; + struct super_block *sb; +- unsigned long *older_than_this; + enum writeback_sync_modes sync_mode; + unsigned int tagged_writepages:1; + unsigned int for_kupdate:1; +@@ -160,7 +159,9 @@ static void inode_io_list_del_locked(struct inode *inode, + struct bdi_writeback *wb) + { + assert_spin_locked(&wb->list_lock); ++ assert_spin_locked(&inode->i_lock); + ++ inode->i_state &= ~I_SYNC_QUEUED; + list_del_init(&inode->i_io_list); + wb_io_lists_depopulated(wb); + } +@@ -269,6 +270,7 @@ void __inode_attach_wb(struct inode *inode, struct page *page) + if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) + wb_put(wb); + } ++EXPORT_SYMBOL_GPL(__inode_attach_wb); + + /** + * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it +@@ -510,9 +512,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) + /* find and pin the new wb */ + rcu_read_lock(); + memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys); +- if (memcg_css) +- isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); ++ if (memcg_css && !css_tryget(memcg_css)) ++ memcg_css = NULL; + rcu_read_unlock(); ++ if (!memcg_css) ++ goto out_free; ++ ++ isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); ++ css_put(memcg_css); + if (!isw->new_wb) + goto out_free; + +@@ -1038,7 +1045,9 @@ void inode_io_list_del(struct inode *inode) + struct bdi_writeback *wb; + + wb = inode_to_wb_and_lock_list(inode); ++ spin_lock(&inode->i_lock); + inode_io_list_del_locked(inode, wb); ++ spin_unlock(&inode->i_lock); + spin_unlock(&wb->list_lock); + } + +@@ -1087,8 +1096,10 @@ void sb_clear_inode_writeback(struct inode *inode) + * the case then the inode must have been redirtied while it was being written + * out and we don't reset its dirtied_when. + */ +-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) ++static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) + { ++ assert_spin_locked(&inode->i_lock); ++ + if (!list_empty(&wb->b_dirty)) { + struct inode *tail; + +@@ -1097,6 +1108,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) + inode->dirtied_when = jiffies; + } + inode_io_list_move_locked(inode, wb, &wb->b_dirty); ++ inode->i_state &= ~I_SYNC_QUEUED; ++} ++ ++static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) ++{ ++ spin_lock(&inode->i_lock); ++ redirty_tail_locked(inode, wb); ++ spin_unlock(&inode->i_lock); + } + + /* +@@ -1135,16 +1154,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) + #define EXPIRE_DIRTY_ATIME 0x0001 + + /* +- * Move expired (dirtied before work->older_than_this) dirty inodes from ++ * Move expired (dirtied before dirtied_before) dirty inodes from + * @delaying_queue to @dispatch_queue. + */ + static int move_expired_inodes(struct list_head *delaying_queue, + struct list_head *dispatch_queue, +- int flags, +- struct wb_writeback_work *work) ++ int flags, unsigned long dirtied_before) + { +- unsigned long *older_than_this = NULL; +- unsigned long expire_time; + LIST_HEAD(tmp); + struct list_head *pos, *node; + struct super_block *sb = NULL; +@@ -1152,21 +1168,17 @@ static int move_expired_inodes(struct list_head *delaying_queue, + int do_sb_sort = 0; + int moved = 0; + +- if ((flags & EXPIRE_DIRTY_ATIME) == 0) +- older_than_this = work->older_than_this; +- else if (!work->for_sync) { +- expire_time = jiffies - (dirtytime_expire_interval * HZ); +- older_than_this = &expire_time; +- } + while (!list_empty(delaying_queue)) { + inode = wb_inode(delaying_queue->prev); +- if (older_than_this && +- inode_dirtied_after(inode, *older_than_this)) ++ if (inode_dirtied_after(inode, dirtied_before)) + break; + list_move(&inode->i_io_list, &tmp); + moved++; ++ spin_lock(&inode->i_lock); + if (flags & EXPIRE_DIRTY_ATIME) +- set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); ++ inode->i_state |= I_DIRTY_TIME_EXPIRED; ++ inode->i_state |= I_SYNC_QUEUED; ++ spin_unlock(&inode->i_lock); + if (sb_is_blkdev_sb(inode->i_sb)) + continue; + if (sb && sb != inode->i_sb) +@@ -1204,18 +1216,22 @@ static int move_expired_inodes(struct list_head *delaying_queue, + * | + * +--> dequeue for IO + */ +-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) ++static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, ++ unsigned long dirtied_before) + { + int moved; ++ unsigned long time_expire_jif = dirtied_before; + + assert_spin_locked(&wb->list_lock); + list_splice_init(&wb->b_more_io, &wb->b_io); +- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); ++ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); ++ if (!work->for_sync) ++ time_expire_jif = jiffies - dirtytime_expire_interval * HZ; + moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, +- EXPIRE_DIRTY_ATIME, work); ++ EXPIRE_DIRTY_ATIME, time_expire_jif); + if (moved) + wb_io_lists_populated(wb); +- trace_writeback_queue_io(wb, work, moved); ++ trace_writeback_queue_io(wb, work, dirtied_before, moved); + } + + static int write_inode(struct inode *inode, struct writeback_control *wbc) +@@ -1309,7 +1325,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, + * writeback is not making progress due to locked + * buffers. Skip this inode for now. + */ +- redirty_tail(inode, wb); ++ redirty_tail_locked(inode, wb); + return; + } + +@@ -1329,7 +1345,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, + * retrying writeback of the dirty page/inode + * that cannot be performed immediately. + */ +- redirty_tail(inode, wb); ++ redirty_tail_locked(inode, wb); + } + } else if (inode->i_state & I_DIRTY) { + /* +@@ -1337,10 +1353,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, + * such as delayed allocation during submission or metadata + * updates after data IO completion. + */ +- redirty_tail(inode, wb); ++ redirty_tail_locked(inode, wb); + } else if (inode->i_state & I_DIRTY_TIME) { + inode->dirtied_when = jiffies; + inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); ++ inode->i_state &= ~I_SYNC_QUEUED; + } else { + /* The inode is clean. Remove from writeback lists. */ + inode_io_list_del_locked(inode, wb); +@@ -1584,8 +1601,8 @@ static long writeback_sb_inodes(struct super_block *sb, + */ + spin_lock(&inode->i_lock); + if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { ++ redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); +- redirty_tail(inode, wb); + continue; + } + if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { +@@ -1726,7 +1743,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + blk_start_plug(&plug); + spin_lock(&wb->list_lock); + if (list_empty(&wb->b_io)) +- queue_io(wb, &work); ++ queue_io(wb, &work, jiffies); + __writeback_inodes_wb(wb, &work); + spin_unlock(&wb->list_lock); + blk_finish_plug(&plug); +@@ -1746,7 +1763,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, + * takes longer than a dirty_writeback_interval interval, then leave a + * one-second gap. + * +- * older_than_this takes precedence over nr_to_write. So we'll only write back ++ * dirtied_before takes precedence over nr_to_write. So we'll only write back + * all dirty pages if they are all attached to "old" mappings. + */ + static long wb_writeback(struct bdi_writeback *wb, +@@ -1754,14 +1771,11 @@ static long wb_writeback(struct bdi_writeback *wb, + { + unsigned long wb_start = jiffies; + long nr_pages = work->nr_pages; +- unsigned long oldest_jif; ++ unsigned long dirtied_before = jiffies; + struct inode *inode; + long progress; + struct blk_plug plug; + +- oldest_jif = jiffies; +- work->older_than_this = &oldest_jif; +- + blk_start_plug(&plug); + spin_lock(&wb->list_lock); + for (;;) { +@@ -1795,14 +1809,14 @@ static long wb_writeback(struct bdi_writeback *wb, + * safe. + */ + if (work->for_kupdate) { +- oldest_jif = jiffies - ++ dirtied_before = jiffies - + msecs_to_jiffies(dirty_expire_interval * 10); + } else if (work->for_background) +- oldest_jif = jiffies; ++ dirtied_before = jiffies; + + trace_writeback_start(wb, work); + if (list_empty(&wb->b_io)) +- queue_io(wb, work); ++ queue_io(wb, work, dirtied_before); + if (work->sb) + progress = writeback_sb_inodes(work->sb, wb, work); + else +@@ -1962,7 +1976,7 @@ void wb_workfn(struct work_struct *work) + struct bdi_writeback, dwork); + long pages_written; + +- set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); ++ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); + current->flags |= PF_SWAPWRITE; + + if (likely(!current_is_workqueue_rescuer() || +@@ -2079,28 +2093,6 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, + return ret; + } + +-static noinline void block_dump___mark_inode_dirty(struct inode *inode) +-{ +- if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { +- struct dentry *dentry; +- const char *name = "?"; +- +- dentry = d_find_alias(inode); +- if (dentry) { +- spin_lock(&dentry->d_lock); +- name = (const char *) dentry->d_name.name; +- } +- printk(KERN_DEBUG +- "%s(%d): dirtied inode %lu (%s) on %s\n", +- current->comm, task_pid_nr(current), inode->i_ino, +- name, inode->i_sb->s_id); +- if (dentry) { +- spin_unlock(&dentry->d_lock); +- dput(dentry); +- } +- } +-} +- + /** + * __mark_inode_dirty - internal function + * @inode: inode to mark +@@ -2159,9 +2151,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) + (dirtytime && (inode->i_state & I_DIRTY_INODE))) + return; + +- if (unlikely(block_dump > 1)) +- block_dump___mark_inode_dirty(inode); +- + spin_lock(&inode->i_lock); + if (dirtytime && (inode->i_state & I_DIRTY_INODE)) + goto out_unlock_inode; +@@ -2175,11 +2164,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) + inode->i_state |= flags; + + /* +- * If the inode is being synced, just update its dirty state. +- * The unlocker will place the inode on the appropriate +- * superblock list, based upon its state. ++ * If the inode is queued for writeback by flush worker, just ++ * update its dirty state. Once the flush worker is done with ++ * the inode it will place it on the appropriate superblock ++ * list, based upon its state. + */ +- if (inode->i_state & I_SYNC) ++ if (inode->i_state & I_SYNC_QUEUED) + goto out_unlock_inode; + + /* +diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c +index d9aba9700726..b83367300f48 100644 +--- a/fs/fuse/cuse.c ++++ b/fs/fuse/cuse.c +@@ -616,6 +616,8 @@ static int __init cuse_init(void) + cuse_channel_fops.owner = THIS_MODULE; + cuse_channel_fops.open = cuse_channel_open; + cuse_channel_fops.release = cuse_channel_release; ++ /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */ ++ cuse_channel_fops.unlocked_ioctl = NULL; + + cuse_class = class_create(THIS_MODULE, "cuse"); + if (IS_ERR(cuse_class)) +diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c +index dd77bb929ee0..5a0f089fbc47 100644 +--- a/fs/fuse/dev.c ++++ b/fs/fuse/dev.c +@@ -835,7 +835,6 @@ static int fuse_check_page(struct page *page) + { + if (page_mapcount(page) || + page->mapping != NULL || +- page_count(page) != 1 || + (page->flags & PAGE_FLAGS_CHECK_AT_PREP & + ~(1 << PG_locked | + 1 << PG_referenced | +@@ -857,15 +856,16 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) + struct page *newpage; + struct pipe_buffer *buf = cs->pipebufs; + ++ get_page(oldpage); + err = unlock_request(cs->req); + if (err) +- return err; ++ goto out_put_old; + + fuse_copy_finish(cs); + + err = pipe_buf_confirm(cs->pipe, buf); + if (err) +- return err; ++ goto out_put_old; + + BUG_ON(!cs->nr_segs); + cs->currbuf = buf; +@@ -905,7 +905,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) + err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); + if (err) { + unlock_page(newpage); +- return err; ++ goto out_put_old; + } + + get_page(newpage); +@@ -924,14 +924,19 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) + if (err) { + unlock_page(newpage); + put_page(newpage); +- return err; ++ goto out_put_old; + } + + unlock_page(oldpage); ++ /* Drop ref for ap->pages[] array */ + put_page(oldpage); + cs->len = 0; + +- return 0; ++ err = 0; ++out_put_old: ++ /* Drop ref obtained in this function */ ++ put_page(oldpage); ++ return err; + + out_fallback_unlock: + unlock_page(newpage); +@@ -940,10 +945,10 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) + cs->offset = buf->offset; + + err = lock_request(cs->req); +- if (err) +- return err; ++ if (!err) ++ err = 1; + +- return 1; ++ goto out_put_old; + } + + static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, +@@ -955,14 +960,16 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, + if (cs->nr_segs == cs->pipe->buffers) + return -EIO; + ++ get_page(page); + err = unlock_request(cs->req); +- if (err) ++ if (err) { ++ put_page(page); + return err; ++ } + + fuse_copy_finish(cs); + + buf = cs->pipebufs; +- get_page(page); + buf->page = page; + buf->offset = offset; + buf->len = count; +@@ -1301,6 +1308,15 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, + goto restart; + } + spin_lock(&fpq->lock); ++ /* ++ * Must not put request on fpq->io queue after having been shut down by ++ * fuse_abort_conn() ++ */ ++ if (!fpq->connected) { ++ req->out.h.error = err = -ECONNABORTED; ++ goto out_end; ++ ++ } + list_add(&req->list, &fpq->io); + spin_unlock(&fpq->lock); + cs->req = req; +@@ -1877,7 +1893,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, + } + + err = -EINVAL; +- if (oh.error <= -1000 || oh.error > 0) ++ if (oh.error <= -512 || oh.error > 0) + goto err_finish; + + spin_lock(&fpq->lock); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index b1ee8b7a5a9b..1956f2abe211 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + static const struct file_operations fuse_direct_io_file_operations; + +@@ -2557,7 +2558,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, + struct iovec *iov = iov_page; + + iov->iov_base = (void __user *)arg; +- iov->iov_len = _IOC_SIZE(cmd); ++ ++ switch (cmd) { ++ case FS_IOC_GETFLAGS: ++ case FS_IOC_SETFLAGS: ++ iov->iov_len = sizeof(int); ++ break; ++ default: ++ iov->iov_len = _IOC_SIZE(cmd); ++ break; ++ } + + if (_IOC_DIR(cmd) & _IOC_WRITE) { + in_iov = iov; +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index c8d9c6620090..2a5229b73dea 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -261,7 +261,7 @@ struct fuse_io_priv { + + #define FUSE_IO_PRIV_SYNC(f) \ + { \ +- .refcnt = { ATOMIC_INIT(1) }, \ ++ .refcnt = KREF_INIT(1), \ + .async = 0, \ + .file = f, \ + } +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index efd44d5645d8..3d4d35083438 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -758,7 +758,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, + } + kfree(gl->gl_lksb.sb_lvbptr); + kmem_cache_free(cachep, gl); +- atomic_dec(&sdp->sd_glock_disposal); ++ if (atomic_dec_and_test(&sdp->sd_glock_disposal)) ++ wake_up(&sdp->sd_glock_wait); + *glp = tmp; + + return ret; +@@ -1349,6 +1350,7 @@ __acquires(&lru_lock) + while(!list_empty(list)) { + gl = list_entry(list->next, struct gfs2_glock, gl_lru); + list_del_init(&gl->gl_lru); ++ clear_bit(GLF_LRU, &gl->gl_flags); + if (!spin_trylock(&gl->gl_lockref.lock)) { + add_back_to_lru: + list_add(&gl->gl_lru, &lru_list); +@@ -1395,7 +1397,6 @@ static long gfs2_scan_glock_lru(int nr) + if (!test_bit(GLF_LOCK, &gl->gl_flags)) { + list_move(&gl->gl_lru, &dispose); + atomic_dec(&lru_count); +- clear_bit(GLF_LRU, &gl->gl_flags); + freed++; + continue; + } +diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c +index 3c3d037df824..3cbc9147286d 100644 +--- a/fs/gfs2/lock_dlm.c ++++ b/fs/gfs2/lock_dlm.c +@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) + { + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; +- int lvb_needs_unlock = 0; + int error; + + if (gl->gl_lksb.sb_lkid == 0) { +@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl) + gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); + gfs2_update_request_times(gl); + +- /* don't want to skip dlm_unlock writing the lvb when lock is ex */ +- +- if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) +- lvb_needs_unlock = 1; ++ /* don't want to skip dlm_unlock writing the lvb when lock has one */ + + if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && +- !lvb_needs_unlock) { ++ !gl->gl_lksb.sb_lvbptr) { + gfs2_glock_free(gl); + return; + } +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index ebea15c168ae..eb31ae9468cc 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -160,15 +160,19 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) + return -EINVAL; + } + +- /* If format numbers match exactly, we're done. */ +- +- if (sb->sb_fs_format == GFS2_FORMAT_FS && +- sb->sb_multihost_format == GFS2_FORMAT_MULTI) +- return 0; ++ if (sb->sb_fs_format != GFS2_FORMAT_FS || ++ sb->sb_multihost_format != GFS2_FORMAT_MULTI) { ++ fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); ++ return -EINVAL; ++ } + +- fs_warn(sdp, "Unknown on-disk format, unable to mount\n"); ++ if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE || ++ (sb->sb_bsize & (sb->sb_bsize - 1))) { ++ pr_warn("Invalid superblock size\n"); ++ return -EINVAL; ++ } + +- return -EINVAL; ++ return 0; + } + + static void end_bio_io_page(struct bio *bio) +@@ -920,7 +924,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo) + } + + static const match_table_t nolock_tokens = { +- { Opt_jid, "jid=%d\n", }, ++ { Opt_jid, "jid=%d", }, + { Opt_err, NULL }, + }; + +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c +index 0a80f6636549..56a94535c246 100644 +--- a/fs/gfs2/rgrp.c ++++ b/fs/gfs2/rgrp.c +@@ -730,9 +730,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) + } + + gfs2_free_clones(rgd); ++ return_all_reservations(rgd); + kfree(rgd->rd_bits); + rgd->rd_bits = NULL; +- return_all_reservations(rgd); + kmem_cache_free(gfs2_rgrpd_cachep, rgd); + } + } +@@ -1000,6 +1000,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip) + if (error < 0) + return error; + ++ if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) { ++ fs_err(sdp, "no resource groups found in the file system.\n"); ++ return -ENOENT; ++ } + set_rgrp_preferences(sdp); + + sdp->sd_rindex_uptodate = 1; +@@ -1371,6 +1375,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ++ if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) ++ return -EROFS; ++ + if (!blk_queue_discard(q)) + return -EOPNOTSUPP; + +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c +index 37496d83661a..ef401aecaa2c 100644 +--- a/fs/gfs2/super.c ++++ b/fs/gfs2/super.c +@@ -986,11 +986,13 @@ void gfs2_freeze_func(struct work_struct *work) + static int gfs2_freeze(struct super_block *sb) + { + struct gfs2_sbd *sdp = sb->s_fs_info; +- int error = 0; ++ int error; + + mutex_lock(&sdp->sd_freeze_mutex); +- if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) ++ if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) { ++ error = -EBUSY; + goto out; ++ } + + if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { + error = -EINVAL; +@@ -1032,10 +1034,10 @@ static int gfs2_unfreeze(struct super_block *sb) + struct gfs2_sbd *sdp = sb->s_fs_info; + + mutex_lock(&sdp->sd_freeze_mutex); +- if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || ++ if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || + !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { + mutex_unlock(&sdp->sd_freeze_mutex); +- return 0; ++ return -EINVAL; + } + + gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); +diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c +index de69d8a24f6d..7f2ef95dcd05 100644 +--- a/fs/hfs/bfind.c ++++ b/fs/hfs/bfind.c +@@ -24,7 +24,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) + fd->key = ptr + tree->max_key_len + 2; + hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", + tree->cnid, __builtin_return_address(0)); +- mutex_lock(&tree->tree_lock); ++ switch (tree->cnid) { ++ case HFS_CAT_CNID: ++ mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); ++ break; ++ case HFS_EXT_CNID: ++ mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); ++ break; ++ case HFS_ATTR_CNID: ++ mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); ++ break; ++ default: ++ return -EINVAL; ++ } + return 0; + } + +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c +index d77d844b668b..1ff979c9d0a3 100644 +--- a/fs/hfs/bnode.c ++++ b/fs/hfs/bnode.c +@@ -14,16 +14,31 @@ + + #include "btree.h" + +-void hfs_bnode_read(struct hfs_bnode *node, void *buf, +- int off, int len) ++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) + { + struct page *page; ++ int pagenum; ++ int bytes_read; ++ int bytes_to_read; ++ void *vaddr; + + off += node->page_offset; +- page = node->page[0]; ++ pagenum = off >> PAGE_SHIFT; ++ off &= ~PAGE_MASK; /* compute page offset for the first page */ + +- memcpy(buf, kmap(page) + off, len); +- kunmap(page); ++ for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { ++ if (pagenum >= node->tree->pages_per_bnode) ++ break; ++ page = node->page[pagenum]; ++ bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); ++ ++ vaddr = kmap_atomic(page); ++ memcpy(buf + bytes_read, vaddr + off, bytes_to_read); ++ kunmap_atomic(vaddr); ++ ++ pagenum++; ++ off = 0; /* page offset only applies to the first page */ ++ } + } + + u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) +diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h +index 2715f416b5a8..308b5f1af65b 100644 +--- a/fs/hfs/btree.h ++++ b/fs/hfs/btree.h +@@ -12,6 +12,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *); + + #define NODE_HASH_SIZE 256 + ++/* B-tree mutex nested subclasses */ ++enum hfs_btree_mutex_classes { ++ CATALOG_BTREE_MUTEX, ++ EXTENTS_BTREE_MUTEX, ++ ATTR_BTREE_MUTEX, ++}; ++ + /* A HFS BTree held in memory */ + struct hfs_btree { + struct super_block *sb; +diff --git a/fs/hfs/super.c b/fs/hfs/super.c +index bf6304a350a6..c2a5a0ca3948 100644 +--- a/fs/hfs/super.c ++++ b/fs/hfs/super.c +@@ -427,14 +427,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) + if (!res) { + if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { + res = -EIO; +- goto bail; ++ goto bail_hfs_find; + } + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); + } +- if (res) { +- hfs_find_exit(&fd); +- goto bail_no_root; +- } ++ if (res) ++ goto bail_hfs_find; + res = -EINVAL; + root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); + hfs_find_exit(&fd); +@@ -450,6 +448,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) + /* everything's okay */ + return 0; + ++bail_hfs_find: ++ hfs_find_exit(&fd); + bail_no_root: + pr_err("get root inode failed\n"); + bail: +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 253b03451b72..e7339a39e718 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -451,7 +451,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, + if (next >= end) + break; + +- hash = hugetlb_fault_mutex_hash(h, mapping, next, 0); ++ hash = hugetlb_fault_mutex_hash(h, mapping, next); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* +@@ -634,7 +634,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + addr = index * hpage_size; + + /* mutex taken here, fault path and hole punch */ +- hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); ++ hash = hugetlb_fault_mutex_hash(h, mapping, index); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* See if already present in mapping to avoid alloc/free */ +@@ -665,8 +665,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + ++ set_page_huge_active(page); + /* +- * page_put due to reference from alloc_huge_page() ++ * put_page() due to reference from alloc_huge_page() + * unlock_page because locked by add_to_page_cache() + */ + put_page(page); +diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c +index e7599615e4e0..e876a30f9073 100644 +--- a/fs/isofs/dir.c ++++ b/fs/isofs/dir.c +@@ -151,6 +151,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, + printk(KERN_NOTICE "iso9660: Corrupted directory entry" + " in block %lu of inode %lu\n", block, + inode->i_ino); ++ brelse(bh); + return -EIO; + } + +diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c +index aee592767f1d..2c43de1b034d 100644 +--- a/fs/isofs/namei.c ++++ b/fs/isofs/namei.c +@@ -101,6 +101,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, + printk(KERN_NOTICE "iso9660: Corrupted directory entry" + " in block %lu of inode %lu\n", block, + dir->i_ino); ++ brelse(bh); + return 0; + } + +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index 8de458d64134..cfbf5474bcca 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1896,6 +1896,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) + */ + static void __jbd2_journal_unfile_buffer(struct journal_head *jh) + { ++ J_ASSERT_JH(jh, jh->b_transaction != NULL); ++ J_ASSERT_JH(jh, jh->b_next_transaction == NULL); ++ + __jbd2_journal_temp_unlink_buffer(jh); + jh->b_transaction = NULL; + jbd2_journal_put_journal_head(jh); +@@ -1987,6 +1990,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, + { + struct buffer_head *head; + struct buffer_head *bh; ++ bool has_write_io_error = false; + int ret = 0; + + J_ASSERT(PageLocked(page)); +@@ -2011,11 +2015,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, + jbd_unlock_bh_state(bh); + if (buffer_jbd(bh)) + goto busy; ++ ++ /* ++ * If we free a metadata buffer which has been failed to ++ * write out, the jbd2 checkpoint procedure will not detect ++ * this failure and may lead to filesystem inconsistency ++ * after cleanup journal tail. ++ */ ++ if (buffer_write_io_error(bh)) { ++ pr_err("JBD2: Error while async write back metadata bh %llu.", ++ (unsigned long long)bh->b_blocknr); ++ has_write_io_error = true; ++ } + } while ((bh = bh->b_this_page) != head); + + ret = try_to_free_buffers(page); + + busy: ++ if (has_write_io_error) ++ jbd2_journal_abort(journal, -EIO); ++ + return ret; + } + +@@ -2443,6 +2462,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) + + was_dirty = test_clear_buffer_jbddirty(bh); + __jbd2_journal_temp_unlink_buffer(jh); ++ ++ /* ++ * b_transaction must be set, otherwise the new b_transaction won't ++ * be holding jh reference ++ */ ++ J_ASSERT_JH(jh, jh->b_transaction != NULL); ++ + /* + * We set b_transaction here because b_next_transaction will inherit + * our jh reference and thus __jbd2_journal_file_buffer() must not +diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c +index 406d9cc84ba8..79e771ab624f 100644 +--- a/fs/jffs2/compr_rtime.c ++++ b/fs/jffs2/compr_rtime.c +@@ -37,6 +37,9 @@ static int jffs2_rtime_compress(unsigned char *data_in, + int outpos = 0; + int pos=0; + ++ if (*dstlen <= 3) ++ return -1; ++ + memset(positions,0,sizeof(positions)); + + while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { +diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c +index e5a6deb38e1e..f4a5ec92f5dc 100644 +--- a/fs/jffs2/dir.c ++++ b/fs/jffs2/dir.c +@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) + int ret; + uint32_t now = get_seconds(); + ++ mutex_lock(&f->sem); + for (fd = f->dents ; fd; fd = fd->next) { +- if (fd->ino) ++ if (fd->ino) { ++ mutex_unlock(&f->sem); + return -ENOTEMPTY; ++ } + } ++ mutex_unlock(&f->sem); + + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, + dentry->d_name.len, f, now); +diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c +index 2f236cca6095..64a55015d126 100644 +--- a/fs/jffs2/readinode.c ++++ b/fs/jffs2/readinode.c +@@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r + jffs2_free_full_dirent(fd); + return -EIO; + } ++ ++#ifdef CONFIG_JFFS2_SUMMARY ++ /* ++ * we use CONFIG_JFFS2_SUMMARY because without it, we ++ * have checked it while mounting ++ */ ++ crc = crc32(0, fd->name, rd->nsize); ++ if (unlikely(crc != je32_to_cpu(rd->name_crc))) { ++ JFFS2_NOTICE("name CRC failed on dirent node at" ++ "%#08x: read %#08x,calculated %#08x\n", ++ ref_offset(ref), je32_to_cpu(rd->node_crc), crc); ++ jffs2_mark_node_obsolete(c, ref); ++ jffs2_free_full_dirent(fd); ++ return 0; ++ } ++#endif + } + + fd->nhash = full_name_hash(NULL, fd->name, rd->nsize); +diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c +index 90431dd613b8..08813789fcf0 100644 +--- a/fs/jffs2/scan.c ++++ b/fs/jffs2/scan.c +@@ -1075,7 +1075,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo + memcpy(&fd->name, rd->name, checkedlen); + fd->name[checkedlen] = 0; + +- crc = crc32(0, fd->name, rd->nsize); ++ crc = crc32(0, fd->name, checkedlen); + if (crc != je32_to_cpu(rd->name_crc)) { + pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + __func__, ofs, je32_to_cpu(rd->name_crc), crc); +diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c +index be7c8a6a5748..4fe64519870f 100644 +--- a/fs/jffs2/summary.c ++++ b/fs/jffs2/summary.c +@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock + dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", + je16_to_cpu(temp->u.nodetype)); + jffs2_sum_disable_collecting(c->summary); ++ /* The above call removes the list, nothing more to do */ ++ goto bail_rwcompat; + } else { + BUG(); /* unknown node in summary information */ + } +@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock + + c->summary->sum_num--; + } ++ bail_rwcompat: + + jffs2_sum_reset_collected(c->summary); + +diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c +index 054cc761b426..87b41edc800d 100644 +--- a/fs/jfs/inode.c ++++ b/fs/jfs/inode.c +@@ -161,7 +161,8 @@ void jfs_evict_inode(struct inode *inode) + if (test_cflag(COMMIT_Freewmap, inode)) + jfs_free_zero_link(inode); + +- diFree(inode); ++ if (JFS_SBI(inode->i_sb)->ipimap) ++ diFree(inode); + + /* + * Free the inode from the quota allocation. +diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c +index 2d514c7affc2..9ff510a489cb 100644 +--- a/fs/jfs/jfs_dmap.c ++++ b/fs/jfs/jfs_dmap.c +@@ -1669,7 +1669,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) + } else if (rc == -ENOSPC) { + /* search for next smaller log2 block */ + l2nb = BLKSTOL2(nblocks) - 1; +- nblocks = 1 << l2nb; ++ nblocks = 1LL << l2nb; + } else { + /* Trim any already allocated blocks */ + jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); +diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h +index 562b9a7e4311..f502a15c6c98 100644 +--- a/fs/jfs/jfs_dmap.h ++++ b/fs/jfs/jfs_dmap.h +@@ -196,7 +196,7 @@ typedef union dmtree { + #define dmt_leafidx t1.leafidx + #define dmt_height t1.height + #define dmt_budmin t1.budmin +-#define dmt_stree t1.stree ++#define dmt_stree t2.stree + + /* + * on-disk aggregate disk allocation map descriptor. +diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h +index b67d64671bb4..415bfa90607a 100644 +--- a/fs/jfs/jfs_filsys.h ++++ b/fs/jfs/jfs_filsys.h +@@ -281,5 +281,6 @@ + * fsck() must be run to repair + */ + #define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */ ++#define FM_STATE_MAX 0x0000000f /* max value of s_state */ + + #endif /* _H_JFS_FILSYS */ +diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c +index a21ea8b3e5fa..12555c4eeb2b 100644 +--- a/fs/jfs/jfs_logmgr.c ++++ b/fs/jfs/jfs_logmgr.c +@@ -1338,6 +1338,7 @@ int lmLogInit(struct jfs_log * log) + } else { + if (memcmp(logsuper->uuid, log->uuid, 16)) { + jfs_warn("wrong uuid on JFS log device"); ++ rc = -EINVAL; + goto errout20; + } + log->size = le32_to_cpu(logsuper->size); +diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c +index 9895595fd2f2..103788ecc28c 100644 +--- a/fs/jfs/jfs_mount.c ++++ b/fs/jfs/jfs_mount.c +@@ -49,6 +49,7 @@ + + #include + #include ++#include + + #include "jfs_incore.h" + #include "jfs_filsys.h" +@@ -378,6 +379,15 @@ static int chkSuper(struct super_block *sb) + sbi->bsize = bsize; + sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize); + ++ /* check some fields for possible corruption */ ++ if (sbi->l2bsize != ilog2((u32)bsize) || ++ j_sb->pad != 0 || ++ le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) { ++ rc = -EINVAL; ++ jfs_err("jfs_mount: Mount Failure: superblock is corrupt!"); ++ goto out; ++ } ++ + /* + * For now, ignore s_pbsize, l2bfactor. All I/O going through buffer + * cache. +diff --git a/fs/libfs.c b/fs/libfs.c +index 278457f22148..835d25e33509 100644 +--- a/fs/libfs.c ++++ b/fs/libfs.c +@@ -865,7 +865,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) + { + struct simple_attr *attr; +- u64 val; ++ unsigned long long val; + size_t size; + ssize_t ret; + +@@ -883,7 +883,9 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, + goto out; + + attr->set_buf[size] = '\0'; +- val = simple_strtoll(attr->set_buf, NULL, 0); ++ ret = kstrtoull(attr->set_buf, 0, &val); ++ if (ret) ++ goto out; + ret = attr->set(attr->data, val); + if (ret == 0) + ret = len; /* on success, claim we got the whole input */ +diff --git a/fs/lockd/host.c b/fs/lockd/host.c +index c7eb47f2fb6c..603fa652b965 100644 +--- a/fs/lockd/host.c ++++ b/fs/lockd/host.c +@@ -430,12 +430,7 @@ nlm_bind_host(struct nlm_host *host) + * RPC rebind is required + */ + if ((clnt = host->h_rpcclnt) != NULL) { +- if (time_after_eq(jiffies, host->h_nextrebind)) { +- rpc_force_rebind(clnt); +- host->h_nextrebind = jiffies + NLM_HOST_REBIND; +- dprintk("lockd: next rebind in %lu jiffies\n", +- host->h_nextrebind - jiffies); +- } ++ nlm_rebind_host(host); + } else { + unsigned long increment = nlmsvc_timeout; + struct rpc_timeout timeparms = { +@@ -483,13 +478,20 @@ nlm_bind_host(struct nlm_host *host) + return clnt; + } + +-/* +- * Force a portmap lookup of the remote lockd port ++/** ++ * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port ++ * @host: NLM host handle for peer ++ * ++ * This is not needed when using a connection-oriented protocol, such as TCP. ++ * The existing autobind mechanism is sufficient to force a rebind when ++ * required, e.g. on connection state transitions. + */ + void + nlm_rebind_host(struct nlm_host *host) + { +- dprintk("lockd: rebind host %s\n", host->h_name); ++ if (host->h_proto != IPPROTO_UDP) ++ return; ++ + if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { + rpc_force_rebind(host->h_rpcclnt); + host->h_nextrebind = jiffies + NLM_HOST_REBIND; +diff --git a/fs/minix/inode.c b/fs/minix/inode.c +index f975d667c539..1191f293ef1d 100644 +--- a/fs/minix/inode.c ++++ b/fs/minix/inode.c +@@ -155,6 +155,23 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) + return 0; + } + ++static bool minix_check_superblock(struct minix_sb_info *sbi) ++{ ++ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) ++ return false; ++ ++ /* ++ * s_max_size must not exceed the block mapping limitation. This check ++ * is only needed for V1 filesystems, since V2/V3 support an extra level ++ * of indirect blocks which places the limit well above U32_MAX. ++ */ ++ if (sbi->s_version == MINIX_V1 && ++ sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE) ++ return false; ++ ++ return true; ++} ++ + static int minix_fill_super(struct super_block *s, void *data, int silent) + { + struct buffer_head *bh; +@@ -233,11 +250,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) + } else + goto out_no_fs; + ++ if (!minix_check_superblock(sbi)) ++ goto out_illegal_sb; ++ + /* + * Allocate the buffer map to keep the superblock small. + */ +- if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) +- goto out_illegal_sb; + i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); + map = kzalloc(i, GFP_KERNEL); + if (!map) +@@ -472,6 +490,13 @@ static struct inode *V1_minix_iget(struct inode *inode) + iget_failed(inode); + return ERR_PTR(-EIO); + } ++ if (raw_inode->i_nlinks == 0) { ++ printk("MINIX-fs: deleted inode referenced: %lu\n", ++ inode->i_ino); ++ brelse(bh); ++ iget_failed(inode); ++ return ERR_PTR(-ESTALE); ++ } + inode->i_mode = raw_inode->i_mode; + i_uid_write(inode, raw_inode->i_uid); + i_gid_write(inode, raw_inode->i_gid); +@@ -505,6 +530,13 @@ static struct inode *V2_minix_iget(struct inode *inode) + iget_failed(inode); + return ERR_PTR(-EIO); + } ++ if (raw_inode->i_nlinks == 0) { ++ printk("MINIX-fs: deleted inode referenced: %lu\n", ++ inode->i_ino); ++ brelse(bh); ++ iget_failed(inode); ++ return ERR_PTR(-ESTALE); ++ } + inode->i_mode = raw_inode->i_mode; + i_uid_write(inode, raw_inode->i_uid); + i_gid_write(inode, raw_inode->i_gid); +diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c +index 4c57c9af6946..0417c4c5ebbd 100644 +--- a/fs/minix/itree_common.c ++++ b/fs/minix/itree_common.c +@@ -74,6 +74,7 @@ static int alloc_branch(struct inode *inode, + int n = 0; + int i; + int parent = minix_new_block(inode); ++ int err = -ENOSPC; + + branch[0].key = cpu_to_block(parent); + if (parent) for (n = 1; n < num; n++) { +@@ -84,6 +85,11 @@ static int alloc_branch(struct inode *inode, + break; + branch[n].key = cpu_to_block(nr); + bh = sb_getblk(inode->i_sb, parent); ++ if (!bh) { ++ minix_free_block(inode, nr); ++ err = -ENOMEM; ++ break; ++ } + lock_buffer(bh); + memset(bh->b_data, 0, bh->b_size); + branch[n].bh = bh; +@@ -102,7 +108,7 @@ static int alloc_branch(struct inode *inode, + bforget(branch[i].bh); + for (i = 0; i < n; i++) + minix_free_block(inode, block_to_cpu(branch[i].key)); +- return -ENOSPC; ++ return err; + } + + static inline int splice_branch(struct inode *inode, +diff --git a/fs/namespace.c b/fs/namespace.c +index d7a32893c9e0..8f4ce6c56e74 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -1687,13 +1687,22 @@ static inline bool may_mount(void) + return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); + } + ++#ifdef CONFIG_MANDATORY_FILE_LOCKING ++static bool may_mandlock(void) ++{ ++ pr_warn_once("======================================================\n" ++ "WARNING: the mand mount option is being deprecated and\n" ++ " will be removed in v5.15!\n" ++ "======================================================\n"); ++ return capable(CAP_SYS_ADMIN); ++} ++#else + static inline bool may_mandlock(void) + { +-#ifndef CONFIG_MANDATORY_FILE_LOCKING ++ pr_warn("VFS: \"mand\" mount option not supported"); + return false; +-#endif +- return capable(CAP_SYS_ADMIN); + } ++#endif + + /* + * Now umount can handle mount points as well as block devices. +@@ -1871,6 +1880,20 @@ void drop_collected_mounts(struct vfsmount *mnt) + namespace_unlock(); + } + ++static bool has_locked_children(struct mount *mnt, struct dentry *dentry) ++{ ++ struct mount *child; ++ ++ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { ++ if (!is_subdir(child->mnt_mountpoint, dentry)) ++ continue; ++ ++ if (child->mnt.mnt_flags & MNT_LOCKED) ++ return true; ++ } ++ return false; ++} ++ + /** + * clone_private_mount - create a private clone of a path + * +@@ -1885,16 +1908,27 @@ struct vfsmount *clone_private_mount(struct path *path) + struct mount *old_mnt = real_mount(path->mnt); + struct mount *new_mnt; + ++ down_read(&namespace_sem); + if (IS_MNT_UNBINDABLE(old_mnt)) +- return ERR_PTR(-EINVAL); ++ goto invalid; ++ ++ if (!check_mnt(old_mnt)) ++ goto invalid; ++ ++ if (has_locked_children(old_mnt, path->dentry)) ++ goto invalid; + +- down_read(&namespace_sem); + new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); + up_read(&namespace_sem); ++ + if (IS_ERR(new_mnt)) + return ERR_CAST(new_mnt); + + return &new_mnt->mnt; ++ ++invalid: ++ up_read(&namespace_sem); ++ return ERR_PTR(-EINVAL); + } + EXPORT_SYMBOL_GPL(clone_private_mount); + +@@ -2210,19 +2244,6 @@ static int do_change_type(struct path *path, int flag) + return err; + } + +-static bool has_locked_children(struct mount *mnt, struct dentry *dentry) +-{ +- struct mount *child; +- list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { +- if (!is_subdir(child->mnt_mountpoint, dentry)) +- continue; +- +- if (child->mnt.mnt_flags & MNT_LOCKED) +- return true; +- } +- return false; +-} +- + /* + * do loopback mount. + */ +diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig +index c3428767332c..55ebf9f4a824 100644 +--- a/fs/nfs/Kconfig ++++ b/fs/nfs/Kconfig +@@ -132,7 +132,7 @@ config PNFS_OBJLAYOUT + config PNFS_FLEXFILE_LAYOUT + tristate + depends on NFS_V4_1 && NFS_V3 +- default m ++ default NFS_V4 + + config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN + string "NFSv4.1 Implementation ID Domain" +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index 28d8a57a9908..d322ed5cbc1c 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -379,7 +379,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, + + if (cl_init->hostname == NULL) { + WARN_ON(1); +- return NULL; ++ return ERR_PTR(-EINVAL); + } + + dprintk("--> nfs_get_client(%s,v%u)\n", +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 2517fcd423b6..d405b5a14073 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -583,6 +583,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); + + do { ++ if (entry->label) ++ entry->label->len = NFS4_MAXLABELLEN; ++ + status = xdr_decode(desc, entry, &stream); + if (status != 0) { + if (status == -EAGAIN) +diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c +index a3fc48ba4931..dec7e5ad525a 100644 +--- a/fs/nfs/filelayout/filelayout.c ++++ b/fs/nfs/filelayout/filelayout.c +@@ -726,7 +726,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, + if (unlikely(!p)) + goto out_err; + fl->fh_array[i]->size = be32_to_cpup(p++); +- if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { ++ if (fl->fh_array[i]->size > NFS_MAXFHSIZE) { + printk(KERN_ERR "NFS: Too big fh %d received %d\n", + i, fl->fh_array[i]->size); + goto out_err; +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 4539008502ce..ee1225369700 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -93,7 +93,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) + if (unlikely(!p)) + return -ENOBUFS; + fh->size = be32_to_cpup(p++); +- if (fh->size > sizeof(struct nfs_fh)) { ++ if (fh->size > NFS_MAXFHSIZE) { + printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", + fh->size); + return -EOVERFLOW; +@@ -939,9 +939,8 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, + goto out_mds; + + /* Use a direct mapping of ds_idx to pgio mirror_idx */ +- if (WARN_ON_ONCE(pgio->pg_mirror_count != +- FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) +- goto out_mds; ++ if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) ++ goto out_eagain; + + for (i = 0; i < pgio->pg_mirror_count; i++) { + ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); +@@ -960,11 +959,15 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, + } + + return; +- ++out_eagain: ++ pnfs_generic_pg_cleanup(pgio); ++ pgio->pg_error = -EAGAIN; ++ return; + out_mds: + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; + nfs_pageio_reset_write_mds(pgio); ++ pgio->pg_error = -EAGAIN; + } + + static unsigned int +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 851274b25d39..7a94f5a5f8c8 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -1469,10 +1469,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle); + */ + static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr) + { +- const struct nfs_inode *nfsi = NFS_I(inode); ++ unsigned long attr_gencount = NFS_I(inode)->attr_gencount; + +- return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || +- ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); ++ return (long)(fattr->gencount - attr_gencount) > 0 || ++ (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0; + } + + static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr) +@@ -1882,7 +1882,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) + nfsi->attrtimeo_timestamp = now; + } + /* Set the barrier to be more recent than this fattr */ +- if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) ++ if ((long)(fattr->gencount - nfsi->attr_gencount) > 0) + nfsi->attr_gencount = fattr->gencount; + } + +@@ -1995,7 +1995,7 @@ static int nfsiod_start(void) + { + struct workqueue_struct *wq; + dprintk("RPC: creating workqueue nfsiod\n"); +- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); ++ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); + if (wq == NULL) + return -ENOMEM; + nfsiod_workqueue = wq; +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index 52ea41bce038..e0f0397fc7ce 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -572,12 +572,14 @@ extern int nfs4_test_session_trunk(struct rpc_clnt *, + + static inline struct inode *nfs_igrab_and_active(struct inode *inode) + { +- inode = igrab(inode); +- if (inode != NULL && !nfs_sb_active(inode->i_sb)) { +- iput(inode); +- inode = NULL; ++ struct super_block *sb = inode->i_sb; ++ ++ if (sb && nfs_sb_active(sb)) { ++ if (igrab(inode)) ++ return inode; ++ nfs_sb_deactive(sb); + } +- return inode; ++ return NULL; + } + + static inline void nfs_iput_and_deactive(struct inode *inode) +diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c +index e49d831c4e85..4f539473862a 100644 +--- a/fs/nfs/namespace.c ++++ b/fs/nfs/namespace.c +@@ -30,9 +30,9 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; + /* + * nfs_path - reconstruct the path given an arbitrary dentry + * @base - used to return pointer to the end of devname part of path +- * @dentry - pointer to dentry ++ * @dentry_in - pointer to dentry + * @buffer - result buffer +- * @buflen - length of buffer ++ * @buflen_in - length of buffer + * @flags - options (see below) + * + * Helper function for constructing the server pathname +@@ -47,15 +47,19 @@ int nfs_mountpoint_expiry_timeout = 500 * HZ; + * the original device (export) name + * (if unset, the original name is returned verbatim) + */ +-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen, +- unsigned flags) ++char *nfs_path(char **p, struct dentry *dentry_in, char *buffer, ++ ssize_t buflen_in, unsigned flags) + { + char *end; + int namelen; + unsigned seq; + const char *base; ++ struct dentry *dentry; ++ ssize_t buflen; + + rename_retry: ++ buflen = buflen_in; ++ dentry = dentry_in; + end = buffer+buflen; + *--end = '\0'; + buflen--; +diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c +index dc925b531f32..41bda33b630f 100644 +--- a/fs/nfs/nfs3proc.c ++++ b/fs/nfs/nfs3proc.c +@@ -363,7 +363,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, + break; + + case NFS3_CREATE_UNCHECKED: +- goto out; ++ goto out_release_acls; + } + nfs_fattr_init(data->res.dir_attr); + nfs_fattr_init(data->res.fattr); +@@ -708,7 +708,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, + break; + default: + status = -EINVAL; +- goto out; ++ goto out_release_acls; + } + + status = nfs3_do_create(dir, dentry, data); +diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c +index 267126d32ec0..4a68837e92ea 100644 +--- a/fs/nfs/nfs3xdr.c ++++ b/fs/nfs/nfs3xdr.c +@@ -33,6 +33,7 @@ + */ + #define NFS3_fhandle_sz (1+16) + #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */ ++#define NFS3_post_op_fh_sz (1+NFS3_fh_sz) + #define NFS3_sattr_sz (15) + #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) + #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) +@@ -70,7 +71,7 @@ + #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1) + #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3) + #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4) +-#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) ++#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) + #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz)) + #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) + #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2) +diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c +index 5cda392028ce..7efb9e0e9f25 100644 +--- a/fs/nfs/nfs42proc.c ++++ b/fs/nfs/nfs42proc.c +@@ -56,7 +56,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + loff_t offset, loff_t len) + { +- struct nfs_server *server = NFS_SERVER(file_inode(filep)); ++ struct inode *inode = file_inode(filep); ++ struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_exception exception = { }; + struct nfs_lock_context *lock; + int err; +@@ -65,9 +66,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + if (IS_ERR(lock)) + return PTR_ERR(lock); + +- exception.inode = file_inode(filep); ++ exception.inode = inode; + exception.state = lock->open_context->state; + ++ err = nfs_sync_inode(inode); ++ if (err) ++ goto out; ++ + do { + err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); + if (err == -ENOTSUPP) { +@@ -76,7 +81,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, + } + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); +- ++out: + nfs_put_lock_context(lock); + return err; + } +@@ -114,16 +119,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) + return -EOPNOTSUPP; + + inode_lock(inode); +- err = nfs_sync_inode(inode); +- if (err) +- goto out_unlock; + + err = nfs42_proc_fallocate(&msg, filep, offset, len); + if (err == 0) + truncate_pagecache_range(inode, offset, (offset + len) -1); + if (err == -EOPNOTSUPP) + NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; +-out_unlock: ++ + inode_unlock(inode); + return err; + } +@@ -292,7 +294,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep, + if (status) + return status; + +- return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); ++ if (whence == SEEK_DATA && res.sr_eof) ++ return -NFS4ERR_NXIO; ++ else ++ return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); + } + + loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c +index 7138383382ff..80718d9999ed 100644 +--- a/fs/nfs/nfs4file.c ++++ b/fs/nfs/nfs4file.c +@@ -147,7 +147,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) + case SEEK_HOLE: + case SEEK_DATA: + ret = nfs42_proc_llseek(filep, offset, whence); +- if (ret != -ENOTSUPP) ++ if (ret != -EOPNOTSUPP) + return ret; + default: + return nfs_file_llseek(filep, offset, whence); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 632d3c3f8dfb..2ea772f596e3 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -4334,12 +4334,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, + u64 cookie, struct page **pages, unsigned int count, int plus) + { + struct inode *dir = d_inode(dentry); ++ struct nfs_server *server = NFS_SERVER(dir); + struct nfs4_readdir_arg args = { + .fh = NFS_FH(dir), + .pages = pages, + .pgbase = 0, + .count = count, +- .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, + .plus = plus, + }; + struct nfs4_readdir_res res; +@@ -4354,9 +4354,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, + dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, + dentry, + (unsigned long long)cookie); ++ if (!(server->caps & NFS_CAP_SECURITY_LABEL)) ++ args.bitmask = server->attr_bitmask_nl; ++ else ++ args.bitmask = server->attr_bitmask; ++ + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); + res.pgbase = args.pgbase; +- status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); ++ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, ++ &res.seq_res, 0); + if (status >= 0) { + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); + status += args.pgbase; +@@ -5138,6 +5144,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); + int ret, i; + ++ /* You can't remove system.nfs4_acl: */ ++ if (buflen == 0) ++ return -EINVAL; + if (!nfs4_server_supports_acls(server)) + return -EOPNOTSUPP; + if (npages > ARRAY_SIZE(pages)) +@@ -5174,6 +5183,14 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen + do { + err = __nfs4_proc_set_acl(inode, buf, buflen); + trace_nfs4_set_acl(inode, err); ++ if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { ++ /* ++ * no need to retry since the kernel ++ * isn't involved in encoding the ACEs. ++ */ ++ err = -EINVAL; ++ break; ++ } + err = nfs4_handle_exception(NFS_SERVER(inode), err, + &exception); + } while (exception.retry); +@@ -5212,9 +5229,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, + return ret; + if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) + return -ENOENT; +- if (buflen < label.len) +- return -ERANGE; +- return 0; ++ return label.len; + } + + static int nfs4_get_security_label(struct inode *inode, void *buf, +@@ -6529,7 +6544,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, + err = nfs4_set_lock_state(state, fl); + if (err != 0) + return err; +- err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); ++ do { ++ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); ++ if (err != -NFS4ERR_DELAY) ++ break; ++ ssleep(1); ++ } while (err == -NFS4ERR_DELAY); + return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); + } + +@@ -7151,7 +7171,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) + } + + static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { +- .rpc_call_done = &nfs4_bind_one_conn_to_session_done, ++ .rpc_call_done = nfs4_bind_one_conn_to_session_done, + }; + + /* +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index d7f8d5ce30e3..0a7c4e30a385 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -4163,7 +4163,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, + goto out_overflow; + if (len < NFS4_MAXLABELLEN) { + if (label) { +- memcpy(label->label, p, len); ++ if (label->len) { ++ if (label->len < len) ++ return -ERANGE; ++ memcpy(label->label, p, len); ++ } + label->len = len; + label->pi = pi; + label->lfs = lfs; +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 529f3a576263..3e0fcb86390a 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -952,17 +952,16 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) + { + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); + +- + if (!list_empty(&mirror->pg_list)) { + int error = desc->pg_ops->pg_doio(desc); + if (error < 0) + desc->pg_error = error; +- else ++ if (list_empty(&mirror->pg_list)) { + mirror->pg_bytes_written += mirror->pg_count; +- } +- if (list_empty(&mirror->pg_list)) { +- mirror->pg_count = 0; +- mirror->pg_base = 0; ++ mirror->pg_count = 0; ++ mirror->pg_base = 0; ++ mirror->pg_recoalesce = 0; ++ } + } + } + +@@ -1061,7 +1060,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) + + do { + list_splice_init(&mirror->pg_list, &head); +- mirror->pg_bytes_written -= mirror->pg_count; + mirror->pg_count = 0; + mirror->pg_base = 0; + mirror->pg_recoalesce = 0; +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index c3abf92adfb7..f19cded49b29 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1070,6 +1070,11 @@ _pnfs_return_layout(struct inode *ino) + { + struct pnfs_layout_hdr *lo = NULL; + struct nfs_inode *nfsi = NFS_I(ino); ++ struct pnfs_layout_range range = { ++ .iomode = IOMODE_ANY, ++ .offset = 0, ++ .length = NFS4_MAX_UINT64, ++ }; + LIST_HEAD(tmp_list); + nfs4_stateid stateid; + int status = 0, empty; +@@ -1088,16 +1093,10 @@ _pnfs_return_layout(struct inode *ino) + pnfs_get_layout_hdr(lo); + empty = list_empty(&lo->plh_segs); + pnfs_clear_layoutcommit(ino, &tmp_list); +- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0); +- +- if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { +- struct pnfs_layout_range range = { +- .iomode = IOMODE_ANY, +- .offset = 0, +- .length = NFS4_MAX_UINT64, +- }; ++ pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0); ++ ++ if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) + NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); +- } + + /* Don't send a LAYOUTRETURN if list was initially empty */ + if (empty) { +diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c +index 77d136ac8909..c21fca0dcba7 100644 +--- a/fs/nfs_common/grace.c ++++ b/fs/nfs_common/grace.c +@@ -75,10 +75,14 @@ __state_in_grace(struct net *net, bool open) + if (!open) + return !list_empty(grace_list); + ++ spin_lock(&grace_lock); + list_for_each_entry(lm, grace_list, list) { +- if (lm->block_opens) ++ if (lm->block_opens) { ++ spin_unlock(&grace_lock); + return true; ++ } + } ++ spin_unlock(&grace_lock); + return false; + } + +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c +index 7e50248ca432..93c6fb53cc0e 100644 +--- a/fs/nfsd/nfs3xdr.c ++++ b/fs/nfsd/nfs3xdr.c +@@ -822,9 +822,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, + if (isdotent(name, namlen)) { + if (namlen == 2) { + dchild = dget_parent(dparent); +- /* filesystem root - cannot return filehandle for ".." */ ++ /* ++ * Don't return filehandle for ".." if we're at ++ * the filesystem or export root: ++ */ + if (dchild == dparent) + goto out; ++ if (dparent == exp->ex_path.dentry) ++ goto out; + } else + dchild = dget(dparent); + } else +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c +index 8d842282111b..172f697864ab 100644 +--- a/fs/nfsd/nfs4callback.c ++++ b/fs/nfsd/nfs4callback.c +@@ -1156,6 +1156,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) + err = setup_callback_client(clp, &conn, ses); + if (err) { + nfsd4_mark_cb_down(clp, err); ++ if (c) ++ svc_xprt_put(c->cn_xprt); + return; + } + } +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c +index 60291d10f8e4..6bb3f3a98d7d 100644 +--- a/fs/nfsd/nfssvc.c ++++ b/fs/nfsd/nfssvc.c +@@ -393,8 +393,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net) + return; + + nfsd_shutdown_net(net); +- printk(KERN_WARNING "nfsd: last server has exited, flushing export " +- "cache\n"); ++ pr_info("nfsd: last server has exited, flushing export cache\n"); + nfsd_export_flush(net); + } + +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index b0eb58ce6b4f..3dff3d5cc0e9 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -2789,6 +2789,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) + if (!nilfs->ns_writer) + return -ENOMEM; + ++ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); ++ + err = nilfs_segctor_start_thread(nilfs->ns_writer); + if (err) { + kfree(nilfs->ns_writer); +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c +index 490303e3d517..e9903bceb2bf 100644 +--- a/fs/nilfs2/sysfs.c ++++ b/fs/nilfs2/sysfs.c +@@ -1064,6 +1064,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) + nilfs_sysfs_delete_superblock_group(nilfs); + nilfs_sysfs_delete_segctor_group(nilfs); + kobject_del(&nilfs->ns_dev_kobj); ++ kobject_put(&nilfs->ns_dev_kobj); + kfree(nilfs->ns_dev_subgroups); + } + +diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c +index 7c410f879412..8cd134750ebb 100644 +--- a/fs/ntfs/inode.c ++++ b/fs/ntfs/inode.c +@@ -502,7 +502,7 @@ static int ntfs_is_extended_system_file(ntfs_attr_search_ctx *ctx) + } + file_name_attr = (FILE_NAME_ATTR*)((u8*)attr + + le16_to_cpu(attr->data.resident.value_offset)); +- p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length); ++ p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length); + if (p2 < (u8*)attr || p2 > p) + goto err_corrupt_attr; + /* This attribute is ok, but is it in the $Extend directory? */ +@@ -661,6 +661,12 @@ static int ntfs_read_locked_inode(struct inode *vi) + } + a = ctx->attr; + /* Get the standard information attribute value. */ ++ if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset) ++ + le32_to_cpu(a->data.resident.value_length) > ++ (u8 *)ctx->mrec + vol->mft_record_size) { ++ ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode."); ++ goto unm_err_out; ++ } + si = (STANDARD_INFORMATION*)((u8*)a + + le16_to_cpu(a->data.resident.value_offset)); + +@@ -1844,6 +1850,12 @@ int ntfs_read_inode_mount(struct inode *vi) + brelse(bh); + } + ++ if (le32_to_cpu(m->bytes_allocated) != vol->mft_record_size) { ++ ntfs_error(sb, "Incorrect mft record size %u in superblock, should be %u.", ++ le32_to_cpu(m->bytes_allocated), vol->mft_record_size); ++ goto err_out; ++ } ++ + /* Apply the mst fixups. */ + if (post_read_mst_fixup((NTFS_RECORD*)m, vol->mft_record_size)) { + /* FIXME: Try to use the $MFTMirr now. */ +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index 6ad76397b31d..291a8250017a 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -2301,7 +2301,7 @@ static void ocfs2_dio_end_io_write(struct inode *inode, + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle = NULL; + loff_t end = offset + bytes; +- int ret = 0, credits = 0, locked = 0; ++ int ret = 0, credits = 0; + + ocfs2_init_dealloc_ctxt(&dealloc); + +@@ -2312,13 +2312,6 @@ static void ocfs2_dio_end_io_write(struct inode *inode, + !dwc->dw_orphaned) + goto out; + +- /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we +- * are in that context. */ +- if (dwc->dw_writer_pid != task_pid_nr(current)) { +- inode_lock(inode); +- locked = 1; +- } +- + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); +@@ -2393,8 +2386,6 @@ static void ocfs2_dio_end_io_write(struct inode *inode, + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + ocfs2_run_deallocs(osb, &dealloc); +- if (locked) +- inode_unlock(inode); + ocfs2_dio_free_write_ctx(inode, dwc); + } + +diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c +index 5e8709aa1e7e..89c71d32dc05 100644 +--- a/fs/ocfs2/cluster/heartbeat.c ++++ b/fs/ocfs2/cluster/heartbeat.c +@@ -2155,7 +2155,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g + o2hb_nego_timeout_handler, + reg, NULL, ®->hr_handler_list); + if (ret) +- goto free; ++ goto remove_item; + + ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key, + sizeof(struct o2hb_nego_msg), +@@ -2174,6 +2174,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g + + unregister_handler: + o2net_unregister_handler_list(®->hr_handler_list); ++remove_item: ++ spin_lock(&o2hb_live_lock); ++ list_del(®->hr_all_item); ++ if (o2hb_global_heartbeat_active()) ++ clear_bit(reg->hr_region_num, o2hb_region_bitmap); ++ spin_unlock(&o2hb_live_lock); + free: + kfree(reg); + return ERR_PTR(ret); +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 05a0fb9854f9..d526e86cf5bb 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1236,22 +1236,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + goto bail_unlock; + } + } ++ down_write(&OCFS2_I(inode)->ip_alloc_sem); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + + 2 * ocfs2_quota_trans_credits(sb)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); +- goto bail_unlock; ++ goto bail_unlock_alloc; + } + status = __dquot_transfer(inode, transfer_to); + if (status < 0) + goto bail_commit; + } else { ++ down_write(&OCFS2_I(inode)->ip_alloc_sem); + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); +- goto bail_unlock; ++ goto bail_unlock_alloc; + } + } + +@@ -1264,6 +1266,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + + bail_commit: + ocfs2_commit_trans(osb, handle); ++bail_unlock_alloc: ++ up_write(&OCFS2_I(inode)->ip_alloc_sem); + bail_unlock: + if (status) { + ocfs2_inode_unlock(inode, 1); +@@ -1507,6 +1511,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, + } + } + ++/* ++ * zero out partial blocks of one cluster. ++ * ++ * start: file offset where zero starts, will be made upper block aligned. ++ * len: it will be trimmed to the end of current cluster if "start + len" ++ * is bigger than it. ++ */ ++static int ocfs2_zeroout_partial_cluster(struct inode *inode, ++ u64 start, u64 len) ++{ ++ int ret; ++ u64 start_block, end_block, nr_blocks; ++ u64 p_block, offset; ++ u32 cluster, p_cluster, nr_clusters; ++ struct super_block *sb = inode->i_sb; ++ u64 end = ocfs2_align_bytes_to_clusters(sb, start); ++ ++ if (start + len < end) ++ end = start + len; ++ ++ start_block = ocfs2_blocks_for_bytes(sb, start); ++ end_block = ocfs2_blocks_for_bytes(sb, end); ++ nr_blocks = end_block - start_block; ++ if (!nr_blocks) ++ return 0; ++ ++ cluster = ocfs2_bytes_to_clusters(sb, start); ++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster, ++ &nr_clusters, NULL); ++ if (ret) ++ return ret; ++ if (!p_cluster) ++ return 0; ++ ++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); ++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; ++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); ++} ++ + static int ocfs2_zero_partial_clusters(struct inode *inode, + u64 start, u64 len) + { +@@ -1516,6 +1559,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int csize = osb->s_clustersize; + handle_t *handle; ++ loff_t isize = i_size_read(inode); + + /* + * The "start" and "end" values are NOT necessarily part of +@@ -1536,6 +1580,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, + if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) + goto out; + ++ /* No page cache for EOF blocks, issue zero out to disk. */ ++ if (end > isize) { ++ /* ++ * zeroout eof blocks in last cluster starting from ++ * "isize" even "start" > "isize" because it is ++ * complicated to zeroout just at "start" as "start" ++ * may be not aligned with block size, buffer write ++ * would be required to do that, but out of eof buffer ++ * write is not supported. ++ */ ++ ret = ocfs2_zeroout_partial_cluster(inode, isize, ++ end - isize); ++ if (ret) { ++ mlog_errno(ret); ++ goto out; ++ } ++ if (start >= isize) ++ goto out; ++ end = isize; ++ } + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); +@@ -1844,7 +1908,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + { + int ret; + s64 llen; +- loff_t size; ++ loff_t size, orig_isize; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + handle_t *handle; +@@ -1936,6 +2000,15 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + default: + ret = -EINVAL; + } ++ ++ orig_isize = i_size_read(inode); ++ /* zeroout eof blocks in the cluster. */ ++ if (!ret && change_size && orig_isize < size) { ++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, ++ size - orig_isize); ++ if (!ret) ++ i_size_write(inode, size); ++ } + up_write(&OCFS2_I(inode)->ip_alloc_sem); + if (ret) { + mlog_errno(ret); +@@ -1952,9 +2025,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + goto out_inode_unlock; + } + +- if (change_size && i_size_read(inode) < size) +- i_size_write(inode, size); +- + inode->i_ctime = inode->i_mtime = current_time(inode); + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); + if (ret < 0) +diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c +index 2cabbcf2f28e..5571268b681c 100644 +--- a/fs/ocfs2/filecheck.c ++++ b/fs/ocfs2/filecheck.c +@@ -431,11 +431,7 @@ static ssize_t ocfs2_filecheck_show(struct kobject *kobj, + ret = snprintf(buf + total, remain, "%lu\t\t%u\t%s\n", + p->fe_ino, p->fe_done, + ocfs2_filecheck_error(p->fe_status)); +- if (ret < 0) { +- total = ret; +- break; +- } +- if (ret == remain) { ++ if (ret >= remain) { + /* snprintf() didn't fit */ + total = -E2BIG; + break; +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h +index 594575e380e8..994a8abdd370 100644 +--- a/fs/ocfs2/ocfs2.h ++++ b/fs/ocfs2/ocfs2.h +@@ -337,8 +337,8 @@ struct ocfs2_super + spinlock_t osb_lock; + u32 s_next_generation; + unsigned long osb_flags; +- s16 s_inode_steal_slot; +- s16 s_meta_steal_slot; ++ u16 s_inode_steal_slot; ++ u16 s_meta_steal_slot; + atomic_t s_num_inodes_stolen; + atomic_t s_num_meta_stolen; + +diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h +index 44d178b8d1aa..c3697cf60500 100644 +--- a/fs/ocfs2/ocfs2_fs.h ++++ b/fs/ocfs2/ocfs2_fs.h +@@ -304,7 +304,7 @@ + #define OCFS2_MAX_SLOTS 255 + + /* Slot map indicator for an empty slot */ +-#define OCFS2_INVALID_SLOT -1 ++#define OCFS2_INVALID_SLOT ((u16)-1) + + #define OCFS2_VOL_UUID_LEN 16 + #define OCFS2_MAX_VOL_LABEL_LEN 64 +@@ -340,8 +340,8 @@ struct ocfs2_system_inode_info { + enum { + BAD_BLOCK_SYSTEM_INODE = 0, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, ++#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE + SLOT_MAP_SYSTEM_INODE, +-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE + HEARTBEAT_SYSTEM_INODE, + GLOBAL_BITMAP_SYSTEM_INODE, + USER_QUOTA_SYSTEM_INODE, +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c +index 52c07346bea3..03e1c6cd6f3c 100644 +--- a/fs/ocfs2/stackglue.c ++++ b/fs/ocfs2/stackglue.c +@@ -510,11 +510,7 @@ static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj, + list_for_each_entry(p, &ocfs2_stack_list, sp_list) { + ret = snprintf(buf, remain, "%s\n", + p->sp_name); +- if (ret < 0) { +- total = ret; +- break; +- } +- if (ret == remain) { ++ if (ret >= remain) { + /* snprintf() didn't fit */ + total = -E2BIG; + break; +@@ -541,7 +537,7 @@ static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj, + if (active_stack) { + ret = snprintf(buf, PAGE_SIZE, "%s\n", + active_stack->sp_name); +- if (ret == PAGE_SIZE) ++ if (ret >= PAGE_SIZE) + ret = -E2BIG; + } + spin_unlock(&ocfs2_stack_lock); +diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c +index 6ad3533940ba..f8d092b80a49 100644 +--- a/fs/ocfs2/suballoc.c ++++ b/fs/ocfs2/suballoc.c +@@ -895,9 +895,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) + { + spin_lock(&osb->osb_lock); + if (type == INODE_ALLOC_SYSTEM_INODE) +- osb->s_inode_steal_slot = slot; ++ osb->s_inode_steal_slot = (u16)slot; + else if (type == EXTENT_ALLOC_SYSTEM_INODE) +- osb->s_meta_steal_slot = slot; ++ osb->s_meta_steal_slot = (u16)slot; + spin_unlock(&osb->osb_lock); + } + +@@ -2891,9 +2891,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) + goto bail; + } + +- inode_alloc_inode = +- ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, +- suballoc_slot); ++ if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) ++ inode_alloc_inode = ocfs2_get_system_file_inode(osb, ++ GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); ++ else ++ inode_alloc_inode = ocfs2_get_system_file_inode(osb, ++ INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + if (!inode_alloc_inode) { + /* the error code could be inaccurate, but we are not able to + * get the correct one. */ +diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c +index 64dfbe5755da..e0fb62f5cf63 100644 +--- a/fs/ocfs2/super.c ++++ b/fs/ocfs2/super.c +@@ -91,7 +91,7 @@ struct mount_options + unsigned long commit_interval; + unsigned long mount_opt; + unsigned int atime_quantum; +- signed short slot; ++ unsigned short slot; + int localalloc_opt; + unsigned int resv_level; + int dir_resv_level; +@@ -1369,7 +1369,7 @@ static int ocfs2_parse_options(struct super_block *sb, + goto bail; + } + if (option) +- mopt->slot = (s16)option; ++ mopt->slot = (u16)option; + break; + case Opt_commit: + if (match_int(&args[0], &option)) { +@@ -1733,6 +1733,7 @@ static void ocfs2_inode_init_once(void *data) + + oi->ip_blkno = 0ULL; + oi->ip_clusters = 0; ++ oi->ip_next_orphan = NULL; + + ocfs2_resv_init_once(&oi->ip_la_data_resv); + +diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c +index 6e35ef6521b4..8972ebc87016 100644 +--- a/fs/orangefs/super.c ++++ b/fs/orangefs/super.c +@@ -185,7 +185,7 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf) + buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail; + buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total; + buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail; +- buf->f_frsize = sb->s_blocksize; ++ buf->f_frsize = 0; + + out_op_release: + op_release(new_op); +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c +index 0b71cb514031..8a8426c3c755 100644 +--- a/fs/overlayfs/copy_up.c ++++ b/fs/overlayfs/copy_up.c +@@ -56,7 +56,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + { + ssize_t list_size, size, value_size = 0; + char *buf, *name, *value = NULL; +- int uninitialized_var(error); ++ int error = 0; + size_t slen; + + if (!(old->d_inode->i_opflags & IOP_XATTR) || +@@ -92,6 +92,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + + if (ovl_is_private_xattr(name)) + continue; ++ ++ error = security_inode_copy_up_xattr(name); ++ if (error < 0 && error != -EOPNOTSUPP) ++ break; ++ if (error == 1) { ++ error = 0; ++ continue; /* Discard */ ++ } + retry: + size = vfs_getxattr(old, name, value, value_size); + if (size == -ERANGE) +@@ -115,13 +123,6 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) + goto retry; + } + +- error = security_inode_copy_up_xattr(name); +- if (error < 0 && error != -EOPNOTSUPP) +- break; +- if (error == 1) { +- error = 0; +- continue; /* Discard */ +- } + error = vfs_setxattr(new, name, value, size, 0); + if (error) + break; +diff --git a/fs/pipe.c b/fs/pipe.c +index 347c6dc888c8..6375e625a263 100644 +--- a/fs/pipe.c ++++ b/fs/pipe.c +@@ -28,6 +28,21 @@ + + #include "internal.h" + ++/* ++ * New pipe buffers will be restricted to this size while the user is exceeding ++ * their pipe buffer quota. The general pipe use case needs at least two ++ * buffers: one for data yet to be read, and one for new data. If this is less ++ * than two, then a write to a non-empty pipe may block even if the pipe is not ++ * full. This can occur with GNU make jobserver or similar uses of pipes as ++ * semaphores: multiple processes may be waiting to write tokens back to the ++ * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. ++ * ++ * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their ++ * own risk, namely: pipe writes to non-full pipes may block until the pipe is ++ * emptied. ++ */ ++#define PIPE_MIN_DEF_BUFFERS 2 ++ + /* + * The max size that a non-root user is allowed to grow the pipe. Can + * be set by root in /proc/sys/fs/pipe-max-size +@@ -653,8 +668,8 @@ struct pipe_inode_info *alloc_pipe_info(void) + user_bufs = account_pipe_buffers(user, 0, pipe_bufs); + + if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { +- user_bufs = account_pipe_buffers(user, pipe_bufs, 1); +- pipe_bufs = 1; ++ user_bufs = account_pipe_buffers(user, pipe_bufs, PIPE_MIN_DEF_BUFFERS); ++ pipe_bufs = PIPE_MIN_DEF_BUFFERS; + } + + if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 89091588a887..3b0af99dd1fd 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -868,7 +868,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, + flags |= FOLL_WRITE; + + while (count > 0) { +- int this_len = min_t(int, count, PAGE_SIZE); ++ size_t this_len = min_t(size_t, count, PAGE_SIZE); + + if (write && copy_from_user(page, buf, this_len)) { + copied = -EFAULT; +@@ -2692,6 +2692,13 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx, + } + + #ifdef CONFIG_SECURITY ++static int proc_pid_attr_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = NULL; ++ __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS); ++ return 0; ++} ++ + static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, + size_t count, loff_t *ppos) + { +@@ -2721,6 +2728,10 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, + ssize_t length; + struct task_struct *task = get_proc_task(inode); + ++ /* A task may only write when it was the opener. */ ++ if (file->private_data != current->mm) ++ return -EPERM; ++ + length = -ESRCH; + if (!task) + goto out_no_task; +@@ -2756,9 +2767,11 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, + } + + static const struct file_operations proc_pid_attr_operations = { ++ .open = proc_pid_attr_open, + .read = proc_pid_attr_read, + .write = proc_pid_attr_write, + .llseek = generic_file_llseek, ++ .release = mem_release, + }; + + static const struct pid_entry attr_dir_stuff[] = { +diff --git a/fs/proc/inode.c b/fs/proc/inode.c +index 4298a3970976..e1b530cc483c 100644 +--- a/fs/proc/inode.c ++++ b/fs/proc/inode.c +@@ -417,7 +417,7 @@ const struct inode_operations proc_link_inode_operations = { + + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) + { +- struct inode *inode = new_inode_pseudo(sb); ++ struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_ino = de->low_ino; +diff --git a/fs/proc/self.c b/fs/proc/self.c +index 40245954c450..f6e2e3fb8a22 100644 +--- a/fs/proc/self.c ++++ b/fs/proc/self.c +@@ -26,6 +26,13 @@ static const char *proc_self_get_link(struct dentry *dentry, + pid_t tgid = task_tgid_nr_ns(current, ns); + char *name; + ++ /* ++ * Not currently supported. Once we can inherit all of struct pid, ++ * we can allow this. ++ */ ++ if (current->flags & PF_KTHREAD) ++ return ERR_PTR(-EOPNOTSUPP); ++ + if (!tgid) + return ERR_PTR(-ENOENT); + /* 11 for max length of signed int in decimal + NULL term */ +@@ -53,7 +60,7 @@ int proc_setup_self(struct super_block *s) + inode_lock(root_inode); + self = d_alloc_name(s->s_root, "self"); + if (self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index f1a877b41b29..1d06ab4aaeab 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1993,6 +1993,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, + + } while (pte++, addr += PAGE_SIZE, addr != end); + pte_unmap_unlock(orig_pte, ptl); ++ cond_resched(); + return 0; + } + #ifdef CONFIG_HUGETLB_PAGE +diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c +index 595b90a9766c..02d1db8e9723 100644 +--- a/fs/proc/thread_self.c ++++ b/fs/proc/thread_self.c +@@ -55,7 +55,7 @@ int proc_setup_thread_self(struct super_block *s) + inode_lock(root_inode); + thread_self = d_alloc_name(s->s_root, "thread-self"); + if (thread_self) { +- struct inode *inode = new_inode_pseudo(s); ++ struct inode *inode = new_inode(s); + if (inode) { + inode->i_ino = thread_self_inum; + inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); +diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c +index 0738972e8d3f..ecd9887b0d1f 100644 +--- a/fs/quota/quota_tree.c ++++ b/fs/quota/quota_tree.c +@@ -61,7 +61,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) + + memset(buf, 0, info->dqi_usable_bs); + return sb->s_op->quota_read(sb, info->dqi_type, buf, +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits); ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); + } + + static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) +@@ -70,7 +70,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) + ssize_t ret; + + ret = sb->s_op->quota_write(sb, info->dqi_type, buf, +- info->dqi_usable_bs, blk << info->dqi_blocksize_bits); ++ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); + if (ret != info->dqi_usable_bs) { + quota_error(sb, "dquota write failed"); + if (ret >= 0) +@@ -283,7 +283,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, + blk); + goto out_buf; + } +- dquot->dq_off = (blk << info->dqi_blocksize_bits) + ++ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + + sizeof(struct qt_disk_dqdbheader) + + i * info->dqi_entry_size; + kfree(buf); +@@ -558,7 +558,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, + ret = -EIO; + goto out_buf; + } else { +- ret = (blk << info->dqi_blocksize_bits) + sizeof(struct ++ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct + qt_disk_dqdbheader) + i * info->dqi_entry_size; + } + out_buf: +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c +index ca71bf881ad1..4a39bb98f8ab 100644 +--- a/fs/quota/quota_v2.c ++++ b/fs/quota/quota_v2.c +@@ -266,6 +266,7 @@ static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) + d->dqb_curspace = cpu_to_le64(m->dqb_curspace); + d->dqb_btime = cpu_to_le64(m->dqb_btime); + d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id)); ++ d->dqb_pad = 0; + if (qtree_entry_unused(info, dp)) + d->dqb_itime = cpu_to_le64(1); + } +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c +index 9531b6c18ac7..f28999f71776 100644 +--- a/fs/reiserfs/inode.c ++++ b/fs/reiserfs/inode.c +@@ -1554,11 +1554,7 @@ void reiserfs_read_locked_inode(struct inode *inode, + * set version 1, version 2 could be used too, because stat data + * key is the same in both versions + */ +- key.version = KEY_FORMAT_3_5; +- key.on_disk_key.k_dir_id = dirino; +- key.on_disk_key.k_objectid = inode->i_ino; +- key.on_disk_key.k_offset = 0; +- key.on_disk_key.k_type = 0; ++ _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3); + + /* look for the object's stat data */ + retval = search_item(inode->i_sb, &key, &path_to_sd); +@@ -2170,7 +2166,8 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, + out_inserted_sd: + clear_nlink(inode); + th->t_trans_id = 0; /* so the caller can't use this handle later */ +- unlock_new_inode(inode); /* OK to do even if we hadn't locked it */ ++ if (inode->i_state & I_NEW) ++ unlock_new_inode(inode); + iput(inode); + return err; + } +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c +index 2a5c4813c47d..94871f611fa8 100644 +--- a/fs/reiserfs/journal.c ++++ b/fs/reiserfs/journal.c +@@ -2766,6 +2766,20 @@ int journal_init(struct super_block *sb, const char *j_dev_name, + goto free_and_return; + } + ++ /* ++ * Sanity check to see if journal first block is correct. ++ * If journal first block is invalid it can cause ++ * zeroing important superblock members. ++ */ ++ if (!SB_ONDISK_JOURNAL_DEVICE(sb) && ++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) { ++ reiserfs_warning(sb, "journal-1393", ++ "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d", ++ SB_JOURNAL_1st_RESERVED_BLOCK(sb), ++ SB_ONDISK_JOURNAL_1st_BLOCK(sb)); ++ goto free_and_return; ++ } ++ + if (journal_init_dev(sb, journal, j_dev_name) != 0) { + reiserfs_warning(sb, "sh-462", + "unable to initialize journal device"); +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c +index 5f5fff068877..f2f7055303ca 100644 +--- a/fs/reiserfs/stree.c ++++ b/fs/reiserfs/stree.c +@@ -386,6 +386,24 @@ void pathrelse(struct treepath *search_path) + search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; + } + ++static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih) ++{ ++ struct reiserfs_de_head *deh; ++ int i; ++ ++ deh = B_I_DEH(bh, ih); ++ for (i = 0; i < ih_entry_count(ih); i++) { ++ if (deh_location(&deh[i]) > ih_item_len(ih)) { ++ reiserfs_warning(NULL, "reiserfs-5094", ++ "directory entry location seems wrong %h", ++ &deh[i]); ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ + static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) + { + struct block_head *blkh; +@@ -453,6 +471,15 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) + "(second one): %h", ih); + return 0; + } ++ if (is_direntry_le_ih(ih)) { ++ if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) { ++ reiserfs_warning(NULL, "reiserfs-5093", ++ "item entry count seems wrong %h", ++ ih); ++ return 0; ++ } ++ return has_valid_deh_location(bh, ih); ++ } + prev_location = ih_location(ih); + } + +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index 677608a89b08..0d324c07762a 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -1234,6 +1234,10 @@ static int reiserfs_parse_options(struct super_block *s, + "turned on."); + return 0; + } ++ if (qf_names[qtype] != ++ REISERFS_SB(s)->s_qf_names[qtype]) ++ kfree(qf_names[qtype]); ++ qf_names[qtype] = NULL; + if (*arg) { /* Some filename specified? */ + if (REISERFS_SB(s)->s_qf_names[qtype] + && strcmp(REISERFS_SB(s)->s_qf_names[qtype], +@@ -1263,10 +1267,6 @@ static int reiserfs_parse_options(struct super_block *s, + else + *mount_options |= 1 << REISERFS_GRPQUOTA; + } else { +- if (qf_names[qtype] != +- REISERFS_SB(s)->s_qf_names[qtype]) +- kfree(qf_names[qtype]); +- qf_names[qtype] = NULL; + if (qtype == USRQUOTA) + *mount_options &= ~(1 << REISERFS_USRQUOTA); + else +@@ -2059,6 +2059,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) + unlock_new_inode(root_inode); + } + ++ if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) || ++ !root_inode->i_size) { ++ SWARN(silent, s, "", "corrupt root inode, run fsck"); ++ iput(root_inode); ++ errval = -EUCLEAN; ++ goto error; ++ } ++ + s->s_root = d_make_root(root_inode); + if (!s->s_root) + goto error; +diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c +index 07900105523f..645ee1bbd025 100644 +--- a/fs/reiserfs/xattr.c ++++ b/fs/reiserfs/xattr.c +@@ -664,6 +664,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, + if (get_inode_sd_version(inode) == STAT_DATA_V1) + return -EOPNOTSUPP; + ++ /* ++ * priv_root needn't be initialized during mount so allow initial ++ * lookups to succeed. ++ */ ++ if (!REISERFS_SB(inode->i_sb)->priv_root) ++ return 0; ++ + dentry = xattr_lookup(inode, name, XATTR_REPLACE); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); +diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h +index 613ff5aef94e..19ca3745301f 100644 +--- a/fs/reiserfs/xattr.h ++++ b/fs/reiserfs/xattr.h +@@ -42,7 +42,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec); + + static inline int reiserfs_xattrs_initialized(struct super_block *sb) + { +- return REISERFS_SB(sb)->priv_root != NULL; ++ return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root; + } + + #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) +diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c +index f86f51f99ace..1dcadd22b440 100644 +--- a/fs/romfs/storage.c ++++ b/fs/romfs/storage.c +@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, + size_t limit; + + limit = romfs_maxsize(sb); +- if (pos >= limit) ++ if (pos >= limit || buflen > limit - pos) + return -EIO; +- if (buflen > limit - pos) +- buflen = limit - pos; + + #ifdef CONFIG_ROMFS_ON_MTD + if (sb->s_mtd) +diff --git a/fs/select.c b/fs/select.c +index 3d4f85defeab..de675c79f479 100644 +--- a/fs/select.c ++++ b/fs/select.c +@@ -961,10 +961,9 @@ static long do_restart_poll(struct restart_block *restart_block) + + ret = do_sys_poll(ufds, nfds, to); + +- if (ret == -EINTR) { +- restart_block->fn = do_restart_poll; +- ret = -ERESTART_RESTARTBLOCK; +- } ++ if (ret == -EINTR) ++ ret = set_restart_fn(restart_block, do_restart_poll); ++ + return ret; + } + +@@ -986,7 +985,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, + struct restart_block *restart_block; + + restart_block = ¤t->restart_block; +- restart_block->fn = do_restart_poll; + restart_block->poll.ufds = ufds; + restart_block->poll.nfds = nfds; + +@@ -997,7 +995,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, + } else + restart_block->poll.has_timeout = 0; + +- ret = -ERESTART_RESTARTBLOCK; ++ ret = set_restart_fn(restart_block, do_restart_poll); + } + return ret; + } +diff --git a/fs/seq_file.c b/fs/seq_file.c +index 368bfb92b115..3ade39e02bb7 100644 +--- a/fs/seq_file.c ++++ b/fs/seq_file.c +@@ -28,6 +28,9 @@ static void *seq_buf_alloc(unsigned long size) + void *buf; + gfp_t gfp = GFP_KERNEL; + ++ if (unlikely(size > MAX_RW_COUNT)) ++ return NULL; ++ + /* + * For high order allocations, use __GFP_NORETRY to avoid oom-killing - + * it's better to fall back to vmalloc() than to kill things. For small +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c +index 8073b6532cf0..1d406a2094a5 100644 +--- a/fs/squashfs/export.c ++++ b/fs/squashfs/export.c +@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) + struct squashfs_sb_info *msblk = sb->s_fs_info; + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ u64 start; + __le64 ino; + int err; + + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); + ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) ++ return -EINVAL; ++ ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]); ++ + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); + if (err < 0) + return err; +@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + u64 lookup_table_start, u64 next_table, unsigned int inodes) + { + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_inode_lookup_table, length %d\n", length); + +@@ -134,20 +142,41 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, + if (inodes == 0) + return ERR_PTR(-EINVAL); + +- /* length bytes should not extend into the next table - this check +- * also traps instances where lookup_table_start is incorrectly larger +- * than the next table start ++ /* ++ * The computed size of the lookup table (length bytes) should exactly ++ * match the table start and end points + */ +- if (lookup_table_start + length > next_table) ++ if (length != (next_table - lookup_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, lookup_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first inode lookup table metadata block, +- * this should be less than lookup_table_start ++ * table0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed inode lookup blocks. Each entry should be ++ * less than the next (i.e. table[0] < table[1]), and the difference ++ * between them should be SQUASHFS_METADATA_SIZE or less. ++ * table[indexes - 1] should be less than lookup_table_start, and ++ * again the difference should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end ++ || (end - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= lookup_table_start || ++ (lookup_table_start - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index f1c1430ae721..0bcb83479fcb 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -224,11 +224,11 @@ static long long read_indexes(struct super_block *sb, int n, + * If the skip factor is limited in this way then the file will use multiple + * slots. + */ +-static inline int calculate_skip(int blocks) ++static inline int calculate_skip(u64 blocks) + { +- int skip = blocks / ((SQUASHFS_META_ENTRIES + 1) ++ u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1) + * SQUASHFS_META_INDEXES); +- return min(SQUASHFS_CACHED_BLKS - 1, skip + 1); ++ return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1); + } + + +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c +index d38ea3dab951..d2e15baab537 100644 +--- a/fs/squashfs/id.c ++++ b/fs/squashfs/id.c +@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_ID_BLOCK(index); + int offset = SQUASHFS_ID_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->id_table[block]); ++ u64 start_block; + __le32 disk_id; + int err; + ++ if (index >= msblk->ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->id_table[block]); ++ + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, + sizeof(disk_id)); + if (err < 0) +@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + u64 id_table_start, u64 next_table, unsigned short no_ids) + { + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); ++ int n; + __le64 *table; ++ u64 start, end; + + TRACE("In read_id_index_table, length %d\n", length); + +@@ -80,20 +88,38 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, + return ERR_PTR(-EINVAL); + + /* +- * length bytes should not extend into the next table - this check +- * also traps instances where id_table_start is incorrectly larger +- * than the next table start ++ * The computed size of the index table (length bytes) should exactly ++ * match the table start and end points + */ +- if (id_table_start + length > next_table) ++ if (length != (next_table - id_table_start)) + return ERR_PTR(-EINVAL); + + table = squashfs_read_table(sb, id_table_start, length); ++ if (IS_ERR(table)) ++ return table; + + /* +- * table[0] points to the first id lookup table metadata block, this +- * should be less than id_table_start ++ * table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than id_table_start, and again the difference ++ * should be SQUASHFS_METADATA_SIZE or less + */ +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= id_table_start || (id_table_start - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + kfree(table); + return ERR_PTR(-EINVAL); + } +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h +index e66486366f02..2fd1262cc1bd 100644 +--- a/fs/squashfs/squashfs_fs.h ++++ b/fs/squashfs/squashfs_fs.h +@@ -30,6 +30,7 @@ + + /* size of metadata (inode and directory) blocks */ + #define SQUASHFS_METADATA_SIZE 8192 ++#define SQUASHFS_BLOCK_OFFSET 2 + + /* default size of block device I/O */ + #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h +index ef69c31947bf..5234c19a0eab 100644 +--- a/fs/squashfs/squashfs_fs_sb.h ++++ b/fs/squashfs/squashfs_fs_sb.h +@@ -77,5 +77,6 @@ struct squashfs_sb_info { + unsigned int inodes; + unsigned int fragments; + int xattr_ids; ++ unsigned int ids; + }; + #endif +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c +index 1516bb779b8d..5abc9d03397c 100644 +--- a/fs/squashfs/super.c ++++ b/fs/squashfs/super.c +@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + msblk->directory_table = le64_to_cpu(sblk->directory_table_start); + msblk->inodes = le32_to_cpu(sblk->inodes); + msblk->fragments = le32_to_cpu(sblk->fragments); ++ msblk->ids = le16_to_cpu(sblk->no_ids); + flags = le16_to_cpu(sblk->flags); + + TRACE("Found valid superblock on %pg\n", sb->s_bdev); +@@ -187,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + TRACE("Block size %d\n", msblk->block_size); + TRACE("Number of inodes %d\n", msblk->inodes); + TRACE("Number of fragments %d\n", msblk->fragments); +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); ++ TRACE("Number of ids %d\n", msblk->ids); + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); + TRACE("sblk->fragment_table_start %llx\n", +@@ -244,8 +245,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) + allocate_id_index_table: + /* Allocate and read id index table */ + msblk->id_table = squashfs_read_id_index_table(sb, +- le64_to_cpu(sblk->id_table_start), next_table, +- le16_to_cpu(sblk->no_ids)); ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + if (IS_ERR(msblk->id_table)) { + ERROR("unable to read id index table\n"); + err = PTR_ERR(msblk->id_table); +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h +index afe70f815e3d..86b0a0073e51 100644 +--- a/fs/squashfs/xattr.h ++++ b/fs/squashfs/xattr.h +@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, + u64 start, u64 *xattr_table_start, int *xattr_ids) + { ++ struct squashfs_xattr_id_table *id_table; ++ ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ if (IS_ERR(id_table)) ++ return (__le64 *) id_table; ++ ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); ++ kfree(id_table); ++ + ERROR("Xattrs in filesystem, these will be ignored\n"); +- *xattr_table_start = start; + return ERR_PTR(-ENOTSUPP); + } + +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c +index c89607d690c4..7f718d2bf357 100644 +--- a/fs/squashfs/xattr_id.c ++++ b/fs/squashfs/xattr_id.c +@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + struct squashfs_sb_info *msblk = sb->s_fs_info; + int block = SQUASHFS_XATTR_BLOCK(index); + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ u64 start_block; + struct squashfs_xattr_id id; + int err; + ++ if (index >= msblk->xattr_ids) ++ return -EINVAL; ++ ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]); ++ + err = squashfs_read_metadata(sb, &id, &start_block, &offset, + sizeof(id)); + if (err < 0) +@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, + /* + * Read uncompressed xattr id lookup table indexes from disk into memory + */ +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, + u64 *xattr_table_start, int *xattr_ids) + { +- unsigned int len; ++ struct squashfs_sb_info *msblk = sb->s_fs_info; ++ unsigned int len, indexes; + struct squashfs_xattr_id_table *id_table; ++ __le64 *table; ++ u64 start, end; ++ int n; + +- id_table = squashfs_read_table(sb, start, sizeof(*id_table)); ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + if (IS_ERR(id_table)) + return (__le64 *) id_table; + +@@ -83,13 +92,54 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, + if (*xattr_ids == 0) + return ERR_PTR(-EINVAL); + +- /* xattr_table should be less than start */ +- if (*xattr_table_start >= start) ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); ++ ++ /* ++ * The computed size of the index table (len bytes) should exactly ++ * match the table start and end points ++ */ ++ start = table_start + sizeof(*id_table); ++ end = msblk->bytes_used; ++ ++ if (len != (end - start)) + return ERR_PTR(-EINVAL); + +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); ++ table = squashfs_read_table(sb, start, len); ++ if (IS_ERR(table)) ++ return table; ++ ++ /* table[0], table[1], ... table[indexes - 1] store the locations ++ * of the compressed xattr id blocks. Each entry should be less than ++ * the next (i.e. table[0] < table[1]), and the difference between them ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] ++ * should be less than table_start, and again the difference ++ * shouls be SQUASHFS_METADATA_SIZE or less. ++ * ++ * Finally xattr_table_start should be less than table[0]. ++ */ ++ for (n = 0; n < (indexes - 1); n++) { ++ start = le64_to_cpu(table[n]); ++ end = le64_to_cpu(table[n + 1]); ++ ++ if (start >= end || (end - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ start = le64_to_cpu(table[indexes - 1]); ++ if (start >= table_start || (table_start - start) > ++ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- TRACE("In read_xattr_index_table, length %d\n", len); ++ if (*xattr_table_start >= le64_to_cpu(table[0])) { ++ kfree(table); ++ return ERR_PTR(-EINVAL); ++ } + +- return squashfs_read_table(sb, start + sizeof(*id_table), len); ++ return table; + } +diff --git a/fs/super.c b/fs/super.c +index cdd21cd45aa7..c777a5cbcb1c 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -1272,36 +1272,11 @@ EXPORT_SYMBOL(__sb_end_write); + */ + int __sb_start_write(struct super_block *sb, int level, bool wait) + { +- bool force_trylock = false; +- int ret = 1; ++ if (!wait) ++ return percpu_down_read_trylock(sb->s_writers.rw_sem + level-1); + +-#ifdef CONFIG_LOCKDEP +- /* +- * We want lockdep to tell us about possible deadlocks with freezing +- * but it's it bit tricky to properly instrument it. Getting a freeze +- * protection works as getting a read lock but there are subtle +- * problems. XFS for example gets freeze protection on internal level +- * twice in some cases, which is OK only because we already hold a +- * freeze protection also on higher level. Due to these cases we have +- * to use wait == F (trylock mode) which must not fail. +- */ +- if (wait) { +- int i; +- +- for (i = 0; i < level - 1; i++) +- if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) { +- force_trylock = true; +- break; +- } +- } +-#endif +- if (wait && !force_trylock) +- percpu_down_read(sb->s_writers.rw_sem + level-1); +- else +- ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1); +- +- WARN_ON(force_trylock && !ret); +- return ret; ++ percpu_down_read(sb->s_writers.rw_sem + level-1); ++ return 1; + } + EXPORT_SYMBOL(__sb_start_write); + +diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c +index 69e287e20732..8223e4330981 100644 +--- a/fs/ubifs/debug.c ++++ b/fs/ubifs/debug.c +@@ -1125,6 +1125,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) + err = PTR_ERR(dent); + if (err == -ENOENT) + break; ++ kfree(pdent); + return err; + } + +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c +index 87ab02e2d666..56eed54633cf 100644 +--- a/fs/ubifs/dir.c ++++ b/fs/ubifs/dir.c +@@ -1144,7 +1144,10 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, + return err; + } + ++ spin_lock(&whiteout->i_lock); + whiteout->i_state |= I_LINKABLE; ++ spin_unlock(&whiteout->i_lock); ++ + whiteout_ui = ubifs_inode(whiteout); + whiteout_ui->data = dev; + whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0)); +@@ -1239,7 +1242,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, + + inc_nlink(whiteout); + mark_inode_dirty(whiteout); ++ ++ spin_lock(&whiteout->i_lock); + whiteout->i_state &= ~I_LINKABLE; ++ spin_unlock(&whiteout->i_lock); ++ + iput(whiteout); + } + +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c +index 97be41215332..99caaae01cab 100644 +--- a/fs/ubifs/io.c ++++ b/fs/ubifs/io.c +@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) + int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, + int offs, int quiet, int must_chk_crc) + { +- int err = -EINVAL, type, node_len; ++ int err = -EINVAL, type, node_len, dump_node = 1; + uint32_t crc, node_crc, magic; + const struct ubifs_ch *ch = buf; + +@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, + out_len: + if (!quiet) + ubifs_err(c, "bad node length %d", node_len); ++ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) ++ dump_node = 0; + out: + if (!quiet) { + ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); +- ubifs_dump_node(c, buf); ++ if (dump_node) { ++ ubifs_dump_node(c, buf); ++ } else { ++ int safe_len = min3(node_len, c->leb_size - offs, ++ (int)UBIFS_MAX_DATA_NODE_SZ); ++ pr_err("\tprevent out-of-bounds memory access\n"); ++ pr_err("\ttruncated data node length %d\n", safe_len); ++ pr_err("\tcorrupted data node:\n"); ++ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, ++ buf, safe_len, 0); ++ } + dump_stack(); + } + return err; +@@ -319,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) + { + uint32_t crc; + +- ubifs_assert(pad >= 0 && !(pad & 7)); ++ ubifs_assert(pad >= 0); + + if (pad >= UBIFS_PAD_NODE_SZ) { + struct ubifs_ch *ch = buf; +@@ -709,6 +721,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + * write-buffer. + */ + memcpy(wbuf->buf + wbuf->used, buf, len); ++ if (aligned_len > len) { ++ ubifs_assert(aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); ++ } + + if (aligned_len == wbuf->avail) { + dbg_io("flush jhead %s wbuf to LEB %d:%d", +@@ -801,13 +817,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) + } + + spin_lock(&wbuf->lock); +- if (aligned_len) ++ if (aligned_len) { + /* + * And now we have what's left and what does not take whole + * max. write unit, so write it to the write-buffer and we are + * done. + */ + memcpy(wbuf->buf, buf + written, len); ++ if (aligned_len > len) { ++ ubifs_assert(aligned_len - len < 8); ++ ubifs_pad(c, wbuf->buf + len, aligned_len - len); ++ } ++ } + + if (c->leb_size - wbuf->offs >= c->max_write_size) + wbuf->size = c->max_write_size; +diff --git a/fs/udf/inode.c b/fs/udf/inode.c +index 9e66d85021fc..50607673a6a9 100644 +--- a/fs/udf/inode.c ++++ b/fs/udf/inode.c +@@ -140,21 +140,24 @@ void udf_evict_inode(struct inode *inode) + struct udf_inode_info *iinfo = UDF_I(inode); + int want_delete = 0; + +- if (!inode->i_nlink && !is_bad_inode(inode)) { +- want_delete = 1; +- udf_setsize(inode, 0); +- udf_update_inode(inode, IS_SYNC(inode)); ++ if (!is_bad_inode(inode)) { ++ if (!inode->i_nlink) { ++ want_delete = 1; ++ udf_setsize(inode, 0); ++ udf_update_inode(inode, IS_SYNC(inode)); ++ } ++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && ++ inode->i_size != iinfo->i_lenExtents) { ++ udf_warn(inode->i_sb, ++ "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", ++ inode->i_ino, inode->i_mode, ++ (unsigned long long)inode->i_size, ++ (unsigned long long)iinfo->i_lenExtents); ++ } + } + truncate_inode_pages_final(&inode->i_data); + invalidate_inode_buffers(inode); + clear_inode(inode); +- if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && +- inode->i_size != iinfo->i_lenExtents) { +- udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", +- inode->i_ino, inode->i_mode, +- (unsigned long long)inode->i_size, +- (unsigned long long)iinfo->i_lenExtents); +- } + kfree(iinfo->i_ext.i_data); + iinfo->i_ext.i_data = NULL; + udf_clear_extent_cache(inode); +@@ -545,11 +548,14 @@ static int udf_do_extend_file(struct inode *inode, + + udf_write_aext(inode, last_pos, &last_ext->extLocation, + last_ext->extLength, 1); ++ + /* +- * We've rewritten the last extent but there may be empty +- * indirect extent after it - enter it. ++ * We've rewritten the last extent. If we are going to add ++ * more extents, we may need to enter possible following ++ * empty indirect extent. + */ +- udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); ++ if (new_block_bytes || prealloc_len) ++ udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); + } + + /* Managed to do everything necessary? */ +diff --git a/fs/udf/namei.c b/fs/udf/namei.c +index 348b922d1b6a..bfa53dead8c8 100644 +--- a/fs/udf/namei.c ++++ b/fs/udf/namei.c +@@ -956,6 +956,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, + iinfo->i_location.partitionReferenceNum, + 0); + epos.bh = udf_tgetblk(sb, block); ++ if (unlikely(!epos.bh)) { ++ err = -ENOMEM; ++ goto out_no_entry; ++ } + lock_buffer(epos.bh); + memset(epos.bh->b_data, 0x00, bsize); + set_buffer_uptodate(epos.bh); +diff --git a/fs/udf/super.c b/fs/udf/super.c +index 4abdba453885..c8c037e8e57b 100644 +--- a/fs/udf/super.c ++++ b/fs/udf/super.c +@@ -1391,6 +1391,12 @@ static int udf_load_sparable_map(struct super_block *sb, + (int)spm->numSparingTables); + return -EIO; + } ++ if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) { ++ udf_err(sb, "error loading logical volume descriptor: " ++ "Too big sparing table size (%u)\n", ++ le32_to_cpu(spm->sizeSparingTable)); ++ return -EIO; ++ } + + for (i = 0; i < spm->numSparingTables; i++) { + loc = le32_to_cpu(spm->locSparingTable[i]); +diff --git a/fs/ufs/super.c b/fs/ufs/super.c +index 351162ff1bfd..e320d824ee4d 100644 +--- a/fs/ufs/super.c ++++ b/fs/ufs/super.c +@@ -99,7 +99,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; + struct inode *inode; + +- if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) ++ if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) + return ERR_PTR(-ESTALE); + + inode = ufs_iget(sb, ino); +diff --git a/fs/xattr.c b/fs/xattr.c +index 1c91835ac431..e4c1cb9796ce 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -203,10 +203,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, + return error; + } + +- ++/** ++ * __vfs_setxattr_locked: set an extended attribute while holding the inode ++ * lock ++ * ++ * @dentry - object to perform setxattr on ++ * @name - xattr name to set ++ * @value - value to set @name to ++ * @size - size of @value ++ * @flags - flags to pass into filesystem operations ++ * @delegated_inode - on return, will contain an inode pointer that ++ * a delegation was broken on, NULL if none. ++ */ + int +-vfs_setxattr(struct dentry *dentry, const char *name, const void *value, +- size_t size, int flags) ++__vfs_setxattr_locked(struct dentry *dentry, const char *name, ++ const void *value, size_t size, int flags, ++ struct inode **delegated_inode) + { + struct inode *inode = dentry->d_inode; + int error; +@@ -215,15 +227,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + if (error) + return error; + +- inode_lock(inode); + error = security_inode_setxattr(dentry, name, value, size, flags); + if (error) + goto out; + ++ error = try_break_deleg(inode, delegated_inode); ++ if (error) ++ goto out; ++ + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); + + out: ++ return error; ++} ++EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); ++ ++int ++vfs_setxattr(struct dentry *dentry, const char *name, const void *value, ++ size_t size, int flags) ++{ ++ struct inode *inode = dentry->d_inode; ++ struct inode *delegated_inode = NULL; ++ int error; ++ ++retry_deleg: ++ inode_lock(inode); ++ error = __vfs_setxattr_locked(dentry, name, value, size, flags, ++ &delegated_inode); + inode_unlock(inode); ++ ++ if (delegated_inode) { ++ error = break_deleg_wait(&delegated_inode); ++ if (!error) ++ goto retry_deleg; ++ } + return error; + } + EXPORT_SYMBOL_GPL(vfs_setxattr); +@@ -379,8 +416,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) + } + EXPORT_SYMBOL(__vfs_removexattr); + ++/** ++ * __vfs_removexattr_locked: set an extended attribute while holding the inode ++ * lock ++ * ++ * @dentry - object to perform setxattr on ++ * @name - name of xattr to remove ++ * @delegated_inode - on return, will contain an inode pointer that ++ * a delegation was broken on, NULL if none. ++ */ + int +-vfs_removexattr(struct dentry *dentry, const char *name) ++__vfs_removexattr_locked(struct dentry *dentry, const char *name, ++ struct inode **delegated_inode) + { + struct inode *inode = dentry->d_inode; + int error; +@@ -389,11 +436,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) + if (error) + return error; + +- inode_lock(inode); + error = security_inode_removexattr(dentry, name); + if (error) + goto out; + ++ error = try_break_deleg(inode, delegated_inode); ++ if (error) ++ goto out; ++ + error = __vfs_removexattr(dentry, name); + + if (!error) { +@@ -402,12 +452,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) + } + + out: ++ return error; ++} ++EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); ++ ++int ++vfs_removexattr(struct dentry *dentry, const char *name) ++{ ++ struct inode *inode = dentry->d_inode; ++ struct inode *delegated_inode = NULL; ++ int error; ++ ++retry_deleg: ++ inode_lock(inode); ++ error = __vfs_removexattr_locked(dentry, name, &delegated_inode); + inode_unlock(inode); ++ ++ if (delegated_inode) { ++ error = break_deleg_wait(&delegated_inode); ++ if (!error) ++ goto retry_deleg; ++ } ++ + return error; + } + EXPORT_SYMBOL_GPL(vfs_removexattr); + +- + /* + * Extended attribute SET operations + */ +diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c +index e567551402a6..b904d4634355 100644 +--- a/fs/xfs/libxfs/xfs_alloc.c ++++ b/fs/xfs/libxfs/xfs_alloc.c +@@ -2507,6 +2507,13 @@ xfs_agf_verify( + be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) + return false; + ++ if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks) ++ return false; ++ ++ if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || ++ be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length)) ++ return false; ++ + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || +@@ -2518,6 +2525,10 @@ xfs_agf_verify( + be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) + return false; + ++ if (xfs_sb_version_hasrmapbt(&mp->m_sb) && ++ be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) ++ return false; ++ + /* + * during growfs operations, the perag is not fully initialised, + * so we can't use it for any useful checking. growfs ensures we can't +@@ -2531,6 +2542,11 @@ xfs_agf_verify( + be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) + return false; + ++ if (xfs_sb_version_hasreflink(&mp->m_sb) && ++ be32_to_cpu(agf->agf_refcount_blocks) > ++ be32_to_cpu(agf->agf_length)) ++ return false; ++ + if (xfs_sb_version_hasreflink(&mp->m_sb) && + (be32_to_cpu(agf->agf_refcount_level) < 1 || + be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) +diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c +index c6c15e5717e4..537acde2c497 100644 +--- a/fs/xfs/libxfs/xfs_attr_leaf.c ++++ b/fs/xfs/libxfs/xfs_attr_leaf.c +@@ -520,8 +520,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args) + ASSERT(ifp->if_flags & XFS_IFINLINE); + } + xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); +- hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; +- hdr->count = 0; ++ hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; ++ memset(hdr, 0, sizeof(*hdr)); + hdr->totsize = cpu_to_be16(sizeof(*hdr)); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); + } +@@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) + ASSERT(blkno == 0); + error = xfs_attr3_leaf_create(args, blkno, &bp); + if (error) { +- error = xfs_da_shrink_inode(args, 0, bp); +- bp = NULL; +- if (error) ++ /* xfs_attr3_leaf_create may not have instantiated a block */ ++ if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0)) + goto out; + xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ + memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ +@@ -1333,7 +1332,9 @@ xfs_attr3_leaf_add_work( + for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { + if (ichdr->freemap[i].base == tmp) { + ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); +- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); ++ ichdr->freemap[i].size -= ++ min_t(uint16_t, ichdr->freemap[i].size, ++ sizeof(xfs_attr_leaf_entry_t)); + } + } + ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); +diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c +index 9ca8809ee3d0..e390a7882933 100644 +--- a/fs/xfs/libxfs/xfs_bmap.c ++++ b/fs/xfs/libxfs/xfs_bmap.c +@@ -781,6 +781,8 @@ xfs_bmap_extents_to_btree( + *logflagsp = 0; + if ((error = xfs_alloc_vextent(&args))) { + xfs_iroot_realloc(ip, -1, whichfork); ++ ASSERT(ifp->if_broot == NULL); ++ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return error; + } +@@ -801,6 +803,8 @@ xfs_bmap_extents_to_btree( + } + if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { + xfs_iroot_realloc(ip, -1, whichfork); ++ ASSERT(ifp->if_broot == NULL); ++ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + return -ENOSPC; + } +diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c +index bbd1238852b3..df7f33e60a4f 100644 +--- a/fs/xfs/libxfs/xfs_dir2_node.c ++++ b/fs/xfs/libxfs/xfs_dir2_node.c +@@ -212,6 +212,7 @@ __xfs_dir3_free_read( + xfs_buf_ioerror(*bpp, -EFSCORRUPTED); + xfs_verifier_error(*bpp); + xfs_trans_brelse(tp, *bpp); ++ *bpp = NULL; + return -EFSCORRUPTED; + } + +diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c +index 3a8cc7139912..89fdcc641715 100644 +--- a/fs/xfs/libxfs/xfs_rmap.c ++++ b/fs/xfs/libxfs/xfs_rmap.c +@@ -1318,7 +1318,7 @@ xfs_rmap_convert_shared( + * record for our insertion point. This will also give us the record for + * start block contiguity tests. + */ +- error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags, ++ error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext, + &PREV, &i); + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done); + +diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c +index 86a4911520cc..69c112ddb544 100644 +--- a/fs/xfs/xfs_icache.c ++++ b/fs/xfs/xfs_icache.c +@@ -307,6 +307,46 @@ xfs_reinit_inode( + return error; + } + ++/* ++ * If we are allocating a new inode, then check what was returned is ++ * actually a free, empty inode. If we are not allocating an inode, ++ * then check we didn't find a free inode. ++ * ++ * Returns: ++ * 0 if the inode free state matches the lookup context ++ * -ENOENT if the inode is free and we are not allocating ++ * -EFSCORRUPTED if there is any state mismatch at all ++ */ ++static int ++xfs_iget_check_free_state( ++ struct xfs_inode *ip, ++ int flags) ++{ ++ if (flags & XFS_IGET_CREATE) { ++ /* should be a free inode */ ++ if (VFS_I(ip)->i_mode != 0) { ++ xfs_warn(ip->i_mount, ++"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", ++ ip->i_ino, VFS_I(ip)->i_mode); ++ return -EFSCORRUPTED; ++ } ++ ++ if (ip->i_d.di_nblocks != 0) { ++ xfs_warn(ip->i_mount, ++"Corruption detected! Free inode 0x%llx has blocks allocated!", ++ ip->i_ino); ++ return -EFSCORRUPTED; ++ } ++ return 0; ++ } ++ ++ /* should be an allocated inode */ ++ if (VFS_I(ip)->i_mode == 0) ++ return -ENOENT; ++ ++ return 0; ++} ++ + /* + * Check the validity of the inode we just found it the cache + */ +@@ -356,12 +396,12 @@ xfs_iget_cache_hit( + } + + /* +- * If lookup is racing with unlink return an error immediately. ++ * Check the inode free state is valid. This also detects lookup ++ * racing with unlinks. + */ +- if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { +- error = -ENOENT; ++ error = xfs_iget_check_free_state(ip, flags); ++ if (error) + goto out_error; +- } + + /* + * If IRECLAIMABLE is set, we've torn down the VFS inode already. +@@ -471,10 +511,14 @@ xfs_iget_cache_miss( + + trace_xfs_iget_miss(ip); + +- if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { +- error = -ENOENT; ++ ++ /* ++ * Check the inode free state is valid. This also detects lookup ++ * racing with unlinks. ++ */ ++ error = xfs_iget_check_free_state(ip, flags); ++ if (error) + goto out_destroy; +- } + + /* + * Preload the radix tree so we can insert safely under the +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c +index 7bfddcd32d73..d5948fb386fa 100644 +--- a/fs/xfs/xfs_iops.c ++++ b/fs/xfs/xfs_iops.c +@@ -820,7 +820,7 @@ xfs_setattr_size( + ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); + ASSERT(S_ISREG(inode->i_mode)); + ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| +- ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); ++ ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0); + + oldsize = inode->i_size; + newsize = iattr->ia_size; +@@ -864,6 +864,16 @@ xfs_setattr_size( + if (newsize > oldsize) { + error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); + } else { ++ /* ++ * iomap won't detect a dirty page over an unwritten block (or a ++ * cow block over a hole) and subsequently skips zeroing the ++ * newly post-EOF portion of the page. Flush the new EOF to ++ * convert the block before the pagecache truncate. ++ */ ++ error = filemap_write_and_wait_range(inode->i_mapping, newsize, ++ newsize); ++ if (error) ++ return error; + error = iomap_truncate_page(inode, newsize, &did_zeroing, + &xfs_iomap_ops); + } +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c +index 7bfcd09d446b..6bc949c63a75 100644 +--- a/fs/xfs/xfs_log.c ++++ b/fs/xfs/xfs_log.c +@@ -2634,7 +2634,6 @@ xlog_state_do_callback( + int funcdidcallbacks; /* flag: function did callbacks */ + int repeats; /* for issuing console warnings if + * looping too many times */ +- int wake = 0; + + spin_lock(&log->l_icloglock); + first_iclog = iclog = log->l_iclog; +@@ -2836,11 +2835,9 @@ xlog_state_do_callback( + #endif + + if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) +- wake = 1; +- spin_unlock(&log->l_icloglock); +- +- if (wake) + wake_up_all(&log->l_flush_wait); ++ ++ spin_unlock(&log->l_icloglock); + } + + +@@ -4002,7 +3999,9 @@ xfs_log_force_umount( + * item committed callback functions will do this again under lock to + * avoid races. + */ ++ spin_lock(&log->l_cilp->xc_push_lock); + wake_up_all(&log->l_cilp->xc_commit_wait); ++ spin_unlock(&log->l_cilp->xc_push_lock); + xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); + + #ifdef XFSERRORDEBUG +diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c +index cecd37569ddb..353bfe9c5cdd 100644 +--- a/fs/xfs/xfs_pnfs.c ++++ b/fs/xfs/xfs_pnfs.c +@@ -144,7 +144,7 @@ xfs_fs_map_blocks( + goto out_unlock; + error = invalidate_inode_pages2(inode->i_mapping); + if (WARN_ON_ONCE(error)) +- return error; ++ goto out_unlock; + + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); + offset_fsb = XFS_B_TO_FSBT(mp, offset); +diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c +index 6b753b969f7b..aa99711a8ff9 100644 +--- a/fs/xfs/xfs_reflink.c ++++ b/fs/xfs/xfs_reflink.c +@@ -1108,6 +1108,7 @@ xfs_reflink_remap_extent( + xfs_filblks_t rlen; + xfs_filblks_t unmap_len; + xfs_off_t newlen; ++ int64_t qres; + int error; + + unmap_len = irec->br_startoff + irec->br_blockcount - destoff; +@@ -1135,13 +1136,19 @@ xfs_reflink_remap_extent( + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, 0); + +- /* If we're not just clearing space, then do we have enough quota? */ +- if (real_extent) { +- error = xfs_trans_reserve_quota_nblks(tp, ip, +- irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); +- if (error) +- goto out_cancel; +- } ++ /* ++ * Reserve quota for this operation. We don't know if the first unmap ++ * in the dest file will cause a bmap btree split, so we always reserve ++ * at least enough blocks for that split. If the extent being mapped ++ * in is written, we need to reserve quota for that too. ++ */ ++ qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); ++ if (real_extent) ++ qres += irec->br_blockcount; ++ error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, ++ XFS_QMOPT_RES_REGBLKS); ++ if (error) ++ goto out_cancel; + + trace_xfs_reflink_remap(ip, irec->br_startoff, + irec->br_blockcount, irec->br_startblock); +diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c +index 0d93d3c10fcc..3ce400158a0a 100644 +--- a/fs/xfs/xfs_rtalloc.c ++++ b/fs/xfs/xfs_rtalloc.c +@@ -257,6 +257,9 @@ xfs_rtallocate_extent_block( + end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1; + i <= end; + i++) { ++ /* Make sure we don't scan off the end of the rt volume. */ ++ maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i; ++ + /* + * See if there's a free extent of maxlen starting at i. + * If it's not so then next will contain the first non-free. +@@ -448,6 +451,14 @@ xfs_rtallocate_extent_near( + */ + if (bno >= mp->m_sb.sb_rextents) + bno = mp->m_sb.sb_rextents - 1; ++ ++ /* Make sure we don't run off the end of the rt volume. */ ++ maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno; ++ if (maxlen < minlen) { ++ *rtblock = NULLRTBLOCK; ++ return 0; ++ } ++ + /* + * Try the exact allocation first. + */ +@@ -1003,10 +1014,13 @@ xfs_growfs_rt( + xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL); + /* +- * Update the bitmap inode's size. ++ * Update the bitmap inode's size ondisk and incore. We need ++ * to update the incore size so that inode inactivation won't ++ * punch what it thinks are "posteof" blocks. + */ + mp->m_rbmip->i_d.di_size = + nsbp->sb_rbmblocks * nsbp->sb_blocksize; ++ i_size_write(VFS_I(mp->m_rbmip), mp->m_rbmip->i_d.di_size); + xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); + /* + * Get the summary inode into the transaction. +@@ -1014,9 +1028,12 @@ xfs_growfs_rt( + xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL); + /* +- * Update the summary inode's size. ++ * Update the summary inode's size. We need to update the ++ * incore size so that inode inactivation won't punch what it ++ * thinks are "posteof" blocks. + */ + mp->m_rsumip->i_d.di_size = nmp->m_rsumsize; ++ i_size_write(VFS_I(mp->m_rsumip), mp->m_rsumip->i_d.di_size); + xfs_trans_log_inode(tp, mp->m_rsumip, XFS_ILOG_CORE); + /* + * Copy summary data from old to new sizes. +diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h +index d04637181ef2..980c9429abec 100644 +--- a/fs/xfs/xfs_sysfs.h ++++ b/fs/xfs/xfs_sysfs.h +@@ -44,9 +44,11 @@ xfs_sysfs_init( + struct xfs_kobj *parent_kobj, + const char *name) + { ++ struct kobject *parent; ++ ++ parent = parent_kobj ? &parent_kobj->kobject : NULL; + init_completion(&kobj->complete); +- return kobject_init_and_add(&kobj->kobject, ktype, +- &parent_kobj->kobject, "%s", name); ++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); + } + + static inline void +diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c +index c3d547211d16..9c42e50a5cb7 100644 +--- a/fs/xfs/xfs_trans_dquot.c ++++ b/fs/xfs/xfs_trans_dquot.c +@@ -669,7 +669,7 @@ xfs_trans_dqresv( + } + } + if (ninos > 0) { +- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; ++ total_count = dqp->q_res_icount + ninos; + timer = be32_to_cpu(dqp->q_core.d_itimer); + warns = be16_to_cpu(dqp->q_core.d_iwarns); + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; +diff --git a/gen_headers_arm.bp b/gen_headers_arm.bp +index ea90cddccdeb..7d6a5504c6e0 100644 +--- a/gen_headers_arm.bp ++++ b/gen_headers_arm.bp +@@ -519,6 +519,8 @@ gen_headers_out_arm = [ + "linux/signal.h", + "linux/signalfd.h", + "linux/slatecom_interface.h", ++ "linux/smc.h", ++ "linux/smc_diag.h", + "linux/smcinvoke.h", + "linux/smiapp.h", + "linux/snmp.h", +diff --git a/gen_headers_arm64.bp b/gen_headers_arm64.bp +index 5caf1118d3ac..16bce3e74231 100644 +--- a/gen_headers_arm64.bp ++++ b/gen_headers_arm64.bp +@@ -517,6 +517,8 @@ gen_headers_out_arm64 = [ + "linux/signal.h", + "linux/signalfd.h", + "linux/slatecom_interface.h", ++ "linux/smc.h", ++ "linux/smc_diag.h", + "linux/smcinvoke.h", + "linux/smiapp.h", + "linux/snmp.h", +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h +index c1a524de67c5..53b2a1f320f9 100644 +--- a/include/acpi/acpi_bus.h ++++ b/include/acpi/acpi_bus.h +@@ -241,6 +241,7 @@ struct acpi_pnp_type { + + struct acpi_device_pnp { + acpi_bus_id bus_id; /* Object name */ ++ int instance_no; /* Instance number of this object */ + struct acpi_pnp_type type; /* ID type */ + acpi_bus_address bus_address; /* _ADR */ + char *unique_id; /* _UID */ +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index 3c3519b0fce5..1c5b8e306e6b 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -252,7 +252,8 @@ + + #define PAGE_ALIGNED_DATA(page_align) \ + . = ALIGN(page_align); \ +- *(.data..page_aligned) ++ *(.data..page_aligned) \ ++ . = ALIGN(page_align); + + #define READ_MOSTLY_DATA(align) \ + . = ALIGN(align); \ +@@ -461,10 +462,14 @@ + */ + #define TEXT_TEXT \ + ALIGN_FUNCTION(); \ +- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ ++ *(.text.hot .text.hot.*) \ ++ *(TEXT_MAIN .text.fixup) \ ++ *(.text.unlikely .text.unlikely.*) \ ++ *(.text.unknown .text.unknown.*) \ + *(.text..ftrace) \ + *(TEXT_CFI_MAIN) \ + *(.ref.text) \ ++ *(.text.asan.* .text.tsan.*) \ + MEM_KEEP(init.text) \ + MEM_KEEP(exit.text) \ + +@@ -621,7 +626,9 @@ + . = ALIGN(bss_align); \ + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ + BSS_FIRST_SECTIONS \ ++ . = ALIGN(PAGE_SIZE); \ + *(.bss..page_aligned) \ ++ . = ALIGN(PAGE_SIZE); \ + *(.dynbss) \ + *(BSS_MAIN) \ + *(COMMON) \ +diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h +index a2bfd7843f18..4bb6b98782e9 100644 +--- a/include/crypto/if_alg.h ++++ b/include/crypto/if_alg.h +@@ -30,8 +30,8 @@ struct alg_sock { + + struct sock *parent; + +- unsigned int refcnt; +- unsigned int nokey_refcnt; ++ atomic_t refcnt; ++ atomic_t nokey_refcnt; + + const struct af_alg_type *type; + void *private; +diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h +index 5203560f992e..000c049a75f7 100644 +--- a/include/crypto/internal/hash.h ++++ b/include/crypto/internal/hash.h +@@ -80,13 +80,7 @@ int ahash_register_instance(struct crypto_template *tmpl, + struct ahash_instance *inst); + void ahash_free_instance(struct crypto_instance *inst); + +-int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, +- unsigned int keylen); +- +-static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +-{ +- return alg->setkey != shash_no_setkey; +-} ++bool crypto_shash_alg_has_setkey(struct shash_alg *alg); + + bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + +diff --git a/include/linux/acpi.h b/include/linux/acpi.h +index 5670bb9788bb..192b045cc56e 100644 +--- a/include/linux/acpi.h ++++ b/include/linux/acpi.h +@@ -734,6 +734,13 @@ static inline int acpi_device_modalias(struct device *dev, + return -ENODEV; + } + ++static inline struct platform_device * ++acpi_create_platform_device(struct acpi_device *adev, ++ struct property_entry *properties) ++{ ++ return NULL; ++} ++ + static inline bool acpi_dma_supported(struct acpi_device *adev) + { + return false; +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h +index 8272fcf78c27..01602a747725 100644 +--- a/include/linux/backing-dev.h ++++ b/include/linux/backing-dev.h +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -525,4 +526,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) + (1 << WB_async_congested)); + } + ++extern const char *bdi_unknown_name; ++ ++static inline const char *bdi_dev_name(struct backing_dev_info *bdi) ++{ ++ if (!bdi || !bdi->dev) ++ return bdi_unknown_name; ++ return dev_name(bdi->dev); ++} ++ + #endif /* _LINUX_BACKING_DEV_H */ +diff --git a/include/linux/bitops.h b/include/linux/bitops.h +index cee74a52b9eb..e1dee6c91ff5 100644 +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -49,7 +49,7 @@ static inline int get_bitmask_order(unsigned int count) + + static __always_inline unsigned long hweight_long(unsigned long w) + { +- return sizeof(w) == 4 ? hweight32(w) : hweight64(w); ++ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); + } + + /** +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 649db60fccfa..adf1c6bb3183 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -860,6 +860,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) + return bdev->bd_disk->queue; /* this is never NULL */ + } + ++/* ++ * The basic unit of block I/O is a sector. It is used in a number of contexts ++ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 ++ * bytes. Variables of type sector_t represent an offset or size that is a ++ * multiple of 512 bytes. Hence these two constants. ++ */ ++#ifndef SECTOR_SHIFT ++#define SECTOR_SHIFT 9 ++#endif ++#ifndef SECTOR_SIZE ++#define SECTOR_SIZE (1 << SECTOR_SHIFT) ++#endif ++ + /* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request +@@ -892,19 +905,20 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); + + static inline unsigned int blk_rq_sectors(const struct request *rq) + { +- return blk_rq_bytes(rq) >> 9; ++ return blk_rq_bytes(rq) >> SECTOR_SHIFT; + } + + static inline unsigned int blk_rq_cur_sectors(const struct request *rq) + { +- return blk_rq_cur_bytes(rq) >> 9; ++ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; + } + + static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, + int op) + { + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) +- return min(q->limits.max_discard_sectors, UINT_MAX >> 9); ++ return min(q->limits.max_discard_sectors, ++ UINT_MAX >> SECTOR_SHIFT); + + if (unlikely(op == REQ_OP_WRITE_SAME)) + return q->limits.max_write_same_sectors; +@@ -1178,16 +1192,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + static inline int sb_issue_discard(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) + { +- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), +- nr_blocks << (sb->s_blocksize_bits - 9), ++ return blkdev_issue_discard(sb->s_bdev, ++ block << (sb->s_blocksize_bits - ++ SECTOR_SHIFT), ++ nr_blocks << (sb->s_blocksize_bits - ++ SECTOR_SHIFT), + gfp_mask, flags); + } + static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, + sector_t nr_blocks, gfp_t gfp_mask) + { + return blkdev_issue_zeroout(sb->s_bdev, +- block << (sb->s_blocksize_bits - 9), +- nr_blocks << (sb->s_blocksize_bits - 9), ++ block << (sb->s_blocksize_bits - ++ SECTOR_SHIFT), ++ nr_blocks << (sb->s_blocksize_bits - ++ SECTOR_SHIFT), + gfp_mask, true); + } + +@@ -1294,7 +1313,8 @@ static inline int queue_alignment_offset(struct request_queue *q) + static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) + { + unsigned int granularity = max(lim->physical_block_size, lim->io_min); +- unsigned int alignment = sector_div(sector, granularity >> 9) << 9; ++ unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) ++ << SECTOR_SHIFT; + + return (granularity + lim->alignment_offset - alignment) % granularity; + } +@@ -1328,8 +1348,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector + return 0; + + /* Why are these in bytes, not sectors? */ +- alignment = lim->discard_alignment >> 9; +- granularity = lim->discard_granularity >> 9; ++ alignment = lim->discard_alignment >> SECTOR_SHIFT; ++ granularity = lim->discard_granularity >> SECTOR_SHIFT; + if (!granularity) + return 0; + +@@ -1340,7 +1360,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector + offset = (granularity + alignment - offset) % granularity; + + /* Turn it back into bytes, gaah */ +- return offset << 9; ++ return offset << SECTOR_SHIFT; + } + + static inline int bdev_discard_alignment(struct block_device *bdev) +diff --git a/include/linux/bvec.h b/include/linux/bvec.h +index 2425374a2493..ed855b6e20a8 100644 +--- a/include/linux/bvec.h ++++ b/include/linux/bvec.h +@@ -91,10 +91,17 @@ static inline void bvec_iter_advance(const struct bio_vec *bv, + } + } + ++static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) ++{ ++ iter->bi_bvec_done = 0; ++ iter->bi_idx++; ++} ++ + #define for_each_bvec(bvl, bio_vec, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ +- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) ++ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ ++ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) + + #endif /* __LINUX_BVEC_ITER_H */ +diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h +index 51bb6532785c..0e7350973e0e 100644 +--- a/include/linux/can/skb.h ++++ b/include/linux/can/skb.h +@@ -48,8 +48,12 @@ static inline void can_skb_reserve(struct sk_buff *skb) + + static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) + { +- if (sk) { +- sock_hold(sk); ++ /* If the socket has already been closed by user space, the ++ * refcount may already be 0 (and the socket will be freed ++ * after the last TX skb has been freed). So only increase ++ * socket refcount if the refcount is > 0. ++ */ ++ if (sk && atomic_inc_not_zero(&sk->sk_refcnt)) { + skb->destructor = sock_efree; + skb->sk = sk; + } +@@ -60,21 +64,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) + */ + static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) + { +- if (skb_shared(skb)) { +- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); ++ struct sk_buff *nskb; + +- if (likely(nskb)) { +- can_skb_set_owner(nskb, skb->sk); +- consume_skb(skb); +- return nskb; +- } else { +- kfree_skb(skb); +- return NULL; +- } ++ nskb = skb_clone(skb, GFP_ATOMIC); ++ if (unlikely(!nskb)) { ++ kfree_skb(skb); ++ return NULL; + } + +- /* we can assume to have an unshared skb with proper owner */ +- return skb; ++ can_skb_set_owner(nskb, skb->sk); ++ consume_skb(skb); ++ return nskb; + } + + #endif /* !_CAN_SKB_H */ +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h +index ce0f32d3c17c..27424c24a53d 100644 +--- a/include/linux/cgroup-defs.h ++++ b/include/linux/cgroup-defs.h +@@ -603,7 +603,9 @@ struct sock_cgroup_data { + union { + #ifdef __LITTLE_ENDIAN + struct { +- u8 is_data; ++ u8 is_data : 1; ++ u8 no_refcnt : 1; ++ u8 unused : 6; + u8 padding; + u16 prioidx; + u32 classid; +@@ -613,7 +615,9 @@ struct sock_cgroup_data { + u32 classid; + u16 prioidx; + u8 padding; +- u8 is_data; ++ u8 unused : 6; ++ u8 no_refcnt : 1; ++ u8 is_data : 1; + } __packed; + #endif + u64 val; +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h +index f9a2c23a5363..b31872a749c5 100755 +--- a/include/linux/cgroup.h ++++ b/include/linux/cgroup.h +@@ -710,6 +710,7 @@ extern spinlock_t cgroup_sk_update_lock; + + void cgroup_sk_alloc_disable(void); + void cgroup_sk_alloc(struct sock_cgroup_data *skcd); ++void cgroup_sk_clone(struct sock_cgroup_data *skcd); + void cgroup_sk_free(struct sock_cgroup_data *skcd); + + static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +@@ -723,7 +724,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) + */ + v = READ_ONCE(skcd->val); + +- if (v & 1) ++ if (v & 3) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +@@ -735,6 +736,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) + #else /* CONFIG_CGROUP_DATA */ + + static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} ++static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} + static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} + + #endif /* CONFIG_CGROUP_DATA */ +diff --git a/include/linux/compat.h b/include/linux/compat.h +index fab35daf8759..6b9d38a7adca 100644 +--- a/include/linux/compat.h ++++ b/include/linux/compat.h +@@ -311,8 +311,6 @@ struct compat_kexec_segment; + struct compat_mq_attr; + struct compat_msgbuf; + +-extern void compat_exit_robust_list(struct task_struct *curr); +- + asmlinkage long + compat_sys_set_robust_list(struct compat_robust_list_head __user *head, + compat_size_t len); +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index 61650c1830d4..d830eddacdc6 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -149,6 +149,12 @@ + + #if GCC_VERSION < 30200 + # error Sorry, your compiler is too old - please upgrade it. ++#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 && !defined(__clang__) ++/* ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 ++ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk ++ */ ++# error Sorry, your version of GCC is too old - please use 5.1 or newer. + #endif + + #if GCC_VERSION < 30300 +diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h +index 6fd3c908a340..a30112ba06e4 100644 +--- a/include/linux/console_struct.h ++++ b/include/linux/console_struct.h +@@ -61,6 +61,7 @@ struct vc_data { + unsigned int vc_rows; + unsigned int vc_size_row; /* Bytes per row */ + unsigned int vc_scan_lines; /* # of scan lines */ ++ unsigned int vc_cell_height; /* CRTC character cell height */ + unsigned long vc_origin; /* [!] Start of real screen */ + unsigned long vc_scr_end; /* [!] End of real screen */ + unsigned long vc_visible_origin; /* [!] Top of visible window */ +diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h +index 2e75ae2d850b..a3e447ec56ea 100644 +--- a/include/linux/debugfs.h ++++ b/include/linux/debugfs.h +@@ -57,6 +57,8 @@ static const struct file_operations __fops = { \ + .llseek = generic_file_llseek, \ + } + ++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); ++ + #if defined(CONFIG_DEBUG_FS) + + struct dentry *debugfs_create_file(const char *name, umode_t mode, +@@ -76,7 +78,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); + struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, + const char *dest); + +-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + debugfs_automount_t f, +@@ -191,7 +192,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, + + static inline struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, +- struct vfsmount *(*f)(void *), ++ debugfs_automount_t f, + void *data) + { + return ERR_PTR(-ENODEV); +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h +index 2b37e89a85d2..715800a25a31 100644 +--- a/include/linux/device-mapper.h ++++ b/include/linux/device-mapper.h +@@ -589,8 +589,6 @@ extern struct ratelimit_state dm_ratelimit_state; + #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ + 0 : scnprintf(result + sz, maxlen - sz, x)) + +-#define SECTOR_SHIFT 9 +- + /* + * Definitions of return values from target end_io function. + */ +diff --git a/include/linux/device.h b/include/linux/device.h +index 5ab43f42b0c0..d52c36f97e90 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -814,6 +814,7 @@ struct device { + struct dev_pin_info *pins; + #endif + #ifdef CONFIG_GENERIC_MSI_IRQ ++ raw_spinlock_t msi_lock; + struct list_head msi_list; + #endif + +diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h +index 2ccbda2ea186..b1b47d7d723a 100644 +--- a/include/linux/diagchar.h ++++ b/include/linux/diagchar.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -147,8 +147,8 @@ + /* This needs to be modified manually now, when we add + * a new RANGE of SSIDs to the msg_mask_tbl. + */ +-#define MSG_MASK_TBL_CNT 26 +-#define APPS_EVENT_LAST_ID 0xCC2 ++#define MSG_MASK_TBL_CNT 27 ++#define APPS_EVENT_LAST_ID 0xCFE + + #define MSG_SSID_0 0 + #define MSG_SSID_0_LAST 134 +@@ -200,8 +200,10 @@ + #define MSG_SSID_23_LAST 10416 + #define MSG_SSID_24 10500 + #define MSG_SSID_24_LAST 10505 +-#define MSG_SSID_25 0xC000 +-#define MSG_SSID_25_LAST 0xC063 ++#define MSG_SSID_25 10600 ++#define MSG_SSID_25_LAST 10620 ++#define MSG_SSID_26 0xC000 ++#define MSG_SSID_26_LAST 0xC063 + + static const uint32_t msg_bld_masks_0[] = { + MSG_LVL_LOW, +@@ -921,13 +923,39 @@ static const uint32_t msg_bld_masks_24[] = { + }; + + static const uint32_t msg_bld_masks_25[] = { ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | ++ MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11, ++ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | ++ MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW, ++ MSG_LVL_LOW ++}; ++ ++static const uint32_t msg_bld_masks_26[] = { + MSG_LVL_LOW + }; + + /* LOG CODES */ + static const uint32_t log_code_last_tbl[] = { + 0x0, /* EQUIP ID 0 */ +- 0x1CDD, /* EQUIP ID 1 */ ++ 0x1D86, /* EQUIP ID 1 */ + 0x0, /* EQUIP ID 2 */ + 0x0, /* EQUIP ID 3 */ + 0x4910, /* EQUIP ID 4 */ +diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h +index 885f587a3555..affe4d1d7d4a 100644 +--- a/include/linux/eeprom_93xx46.h ++++ b/include/linux/eeprom_93xx46.h +@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data { + #define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0) + /* Instructions such as EWEN are (addrlen + 2) in length. */ + #define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1) ++/* Add extra cycle after address during a read */ ++#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2) + + /* + * optional hooks to control additional logic +diff --git a/include/linux/efi.h b/include/linux/efi.h +index 02c4f16685b6..69605956beb8 100644 +--- a/include/linux/efi.h ++++ b/include/linux/efi.h +@@ -910,7 +910,11 @@ extern void *efi_get_pal_addr (void); + extern void efi_map_pal_code (void); + extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); + extern void efi_gettimeofday (struct timespec64 *ts); ++#ifdef CONFIG_EFI + extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ ++#else ++static inline void efi_enter_virtual_mode (void) {} ++#endif + #ifdef CONFIG_X86 + extern void efi_late_init(void); + extern void efi_free_boot_services(void); +diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h +index 698d51a0eea3..4adf7faeaeb5 100644 +--- a/include/linux/elfcore.h ++++ b/include/linux/elfcore.h +@@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse + } + #endif + ++#if defined(CONFIG_UM) || defined(CONFIG_IA64) + /* + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its +@@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); + extern int + elf_core_write_extra_data(struct coredump_params *cprm); + extern size_t elf_core_extra_data_size(void); ++#else ++static inline Elf_Half elf_core_extra_phdrs(void) ++{ ++ return 0; ++} ++ ++static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) ++{ ++ return 1; ++} ++ ++static inline int elf_core_write_extra_data(struct coredump_params *cprm) ++{ ++ return 1; ++} ++ ++static inline size_t elf_core_extra_data_size(void) ++{ ++ return 0; ++} ++#endif + + #endif /* _LINUX_ELFCORE_H */ +diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h +index 278e3ef05336..56c6d9031663 100644 +--- a/include/linux/elfnote.h ++++ b/include/linux/elfnote.h +@@ -53,7 +53,7 @@ + .popsection ; + + #define ELFNOTE(name, type, desc) \ +- ELFNOTE_START(name, type, "") \ ++ ELFNOTE_START(name, type, "a") \ + desc ; \ + ELFNOTE_END + +diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h +index a0e03b13b449..2aa32075bca1 100644 +--- a/include/linux/extcon/extcon-adc-jack.h ++++ b/include/linux/extcon/extcon-adc-jack.h +@@ -59,7 +59,7 @@ struct adc_jack_pdata { + const char *name; + const char *consumer_channel; + +- const enum extcon *cable_names; ++ const unsigned int *cable_names; + + /* The last entry's state should be 0 */ + struct adc_jack_cond *adc_conditions; +diff --git a/include/linux/font.h b/include/linux/font.h +index d6821769dd1e..f85e70bd4793 100644 +--- a/include/linux/font.h ++++ b/include/linux/font.h +@@ -57,4 +57,17 @@ extern const struct font_desc *get_default_font(int xres, int yres, + /* Max. length for the name of a predefined font */ + #define MAX_FONT_NAME 32 + ++/* Extra word getters */ ++#define REFCOUNT(fd) (((int *)(fd))[-1]) ++#define FNTSIZE(fd) (((int *)(fd))[-2]) ++#define FNTCHARCNT(fd) (((int *)(fd))[-3]) ++#define FNTSUM(fd) (((int *)(fd))[-4]) ++ ++#define FONT_EXTRA_WORDS 4 ++ ++struct font_data { ++ unsigned int extra[FONT_EXTRA_WORDS]; ++ const unsigned char data[]; ++} __packed; ++ + #endif /* _VIDEO_FONT_H */ +diff --git a/include/linux/fs.h b/include/linux/fs.h +index c3d2a2a09967..86c97fee70e0 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1989,6 +1989,10 @@ static inline bool HAS_UNMAPPED_ID(struct inode *inode) + * wb stat updates to grab mapping->tree_lock. See + * inode_switch_wb_work_fn() for details. + * ++ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. ++ * Used to detect that mark_inode_dirty() should not move ++ * inode between dirty lists. ++ * + * Q: What is the difference between I_WILL_FREE and I_FREEING? + */ + #define I_DIRTY_SYNC (1 << 0) +@@ -2006,9 +2010,9 @@ static inline bool HAS_UNMAPPED_ID(struct inode *inode) + #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) + #define I_LINKABLE (1 << 10) + #define I_DIRTY_TIME (1 << 11) +-#define __I_DIRTY_TIME_EXPIRED 12 +-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) ++#define I_DIRTY_TIME_EXPIRED (1 << 12) + #define I_WB_SWITCH (1 << 13) ++#define I_SYNC_QUEUED (1 << 17) + + #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) + #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h +index ab7938a58ca0..941a0f1b8a54 100644 +--- a/include/linux/ftrace.h ++++ b/include/linux/ftrace.h +@@ -792,7 +792,9 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + + /* for init task */ +-#define INIT_FTRACE_GRAPH .ret_stack = NULL, ++#define INIT_FTRACE_GRAPH \ ++ .ret_stack = NULL, \ ++ .tracing_graph_pause = ATOMIC_INIT(0), + + /* + * Stack of return addresses for functions +diff --git a/include/linux/futex.h b/include/linux/futex.h +index c015fa91e7cc..0f294ae63c78 100644 +--- a/include/linux/futex.h ++++ b/include/linux/futex.h +@@ -1,6 +1,8 @@ + #ifndef _LINUX_FUTEX_H + #define _LINUX_FUTEX_H + ++#include ++ + #include + + struct inode; +@@ -11,9 +13,6 @@ union ktime; + long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, + u32 __user *uaddr2, u32 val2, u32 val3); + +-extern int +-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); +- + /* + * Futexes are matched on equal values of this key. + * The key type depends on whether it's a shared or private mapping. +@@ -56,19 +55,34 @@ union futex_key { + #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } + + #ifdef CONFIG_FUTEX +-extern void exit_robust_list(struct task_struct *curr); +-extern void exit_pi_state_list(struct task_struct *curr); +-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +-#define futex_cmpxchg_enabled 1 +-#else +-extern int futex_cmpxchg_enabled; +-#endif +-#else +-static inline void exit_robust_list(struct task_struct *curr) +-{ +-} +-static inline void exit_pi_state_list(struct task_struct *curr) ++enum { ++ FUTEX_STATE_OK, ++ FUTEX_STATE_EXITING, ++ FUTEX_STATE_DEAD, ++}; ++ ++static inline void futex_init_task(struct task_struct *tsk) + { ++ tsk->robust_list = NULL; ++#ifdef CONFIG_COMPAT ++ tsk->compat_robust_list = NULL; ++#endif ++ INIT_LIST_HEAD(&tsk->pi_state_list); ++ tsk->pi_state_cache = NULL; ++ tsk->futex_state = FUTEX_STATE_OK; ++ mutex_init(&tsk->futex_exit_mutex); + } ++ ++void futex_exit_recursive(struct task_struct *tsk); ++void futex_exit_release(struct task_struct *tsk); ++void futex_exec_release(struct task_struct *tsk); ++ ++long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, ++ u32 __user *uaddr2, u32 val2, u32 val3); ++#else ++static inline void futex_init_task(struct task_struct *tsk) { } ++static inline void futex_exit_recursive(struct task_struct *tsk) { } ++static inline void futex_exit_release(struct task_struct *tsk) { } ++static inline void futex_exec_release(struct task_struct *tsk) { } + #endif + #endif +diff --git a/include/linux/genhd.h b/include/linux/genhd.h +index 3c99fb6727ca..12a2f5ac51c9 100644 +--- a/include/linux/genhd.h ++++ b/include/linux/genhd.h +@@ -716,9 +716,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part) + static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) + { + #if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP) ++ preempt_disable(); + write_seqcount_begin(&part->nr_sects_seq); + part->nr_sects = size; + write_seqcount_end(&part->nr_sects_seq); ++ preempt_enable(); + #elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT) + preempt_disable(); + part->nr_sects = size; +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 877bb9aaca18..57d4a65b0fda 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -248,6 +248,8 @@ struct hid_item { + #define HID_CP_SELECTION 0x000c0080 + #define HID_CP_MEDIASELECTION 0x000c0087 + #define HID_CP_SELECTDISC 0x000c00ba ++#define HID_CP_VOLUMEUP 0x000c00e9 ++#define HID_CP_VOLUMEDOWN 0x000c00ea + #define HID_CP_PLAYBACKSPEED 0x000c00f1 + #define HID_CP_PROXIMITY 0x000c0109 + #define HID_CP_SPEAKERSYSTEM 0x000c0160 +@@ -877,34 +879,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { + * @max: maximal valid usage->code to consider later (out parameter) + * @type: input event type (EV_KEY, EV_REL, ...) + * @c: code which corresponds to this usage and type ++ * ++ * The value pointed to by @bit will be set to NULL if either @type is ++ * an unhandled event type, or if @c is out of range for @type. This ++ * can be used as an error condition. + */ + static inline void hid_map_usage(struct hid_input *hidinput, + struct hid_usage *usage, unsigned long **bit, int *max, +- __u8 type, __u16 c) ++ __u8 type, unsigned int c) + { + struct input_dev *input = hidinput->input; +- +- usage->type = type; +- usage->code = c; ++ unsigned long *bmap = NULL; ++ unsigned int limit = 0; + + switch (type) { + case EV_ABS: +- *bit = input->absbit; +- *max = ABS_MAX; ++ bmap = input->absbit; ++ limit = ABS_MAX; + break; + case EV_REL: +- *bit = input->relbit; +- *max = REL_MAX; ++ bmap = input->relbit; ++ limit = REL_MAX; + break; + case EV_KEY: +- *bit = input->keybit; +- *max = KEY_MAX; ++ bmap = input->keybit; ++ limit = KEY_MAX; + break; + case EV_LED: +- *bit = input->ledbit; +- *max = LED_MAX; ++ bmap = input->ledbit; ++ limit = LED_MAX; + break; + } ++ ++ if (unlikely(c > limit || !bmap)) { ++ pr_warn_ratelimited("%s: Invalid code %d type %d\n", ++ input->name, c, type); ++ *bit = NULL; ++ return; ++ } ++ ++ usage->type = type; ++ usage->code = c; ++ *max = limit; ++ *bit = bmap; + } + + /** +@@ -918,7 +935,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, + __u8 type, __u16 c) + { + hid_map_usage(hidinput, usage, bit, max, type, c); +- clear_bit(c, *bit); ++ if (*bit) ++ clear_bit(usage->code, *bit); + } + + /** +@@ -1112,8 +1130,7 @@ static inline void hid_hw_wait(struct hid_device *hdev) + */ + static inline u32 hid_report_len(struct hid_report *report) + { +- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ +- return ((report->size - 1) >> 3) + 1 + (report->id > 0); ++ return DIV_ROUND_UP(report->size, 8) + (report->id > 0); + } + + int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, +diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h +index 394a8405dd74..e0521a1d9325 100644 +--- a/include/linux/hil_mlc.h ++++ b/include/linux/hil_mlc.h +@@ -103,7 +103,7 @@ struct hilse_node { + + /* Methods for back-end drivers, e.g. hp_sdc_mlc */ + typedef int (hil_mlc_cts) (hil_mlc *mlc); +-typedef void (hil_mlc_out) (hil_mlc *mlc); ++typedef int (hil_mlc_out) (hil_mlc *mlc); + typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout); + + struct hil_mlc_devinfo { +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 6b8a7b654771..6417bc845db5 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -93,7 +93,7 @@ void free_huge_page(struct page *page); + void hugetlb_fix_reserve_counts(struct inode *inode); + extern struct mutex *hugetlb_fault_mutex_table; + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address); ++ pgoff_t idx); + + pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); + +@@ -451,17 +451,6 @@ static inline int hstate_index(struct hstate *h) + return h - hstates; + } + +-pgoff_t __basepage_index(struct page *page); +- +-/* Return page->index in PAGE_SIZE units */ +-static inline pgoff_t basepage_index(struct page *page) +-{ +- if (!PageCompound(page)) +- return page->index; +- +- return __basepage_index(page); +-} +- + extern int dissolve_free_huge_pages(unsigned long start_pfn, + unsigned long end_pfn); + static inline bool hugepage_migration_supported(struct hstate *h) +@@ -502,6 +491,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) + { + atomic_long_sub(l, &mm->hugetlb_usage); + } ++ ++void set_page_huge_active(struct page *page); ++ + #else /* CONFIG_HUGETLB_PAGE */ + struct hstate {}; + #define alloc_huge_page(v, a, r) NULL +@@ -526,10 +518,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) + #define hstate_index_to_shift(index) 0 + #define hstate_index(h) 0 + +-static inline pgoff_t basepage_index(struct page *page) +-{ +- return page->index; +-} + #define dissolve_free_huge_pages(s, e) 0 + #define hugepage_migration_supported(h) false + +diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h +index a3c3ecd59f08..7a43afd27365 100644 +--- a/include/linux/i2c-algo-pca.h ++++ b/include/linux/i2c-algo-pca.h +@@ -52,6 +52,20 @@ + #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ + #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ + ++/** ++ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings ++ * @mode: Configured i2c bus mode ++ * @tlow: Configured SCL LOW period ++ * @thi: Configured SCL HIGH period ++ * @clock_freq: The configured clock frequency ++ */ ++struct pca_i2c_bus_settings { ++ int mode; ++ int tlow; ++ int thi; ++ int clock_freq; ++}; ++ + struct i2c_algo_pca_data { + void *data; /* private low level data */ + void (*write_byte) (void *data, int reg, int val); +@@ -63,6 +77,7 @@ struct i2c_algo_pca_data { + * For PCA9665, use the frequency you want here. */ + unsigned int i2c_clock; + unsigned int chip; ++ struct pca_i2c_bus_settings bus_settings; + }; + + int i2c_pca_add_bus(struct i2c_adapter *); +diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h +index 57086e9fc64c..d87dabac04c1 100644 +--- a/include/linux/icmpv6.h ++++ b/include/linux/icmpv6.h +@@ -2,6 +2,7 @@ + #define _LINUX_ICMPV6_H + + #include ++#include + #include + + static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) +@@ -12,21 +13,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) + #include + + #if IS_ENABLED(CONFIG_IPV6) +-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); + + typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, +- const struct in6_addr *force_saddr); ++ const struct in6_addr *force_saddr, ++ const struct inet6_skb_parm *parm); ++void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, ++ const struct in6_addr *force_saddr, ++ const struct inet6_skb_parm *parm); ++#if IS_BUILTIN(CONFIG_IPV6) ++static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, ++ const struct inet6_skb_parm *parm) ++{ ++ icmp6_send(skb, type, code, info, NULL, parm); ++} ++static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) ++{ ++ BUILD_BUG_ON(fn != icmp6_send); ++ return 0; ++} ++static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) ++{ ++ BUILD_BUG_ON(fn != icmp6_send); ++ return 0; ++} ++#else ++extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, ++ const struct inet6_skb_parm *parm); + extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); + extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); ++#endif ++ ++static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) ++{ ++ __icmpv6_send(skb, type, code, info, IP6CB(skb)); ++} ++ + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len); + ++#if IS_ENABLED(CONFIG_NF_NAT) ++void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); ++#else ++static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) ++{ ++ struct inet6_skb_parm parm = { 0 }; ++ __icmpv6_send(skb_in, type, code, info, &parm); ++} ++#endif ++ + #else + + static inline void icmpv6_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) + { ++} + ++static inline void icmpv6_ndo_send(struct sk_buff *skb, ++ u8 type, u8 code, __u32 info) ++{ + } + #endif + +diff --git a/include/linux/ide.h b/include/linux/ide.h +index a633898f36ac..eb2ac48c99db 100644 +--- a/include/linux/ide.h ++++ b/include/linux/ide.h +@@ -128,7 +128,6 @@ struct ide_io_ports { + */ + #define PARTN_BITS 6 /* number of minor dev bits for partitions */ + #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ +-#define SECTOR_SIZE 512 + + /* + * Timeouts for various operations: +diff --git a/include/linux/idr.h b/include/linux/idr.h +index 083d61e92706..3639a28188c9 100644 +--- a/include/linux/idr.h ++++ b/include/linux/idr.h +@@ -195,6 +195,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id) + return ida_get_new_above(ida, 0, p_id); + } + ++static inline bool ida_is_empty(struct ida *ida) ++{ ++ return idr_is_empty(&ida->idr); ++} ++ + void __init idr_init_cache(void); + + #endif /* __IDR_H__ */ +diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h +index a4ccc3122f93..cfcbc49f4ddf 100644 +--- a/include/linux/if_macvlan.h ++++ b/include/linux/if_macvlan.h +@@ -70,13 +70,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, + if (likely(success)) { + struct vlan_pcpu_stats *pcpu_stats; + +- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); ++ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += len; + if (multicast) + pcpu_stats->rx_multicast++; + u64_stats_update_end(&pcpu_stats->syncp); ++ put_cpu_ptr(vlan->pcpu_stats); + } else { + this_cpu_inc(vlan->pcpu_stats->rx_errors); + } +diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h +index 7e39719e27cb..27edc7f2de31 100644 +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -30,6 +30,8 @@ + #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ + #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + ++#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ ++ + /* + * struct vlan_hdr - vlan header + * @h_vlan_TCI: priority and VLAN ID +@@ -495,10 +497,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, ++static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, + int *depth) + { +- unsigned int vlan_depth = skb->mac_len; ++ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; + + /* if type is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at +@@ -513,13 +515,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + vlan_depth = ETH_HLEN; + } + do { +- struct vlan_hdr *vh; ++ struct vlan_hdr vhdr, *vh; + +- if (unlikely(!pskb_may_pull(skb, +- vlan_depth + VLAN_HLEN))) ++ vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); ++ if (unlikely(!vh || !--parse_depth)) + return 0; + +- vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (eth_type_vlan(type)); +@@ -538,11 +539,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, + * Returns the EtherType of the packet, regardless of whether it is + * vlan encapsulated (normal or hardware accelerated) or not. + */ +-static inline __be16 vlan_get_protocol(struct sk_buff *skb) ++static inline __be16 vlan_get_protocol(const struct sk_buff *skb) + { + return __vlan_get_protocol(skb, skb->protocol, NULL); + } + ++/* A getter for the SKB protocol field which will handle VLAN tags consistently ++ * whether VLAN acceleration is enabled or not. ++ */ ++static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) ++{ ++ if (!skip_vlan) ++ /* VLAN acceleration strips the VLAN header from the skb and ++ * moves it to skb->vlan_proto ++ */ ++ return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; ++ ++ return vlan_get_protocol(skb); ++} ++ + static inline void vlan_set_encap_proto(struct sk_buff *skb, + struct vlan_hdr *vhdr) + { +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 27dbab59f034..188bd1768971 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -317,8 +317,8 @@ enum { + + #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) + #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) +-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) +-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) ++#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) ++#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) + #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ +@@ -447,6 +447,8 @@ struct intel_iommu { + struct device *iommu_dev; /* IOMMU-sysfs device */ + int node; + u32 flags; /* Software defined flags */ ++ ++ struct dmar_drhd_unit *drhd; + }; + + static inline void __iommu_flush_cache( +diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h +index 58df02bd93c9..fa46183b163b 100644 +--- a/include/linux/io-mapping.h ++++ b/include/linux/io-mapping.h +@@ -120,9 +120,12 @@ io_mapping_init_wc(struct io_mapping *iomap, + resource_size_t base, + unsigned long size) + { ++ iomap->iomem = ioremap_wc(base, size); ++ if (!iomap->iomem) ++ return NULL; ++ + iomap->base = base; + iomap->size = size; +- iomap->iomem = ioremap_wc(base, size); + #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ + iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); + #elif defined(pgprot_writecombine) +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h +index 4eee852f5bc7..342e622f84a4 100755 +--- a/include/linux/ipv6.h ++++ b/include/linux/ipv6.h +@@ -2,6 +2,7 @@ + #define _IPV6_H + + #include ++#include + + #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) + #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +@@ -77,7 +78,6 @@ struct ipv6_params { + __s32 autoconf; + }; + extern struct ipv6_params ipv6_defaults; +-#include + #include + #include + +diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h +index 8e9e288b08c1..05d86addeaf1 100644 +--- a/include/linux/kdev_t.h ++++ b/include/linux/kdev_t.h +@@ -20,61 +20,61 @@ + }) + + /* acceptable for old filesystems */ +-static inline bool old_valid_dev(dev_t dev) ++static __always_inline bool old_valid_dev(dev_t dev) + { + return MAJOR(dev) < 256 && MINOR(dev) < 256; + } + +-static inline u16 old_encode_dev(dev_t dev) ++static __always_inline u16 old_encode_dev(dev_t dev) + { + return (MAJOR(dev) << 8) | MINOR(dev); + } + +-static inline dev_t old_decode_dev(u16 val) ++static __always_inline dev_t old_decode_dev(u16 val) + { + return MKDEV((val >> 8) & 255, val & 255); + } + +-static inline u32 new_encode_dev(dev_t dev) ++static __always_inline u32 new_encode_dev(dev_t dev) + { + unsigned major = MAJOR(dev); + unsigned minor = MINOR(dev); + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); + } + +-static inline dev_t new_decode_dev(u32 dev) ++static __always_inline dev_t new_decode_dev(u32 dev) + { + unsigned major = (dev & 0xfff00) >> 8; + unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); + return MKDEV(major, minor); + } + +-static inline u64 huge_encode_dev(dev_t dev) ++static __always_inline u64 huge_encode_dev(dev_t dev) + { + return new_encode_dev(dev); + } + +-static inline dev_t huge_decode_dev(u64 dev) ++static __always_inline dev_t huge_decode_dev(u64 dev) + { + return new_decode_dev(dev); + } + +-static inline int sysv_valid_dev(dev_t dev) ++static __always_inline int sysv_valid_dev(dev_t dev) + { + return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); + } + +-static inline u32 sysv_encode_dev(dev_t dev) ++static __always_inline u32 sysv_encode_dev(dev_t dev) + { + return MINOR(dev) | (MAJOR(dev) << 18); + } + +-static inline unsigned sysv_major(u32 dev) ++static __always_inline unsigned sysv_major(u32 dev) + { + return (dev >> 18) & 0x3fff; + } + +-static inline unsigned sysv_minor(u32 dev) ++static __always_inline unsigned sysv_minor(u32 dev) + { + return dev & 0x3ffff; + } +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index e465bb15912d..6be5545d3584 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -317,7 +317,7 @@ extern void gdbstub_exit(int status); + extern int kgdb_single_step; + extern atomic_t kgdb_active; + #define in_dbg_master() \ +- (raw_smp_processor_id() == atomic_read(&kgdb_active)) ++ (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) + extern bool dbg_is_early; + extern void __init dbg_late_init(void); + #else /* ! CONFIG_KGDB */ +diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h +index 1e032a1ddb3e..60af12869ac7 100644 +--- a/include/linux/khugepaged.h ++++ b/include/linux/khugepaged.h +@@ -13,6 +13,7 @@ extern int __khugepaged_enter(struct mm_struct *mm); + extern void __khugepaged_exit(struct mm_struct *mm); + extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags); ++extern void khugepaged_min_free_kbytes_update(void); + + #define khugepaged_enabled() \ + (transparent_hugepage_flags & \ +@@ -70,6 +71,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + { + return 0; + } ++ ++static inline void khugepaged_min_free_kbytes_update(void) ++{ ++} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + + #endif /* _LINUX_KHUGEPAGED_H */ +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h +index cb527c78de9f..4db62045f01a 100644 +--- a/include/linux/kprobes.h ++++ b/include/linux/kprobes.h +@@ -366,6 +366,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) + return this_cpu_ptr(&kprobe_ctlblk); + } + ++extern struct kprobe kprobe_busy; ++void kprobe_busy_begin(void); ++void kprobe_busy_end(void); ++ + int register_kprobe(struct kprobe *p); + void unregister_kprobe(struct kprobe *p); + int register_kprobes(struct kprobe **kps, int num); +diff --git a/include/linux/kref.h b/include/linux/kref.h +index e2df6d397ff0..7c88d865f82f 100644 +--- a/include/linux/kref.h ++++ b/include/linux/kref.h +@@ -24,6 +24,8 @@ struct kref { + atomic_t refcount; + }; + ++#define KREF_INIT(n) { .refcount = ATOMIC_INIT(n), } ++ + /** + * kref_init - initialize object. + * @kref: object in question. +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index bb4af1bfcaf4..05aa860daa5c 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -932,7 +933,15 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) + static inline unsigned long + __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) + { +- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; ++ /* ++ * The index was checked originally in search_memslots. To avoid ++ * that a malicious guest builds a Spectre gadget out of e.g. page ++ * table walks, do not let the processor speculate loads outside ++ * the guest's registered memslots. ++ */ ++ unsigned long offset = gfn - slot->base_gfn; ++ offset = array_index_nospec(offset, slot->npages); ++ return slot->userspace_addr + offset * PAGE_SIZE; + } + + static inline int memslot_id(struct kvm *kvm, gfn_t gfn) +diff --git a/include/linux/libata.h b/include/linux/libata.h +index cdfb67b22317..3fabf57fd6e0 100644 +--- a/include/linux/libata.h ++++ b/include/linux/libata.h +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + /* + * Define if arch has non-standard setup. This is a _PCI_ standard +@@ -434,6 +435,7 @@ enum { + ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ ++ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ + + /* DMA mask for user DMA control: User visible values; DO NOT + renumber */ +@@ -497,6 +499,7 @@ enum hsm_task_states { + }; + + enum ata_completion_errors { ++ AC_ERR_OK = 0, /* no error */ + AC_ERR_DEV = (1 << 0), /* device reported error */ + AC_ERR_HSM = (1 << 1), /* host state machine violation */ + AC_ERR_TIMEOUT = (1 << 2), /* timeout */ +@@ -878,6 +881,8 @@ struct ata_port { + struct timer_list fastdrain_timer; + unsigned long fastdrain_cnt; + ++ async_cookie_t cookie; ++ + int em_message_type; + void *private_data; + +@@ -899,9 +904,9 @@ struct ata_port_operations { + /* + * Command execution + */ +- int (*qc_defer)(struct ata_queued_cmd *qc); +- int (*check_atapi_dma)(struct ata_queued_cmd *qc); +- void (*qc_prep)(struct ata_queued_cmd *qc); ++ int (*qc_defer)(struct ata_queued_cmd *qc); ++ int (*check_atapi_dma)(struct ata_queued_cmd *qc); ++ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); + unsigned int (*qc_issue)(struct ata_queued_cmd *qc); + bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); + +@@ -1164,7 +1169,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); + extern const char *ata_mode_string(unsigned long xfer_mask); + extern unsigned long ata_id_xfermask(const u16 *id); + extern int ata_std_qc_defer(struct ata_queued_cmd *qc); +-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); ++extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); + extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, + unsigned int n_elem); + extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); +@@ -1877,9 +1882,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + +-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); ++extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); + extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); +-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); ++extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); + extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); + extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); +diff --git a/include/linux/log2.h b/include/linux/log2.h +index c373295f359f..cca606609e1b 100644 +--- a/include/linux/log2.h ++++ b/include/linux/log2.h +@@ -159,7 +159,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) + #define roundup_pow_of_two(n) \ + ( \ + __builtin_constant_p(n) ? ( \ +- (n == 1) ? 1 : \ ++ ((n) == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h +index 5fbbfa6ce1f2..4bd47d1cb78c 100644 +--- a/include/linux/memcontrol.h ++++ b/include/linux/memcontrol.h +@@ -494,6 +494,17 @@ struct mem_cgroup *lock_page_memcg(struct page *page); + void __unlock_page_memcg(struct mem_cgroup *memcg); + void unlock_page_memcg(struct page *page); + ++static inline void __mem_cgroup_update_page_stat(struct page *page, ++ struct mem_cgroup *memcg, ++ enum mem_cgroup_stat_index idx, ++ int val) ++{ ++ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); ++ ++ if (memcg && memcg->stat) ++ this_cpu_add(memcg->stat->count[idx], val); ++} ++ + /** + * mem_cgroup_update_page_stat - update page state statistics + * @page: the page +@@ -509,13 +520,12 @@ void unlock_page_memcg(struct page *page); + * mem_cgroup_update_page_stat(page, state, -1); + * unlock_page(page) or unlock_page_memcg(page) + */ ++ + static inline void mem_cgroup_update_page_stat(struct page *page, + enum mem_cgroup_stat_index idx, int val) + { +- VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page))); + +- if (page->mem_cgroup) +- this_cpu_add(page->mem_cgroup->stat->count[idx], val); ++ __mem_cgroup_update_page_stat(page, page->mem_cgroup, idx, val); + } + + static inline void mem_cgroup_inc_page_stat(struct page *page, +@@ -772,6 +782,13 @@ static inline void mem_cgroup_update_page_stat(struct page *page, + { + } + ++static inline void __mem_cgroup_update_page_stat(struct page *page, ++ struct mem_cgroup *memcg, ++ enum mem_cgroup_stat_index idx, ++ int nr) ++{ ++} ++ + static inline void mem_cgroup_inc_page_stat(struct page *page, + enum mem_cgroup_stat_index idx) + { +diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h +index 67703f23e7ba..821a3b9bc16e 100644 +--- a/include/linux/mfd/abx500/ux500_chargalg.h ++++ b/include/linux/mfd/abx500/ux500_chargalg.h +@@ -15,7 +15,7 @@ + * - POWER_SUPPLY_TYPE_USB, + * because only them store as drv_data pointer to struct ux500_charger. + */ +-#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy) ++#define psy_to_ux500_charger(x) power_supply_get_drvdata(x) + + /* Forward declaration */ + struct ux500_charger; +diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h +index 1b63fc2f42d1..52d53d134f72 100644 +--- a/include/linux/mfd/rt5033-private.h ++++ b/include/linux/mfd/rt5033-private.h +@@ -203,13 +203,13 @@ enum rt5033_reg { + #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U + #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U + #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U +-#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32 ++#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21 + + /* RT5033 regulator LDO output voltage uV */ + #define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U + #define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U + #define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U +-#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32 ++#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19 + + /* RT5033 regulator SAFE LDO output voltage uV */ + #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U +diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h +index 451a811f48f2..d1fb3bbff37a 100644 +--- a/include/linux/mmdebug.h ++++ b/include/linux/mmdebug.h +@@ -36,10 +36,22 @@ void dump_mm(const struct mm_struct *mm); + BUG(); \ + } \ + } while (0) +-#define VM_WARN_ON(cond) WARN_ON(cond) +-#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) +-#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +-#define VM_WARN(cond, format...) WARN(cond, format) ++#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ ++ static bool __section(".data.once") __warned; \ ++ int __ret_warn_once = !!(cond); \ ++ \ ++ if (unlikely(__ret_warn_once && !__warned)) { \ ++ dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\ ++ __warned = true; \ ++ WARN_ON(1); \ ++ } \ ++ unlikely(__ret_warn_once); \ ++}) ++ ++#define VM_WARN_ON(cond) (void)WARN_ON(cond) ++#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) ++#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) ++#define VM_WARN(cond, format...) (void)WARN(cond, format) + #else + #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) + #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +@@ -47,6 +59,7 @@ void dump_mm(const struct mm_struct *mm); + #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) + #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) + #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) ++#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) + #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) + #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) + #endif +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index e717b8db7e94..f146244e7b6a 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -768,7 +768,8 @@ static inline bool is_dev_zone(const struct zone *zone) + #include + + extern struct mutex zonelists_mutex; +-void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); ++void build_all_zonelists(pg_data_t *pgdat, struct zone *zone, ++ bool hotplug_context); + void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); + bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, +diff --git a/include/linux/msi.h b/include/linux/msi.h +index debc8aa4ec19..601bff9fbbec 100644 +--- a/include/linux/msi.h ++++ b/include/linux/msi.h +@@ -133,7 +133,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); + void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); + + u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); +-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); ++void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); + void pci_msi_mask_irq(struct irq_data *data); + void pci_msi_unmask_irq(struct irq_data *data); + +diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h +index 8f2c2ee8a504..6b5c68184464 100644 +--- a/include/linux/msm-sps.h ++++ b/include/linux/msm-sps.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2011-2018, 2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -492,7 +492,7 @@ struct sps_bam_props { + * + */ + struct sps_mem_buffer { +- void *base; ++ void __iomem *base; + phys_addr_t phys_base; + unsigned long iova; + u32 size; +diff --git a/include/linux/msm_ep_pcie.h b/include/linux/msm_ep_pcie.h +index a1d2a17820e0..592b92de82af 100644 +--- a/include/linux/msm_ep_pcie.h ++++ b/include/linux/msm_ep_pcie.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2015, 2017, 2019-2020, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -31,6 +31,8 @@ enum ep_pcie_event { + EP_PCIE_EVENT_LINKUP = 0x20, + EP_PCIE_EVENT_MHI_A7 = 0x40, + EP_PCIE_EVENT_MMIO_WRITE = 0x80, ++ EP_PCIE_EVENT_L1SUB_TIMEOUT = 0x100, ++ EP_PCIE_EVENT_L1SUB_TIMEOUT_EXIT = 0x200, + }; + + enum ep_pcie_irq_event { +@@ -101,6 +103,11 @@ struct ep_pcie_db_config { + u32 tgt_addr; + }; + ++struct ep_pcie_inactivity { ++ bool enable; ++ uint32_t timer_us; ++}; ++ + struct ep_pcie_hw { + struct list_head node; + u32 device_id; +@@ -112,13 +119,14 @@ struct ep_pcie_hw { + u32 num_entries); + int (*get_msi_config)(struct ep_pcie_msi_config *cfg); + int (*trigger_msi)(u32 idx); +- int (*wakeup_host)(void); ++ int (*wakeup_host)(enum ep_pcie_event event); + int (*enable_endpoint)(enum ep_pcie_options opt); + int (*disable_endpoint)(void); + int (*config_db_routing)(struct ep_pcie_db_config chdb_cfg, + struct ep_pcie_db_config erdb_cfg); + int (*mask_irq_event)(enum ep_pcie_irq_event event, + bool enable); ++ int (*configure_inactivity_timer)(struct ep_pcie_inactivity *param); + }; + + /* +@@ -230,12 +238,13 @@ int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx); + /* + * ep_pcie_wakeup_host - wake up the host. + * @phandle: PCIe endpoint HW driver handle ++ * @event: PCIe event of ep_pcie_event type + * + * This function asserts WAKE GPIO to wake up the host. + * + * Return: 0 on success, negative value on error + */ +-int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle); ++int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle, enum ep_pcie_event event); + + /* + * ep_pcie_enable_endpoint - enable PCIe endpoint. +@@ -287,4 +296,23 @@ int ep_pcie_config_db_routing(struct ep_pcie_hw *phandle, + int ep_pcie_mask_irq_event(struct ep_pcie_hw *phandle, + enum ep_pcie_irq_event event, + bool enable); ++ ++/* ++ * ep_pcie_configure_inactivity_timer - Configure timer to trigger ++ * upon link inactivity. ++ * @phandle: PCIe endpoint HW driver handle ++ * @param: structure member to program the timer and enable it. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int ep_pcie_configure_inactivity_timer(struct ep_pcie_hw *phandle, ++ struct ep_pcie_inactivity *param); ++ ++/* ++ * ep_pcie_core_l1ss_sleep_config_enable - Enable L1ss sleep configuration ++ * to gate the CLKREQ# and disable PCIe resources. ++ * ++ * Return: 0 on success, negative value on error ++ */ ++int ep_pcie_core_l1ss_sleep_config_enable(void); + #endif +diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h +index b5b43f94f311..01b990e4b228 100644 +--- a/include/linux/mtd/map.h ++++ b/include/linux/mtd/map.h +@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); + ({ \ + int i, ret = 1; \ + for (i = 0; i < map_words(map); i++) { \ +- if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ ++ if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ + ret = 0; \ + break; \ + } \ +diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h +index d8905a229f34..573e744223a2 100644 +--- a/include/linux/mtd/nand.h ++++ b/include/linux/mtd/nand.h +@@ -24,6 +24,7 @@ + #include + #include + ++struct nand_chip; + struct mtd_info; + struct nand_flash_dev; + struct device_node; +@@ -39,7 +40,7 @@ int nand_scan_ident(struct mtd_info *mtd, int max_chips, + int nand_scan_tail(struct mtd_info *mtd); + + /* Unregister the MTD device and free resources held by the NAND device */ +-void nand_release(struct mtd_info *mtd); ++void nand_release(struct nand_chip *chip); + + /* Internal helper for board drivers which need to override command function */ + void nand_wait_ready(struct mtd_info *mtd); +@@ -219,9 +220,6 @@ enum nand_ecc_algo { + #define NAND_CI_CELLTYPE_MSK 0x0C + #define NAND_CI_CELLTYPE_SHIFT 2 + +-/* Keep gcc happy */ +-struct nand_chip; +- + /* ONFI features */ + #define ONFI_FEATURE_16_BIT_BUS (1 << 0) + #define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) +diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h +index 42ff7ff09bf5..09404fb36b34 100644 +--- a/include/linux/mtd/pfow.h ++++ b/include/linux/mtd/pfow.h +@@ -127,7 +127,7 @@ static inline void print_drs_error(unsigned dsr) + + if (!(dsr & DSR_AVAILABLE)) + printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); +- if (prog_status & 0x03) ++ if ((prog_status & 0x03) == 0x03) + printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " + "half with 41h command\n"); + else if (prog_status & 0x02) +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index a79980ab0506..b75f93d94ab8 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2478,7 +2478,7 @@ void synchronize_net(void); + int init_dummy_netdev(struct net_device *dev); + + DECLARE_PER_CPU(int, xmit_recursion); +-#define XMIT_RECURSION_LIMIT 10 ++#define XMIT_RECURSION_LIMIT 8 + + static inline int dev_recursion_level(void) + { +@@ -3707,6 +3707,7 @@ static inline void netif_tx_disable(struct net_device *dev) + + local_bh_disable(); + cpu = smp_processor_id(); ++ spin_lock(&dev->tx_global_lock); + for (i = 0; i < dev->num_tx_queues; i++) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + +@@ -3714,6 +3715,7 @@ static inline void netif_tx_disable(struct net_device *dev) + netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); + } ++ spin_unlock(&dev->tx_global_lock); + local_bh_enable(); + } + +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h +index 69111fa2e578..0c106e541d75 100644 +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -139,6 +139,7 @@ struct xt_match { + + const char *table; + unsigned int matchsize; ++ unsigned int usersize; + #ifdef CONFIG_COMPAT + unsigned int compatsize; + #endif +@@ -179,6 +180,7 @@ struct xt_target { + + const char *table; + unsigned int targetsize; ++ unsigned int usersize; + #ifdef CONFIG_COMPAT + unsigned int compatsize; + #endif +@@ -261,6 +263,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, + int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, + bool inv_proto); + ++int xt_match_to_user(const struct xt_entry_match *m, ++ struct xt_entry_match __user *u); ++int xt_target_to_user(const struct xt_entry_target *t, ++ struct xt_entry_target __user *u); ++int xt_data_to_user(void __user *dst, const void *src, ++ int usersize, int size); ++ + void *xt_copy_counters_from_user(const void __user *user, unsigned int len, + struct xt_counters_info *info, bool compat); + +@@ -334,7 +343,7 @@ static inline unsigned int xt_write_recseq_begin(void) + * since addend is most likely 1 + */ + __this_cpu_add(xt_recseq.sequence, addend); +- smp_wmb(); ++ smp_mb(); + + return addend; + } +diff --git a/include/linux/of.h b/include/linux/of.h +index b30c6d91d2c5..562d7cd4ac43 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -1132,6 +1132,7 @@ static inline int of_get_available_child_count(const struct device_node *np) + #define _OF_DECLARE(table, name, compat, fn, fn_type) \ + static const struct of_device_id __of_table_##name \ + __used __section(__##table##_of_table) \ ++ __aligned(__alignof__(struct of_device_id)) \ + = { .compatible = compat, \ + .data = (fn == (fn_type)NULL) ? fn : fn } + #else +diff --git a/include/linux/overflow.h b/include/linux/overflow.h +old mode 100644 +new mode 100755 +index 980beb1d891d..5043904a90cc +--- a/include/linux/overflow.h ++++ b/include/linux/overflow.h +@@ -3,6 +3,7 @@ + #define __LINUX_OVERFLOW_H + + #include ++#include + + /* + * In the fallback code below, we need to compute the minimum and +diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h +index 9298c393ddaa..1ebb7e61ce4b 100644 +--- a/include/linux/page_ext.h ++++ b/include/linux/page_ext.h +@@ -53,8 +53,12 @@ static inline void page_ext_init_flatmem(void) + { + } + extern void page_ext_init(void); ++static inline void page_ext_init_flatmem_late(void) ++{ ++} + #else + extern void page_ext_init_flatmem(void); ++extern void page_ext_init_flatmem_late(void); + static inline void page_ext_init(void) + { + } +@@ -78,6 +82,10 @@ static inline void page_ext_init(void) + { + } + ++static inline void page_ext_init_flatmem_late(void) ++{ ++} ++ + static inline void page_ext_init_flatmem(void) + { + } +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 0dfc605b3c6a..c953fd345f43 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -383,7 +383,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping, + } + + /* +- * Get index of the page with in radix-tree ++ * Get index of the page within radix-tree (but not for hugetlb pages). + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + */ + static inline pgoff_t page_to_index(struct page *page) +@@ -402,15 +402,16 @@ static inline pgoff_t page_to_index(struct page *page) + return pgoff; + } + ++extern pgoff_t hugetlb_basepage_index(struct page *page); ++ + /* +- * Get the offset in PAGE_SIZE. +- * (TODO: hugepage should have ->index in PAGE_SIZE) ++ * Get the offset in PAGE_SIZE (even for hugetlb pages). ++ * (TODO: hugetlb pages should have ->index in PAGE_SIZE) + */ + static inline pgoff_t page_to_pgoff(struct page *page) + { +- if (unlikely(PageHeadHuge(page))) +- return page->index << compound_order(page); +- ++ if (unlikely(PageHuge(page))) ++ return hugetlb_basepage_index(page); + return page_to_index(page); + } + +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h +index 667f7e89b2b5..664558fff07a 100644 +--- a/include/linux/perf_event.h ++++ b/include/linux/perf_event.h +@@ -477,7 +477,7 @@ struct pmu { + */ + struct perf_addr_filter { + struct list_head entry; +- struct inode *inode; ++ struct path path; + unsigned long offset; + unsigned long size; + unsigned int range : 1, +diff --git a/include/linux/prandom.h b/include/linux/prandom.h +new file mode 100644 +index 000000000000..e20339c78a84 +--- /dev/null ++++ b/include/linux/prandom.h +@@ -0,0 +1,110 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * include/linux/prandom.h ++ * ++ * Include file for the fast pseudo-random 32-bit ++ * generation. ++ */ ++#ifndef _LINUX_PRANDOM_H ++#define _LINUX_PRANDOM_H ++ ++#include ++#include ++ ++u32 prandom_u32(void); ++void prandom_bytes(void *buf, size_t nbytes); ++void prandom_seed(u32 seed); ++void prandom_reseed_late(void); ++ ++#if BITS_PER_LONG == 64 ++/* ++ * The core SipHash round function. Each line can be executed in ++ * parallel given enough CPU resources. ++ */ ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \ ++ v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \ ++ v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \ ++ v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \ ++ v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \ ++) ++ ++#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261) ++#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573) ++ ++#elif BITS_PER_LONG == 32 ++/* ++ * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash. ++ * This is weaker, but 32-bit machines are not used for high-traffic ++ * applications, so there is less output for an attacker to analyze. ++ */ ++#define PRND_SIPROUND(v0, v1, v2, v3) ( \ ++ v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \ ++ v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \ ++ v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \ ++ v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \ ++) ++#define PRND_K0 0x6c796765 ++#define PRND_K1 0x74656462 ++ ++#else ++#error Unsupported BITS_PER_LONG ++#endif ++ ++struct rnd_state { ++ __u32 s1, s2, s3, s4; ++}; ++ ++u32 prandom_u32_state(struct rnd_state *state); ++void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); ++void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); ++ ++#define prandom_init_once(pcpu_state) \ ++ DO_ONCE(prandom_seed_full_state, (pcpu_state)) ++ ++/** ++ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) ++ * @ep_ro: right open interval endpoint ++ * ++ * Returns a pseudo-random number that is in interval [0, ep_ro). Note ++ * that the result depends on PRNG being well distributed in [0, ~0U] ++ * u32 space. Here we use maximally equidistributed combined Tausworthe ++ * generator, that is, prandom_u32(). This is useful when requesting a ++ * random index of an array containing ep_ro elements, for example. ++ * ++ * Returns: pseudo-random number in interval [0, ep_ro) ++ */ ++static inline u32 prandom_u32_max(u32 ep_ro) ++{ ++ return (u32)(((u64) prandom_u32() * ep_ro) >> 32); ++} ++ ++/* ++ * Handle minimum values for seeds ++ */ ++static inline u32 __seed(u32 x, u32 m) ++{ ++ return (x < m) ? x + m : x; ++} ++ ++/** ++ * prandom_seed_state - set seed for prandom_u32_state(). ++ * @state: pointer to state structure to receive the seed. ++ * @seed: arbitrary 64-bit value to use as a seed. ++ */ ++static inline void prandom_seed_state(struct rnd_state *state, u64 seed) ++{ ++ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL; ++ ++ state->s1 = __seed(i, 2U); ++ state->s2 = __seed(i, 8U); ++ state->s3 = __seed(i, 16U); ++ state->s4 = __seed(i, 128U); ++} ++ ++/* Pseudo random number generator from numerical recipes. */ ++static inline u32 next_pseudo_random32(u32 seed) ++{ ++ return seed * 1664525 + 1013904223; ++} ++ ++#endif +diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h +index 72d88cf3ca25..5a215da57b55 100644 +--- a/include/linux/qed/qed_chain.h ++++ b/include/linux/qed/qed_chain.h +@@ -155,28 +155,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) + + static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) + { ++ u16 elem_per_page = p_chain->elem_per_page; ++ u32 prod = p_chain->u.chain16.prod_idx; ++ u32 cons = p_chain->u.chain16.cons_idx; + u16 used; + +- used = (u16) (((u32)0x10000 + +- (u32)p_chain->u.chain16.prod_idx) - +- (u32)p_chain->u.chain16.cons_idx); ++ if (prod < cons) ++ prod += (u32)U16_MAX + 1; ++ ++ used = (u16)(prod - cons); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) +- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - +- p_chain->u.chain16.cons_idx / p_chain->elem_per_page; ++ used -= prod / elem_per_page - cons / elem_per_page; + + return (u16)(p_chain->capacity - used); + } + + static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) + { ++ u16 elem_per_page = p_chain->elem_per_page; ++ u64 prod = p_chain->u.chain32.prod_idx; ++ u64 cons = p_chain->u.chain32.cons_idx; + u32 used; + +- used = (u32) (((u64)0x100000000ULL + +- (u64)p_chain->u.chain32.prod_idx) - +- (u64)p_chain->u.chain32.cons_idx); ++ if (prod < cons) ++ prod += (u64)U32_MAX + 1; ++ ++ used = (u32)(prod - cons); + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) +- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - +- p_chain->u.chain32.cons_idx / p_chain->elem_per_page; ++ used -= (u32)(prod / elem_per_page - cons / elem_per_page); + + return p_chain->capacity - used; + } +diff --git a/include/linux/random.h b/include/linux/random.h +index 1fa0dc880bd7..01a1fb1826ca 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -80,61 +80,12 @@ static inline unsigned long get_random_canary(void) + + unsigned long randomize_page(unsigned long start, unsigned long range); + +-u32 prandom_u32(void); +-void prandom_bytes(void *buf, size_t nbytes); +-void prandom_seed(u32 seed); +-void prandom_reseed_late(void); +- +-struct rnd_state { +- __u32 s1, s2, s3, s4; +-}; +- +-u32 prandom_u32_state(struct rnd_state *state); +-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); +- +-#define prandom_init_once(pcpu_state) \ +- DO_ONCE(prandom_seed_full_state, (pcpu_state)) +- +-/** +- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) +- * @ep_ro: right open interval endpoint +- * +- * Returns a pseudo-random number that is in interval [0, ep_ro). Note +- * that the result depends on PRNG being well distributed in [0, ~0U] +- * u32 space. Here we use maximally equidistributed combined Tausworthe +- * generator, that is, prandom_u32(). This is useful when requesting a +- * random index of an array containing ep_ro elements, for example. +- * +- * Returns: pseudo-random number in interval [0, ep_ro) +- */ +-static inline u32 prandom_u32_max(u32 ep_ro) +-{ +- return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +-} +- + /* +- * Handle minimum values for seeds ++ * This is designed to be standalone for just prandom ++ * users, but for now we include it from ++ * for legacy reasons. + */ +-static inline u32 __seed(u32 x, u32 m) +-{ +- return (x < m) ? x + m : x; +-} +- +-/** +- * prandom_seed_state - set seed for prandom_u32_state(). +- * @state: pointer to state structure to receive the seed. +- * @seed: arbitrary 64-bit value to use as a seed. +- */ +-static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +-{ +- u32 i = (seed >> 32) ^ (seed << 10) ^ seed; +- +- state->s1 = __seed(i, 2U); +- state->s2 = __seed(i, 8U); +- state->s3 = __seed(i, 16U); +- state->s4 = __seed(i, 128U); +-} ++#include + + #ifdef CONFIG_ARCH_RANDOM + # include +@@ -165,10 +116,4 @@ static inline bool arch_has_random_seed(void) + } + #endif + +-/* Pseudo random number generator from numerical recipes. */ +-static inline u32 next_pseudo_random32(u32 seed) +-{ +- return seed * 1664525 + 1013904223; +-} +- + #endif /* _LINUX_RANDOM_H */ +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 2794cd540218..8ee9240f612d 100755 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2041,6 +2041,8 @@ struct task_struct { + #endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; ++ struct mutex futex_exit_mutex; ++ unsigned int futex_state; + #endif + #ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; +@@ -2503,7 +2505,6 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, + */ + #define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */ + #define PF_EXITING 0x00000004 /* getting shut down */ +-#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ + #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ + #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ +@@ -3258,8 +3259,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); + * succeeds. + */ + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); +-/* Remove the current tasks stale references to the old mm_struct */ +-extern void mm_release(struct task_struct *, struct mm_struct *); ++/* Remove the current tasks stale references to the old mm_struct on exit() */ ++extern void exit_mm_release(struct task_struct *, struct mm_struct *); ++/* Remove the current tasks stale references to the old mm_struct on exec() */ ++extern void exec_mm_release(struct task_struct *, struct mm_struct *); + + #ifdef CONFIG_HAVE_COPY_THREAD_TLS + extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, +diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h +index 150aa102c861..cff944690e1e 100755 +--- a/include/linux/sched/sysctl.h ++++ b/include/linux/sched/sysctl.h +@@ -115,6 +115,7 @@ static inline unsigned int get_sysctl_sched_cfs_boost(void) + extern unsigned int sysctl_sched_autogroup_enabled; + #endif + ++extern int sysctl_sched_rr_timeslice; + extern int sched_rr_timeslice; + + extern int sched_rr_handler(struct ctl_table *table, int write, +diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h +index fb7eb9ccb1cd..d4c3c9bab582 100644 +--- a/include/linux/seq_buf.h ++++ b/include/linux/seq_buf.h +@@ -29,7 +29,7 @@ static inline void seq_buf_clear(struct seq_buf *s) + } + + static inline void +-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) ++seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) + { + s->buffer = buf; + s->size = size; +diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h +index ead97654c4e9..1613fe5c668e 100644 +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h +@@ -242,6 +242,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) + * usual consistency guarantee. It is one wmb cheaper, because we can + * collapse the two back-to-back wmb()s. + * ++ * Note that, writes surrounding the barrier should be declared atomic (e.g. ++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads ++ * atomically, avoiding compiler optimizations; b) to document which writes are ++ * meant to propagate to the reader critical section. This is necessary because ++ * neither writes before and after the barrier are enclosed in a seq-writer ++ * critical section that would ensure readers are aware of ongoing writes. ++ * + * seqcount_t seq; + * bool X = true, Y = false; + * +@@ -261,11 +268,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) + * + * void write(void) + * { +- * Y = true; ++ * WRITE_ONCE(Y, true); + * + * raw_write_seqcount_barrier(seq); + * +- * X = false; ++ * WRITE_ONCE(X, false); + * } + */ + static inline void raw_write_seqcount_barrier(seqcount_t *s) +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index fdde51a16c87..cc6d0d279ad4 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1553,6 +1553,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) + return list_->qlen; + } + ++/** ++ * skb_queue_len_lockless - get queue length ++ * @list_: list to measure ++ * ++ * Return the length of an &sk_buff queue. ++ * This variant can be used in lockless contexts. ++ */ ++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) ++{ ++ return READ_ONCE(list_->qlen); ++} ++ + /** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize +@@ -1756,7 +1768,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) + { + struct sk_buff *next, *prev; + +- list->qlen--; ++ WRITE_ONCE(list->qlen, list->qlen - 1); + next = skb->next; + prev = skb->prev; + skb->next = skb->prev = NULL; +@@ -2799,7 +2811,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) ++static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) + { + unsigned int size = skb->len; + +diff --git a/include/linux/spi/qcom-spi.h b/include/linux/spi/qcom-spi.h +index 598481079383..137fc66be6e8 100644 +--- a/include/linux/spi/qcom-spi.h ++++ b/include/linux/spi/qcom-spi.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -34,7 +34,10 @@ + * @bam_producer_pipe_index BAM producer pipe + * @rt_priority true if RT thread + * @use_pinctrl true if pinctrl library is used +- * @is_shared true when qup is shared between ee's ++ * @is_shared true when qup is shared between ee's and client driver is not ++ in control of spi pm_runtime_get_sync/put_sync. ++ * @shared_ee true when qup is shared between ee's and client driver is in ++ control of spi pm_runtime_get_sync/put_sync. + */ + struct msm_spi_platform_data { + u32 max_clock_speed; +@@ -53,4 +56,5 @@ struct msm_spi_platform_data { + bool rt_priority; + bool use_pinctrl; + bool is_shared; ++ bool shared_ee; + }; +diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h +index 4b743ac35396..9c8445f1af0c 100644 +--- a/include/linux/spi/spi.h ++++ b/include/linux/spi/spi.h +@@ -443,6 +443,9 @@ struct spi_master { + #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ + #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ + ++ /* flag indicating this is a non-devres managed controller */ ++ bool devm_allocated; ++ + /* + * on some hardware transfer / message size may be constrained + * the limit may depend on device transfer settings +@@ -601,6 +604,8 @@ extern void spi_finalize_current_transfer(struct spi_master *master); + /* the spi driver core manages memory for the spi_master classdev */ + extern struct spi_master * + spi_alloc_master(struct device *host, unsigned size); ++extern struct spi_master * ++devm_spi_alloc_master(struct device *dev, unsigned int size); + + extern int spi_register_master(struct spi_master *master); + extern int devm_spi_register_master(struct device *dev, +diff --git a/include/linux/string.h b/include/linux/string.h +index c890bdfe296e..f2a3dedccfe8 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -29,6 +29,10 @@ size_t strlcpy(char *, const char *, size_t); + #ifndef __HAVE_ARCH_STRSCPY + ssize_t strscpy(char *, const char *, size_t); + #endif ++ ++/* Wraps calls to strscpy()/memset(), no arch specific code required */ ++ssize_t strscpy_pad(char *dest, const char *src, size_t count); ++ + #ifndef __HAVE_ARCH_STRCAT + extern char * strcat(char *, const char *); + #endif +@@ -99,6 +103,36 @@ extern __kernel_size_t strcspn(const char *,const char *); + #ifndef __HAVE_ARCH_MEMSET + extern void * memset(void *,int,__kernel_size_t); + #endif ++ ++#ifndef __HAVE_ARCH_MEMSET16 ++extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); ++#endif ++ ++#ifndef __HAVE_ARCH_MEMSET32 ++extern void *memset32(uint32_t *, uint32_t, __kernel_size_t); ++#endif ++ ++#ifndef __HAVE_ARCH_MEMSET64 ++extern void *memset64(uint64_t *, uint64_t, __kernel_size_t); ++#endif ++ ++static inline void *memset_l(unsigned long *p, unsigned long v, ++ __kernel_size_t n) ++{ ++ if (BITS_PER_LONG == 32) ++ return memset32((uint32_t *)p, v, n); ++ else ++ return memset64((uint64_t *)p, v, n); ++} ++ ++static inline void *memset_p(void **p, void *v, __kernel_size_t n) ++{ ++ if (BITS_PER_LONG == 32) ++ return memset32((uint32_t *)p, (uintptr_t)v, n); ++ else ++ return memset64((uint64_t *)p, (uintptr_t)v, n); ++} ++ + #ifndef __HAVE_ARCH_MEMCPY + extern void * memcpy(void *,const void *,__kernel_size_t); + #endif +diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h +index 68ec78c1aa48..bd8480ae82e9 100644 +--- a/include/linux/sunrpc/gss_api.h ++++ b/include/linux/sunrpc/gss_api.h +@@ -82,6 +82,7 @@ struct pf_desc { + u32 service; + char *name; + char *auth_domain_name; ++ struct auth_domain *domain; + bool datatouch; + }; + +diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h +index 726aff1a5201..213fa12f56fc 100644 +--- a/include/linux/sunrpc/svcauth_gss.h ++++ b/include/linux/sunrpc/svcauth_gss.h +@@ -20,7 +20,8 @@ int gss_svc_init(void); + void gss_svc_shutdown(void); + int gss_svc_init_net(struct net *net); + void gss_svc_shutdown_net(struct net *net); +-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); ++struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, ++ char *name); + u32 svcauth_gss_flavor(struct auth_domain *dom); + + #endif /* __KERNEL__ */ +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h +index 56c48c884a24..bf0db32b40aa 100644 +--- a/include/linux/sunrpc/xdr.h ++++ b/include/linux/sunrpc/xdr.h +@@ -23,8 +23,7 @@ + #define XDR_QUADLEN(l) (((l) + 3) >> 2) + + /* +- * Generic opaque `network object.' At the kernel level, this type +- * is used only by lockd. ++ * Generic opaque `network object.' + */ + #define XDR_MAX_NETOBJ 1024 + struct xdr_netobj { +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h +index a5da60b24d83..15bab51a3aef 100644 +--- a/include/linux/sunrpc/xprt.h ++++ b/include/linux/sunrpc/xprt.h +@@ -310,6 +310,7 @@ struct xprt_class { + struct rpc_xprt * (*setup)(struct xprt_create *); + struct module *owner; + char name[32]; ++ const char * netid[]; + }; + + /* +diff --git a/include/linux/swapops.h b/include/linux/swapops.h +index 5c3a5f3e7eec..c5ff7b217ee6 100644 +--- a/include/linux/swapops.h ++++ b/include/linux/swapops.h +@@ -196,15 +196,6 @@ static inline void num_poisoned_pages_dec(void) + atomic_long_dec(&num_poisoned_pages); + } + +-static inline void num_poisoned_pages_add(long num) +-{ +- atomic_long_add(num, &num_poisoned_pages); +-} +- +-static inline void num_poisoned_pages_sub(long num) +-{ +- atomic_long_sub(num, &num_poisoned_pages); +-} + #else + + static inline swp_entry_t make_hwpoison_entry(struct page *page) +diff --git a/include/linux/tcp.h b/include/linux/tcp.h +index 4fd7dd9f26fe..95b4be603db2 100644 +--- a/include/linux/tcp.h ++++ b/include/linux/tcp.h +@@ -218,6 +218,8 @@ struct tcp_sock { + u8 reord; /* reordering detected */ + } rack; + u16 advmss; /* Advertised MSS */ ++ u8 tlp_retrans:1, /* TLP is a retransmission */ ++ unused_1:7; + u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */ + fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ + is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ +@@ -235,7 +237,7 @@ struct tcp_sock { + syn_data_acked:1,/* data in SYN is acked by SYN-ACK */ + save_syn:1, /* Save headers of SYN packet */ + is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ +- u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ ++ u32 tlp_high_seq; /* snd_nxt at the time of TLP */ + + /* RTT measurement */ + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index ce98a8e7ef78..e6cede37bcd6 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -10,6 +10,10 @@ + #include + #include + #include ++#include ++ ++struct timespec; ++struct compat_timespec; + + #ifdef CONFIG_THREAD_INFO_IN_TASK + /* +@@ -26,6 +30,18 @@ + + #ifdef __KERNEL__ + ++#ifndef arch_set_restart_data ++#define arch_set_restart_data(restart) do { } while (0) ++#endif ++ ++static inline long set_restart_fn(struct restart_block *restart, ++ long (*fn)(struct restart_block *)) ++{ ++ restart->fn = fn; ++ arch_set_restart_data(restart); ++ return -ERESTART_RESTARTBLOCK; ++} ++ + #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) + + /* +diff --git a/include/linux/time64.h b/include/linux/time64.h +index 980c71b3001a..2a45b8c87edb 100644 +--- a/include/linux/time64.h ++++ b/include/linux/time64.h +@@ -188,6 +188,10 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) + */ + static inline s64 timespec64_to_ns(const struct timespec64 *ts) + { ++ /* Prevent multiplication overflow */ ++ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) ++ return KTIME_MAX; ++ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; + } + +diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h +index cfaf5a1d4bad..f5be2716b01c 100644 +--- a/include/linux/trace_seq.h ++++ b/include/linux/trace_seq.h +@@ -11,7 +11,7 @@ + */ + + struct trace_seq { +- unsigned char buffer[PAGE_SIZE]; ++ char buffer[PAGE_SIZE]; + struct seq_buf seq; + int full; + }; +@@ -50,7 +50,7 @@ static inline int trace_seq_used(struct trace_seq *s) + * that is about to be written to and then return the result + * of that write. + */ +-static inline unsigned char * ++static inline char * + trace_seq_buffer_ptr(struct trace_seq *s) + { + return s->buffer + seq_buf_used(&s->seq); +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h +index be586c632a0c..f261114e2758 100644 +--- a/include/linux/tracepoint.h ++++ b/include/linux/tracepoint.h +@@ -314,7 +314,7 @@ extern void syscall_unregfunc(void); + static const char *___tp_str __tracepoint_string = str; \ + ___tp_str; \ + }) +-#define __tracepoint_string __attribute__((section("__tracepoint_str"))) ++#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) + #else + /* + * tracepoint_string() is used to save the string address for userspace +diff --git a/include/linux/tty.h b/include/linux/tty.h +index 31c9bbce4778..c67ef6fa6e95 100644 +--- a/include/linux/tty.h ++++ b/include/linux/tty.h +@@ -293,6 +293,10 @@ struct tty_struct { + struct termiox *termiox; /* May be NULL for unsupported */ + char name[64]; + struct pid *pgrp; /* Protected by ctrl lock */ ++ /* ++ * Writes protected by both ctrl lock and legacy mutex, readers must use ++ * at least one of them. ++ */ + struct pid *session; + unsigned long flags; + int count; +diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h +index b742b5e47cc2..ae63c962fa22 100644 +--- a/include/linux/tty_driver.h ++++ b/include/linux/tty_driver.h +@@ -235,7 +235,7 @@ + * + * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel + * structure to complete. This method is optional and will only be called +- * if provided (otherwise EINVAL will be returned). ++ * if provided (otherwise ENOTTY will be returned). + */ + + #include +diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h +index 650f3dd6b800..f604a8fe9d2e 100644 +--- a/include/linux/u64_stats_sync.h ++++ b/include/linux/u64_stats_sync.h +@@ -68,12 +68,13 @@ struct u64_stats_sync { + }; + + ++#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) ++#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) ++#else + static inline void u64_stats_init(struct u64_stats_sync *syncp) + { +-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) +- seqcount_init(&syncp->seq); +-#endif + } ++#endif + + static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) + { +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h +index 9442423979c1..cc5ba47062e8 100644 +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h +@@ -90,6 +90,17 @@ static inline unsigned long __copy_from_user_nocache(void *to, + extern long probe_kernel_read(void *dst, const void *src, size_t size); + extern long __probe_kernel_read(void *dst, const void *src, size_t size); + ++/* ++ * probe_user_read(): safely attempt to read from a location in user space ++ * @dst: pointer to the buffer that shall take the data ++ * @src: address to read from ++ * @size: size of the data chunk ++ * ++ * Safely read from address @src to the buffer at @dst. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++extern long probe_user_read(void *dst, const void __user *src, size_t size); ++ + /* + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to +@@ -102,7 +113,22 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size); + extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); + extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); + ++/* ++ * probe_user_write(): safely attempt to write to a location in user space ++ * @dst: address to write to ++ * @src: pointer to the data that shall be written ++ * @size: size of the data chunk ++ * ++ * Safely write to address @dst from the buffer at @src. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); ++extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); ++ + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); ++extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, ++ long count); ++extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); + + /** + * probe_kernel_address(): safely attempt to read from a location +diff --git a/include/linux/usb.h b/include/linux/usb.h +index 16272f1e237c..7864b72b94bf 100644 +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -99,6 +99,41 @@ enum usb_interface_condition { + USB_INTERFACE_UNBINDING, + }; + ++int __must_check ++usb_find_common_endpoints(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **bulk_in, ++ struct usb_endpoint_descriptor **bulk_out, ++ struct usb_endpoint_descriptor **int_in, ++ struct usb_endpoint_descriptor **int_out); ++ ++static inline int __must_check ++usb_find_bulk_in_endpoint(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **bulk_in) ++{ ++ return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL); ++} ++ ++static inline int __must_check ++usb_find_bulk_out_endpoint(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **bulk_out) ++{ ++ return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL); ++} ++ ++static inline int __must_check ++usb_find_int_in_endpoint(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **int_in) ++{ ++ return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL); ++} ++ ++static inline int __must_check ++usb_find_int_out_endpoint(struct usb_host_interface *alt, ++ struct usb_endpoint_descriptor **int_out) ++{ ++ return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out); ++} ++ + /** + * struct usb_interface - what usb device drivers talk to + * @altsetting: array of interface structures, one for each alternate +@@ -1680,6 +1715,8 @@ static inline int usb_urb_dir_out(struct urb *urb) + return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; + } + ++int usb_urb_ep_type_check(const struct urb *urb); ++ + void *usb_alloc_coherent(struct usb_device *dev, size_t size, + gfp_t mem_flags, dma_addr_t *dma); + void usb_free_coherent(struct usb_device *dev, size_t size, +diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h +index bd25e8133db6..94caa5f9efdc 100644 +--- a/include/linux/usb/composite.h ++++ b/include/linux/usb/composite.h +@@ -270,6 +270,9 @@ int usb_func_wakeup(struct usb_function *func); + + int usb_get_func_interface_id(struct usb_function *func); + ++int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, ++ struct usb_ep *_ep, u8 alt); ++ + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep); + +diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h +index 7a0350535cb1..603b298dc747 100644 +--- a/include/linux/usb/otg-fsm.h ++++ b/include/linux/usb/otg-fsm.h +@@ -210,6 +210,7 @@ struct otg_fsm { + struct mutex lock; + u8 *host_req_flag; + struct delayed_work hnp_polling_work; ++ bool hnp_work_inited; + bool state_changed; + }; + +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index ea4f81c2a6d5..602dff213bae 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -59,4 +59,7 @@ + /* Device needs a pause after every control message. */ + #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) + ++/* device has blacklisted endpoints */ ++#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h +index fde7550754df..52f47c2944f8 100644 +--- a/include/linux/usb/usbnet.h ++++ b/include/linux/usb/usbnet.h +@@ -80,6 +80,8 @@ struct usbnet { + # define EVENT_LINK_CHANGE 11 + # define EVENT_SET_RX_MODE 12 + # define EVENT_NO_IP_ALIGN 13 ++ u32 rx_speed; /* in bps - NOT Mbps */ ++ u32 tx_speed; /* in bps - NOT Mbps */ + }; + + static inline struct usb_driver *driver_of(struct usb_interface *intf) +diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h +index 584f9a647ad4..6b6c1138f963 100644 +--- a/include/linux/virtio_vsock.h ++++ b/include/linux/virtio_vsock.h +@@ -11,7 +11,8 @@ + #define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256) + #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) + #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL +-#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) ++#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size ++extern uint virtio_transport_max_vsock_pkt_buf_size; + + enum { + VSOCK_VQ_RX = 0, /* for host to guest data */ +diff --git a/include/linux/xattr.h b/include/linux/xattr.h +index e77605a0c8da..1b1c9bfd24e9 100644 +--- a/include/linux/xattr.h ++++ b/include/linux/xattr.h +@@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); + ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); + int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); + int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); ++int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); + int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); + int __vfs_removexattr(struct dentry *, const char *); ++int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); + int vfs_removexattr(struct dentry *, const char *); + + ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); +diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h +index 2219cce81ca4..4638dddc040b 100644 +--- a/include/linux/zsmalloc.h ++++ b/include/linux/zsmalloc.h +@@ -36,7 +36,7 @@ enum zs_mapmode { + + struct zs_pool_stats { + /* How many pages were migrated (freed) */ +- unsigned long pages_compacted; ++ atomic_long_t pages_compacted; + }; + + struct zs_pool; +diff --git a/include/net/addrconf.h b/include/net/addrconf.h +index 335b85d94182..8e2362c6855e 100644 +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -255,6 +255,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, + const struct in6_addr *addr); + int ipv6_sock_ac_drop(struct sock *sk, int ifindex, + const struct in6_addr *addr); ++void __ipv6_sock_ac_close(struct sock *sk); + void ipv6_sock_ac_close(struct sock *sk); + + int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index fd60eccb59a6..79f2e1ccfcfb 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -8,6 +8,7 @@ + + void unix_inflight(struct user_struct *user, struct file *fp); + void unix_notinflight(struct user_struct *user, struct file *fp); ++void unix_destruct_scm(struct sk_buff *skb); + void unix_gc(void); + void wait_for_unix_gc(void); + struct sock *unix_get_socket(struct file *filp); +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h +index 57a7dba49d29..819796fb9d46 100644 +--- a/include/net/bluetooth/hci_core.h ++++ b/include/net/bluetooth/hci_core.h +@@ -512,6 +512,7 @@ struct hci_chan { + struct sk_buff_head data_q; + unsigned int sent; + __u8 state; ++ bool amp; + }; + + struct hci_conn_params { +@@ -1027,6 +1028,7 @@ struct hci_dev *hci_alloc_dev(void); + void hci_free_dev(struct hci_dev *hdev); + int hci_register_dev(struct hci_dev *hdev); + void hci_unregister_dev(struct hci_dev *hdev); ++void hci_cleanup_dev(struct hci_dev *hdev); + int hci_suspend_dev(struct hci_dev *hdev); + int hci_resume_dev(struct hci_dev *hdev); + int hci_reset_dev(struct hci_dev *hdev); +@@ -1250,16 +1252,34 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) + conn->security_cfm_cb(conn, status); + } + +-static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, +- __u8 encrypt) ++static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) + { + struct hci_cb *cb; ++ __u8 encrypt; ++ ++ if (conn->state == BT_CONFIG) { ++ if (!status) ++ conn->state = BT_CONNECTED; + +- if (conn->sec_level == BT_SECURITY_SDP) +- conn->sec_level = BT_SECURITY_LOW; ++ hci_connect_cfm(conn, status); ++ hci_conn_drop(conn); ++ return; ++ } + +- if (conn->pending_sec_level > conn->sec_level) +- conn->sec_level = conn->pending_sec_level; ++ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) ++ encrypt = 0x00; ++ else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) ++ encrypt = 0x02; ++ else ++ encrypt = 0x01; ++ ++ if (!status) { ++ if (conn->sec_level == BT_SECURITY_SDP) ++ conn->sec_level = BT_SECURITY_LOW; ++ ++ if (conn->pending_sec_level > conn->sec_level) ++ conn->sec_level = conn->pending_sec_level; ++ } + + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { +diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h +index 5ee3c689c863..9c50d458ee83 100644 +--- a/include/net/bluetooth/l2cap.h ++++ b/include/net/bluetooth/l2cap.h +@@ -619,6 +619,8 @@ struct l2cap_ops { + struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb); ++ int (*filter) (struct l2cap_chan * chan, ++ struct sk_buff *skb); + }; + + struct l2cap_conn { +diff --git a/include/net/bonding.h b/include/net/bonding.h +index 8750c2c4871a..bd29fd190bb1 100644 +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -170,6 +170,11 @@ struct slave { + struct rtnl_link_stats64 slave_stats; + }; + ++static inline struct slave *to_slave(struct kobject *kobj) ++{ ++ return container_of(kobj, struct slave, kobj); ++} ++ + struct bond_up_slave { + unsigned int count; + struct rcu_head rcu; +@@ -694,6 +699,9 @@ extern struct bond_parm_tbl ad_select_tbl[]; + /* exported from bond_netlink.c */ + extern struct rtnl_link_ops bond_link_ops; + ++/* exported from bond_sysfs_slave.c */ ++extern const struct sysfs_ops slave_sysfs_ops; ++ + static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) + { + atomic_long_inc(&dev->tx_dropped); +diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h +index 028b754ae9b1..0baf2e21a533 100644 +--- a/include/net/caif/caif_dev.h ++++ b/include/net/caif/caif_dev.h +@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer); + * The link_support layer is used to add any Link Layer specific + * framing. + */ +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, +diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h +index 70bfd017581f..219094ace893 100644 +--- a/include/net/caif/cfcnfg.h ++++ b/include/net/caif/cfcnfg.h +@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg); + * @fcs: Specify if checksum is used in CAIF Framing Layer. + * @head_room: Head space needed by link specific protocol. + */ +-void ++int + cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + struct net_device *dev, struct cflayer *phy_layer, + enum cfcnfg_phy_preference pref, +diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h +index b5b020f3c72e..bc3fae07a25f 100644 +--- a/include/net/caif/cfserl.h ++++ b/include/net/caif/cfserl.h +@@ -9,4 +9,5 @@ + #include + + struct cflayer *cfserl_create(int instance, bool use_stx); ++void cfserl_release(struct cflayer *layer); + #endif +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h +index db31d9f3ee83..be83eae4399b 100644 +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -4367,7 +4367,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); + * Return: 0 on success. Non-zero on error. + */ + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, +- const u8 *addr, enum nl80211_iftype iftype); ++ const u8 *addr, enum nl80211_iftype iftype, ++ bool is_amsdu); + + /** + * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 +@@ -4379,7 +4380,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) + { +- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype); ++ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, false); + } + + /** +diff --git a/include/net/dst.h b/include/net/dst.h +index e57e8fb9a43d..12247c034206 100644 +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -479,7 +479,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co + static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, + struct sk_buff *skb) + { +- struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); ++ struct neighbour *n = NULL; ++ ++ /* The packets from tunnel devices (eg bareudp) may have only ++ * metadata in the dst pointer of skb. Hence a pointer check of ++ * neigh_lookup is needed. ++ */ ++ if (dst->ops->neigh_lookup) ++ n = dst->ops->neigh_lookup(dst, skb, NULL); ++ + return IS_ERR(n) ? NULL : n; + } + +diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h +index 6965c8f68ade..5a23535a5018 100644 +--- a/include/net/dst_metadata.h ++++ b/include/net/dst_metadata.h +@@ -31,7 +31,9 @@ static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb) + return &md_dst->u.tun_info; + + dst = skb_dst(skb); +- if (dst && dst->lwtstate) ++ if (dst && dst->lwtstate && ++ (dst->lwtstate->type == LWTUNNEL_ENCAP_IP || ++ dst->lwtstate->type == LWTUNNEL_ENCAP_IP6)) + return lwt_tun_info(dst->lwtstate); + + return NULL; +diff --git a/include/net/genetlink.h b/include/net/genetlink.h +index 8d4608ce8716..facf9851ede6 100644 +--- a/include/net/genetlink.h ++++ b/include/net/genetlink.h +@@ -33,12 +33,6 @@ struct genl_info; + * do additional, common, filtering and return an error + * @post_doit: called after an operation's doit callback, it may + * undo operations done by pre_doit, for example release locks +- * @mcast_bind: a socket bound to the given multicast group (which +- * is given as the offset into the groups array) +- * @mcast_unbind: a socket was unbound from the given multicast group. +- * Note that unbind() will not be called symmetrically if the +- * generic netlink family is removed while there are still open +- * sockets. + * @attrbuf: buffer to store parsed attributes + * @family_list: family list + * @mcgrps: multicast groups used by this family (private) +@@ -61,8 +55,6 @@ struct genl_family { + void (*post_doit)(const struct genl_ops *ops, + struct sk_buff *skb, + struct genl_info *info); +- int (*mcast_bind)(struct net *net, int group); +- void (*mcast_unbind)(struct net *net, int group); + struct nlattr ** attrbuf; /* private */ + const struct genl_ops * ops; /* private */ + const struct genl_multicast_group *mcgrps; /* private */ +diff --git a/include/net/icmp.h b/include/net/icmp.h +index 8665bf24e3b7..ffe4a5d2bbe7 100644 +--- a/include/net/icmp.h ++++ b/include/net/icmp.h +@@ -47,6 +47,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 + __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); + } + ++#if IS_ENABLED(CONFIG_NF_NAT) ++void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); ++#else ++static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) ++{ ++ struct ip_options opts = { 0 }; ++ __icmp_send(skb_in, type, code, info, &opts); ++} ++#endif ++ + int icmp_rcv(struct sk_buff *skb); + void icmp_err(struct sk_buff *skb, u32 info); + int icmp_init(void); +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h +index 146054ceea8e..5bb56ebf3c9f 100644 +--- a/include/net/inet_connection_sock.h ++++ b/include/net/inet_connection_sock.h +@@ -319,5 +319,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, + int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen); + ++/* update the fast reuse flag when adding a socket */ ++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, ++ struct sock *sk); ++ + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); + #endif /* _INET_CONNECTION_SOCK_H */ +diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h +index dce2d586d9ce..245d999c0eac 100644 +--- a/include/net/inet_ecn.h ++++ b/include/net/inet_ecn.h +@@ -3,6 +3,7 @@ + + #include + #include ++#include + + #include + #include +diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h +index 96cc703abc4f..cfaa29c2d78d 100644 +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -203,7 +203,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, + int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); + +-static inline int ip6_skb_dst_mtu(struct sk_buff *skb) ++static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) + { + struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? + inet6_sk(skb->sk) : NULL; +diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h +index c0f0a13ed818..49aa79c7b278 100644 +--- a/include/net/llc_pdu.h ++++ b/include/net/llc_pdu.h +@@ -15,9 +15,11 @@ + #include + + /* Lengths of frame formats */ +-#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ +-#define LLC_PDU_LEN_S 4 +-#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ ++#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ ++#define LLC_PDU_LEN_S 4 ++#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ ++/* header and 1 control byte and XID info */ ++#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info)) + /* Known SAP addresses */ + #define LLC_GLOBAL_SAP 0xFF + #define LLC_NULL_SAP 0x00 /* not network-layer visible */ +@@ -50,9 +52,10 @@ + #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ + #define LLC_PDU_TYPE_MASK 0x03 + +-#define LLC_PDU_TYPE_I 0 /* first bit */ +-#define LLC_PDU_TYPE_S 1 /* first two bits */ +-#define LLC_PDU_TYPE_U 3 /* first two bits */ ++#define LLC_PDU_TYPE_I 0 /* first bit */ ++#define LLC_PDU_TYPE_S 1 /* first two bits */ ++#define LLC_PDU_TYPE_U 3 /* first two bits */ ++#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */ + + #define LLC_PDU_TYPE_IS_I(pdu) \ + ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0) +@@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) + static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, + u8 ssap, u8 dsap, u8 cr) + { +- const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; ++ int hlen = 4; /* default value for I and S types */ + struct llc_pdu_un *pdu; + ++ switch (type) { ++ case LLC_PDU_TYPE_U: ++ hlen = 3; ++ break; ++ case LLC_PDU_TYPE_U_XID: ++ hlen = 6; ++ break; ++ } ++ + skb_push(skb, hlen); + skb_reset_network_header(skb); + pdu = llc_pdu_un_hdr(skb); +@@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, + xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ + xid_info->type = svcs_supported; + xid_info->rw = rx_window << 1; /* size of receive window */ +- skb_put(skb, sizeof(struct llc_xid_info)); ++ ++ /* no need to push/put since llc_pdu_header_init() has already ++ * pushed 3 + 3 bytes ++ */ + } + + /** +diff --git a/include/net/ndisc.h b/include/net/ndisc.h +index 96c475bbc57b..db78894e8886 100644 +--- a/include/net/ndisc.h ++++ b/include/net/ndisc.h +@@ -37,7 +37,7 @@ enum { + ND_OPT_DNSSL = 31, /* RFC6106 */ + ND_OPT_6CO = 34, /* RFC6775 */ + ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ +- ND_OPT_PREF64 = 38, /* RFC-ietf-6man-ra-pref64-09 */ ++ ND_OPT_PREF64 = 38, /* RFC8781 */ + __ND_OPT_MAX + }; + +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h +index 7ba9a624090f..91e395fd0a65 100644 +--- a/include/net/netfilter/nf_tables.h ++++ b/include/net/netfilter/nf_tables.h +@@ -119,6 +119,8 @@ static inline u8 nft_reg_load8(u32 *sreg) + static inline void nft_data_copy(u32 *dst, const struct nft_data *src, + unsigned int len) + { ++ if (len % NFT_REG32_SIZE) ++ dst[len / NFT_REG32_SIZE] = 0; + memcpy(dst, src, len); + } + +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h +index af1c5e7c7e94..b0905b2be9ad 100644 +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -112,6 +112,7 @@ struct netns_ipv4 { + int sysctl_tcp_orphan_retries; + int sysctl_tcp_fin_timeout; + unsigned int sysctl_tcp_notsent_lowat; ++ int sysctl_tcp_default_init_rwnd; + + int sysctl_igmp_max_memberships; + int sysctl_igmp_max_msf; +diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h +index 87499b6b35d6..2ba054fe14ac 100644 +--- a/include/net/nfc/nci_core.h ++++ b/include/net/nfc/nci_core.h +@@ -310,6 +310,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len, + struct sk_buff **resp); + + struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); ++void nci_hci_deallocate(struct nci_dev *ndev); + int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, + const u8 *param, size_t param_len); + int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, +diff --git a/include/net/red.h b/include/net/red.h +index 3618cdfec884..117a3654d319 100644 +--- a/include/net/red.h ++++ b/include/net/red.h +@@ -167,14 +167,24 @@ static inline void red_set_vars(struct red_vars *v) + v->qcount = -1; + } + +-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) ++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, ++ u8 Scell_log, u8 *stab) + { +- if (fls(qth_min) + Wlog > 32) ++ if (fls(qth_min) + Wlog >= 32) + return false; +- if (fls(qth_max) + Wlog > 32) ++ if (fls(qth_max) + Wlog >= 32) ++ return false; ++ if (Scell_log >= 32) + return false; + if (qth_max < qth_min) + return false; ++ if (stab) { ++ int i; ++ ++ for (i = 0; i < RED_STAB_SIZE; i++) ++ if (stab[i] >= 32) ++ return false; ++ } + return true; + } + +diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h +index 4113916cc1bb..af0745f316fe 100644 +--- a/include/net/rtnetlink.h ++++ b/include/net/rtnetlink.h +@@ -28,6 +28,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) + * + * @list: Used internally + * @kind: Identifier ++ * @netns_refund: Physical device, move to init_net on netns exit + * @maxtype: Highest device specific netlink attribute number + * @policy: Netlink policy for device specific attribute validation + * @validate: Optional validation function for netlink/changelink parameters +@@ -84,6 +85,7 @@ struct rtnl_link_ops { + unsigned int (*get_num_tx_queues)(void); + unsigned int (*get_num_rx_queues)(void); + ++ bool netns_refund; + int slave_maxtype; + const struct nla_policy *slave_policy; + int (*slave_validate)(struct nlattr *tb[], +diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h +index 5b847e49f7e9..9799c300603a 100644 +--- a/include/net/sctp/constants.h ++++ b/include/net/sctp/constants.h +@@ -344,8 +344,7 @@ typedef enum { + } sctp_scope_policy_t; + + /* Based on IPv4 scoping , +- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, +- * 192.88.99.0/24. ++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. + * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP + * addresses. + */ +@@ -353,15 +352,16 @@ typedef enum { + ((htonl(INADDR_BROADCAST) == a) || \ + ipv4_is_multicast(a) || \ + ipv4_is_zeronet(a) || \ +- ipv4_is_test_198(a) || \ + ipv4_is_anycast_6to4(a)) + + /* Flags used for the bind address copy functions. */ +-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by ++#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by + local sock family */ +-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by ++#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by ++ local sock family */ ++#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by + peer */ +-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by ++#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by + peer */ + + /* Reasons to retransmit. */ +diff --git a/include/net/sock.h b/include/net/sock.h +index 7a57d6f55f1f..83dab256b8b5 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -785,6 +785,8 @@ static inline int sk_memalloc_socks(void) + { + return static_key_false(&memalloc_socks); + } ++ ++void __receive_sock(struct file *file); + #else + + static inline int sk_memalloc_socks(void) +@@ -792,6 +794,8 @@ static inline int sk_memalloc_socks(void) + return 0; + } + ++static inline void __receive_sock(struct file *file) ++{ } + #endif + + static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) +@@ -1632,7 +1636,6 @@ static inline int sk_tx_queue_get(const struct sock *sk) + + static inline void sk_set_socket(struct sock *sk, struct socket *sock) + { +- sk_tx_queue_clear(sk); + sk->sk_socket = sock; + } + +@@ -1685,7 +1688,8 @@ static inline u32 net_tx_rndhash(void) + + static inline void sk_set_txhash(struct sock *sk) + { +- sk->sk_txhash = net_tx_rndhash(); ++ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */ ++ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); + } + + static inline void sk_rethink_txhash(struct sock *sk) +@@ -1940,9 +1944,12 @@ static inline void sock_poll_wait(struct file *filp, + + static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) + { +- if (sk->sk_txhash) { ++ /* This pairs with WRITE_ONCE() in sk_set_txhash() */ ++ u32 txhash = READ_ONCE(sk->sk_txhash); ++ ++ if (txhash) { + skb->l4_hash = 1; +- skb->hash = sk->sk_txhash; ++ skb->hash = txhash; + } + } + +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 873501c90396..d34951719f96 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -276,7 +276,6 @@ extern int sysctl_tcp_autocorking; + extern int sysctl_tcp_invalid_ratelimit; + extern int sysctl_tcp_pacing_ss_ratio; + extern int sysctl_tcp_pacing_ca_ratio; +-extern int sysctl_tcp_default_init_rwnd; + + extern atomic_long_t tcp_memory_allocated; + +@@ -1269,7 +1268,7 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) + rx_opt->num_sacks = 0; + } + +-u32 tcp_default_init_rwnd(u32 mss); ++u32 tcp_default_init_rwnd(struct net *net, u32 mss); + void tcp_cwnd_restart(struct sock *sk, s32 delta); + + static inline void tcp_slow_start_after_idle_check(struct sock *sk) +@@ -1285,7 +1284,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) + } + + /* Determine a window scaling and initial window to offer. */ +-void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, ++void tcp_select_initial_window(struct net *net, ++ int __space, __u32 mss, __u32 *rcv_wnd, + __u32 *window_clamp, int wscale_ok, + __u8 *rcv_wscale, __u32 init_rcv_wnd); + +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index 3105a63f5cb9..0c87a8bda134 100755 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -1756,21 +1756,17 @@ static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay + static inline int xfrm_replay_clone(struct xfrm_state *x, + struct xfrm_state *orig) + { +- x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), ++ ++ x->replay_esn = kmemdup(orig->replay_esn, ++ xfrm_replay_state_esn_len(orig->replay_esn), + GFP_KERNEL); + if (!x->replay_esn) + return -ENOMEM; +- +- x->replay_esn->bmp_len = orig->replay_esn->bmp_len; +- x->replay_esn->replay_window = orig->replay_esn->replay_window; +- +- x->preplay_esn = kmemdup(x->replay_esn, +- xfrm_replay_state_esn_len(x->replay_esn), ++ x->preplay_esn = kmemdup(orig->preplay_esn, ++ xfrm_replay_state_esn_len(orig->preplay_esn), + GFP_KERNEL); +- if (!x->preplay_esn) { +- kfree(x->replay_esn); ++ if (!x->preplay_esn) + return -ENOMEM; +- } + + return 0; + } +@@ -1868,4 +1864,37 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, + + return 0; + } ++ ++extern const int xfrm_msg_min[XFRM_NR_MSGTYPES]; ++extern const struct nla_policy xfrma_policy[XFRMA_MAX+1]; ++ ++struct xfrm_translator { ++ /* Allocate frag_list and put compat translation there */ ++ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src); ++ ++ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */ ++ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh, ++ int maxtype, const struct nla_policy *policy); ++ ++ /* Translate 32-bit user_policy from sockptr */ ++ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen); ++ ++ struct module *owner; ++}; ++ ++#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) ++extern int xfrm_register_translator(struct xfrm_translator *xtr); ++extern int xfrm_unregister_translator(struct xfrm_translator *xtr); ++extern struct xfrm_translator *xfrm_get_translator(void); ++extern void xfrm_put_translator(struct xfrm_translator *xtr); ++#else ++static inline struct xfrm_translator *xfrm_get_translator(void) ++{ ++ return NULL; ++} ++static inline void xfrm_put_translator(struct xfrm_translator *xtr) ++{ ++} ++#endif ++ + #endif /* _NET_XFRM_H */ +diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h +index 6be92eede5c0..a911f993219d 100644 +--- a/include/scsi/libfcoe.h ++++ b/include/scsi/libfcoe.h +@@ -261,7 +261,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, + struct fc_frame *); + + /* libfcoe funcs */ +-u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); ++u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int); + int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, + const struct libfc_function_template *, int init_fcp); + u32 fcoe_fc_crc(struct fc_frame *fp); +diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h +index c7b1dc713cdd..9c7f4aad6db6 100644 +--- a/include/scsi/libiscsi.h ++++ b/include/scsi/libiscsi.h +@@ -144,6 +144,9 @@ struct iscsi_task { + void *dd_data; /* driver/transport data */ + }; + ++/* invalid scsi_task pointer */ ++#define INVALID_SCSI_TASK (struct iscsi_task *)-1l ++ + static inline int iscsi_task_has_unsol_data(struct iscsi_task *task) + { + return task->unsol_r2t.data_length > task->unsol_r2t.sent; +diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h +index 20bf7eaef05a..d699fdc78cbb 100644 +--- a/include/scsi/scsi_common.h ++++ b/include/scsi/scsi_common.h +@@ -24,6 +24,13 @@ scsi_command_size(const unsigned char *cmnd) + scsi_varlen_cdb_length(cmnd) : COMMAND_SIZE(cmnd[0]); + } + ++static inline unsigned char ++scsi_command_control(const unsigned char *cmnd) ++{ ++ return (cmnd[0] == VARIABLE_LENGTH_CMD) ? ++ cmnd[1] : cmnd[COMMAND_SIZE(cmnd[0]) - 1]; ++} ++ + /* Returns a human-readable name for the device */ + extern const char *scsi_device_type(unsigned type); + +diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h +index 6183d20a01fb..e673c7c9c5fb 100644 +--- a/include/scsi/scsi_transport_iscsi.h ++++ b/include/scsi/scsi_transport_iscsi.h +@@ -437,6 +437,8 @@ extern void iscsi_free_session(struct iscsi_cls_session *session); + extern int iscsi_destroy_session(struct iscsi_cls_session *session); + extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, + int dd_size, uint32_t cid); ++extern void iscsi_put_conn(struct iscsi_cls_conn *conn); ++extern void iscsi_get_conn(struct iscsi_cls_conn *conn); + extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); + extern void iscsi_unblock_session(struct iscsi_cls_session *session); + extern void iscsi_block_session(struct iscsi_cls_session *session); +diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h +index 9b1d43d671a3..8c18dc6d3fde 100644 +--- a/include/soc/nps/common.h ++++ b/include/soc/nps/common.h +@@ -45,6 +45,12 @@ + #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 + #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 + ++#ifndef AUX_IENABLE ++#define AUX_IENABLE 0x40c ++#endif ++ ++#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) ++ + #ifndef __ASSEMBLY__ + + /* In order to increase compilation test coverage */ +diff --git a/include/soc/qcom/boot_stats.h b/include/soc/qcom/boot_stats.h +index 227d2435c662..e927d45ca83a 100644 +--- a/include/soc/qcom/boot_stats.h ++++ b/include/soc/qcom/boot_stats.h +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2013-2014,2019 The Linux Foundation. All rights reserved. ++/* Copyright (c) 2013-2014,2019-2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -44,7 +44,9 @@ static inline phys_addr_t msm_timer_get_pa(void) { return 0; } + #ifdef CONFIG_MSM_BOOT_TIME_MARKER + static inline int boot_marker_enabled(void) { return 1; } + void place_marker(const char *name); ++void update_marker(const char *name); + #else + static inline void place_marker(char *name) { }; ++static inline void update_marker(const char *name) { }; + static inline int boot_marker_enabled(void) { return 0; } + #endif +diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h +index c2c6d28fde74..9a5978272f57 100644 +--- a/include/sound/compress_driver.h ++++ b/include/sound/compress_driver.h +@@ -72,6 +72,7 @@ struct snd_compr_runtime { + * @direction: stream direction, playback/recording + * @metadata_set: metadata set flag, true when set + * @next_track: has userspace signal next track transition, true when set ++ * @partial_drain: undergoing partial_drain for stream, true when set + * @private_data: pointer to DSP private data + */ + struct snd_compr_stream { +@@ -83,6 +84,7 @@ struct snd_compr_stream { + enum snd_compr_direction direction; + bool metadata_set; + bool next_track; ++ bool partial_drain; + void *private_data; + struct snd_soc_pcm_runtime *be; + }; +@@ -191,7 +193,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) + if (snd_BUG_ON(!stream)) + return; + +- stream->runtime->state = SNDRV_PCM_STATE_SETUP; ++ /* for partial_drain case we are back to running state on success */ ++ if (stream->partial_drain) { ++ stream->runtime->state = SNDRV_PCM_STATE_RUNNING; ++ stream->partial_drain = false; /* clear this flag as well */ ++ } else { ++ stream->runtime->state = SNDRV_PCM_STATE_SETUP; ++ } + + wake_up(&stream->runtime->sleep); + } +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index 30f99ce4c6ce..8a70d38f1332 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -178,6 +178,10 @@ enum tcm_sense_reason_table { + TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), + TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), ++ TCM_TOO_MANY_TARGET_DESCS = R(0x19), ++ TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a), ++ TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), ++ TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), + #undef R + }; + +diff --git a/include/trace/events/target.h b/include/trace/events/target.h +index 50fea660c0f8..d543e8b87e50 100644 +--- a/include/trace/events/target.h ++++ b/include/trace/events/target.h +@@ -139,6 +139,7 @@ TRACE_EVENT(target_sequencer_start, + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) ++ __field( unsigned char, control ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) + __string( initiator, cmd->se_sess->se_node_acl->initiatorname ) + ), +@@ -148,6 +149,7 @@ TRACE_EVENT(target_sequencer_start, + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; ++ __entry->control = scsi_command_control(cmd->t_task_cdb); + memcpy(__entry->cdb, cmd->t_task_cdb, TCM_MAX_COMMAND_SIZE); + __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); + ), +@@ -157,9 +159,7 @@ TRACE_EVENT(target_sequencer_start, + show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), +- scsi_command_size(__entry->cdb) <= 16 ? +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] : +- __entry->cdb[1] ++ __entry->control + ) + ); + +@@ -174,6 +174,7 @@ TRACE_EVENT(target_cmd_complete, + __field( unsigned int, opcode ) + __field( unsigned int, data_length ) + __field( unsigned int, task_attribute ) ++ __field( unsigned char, control ) + __field( unsigned char, scsi_status ) + __field( unsigned char, sense_length ) + __array( unsigned char, cdb, TCM_MAX_COMMAND_SIZE ) +@@ -186,6 +187,7 @@ TRACE_EVENT(target_cmd_complete, + __entry->opcode = cmd->t_task_cdb[0]; + __entry->data_length = cmd->data_length; + __entry->task_attribute = cmd->sam_task_attr; ++ __entry->control = scsi_command_control(cmd->t_task_cdb); + __entry->scsi_status = cmd->scsi_status; + __entry->sense_length = cmd->scsi_status == SAM_STAT_CHECK_CONDITION ? + min(18, ((u8 *) cmd->sense_buffer)[SPC_ADD_SENSE_LEN_OFFSET] + 8) : 0; +@@ -202,9 +204,7 @@ TRACE_EVENT(target_cmd_complete, + show_opcode_name(__entry->opcode), + __entry->data_length, __print_hex(__entry->cdb, 16), + show_task_attribute_name(__entry->task_attribute), +- scsi_command_size(__entry->cdb) <= 16 ? +- __entry->cdb[scsi_command_size(__entry->cdb) - 1] : +- __entry->cdb[1] ++ __entry->control + ) + ); + +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h +index 7bd8783a590f..2cad62ed000c 100644 +--- a/include/trace/events/writeback.h ++++ b/include/trace/events/writeback.h +@@ -65,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : ++ NULL), 32); + __entry->ino = mapping ? mapping->host->i_ino : 0; + __entry->index = page->index; + ), +@@ -95,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, + struct backing_dev_info *bdi = inode_to_bdi(inode); + + /* may be called for files on pseudo FSes w/ unregistered bdi */ +- strncpy(__entry->name, +- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->flags = flags; +@@ -175,8 +175,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->sync_mode = wbc->sync_mode; + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); +@@ -219,8 +219,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- strncpy(__entry->name, +- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32); ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + __entry->nr_pages = work->nr_pages; + __entry->sb_dev = work->sb ? work->sb->s_dev : 0; + __entry->sync_mode = work->sync_mode; +@@ -273,7 +272,7 @@ DECLARE_EVENT_CLASS(writeback_class, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + TP_printk("bdi %s: cgroup_ino=%u", +@@ -296,7 +295,7 @@ TRACE_EVENT(writeback_bdi_register, + __array(char, name, 32) + ), + TP_fast_assign( +- strncpy(__entry->name, dev_name(bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + ), + TP_printk("bdi %s", + __entry->name +@@ -321,7 +320,7 @@ DECLARE_EVENT_CLASS(wbc_class, + ), + + TP_fast_assign( +- strncpy(__entry->name, dev_name(bdi->dev), 32); ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + __entry->nr_to_write = wbc->nr_to_write; + __entry->pages_skipped = wbc->pages_skipped; + __entry->sync_mode = wbc->sync_mode; +@@ -360,8 +359,9 @@ DEFINE_WBC_EVENT(wbc_writepage); + TRACE_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, + struct wb_writeback_work *work, ++ unsigned long dirtied_before, + int moved), +- TP_ARGS(wb, work, moved), ++ TP_ARGS(wb, work, dirtied_before, moved), + TP_STRUCT__entry( + __array(char, name, 32) + __field(unsigned long, older) +@@ -371,19 +371,17 @@ TRACE_EVENT(writeback_queue_io, + __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( +- unsigned long *older_than_this = work->older_than_this; +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32); +- __entry->older = older_than_this ? *older_than_this : 0; +- __entry->age = older_than_this ? +- (jiffies - *older_than_this) * 1000 / HZ : -1; ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); ++ __entry->older = dirtied_before; ++ __entry->age = (jiffies - dirtied_before) * 1000 / HZ; + __entry->moved = moved; + __entry->reason = work->reason; + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", + __entry->name, +- __entry->older, /* older_than_this in jiffies */ +- __entry->age, /* older_than_this in relative milliseconds */ ++ __entry->older, /* dirtied_before in jiffies */ ++ __entry->age, /* dirtied_before in relative milliseconds */ + __entry->moved, + __print_symbolic(__entry->reason, WB_WORK_REASON), + __entry->cgroup_ino +@@ -458,7 +456,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, + ), + + TP_fast_assign( +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + __entry->write_bw = KBps(wb->write_bandwidth); + __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); + __entry->dirty_rate = KBps(dirty_rate); +@@ -523,7 +521,7 @@ TRACE_EVENT(balance_dirty_pages, + + TP_fast_assign( + unsigned long freerun = (thresh + bg_thresh) / 2; +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + + __entry->limit = global_wb_domain.dirty_limit; + __entry->setpoint = (global_wb_domain.dirty_limit + +@@ -583,8 +581,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; +@@ -657,8 +655,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, + ), + + TP_fast_assign( +- strncpy(__entry->name, +- dev_name(inode_to_bdi(inode)->dev), 32); ++ strscpy_pad(__entry->name, ++ bdi_dev_name(inode_to_bdi(inode)), 32); + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; +diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h +index 8562b1cb776b..3b92610502ca 100644 +--- a/include/uapi/linux/bcache.h ++++ b/include/uapi/linux/bcache.h +@@ -5,7 +5,7 @@ + * Bcache on disk data structures + */ + +-#include ++#include + + #define BITMASK(name, type, field, offset, size) \ + static inline __u64 name(const type *k) \ +diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h +index a1ded2a1bf1d..2abf660e9f65 100644 +--- a/include/uapi/linux/btrfs_tree.h ++++ b/include/uapi/linux/btrfs_tree.h +@@ -1,6 +1,8 @@ + #ifndef _BTRFS_CTREE_H_ + #define _BTRFS_CTREE_H_ + ++#include ++ + /* + * This header contains the structure definitions and constants used + * by file system objects that can be retrieved using +diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h +index c872bfd25e13..03c3e1869be7 100644 +--- a/include/uapi/linux/const.h ++++ b/include/uapi/linux/const.h +@@ -24,4 +24,9 @@ + #define _BITUL(x) (_AC(1,UL) << (x)) + #define _BITULL(x) (_AC(1,ULL) << (x)) + ++#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) ++#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) ++ ++#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) ++ + #endif /* !(_LINUX_CONST_H) */ +diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h +index 79b5ded2001a..3d0aa2bc69f3 100644 +--- a/include/uapi/linux/cryptouser.h ++++ b/include/uapi/linux/cryptouser.h +@@ -18,6 +18,8 @@ + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + ++#include ++ + /* Netlink configuration messages. */ + enum { + CRYPTO_MSG_BASE = 0x10, +diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h +index 00a20cd21ee2..b653754ee9cf 100644 +--- a/include/uapi/linux/dvb/frontend.h ++++ b/include/uapi/linux/dvb/frontend.h +@@ -28,13 +28,46 @@ + + #include + +-enum fe_type { +- FE_QPSK, +- FE_QAM, +- FE_OFDM, +- FE_ATSC +-}; +- ++/** ++ * enum fe_caps - Frontend capabilities ++ * ++ * @FE_IS_STUPID: There's something wrong at the ++ * frontend, and it can't report its ++ * capabilities. ++ * @FE_CAN_INVERSION_AUTO: Can auto-detect frequency spectral ++ * band inversion ++ * @FE_CAN_FEC_1_2: Supports FEC 1/2 ++ * @FE_CAN_FEC_2_3: Supports FEC 2/3 ++ * @FE_CAN_FEC_3_4: Supports FEC 3/4 ++ * @FE_CAN_FEC_4_5: Supports FEC 4/5 ++ * @FE_CAN_FEC_5_6: Supports FEC 5/6 ++ * @FE_CAN_FEC_6_7: Supports FEC 6/7 ++ * @FE_CAN_FEC_7_8: Supports FEC 7/8 ++ * @FE_CAN_FEC_8_9: Supports FEC 8/9 ++ * @FE_CAN_FEC_AUTO: Can auto-detect FEC ++ * @FE_CAN_QPSK: Supports QPSK modulation ++ * @FE_CAN_QAM_16: Supports 16-QAM modulation ++ * @FE_CAN_QAM_32: Supports 32-QAM modulation ++ * @FE_CAN_QAM_64: Supports 64-QAM modulation ++ * @FE_CAN_QAM_128: Supports 128-QAM modulation ++ * @FE_CAN_QAM_256: Supports 256-QAM modulation ++ * @FE_CAN_QAM_AUTO: Can auto-detect QAM modulation ++ * @FE_CAN_TRANSMISSION_MODE_AUTO: Can auto-detect transmission mode ++ * @FE_CAN_BANDWIDTH_AUTO: Can auto-detect bandwidth ++ * @FE_CAN_GUARD_INTERVAL_AUTO: Can auto-detect guard interval ++ * @FE_CAN_HIERARCHY_AUTO: Can auto-detect hierarchy ++ * @FE_CAN_8VSB: Supports 8-VSB modulation ++ * @FE_CAN_16VSB: Supporta 16-VSB modulation ++ * @FE_HAS_EXTENDED_CAPS: Unused ++ * @FE_CAN_MULTISTREAM: Supports multistream filtering ++ * @FE_CAN_TURBO_FEC: Supports "turbo FEC" modulation ++ * @FE_CAN_2G_MODULATION: Supports "2nd generation" modulation, ++ * e. g. DVB-S2, DVB-T2, DVB-C2 ++ * @FE_NEEDS_BENDING: Unused ++ * @FE_CAN_RECOVER: Can recover from a cable unplug ++ * automatically ++ * @FE_CAN_MUTE_TS: Can stop spurious TS data output ++ */ + enum fe_caps { + FE_IS_STUPID = 0, + FE_CAN_INVERSION_AUTO = 0x1, +@@ -60,15 +93,55 @@ enum fe_caps { + FE_CAN_HIERARCHY_AUTO = 0x100000, + FE_CAN_8VSB = 0x200000, + FE_CAN_16VSB = 0x400000, +- FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ +- FE_CAN_MULTISTREAM = 0x4000000, /* frontend supports multistream filtering */ +- FE_CAN_TURBO_FEC = 0x8000000, /* frontend supports "turbo fec modulation" */ +- FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ +- FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ +- FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ +- FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */ ++ FE_HAS_EXTENDED_CAPS = 0x800000, ++ FE_CAN_MULTISTREAM = 0x4000000, ++ FE_CAN_TURBO_FEC = 0x8000000, ++ FE_CAN_2G_MODULATION = 0x10000000, ++ FE_NEEDS_BENDING = 0x20000000, ++ FE_CAN_RECOVER = 0x40000000, ++ FE_CAN_MUTE_TS = 0x80000000 ++}; ++ ++/* ++ * DEPRECATED: Should be kept just due to backward compatibility. ++ */ ++enum fe_type { ++ FE_QPSK, ++ FE_QAM, ++ FE_OFDM, ++ FE_ATSC + }; + ++/** ++ * struct dvb_frontend_info - Frontend properties and capabilities ++ * ++ * @name: Name of the frontend ++ * @type: **DEPRECATED**. ++ * Should not be used on modern programs, ++ * as a frontend may have more than one type. ++ * In order to get the support types of a given ++ * frontend, use :c:type:`DTV_ENUM_DELSYS` ++ * instead. ++ * @frequency_min: Minimal frequency supported by the frontend. ++ * @frequency_max: Minimal frequency supported by the frontend. ++ * @frequency_stepsize: All frequencies are multiple of this value. ++ * @frequency_tolerance: Frequency tolerance. ++ * @symbol_rate_min: Minimal symbol rate, in bauds ++ * (for Cable/Satellite systems). ++ * @symbol_rate_max: Maximal symbol rate, in bauds ++ * (for Cable/Satellite systems). ++ * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm ++ * (for Cable/Satellite systems). ++ * @notifier_delay: **DEPRECATED**. Not used by any driver. ++ * @caps: Capabilities supported by the frontend, ++ * as specified in &enum fe_caps. ++ * ++ * .. note: ++ * ++ * #. The frequencies are specified in Hz for Terrestrial and Cable ++ * systems. ++ * #. The frequencies are specified in kHz for Satellite systems. ++ */ + struct dvb_frontend_info { + char name[128]; + enum fe_type type; /* DEPRECATED. Use DTV_ENUM_DELSYS instead */ +@@ -78,55 +151,105 @@ struct dvb_frontend_info { + __u32 frequency_tolerance; + __u32 symbol_rate_min; + __u32 symbol_rate_max; +- __u32 symbol_rate_tolerance; /* ppm */ ++ __u32 symbol_rate_tolerance; + __u32 notifier_delay; /* DEPRECATED */ + enum fe_caps caps; + }; + +- + /** +- * Check out the DiSEqC bus spec available on http://www.eutelsat.org/ for +- * the meaning of this struct... ++ * struct dvb_diseqc_master_cmd - DiSEqC master command ++ * ++ * @msg: ++ * DiSEqC message to be sent. It contains a 3 bytes header with: ++ * framing + address + command, and an optional argument ++ * of up to 3 bytes of data. ++ * @msg_len: ++ * Length of the DiSEqC message. Valid values are 3 to 6. ++ * ++ * Check out the DiSEqC bus spec available on http://www.eutelsat.org/ for ++ * the possible messages that can be used. + */ + struct dvb_diseqc_master_cmd { +- __u8 msg [6]; /* { framing, address, command, data [3] } */ +- __u8 msg_len; /* valid values are 3...6 */ ++ __u8 msg[6]; ++ __u8 msg_len; + }; + ++/** ++ * struct dvb_diseqc_slave_reply - DiSEqC received data ++ * ++ * @msg: ++ * DiSEqC message buffer to store a message received via DiSEqC. ++ * It contains one byte header with: framing and ++ * an optional argument of up to 3 bytes of data. ++ * @msg_len: ++ * Length of the DiSEqC message. Valid values are 0 to 4, ++ * where 0 means no message. ++ * @timeout: ++ * Return from ioctl after timeout ms with errorcode when ++ * no message was received. ++ * ++ * Check out the DiSEqC bus spec available on http://www.eutelsat.org/ for ++ * the possible messages that can be used. ++ */ + struct dvb_diseqc_slave_reply { +- __u8 msg [4]; /* { framing, data [3] } */ +- __u8 msg_len; /* valid values are 0...4, 0 means no msg */ +- int timeout; /* return from ioctl after timeout ms with */ +-}; /* errorcode when no message was received */ ++ __u8 msg[4]; ++ __u8 msg_len; ++ int timeout; ++}; + ++/** ++ * enum fe_sec_voltage - DC Voltage used to feed the LNBf ++ * ++ * @SEC_VOLTAGE_13: Output 13V to the LNBf ++ * @SEC_VOLTAGE_18: Output 18V to the LNBf ++ * @SEC_VOLTAGE_OFF: Don't feed the LNBf with a DC voltage ++ */ + enum fe_sec_voltage { + SEC_VOLTAGE_13, + SEC_VOLTAGE_18, + SEC_VOLTAGE_OFF + }; + ++/** ++ * enum fe_sec_tone_mode - Type of tone to be send to the LNBf. ++ * @SEC_TONE_ON: Sends a 22kHz tone burst to the antenna. ++ * @SEC_TONE_OFF: Don't send a 22kHz tone to the antenna (except ++ * if the ``FE_DISEQC_*`` ioctls are called). ++ */ + enum fe_sec_tone_mode { + SEC_TONE_ON, + SEC_TONE_OFF + }; + ++/** ++ * enum fe_sec_mini_cmd - Type of mini burst to be sent ++ * ++ * @SEC_MINI_A: Sends a mini-DiSEqC 22kHz '0' Tone Burst to select ++ * satellite-A ++ * @SEC_MINI_B: Sends a mini-DiSEqC 22kHz '1' Data Burst to select ++ * satellite-B ++ */ + enum fe_sec_mini_cmd { + SEC_MINI_A, + SEC_MINI_B + }; + + /** +- * enum fe_status - enumerates the possible frontend status +- * @FE_HAS_SIGNAL: found something above the noise level +- * @FE_HAS_CARRIER: found a DVB signal +- * @FE_HAS_VITERBI: FEC is stable +- * @FE_HAS_SYNC: found sync bytes +- * @FE_HAS_LOCK: everything's working +- * @FE_TIMEDOUT: no lock within the last ~2 seconds +- * @FE_REINIT: frontend was reinitialized, application is recommended +- * to reset DiSEqC, tone and parameters ++ * enum fe_status - Enumerates the possible frontend status. ++ * @FE_NONE: The frontend doesn't have any kind of lock. ++ * That's the initial frontend status ++ * @FE_HAS_SIGNAL: Has found something above the noise level. ++ * @FE_HAS_CARRIER: Has found a DVB signal. ++ * @FE_HAS_VITERBI: FEC inner coding (Viterbi, LDPC or other inner code). ++ * is stable. ++ * @FE_HAS_SYNC: Synchronization bytes was found. ++ * @FE_HAS_LOCK: DVB were locked and everything is working. ++ * @FE_TIMEDOUT: Fo lock within the last about 2 seconds. ++ * @FE_REINIT: Frontend was reinitialized, application is recommended ++ * to reset DiSEqC, tone and parameters. + */ + enum fe_status { ++ FE_NONE = 0x00, + FE_HAS_SIGNAL = 0x01, + FE_HAS_CARRIER = 0x02, + FE_HAS_VITERBI = 0x04, +@@ -136,12 +259,45 @@ enum fe_status { + FE_REINIT = 0x40, + }; + ++/** ++ * enum fe_spectral_inversion - Type of inversion band ++ * ++ * @INVERSION_OFF: Don't do spectral band inversion. ++ * @INVERSION_ON: Do spectral band inversion. ++ * @INVERSION_AUTO: Autodetect spectral band inversion. ++ * ++ * This parameter indicates if spectral inversion should be presumed or ++ * not. In the automatic setting (``INVERSION_AUTO``) the hardware will try ++ * to figure out the correct setting by itself. If the hardware doesn't ++ * support, the DVB core will try to lock at the carrier first with ++ * inversion off. If it fails, it will try to enable inversion. ++ */ + enum fe_spectral_inversion { + INVERSION_OFF, + INVERSION_ON, + INVERSION_AUTO + }; + ++/** ++ * enum fe_code_rate - Type of Forward Error Correction (FEC) ++ * ++ * ++ * @FEC_NONE: No Forward Error Correction Code ++ * @FEC_1_2: Forward Error Correction Code 1/2 ++ * @FEC_2_3: Forward Error Correction Code 2/3 ++ * @FEC_3_4: Forward Error Correction Code 3/4 ++ * @FEC_4_5: Forward Error Correction Code 4/5 ++ * @FEC_5_6: Forward Error Correction Code 5/6 ++ * @FEC_6_7: Forward Error Correction Code 6/7 ++ * @FEC_7_8: Forward Error Correction Code 7/8 ++ * @FEC_8_9: Forward Error Correction Code 8/9 ++ * @FEC_AUTO: Autodetect Error Correction Code ++ * @FEC_3_5: Forward Error Correction Code 3/5 ++ * @FEC_9_10: Forward Error Correction Code 9/10 ++ * @FEC_2_5: Forward Error Correction Code 2/5 ++ * ++ * Please note that not all FEC types are supported by a given standard. ++ */ + enum fe_code_rate { + FEC_NONE = 0, + FEC_1_2, +@@ -158,6 +314,26 @@ enum fe_code_rate { + FEC_2_5, + }; + ++/** ++ * enum fe_modulation - Type of modulation/constellation ++ * @QPSK: QPSK modulation ++ * @QAM_16: 16-QAM modulation ++ * @QAM_32: 32-QAM modulation ++ * @QAM_64: 64-QAM modulation ++ * @QAM_128: 128-QAM modulation ++ * @QAM_256: 256-QAM modulation ++ * @QAM_AUTO: Autodetect QAM modulation ++ * @VSB_8: 8-VSB modulation ++ * @VSB_16: 16-VSB modulation ++ * @PSK_8: 8-PSK modulation ++ * @APSK_16: 16-APSK modulation ++ * @APSK_32: 32-APSK modulation ++ * @DQPSK: DQPSK modulation ++ * @QAM_4_NR: 4-QAM-NR modulation ++ * ++ * Please note that not all modulations are supported by a given standard. ++ * ++ */ + enum fe_modulation { + QPSK, + QAM_16, +@@ -175,6 +351,32 @@ enum fe_modulation { + QAM_4_NR, + }; + ++/** ++ * enum fe_transmit_mode - Transmission mode ++ * ++ * @TRANSMISSION_MODE_AUTO: ++ * Autodetect transmission mode. The hardware will try to find the ++ * correct FFT-size (if capable) to fill in the missing parameters. ++ * @TRANSMISSION_MODE_1K: ++ * Transmission mode 1K ++ * @TRANSMISSION_MODE_2K: ++ * Transmission mode 2K ++ * @TRANSMISSION_MODE_8K: ++ * Transmission mode 8K ++ * @TRANSMISSION_MODE_4K: ++ * Transmission mode 4K ++ * @TRANSMISSION_MODE_16K: ++ * Transmission mode 16K ++ * @TRANSMISSION_MODE_32K: ++ * Transmission mode 32K ++ * @TRANSMISSION_MODE_C1: ++ * Single Carrier (C=1) transmission mode (DTMB only) ++ * @TRANSMISSION_MODE_C3780: ++ * Multi Carrier (C=3780) transmission mode (DTMB only) ++ * ++ * Please note that not all transmission modes are supported by a given ++ * standard. ++ */ + enum fe_transmit_mode { + TRANSMISSION_MODE_2K, + TRANSMISSION_MODE_8K, +@@ -187,6 +389,23 @@ enum fe_transmit_mode { + TRANSMISSION_MODE_C3780, + }; + ++/** ++ * enum fe_guard_interval - Guard interval ++ * ++ * @GUARD_INTERVAL_AUTO: Autodetect the guard interval ++ * @GUARD_INTERVAL_1_128: Guard interval 1/128 ++ * @GUARD_INTERVAL_1_32: Guard interval 1/32 ++ * @GUARD_INTERVAL_1_16: Guard interval 1/16 ++ * @GUARD_INTERVAL_1_8: Guard interval 1/8 ++ * @GUARD_INTERVAL_1_4: Guard interval 1/4 ++ * @GUARD_INTERVAL_19_128: Guard interval 19/128 ++ * @GUARD_INTERVAL_19_256: Guard interval 19/256 ++ * @GUARD_INTERVAL_PN420: PN length 420 (1/4) ++ * @GUARD_INTERVAL_PN595: PN length 595 (1/6) ++ * @GUARD_INTERVAL_PN945: PN length 945 (1/9) ++ * ++ * Please note that not all guard intervals are supported by a given standard. ++ */ + enum fe_guard_interval { + GUARD_INTERVAL_1_32, + GUARD_INTERVAL_1_16, +@@ -201,6 +420,16 @@ enum fe_guard_interval { + GUARD_INTERVAL_PN945, + }; + ++/** ++ * enum fe_hierarchy - Hierarchy ++ * @HIERARCHY_NONE: No hierarchy ++ * @HIERARCHY_AUTO: Autodetect hierarchy (if supported) ++ * @HIERARCHY_1: Hierarchy 1 ++ * @HIERARCHY_2: Hierarchy 2 ++ * @HIERARCHY_4: Hierarchy 4 ++ * ++ * Please note that not all hierarchy types are supported by a given standard. ++ */ + enum fe_hierarchy { + HIERARCHY_NONE, + HIERARCHY_1, +@@ -209,6 +438,15 @@ enum fe_hierarchy { + HIERARCHY_AUTO + }; + ++/** ++ * enum fe_interleaving - Interleaving ++ * @INTERLEAVING_NONE: No interleaving. ++ * @INTERLEAVING_AUTO: Auto-detect interleaving. ++ * @INTERLEAVING_240: Interleaving of 240 symbols. ++ * @INTERLEAVING_720: Interleaving of 720 symbols. ++ * ++ * Please note that, currently, only DTMB uses it. ++ */ + enum fe_interleaving { + INTERLEAVING_NONE, + INTERLEAVING_AUTO, +@@ -216,7 +454,8 @@ enum fe_interleaving { + INTERLEAVING_720, + }; + +-/* S2API Commands */ ++/* DVBv5 property Commands */ ++ + #define DTV_UNDEFINED 0 + #define DTV_TUNE 1 + #define DTV_CLEAR 2 +@@ -309,19 +548,79 @@ enum fe_interleaving { + + #define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT + ++/** ++ * enum fe_pilot - Type of pilot tone ++ * ++ * @PILOT_ON: Pilot tones enabled ++ * @PILOT_OFF: Pilot tones disabled ++ * @PILOT_AUTO: Autodetect pilot tones ++ */ + enum fe_pilot { + PILOT_ON, + PILOT_OFF, + PILOT_AUTO, + }; + ++/** ++ * enum fe_rolloff - Rolloff factor (also known as alpha) ++ * @ROLLOFF_35: Roloff factor: 35% ++ * @ROLLOFF_20: Roloff factor: 20% ++ * @ROLLOFF_25: Roloff factor: 25% ++ * @ROLLOFF_AUTO: Auto-detect the roloff factor. ++ * ++ * .. note: ++ * ++ * Roloff factor of 35% is implied on DVB-S. On DVB-S2, it is default. ++ */ + enum fe_rolloff { +- ROLLOFF_35, /* Implied value in DVB-S, default for DVB-S2 */ ++ ROLLOFF_35, + ROLLOFF_20, + ROLLOFF_25, + ROLLOFF_AUTO, + }; + ++/** ++ * enum fe_delivery_system - Type of the delivery system ++ * ++ * @SYS_UNDEFINED: ++ * Undefined standard. Generally, indicates an error ++ * @SYS_DVBC_ANNEX_A: ++ * Cable TV: DVB-C following ITU-T J.83 Annex A spec ++ * @SYS_DVBC_ANNEX_B: ++ * Cable TV: DVB-C following ITU-T J.83 Annex B spec (ClearQAM) ++ * @SYS_DVBC_ANNEX_C: ++ * Cable TV: DVB-C following ITU-T J.83 Annex C spec ++ * @SYS_ISDBC: ++ * Cable TV: ISDB-C (no drivers yet) ++ * @SYS_DVBT: ++ * Terrestrial TV: DVB-T ++ * @SYS_DVBT2: ++ * Terrestrial TV: DVB-T2 ++ * @SYS_ISDBT: ++ * Terrestrial TV: ISDB-T ++ * @SYS_ATSC: ++ * Terrestrial TV: ATSC ++ * @SYS_ATSCMH: ++ * Terrestrial TV (mobile): ATSC-M/H ++ * @SYS_DTMB: ++ * Terrestrial TV: DTMB ++ * @SYS_DVBS: ++ * Satellite TV: DVB-S ++ * @SYS_DVBS2: ++ * Satellite TV: DVB-S2 ++ * @SYS_TURBO: ++ * Satellite TV: DVB-S Turbo ++ * @SYS_ISDBS: ++ * Satellite TV: ISDB-S ++ * @SYS_DAB: ++ * Digital audio: DAB (not fully supported) ++ * @SYS_DSS: ++ * Satellite TV: DSS (not fully supported) ++ * @SYS_CMMB: ++ * Terrestrial TV (mobile): CMMB (not fully supported) ++ * @SYS_DVBH: ++ * Terrestrial TV (mobile): DVB-H (standard deprecated) ++ */ + enum fe_delivery_system { + SYS_UNDEFINED, + SYS_DVBC_ANNEX_A, +@@ -344,35 +643,85 @@ enum fe_delivery_system { + SYS_DVBC_ANNEX_C, + }; + +-/* backward compatibility */ ++/* backward compatibility definitions for delivery systems */ + #define SYS_DVBC_ANNEX_AC SYS_DVBC_ANNEX_A +-#define SYS_DMBTH SYS_DTMB /* DMB-TH is legacy name, use DTMB instead */ ++#define SYS_DMBTH SYS_DTMB /* DMB-TH is legacy name, use DTMB */ + +-/* ATSC-MH */ ++/* ATSC-MH specific parameters */ + ++/** ++ * enum atscmh_sccc_block_mode - Type of Series Concatenated Convolutional ++ * Code Block Mode. ++ * ++ * @ATSCMH_SCCC_BLK_SEP: ++ * Separate SCCC: the SCCC outer code mode shall be set independently ++ * for each Group Region (A, B, C, D) ++ * @ATSCMH_SCCC_BLK_COMB: ++ * Combined SCCC: all four Regions shall have the same SCCC outer ++ * code mode. ++ * @ATSCMH_SCCC_BLK_RES: ++ * Reserved. Shouldn't be used. ++ */ + enum atscmh_sccc_block_mode { + ATSCMH_SCCC_BLK_SEP = 0, + ATSCMH_SCCC_BLK_COMB = 1, + ATSCMH_SCCC_BLK_RES = 2, + }; + ++/** ++ * enum atscmh_sccc_code_mode - Type of Series Concatenated Convolutional ++ * Code Rate. ++ * ++ * @ATSCMH_SCCC_CODE_HLF: ++ * The outer code rate of a SCCC Block is 1/2 rate. ++ * @ATSCMH_SCCC_CODE_QTR: ++ * The outer code rate of a SCCC Block is 1/4 rate. ++ * @ATSCMH_SCCC_CODE_RES: ++ * Reserved. Should not be used. ++ */ + enum atscmh_sccc_code_mode { + ATSCMH_SCCC_CODE_HLF = 0, + ATSCMH_SCCC_CODE_QTR = 1, + ATSCMH_SCCC_CODE_RES = 2, + }; + ++/** ++ * enum atscmh_rs_frame_ensemble - Reed Solomon(RS) frame ensemble. ++ * ++ * @ATSCMH_RSFRAME_ENS_PRI: Primary Ensemble. ++ * @ATSCMH_RSFRAME_ENS_SEC: Secondary Ensemble. ++ */ + enum atscmh_rs_frame_ensemble { + ATSCMH_RSFRAME_ENS_PRI = 0, + ATSCMH_RSFRAME_ENS_SEC = 1, + }; + ++/** ++ * enum atscmh_rs_frame_mode - Reed Solomon (RS) frame mode. ++ * ++ * @ATSCMH_RSFRAME_PRI_ONLY: ++ * Single Frame: There is only a primary RS Frame for all Group ++ * Regions. ++ * @ATSCMH_RSFRAME_PRI_SEC: ++ * Dual Frame: There are two separate RS Frames: Primary RS Frame for ++ * Group Region A and B and Secondary RS Frame for Group Region C and ++ * D. ++ * @ATSCMH_RSFRAME_RES: ++ * Reserved. Shouldn't be used. ++ */ + enum atscmh_rs_frame_mode { + ATSCMH_RSFRAME_PRI_ONLY = 0, + ATSCMH_RSFRAME_PRI_SEC = 1, + ATSCMH_RSFRAME_RES = 2, + }; + ++/** ++ * enum atscmh_rs_code_mode ++ * @ATSCMH_RSCODE_211_187: Reed Solomon code (211,187). ++ * @ATSCMH_RSCODE_223_187: Reed Solomon code (223,187). ++ * @ATSCMH_RSCODE_235_187: Reed Solomon code (235,187). ++ * @ATSCMH_RSCODE_RES: Reserved. Shouldn't be used. ++ */ + enum atscmh_rs_code_mode { + ATSCMH_RSCODE_211_187 = 0, + ATSCMH_RSCODE_223_187 = 1, +@@ -383,28 +732,18 @@ enum atscmh_rs_code_mode { + #define NO_STREAM_ID_FILTER (~0U) + #define LNA_AUTO (~0U) + +-struct dtv_cmds_h { +- char *name; /* A display name for debugging purposes */ +- +- __u32 cmd; /* A unique ID */ +- +- /* Flags */ +- __u32 set:1; /* Either a set or get property */ +- __u32 buffer:1; /* Does this property use the buffer? */ +- __u32 reserved:30; /* Align */ +-}; +- + /** +- * Scale types for the quality parameters. ++ * enum fecap_scale_params - scale types for the quality parameters. ++ * + * @FE_SCALE_NOT_AVAILABLE: That QoS measure is not available. That + * could indicate a temporary or a permanent + * condition. + * @FE_SCALE_DECIBEL: The scale is measured in 0.001 dB steps, typically +- * used on signal measures. ++ * used on signal measures. + * @FE_SCALE_RELATIVE: The scale is a relative percentual measure, +- * ranging from 0 (0%) to 0xffff (100%). ++ * ranging from 0 (0%) to 0xffff (100%). + * @FE_SCALE_COUNTER: The scale counts the occurrence of an event, like +- * bit error, block error, lapsed time. ++ * bit error, block error, lapsed time. + */ + enum fecap_scale_params { + FE_SCALE_NOT_AVAILABLE = 0, +@@ -416,24 +755,38 @@ enum fecap_scale_params { + /** + * struct dtv_stats - Used for reading a DTV status property + * +- * @value: value of the measure. Should range from 0 to 0xffff; + * @scale: Filled with enum fecap_scale_params - the scale + * in usage for that parameter + * ++ * The ``{unnamed_union}`` may have either one of the values below: ++ * ++ * %svalue ++ * integer value of the measure, for %FE_SCALE_DECIBEL, ++ * used for dB measures. The unit is 0.001 dB. ++ * ++ * %uvalue ++ * unsigned integer value of the measure, used when @scale is ++ * either %FE_SCALE_RELATIVE or %FE_SCALE_COUNTER. ++ * + * For most delivery systems, this will return a single value for each + * parameter. ++ * + * It should be noticed, however, that new OFDM delivery systems like + * ISDB can use different modulation types for each group of carriers. + * On such standards, up to 8 groups of statistics can be provided, one + * for each carrier group (called "layer" on ISDB). ++ * + * In order to be consistent with other delivery systems, the first + * value refers to the entire set of carriers ("global"). +- * dtv_status:scale should use the value FE_SCALE_NOT_AVAILABLE when ++ * ++ * @scale should use the value %FE_SCALE_NOT_AVAILABLE when + * the value for the entire group of carriers or from one specific layer + * is not provided by the hardware. +- * st.len should be filled with the latest filled status + 1. + * +- * In other words, for ISDB, those values should be filled like: ++ * @len should be filled with the latest filled status + 1. ++ * ++ * In other words, for ISDB, those values should be filled like:: ++ * + * u.st.stat.svalue[0] = global statistics; + * u.st.stat.scale[0] = FE_SCALE_DECIBEL; + * u.st.stat.value[1] = layer A statistics; +@@ -455,11 +808,39 @@ struct dtv_stats { + + #define MAX_DTV_STATS 4 + ++/** ++ * struct dtv_fe_stats - store Digital TV frontend statistics ++ * ++ * @len: length of the statistics - if zero, stats is disabled. ++ * @stat: array with digital TV statistics. ++ * ++ * On most standards, @len can either be 0 or 1. However, for ISDB, each ++ * layer is modulated in separate. So, each layer may have its own set ++ * of statistics. If so, stat[0] carries on a global value for the property. ++ * Indexes 1 to 3 means layer A to B. ++ */ + struct dtv_fe_stats { + __u8 len; + struct dtv_stats stat[MAX_DTV_STATS]; + } __attribute__ ((packed)); + ++/** ++ * struct dtv_property - store one of frontend command and its value ++ * ++ * @cmd: Digital TV command. ++ * @reserved: Not used. ++ * @u: Union with the values for the command. ++ * @result: Unused ++ * ++ * The @u union may have either one of the values below: ++ * ++ * %data ++ * an unsigned 32-bits number. ++ * %st ++ * a &struct dtv_fe_stats array of statistics. ++ * %buffer ++ * a buffer of up to 32 characters (currently unused). ++ */ + struct dtv_property { + __u32 cmd; + __u32 reserved[3]; +@@ -479,17 +860,70 @@ struct dtv_property { + /* num of properties cannot exceed DTV_IOCTL_MAX_MSGS per ioctl */ + #define DTV_IOCTL_MAX_MSGS 64 + ++/** ++ * struct dtv_properties - a set of command/value pairs. ++ * ++ * @num: amount of commands stored at the struct. ++ * @props: a pointer to &struct dtv_property. ++ */ + struct dtv_properties { + __u32 num; + struct dtv_property *props; + }; + ++/* ++ * When set, this flag will disable any zigzagging or other "normal" tuning ++ * behavior. Additionally, there will be no automatic monitoring of the lock ++ * status, and hence no frontend events will be generated. If a frontend device ++ * is closed, this flag will be automatically turned off when the device is ++ * reopened read-write. ++ */ ++#define FE_TUNE_MODE_ONESHOT 0x01 ++ ++/* Digital TV Frontend API calls */ ++ ++#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info) ++ ++#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62) ++#define FE_DISEQC_SEND_MASTER_CMD _IOW('o', 63, struct dvb_diseqc_master_cmd) ++#define FE_DISEQC_RECV_SLAVE_REPLY _IOR('o', 64, struct dvb_diseqc_slave_reply) ++#define FE_DISEQC_SEND_BURST _IO('o', 65) /* fe_sec_mini_cmd_t */ ++ ++#define FE_SET_TONE _IO('o', 66) /* fe_sec_tone_mode_t */ ++#define FE_SET_VOLTAGE _IO('o', 67) /* fe_sec_voltage_t */ ++#define FE_ENABLE_HIGH_LNB_VOLTAGE _IO('o', 68) /* int */ ++ ++#define FE_READ_STATUS _IOR('o', 69, fe_status_t) ++#define FE_READ_BER _IOR('o', 70, __u32) ++#define FE_READ_SIGNAL_STRENGTH _IOR('o', 71, __u16) ++#define FE_READ_SNR _IOR('o', 72, __u16) ++#define FE_READ_UNCORRECTED_BLOCKS _IOR('o', 73, __u32) ++ ++#define FE_SET_FRONTEND_TUNE_MODE _IO('o', 81) /* unsigned int */ ++#define FE_GET_EVENT _IOR('o', 78, struct dvb_frontend_event) ++ ++#define FE_DISHNETWORK_SEND_LEGACY_CMD _IO('o', 80) /* unsigned int */ ++ ++#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties) ++#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties) ++ + #if defined(__DVB_CORE__) || !defined (__KERNEL__) + + /* +- * DEPRECATED: The DVBv3 ioctls, structs and enums should not be used on +- * newer programs, as it doesn't support the second generation of digital +- * TV standards, nor supports newer delivery systems. ++ * DEPRECATED: Everything below is deprecated in favor of DVBv5 API ++ * ++ * The DVBv3 only ioctls, structs and enums should not be used on ++ * newer programs, as it doesn't support the second generation of ++ * digital TV standards, nor supports newer delivery systems. ++ * They also don't support modern frontends with usually support multiple ++ * delivery systems. ++ * ++ * Drivers shouldn't use them. ++ * ++ * New applications should use DVBv5 delivery system instead ++ */ ++ ++/* + */ + + enum fe_bandwidth { +@@ -502,7 +936,7 @@ enum fe_bandwidth { + BANDWIDTH_1_712_MHZ, + }; + +-/* This is needed for legacy userspace support */ ++/* This is kept for legacy userspace support */ + typedef enum fe_sec_voltage fe_sec_voltage_t; + typedef enum fe_caps fe_caps_t; + typedef enum fe_type fe_type_t; +@@ -520,6 +954,8 @@ typedef enum fe_pilot fe_pilot_t; + typedef enum fe_rolloff fe_rolloff_t; + typedef enum fe_delivery_system fe_delivery_system_t; + ++/* DVBv3 structs */ ++ + struct dvb_qpsk_parameters { + __u32 symbol_rate; /* symbol rate in Symbols per second */ + fe_code_rate_t fec_inner; /* forward error correction (see above) */ +@@ -561,42 +997,12 @@ struct dvb_frontend_event { + fe_status_t status; + struct dvb_frontend_parameters parameters; + }; +-#endif + +-#define FE_SET_PROPERTY _IOW('o', 82, struct dtv_properties) +-#define FE_GET_PROPERTY _IOR('o', 83, struct dtv_properties) +- +-/** +- * When set, this flag will disable any zigzagging or other "normal" tuning +- * behaviour. Additionally, there will be no automatic monitoring of the lock +- * status, and hence no frontend events will be generated. If a frontend device +- * is closed, this flag will be automatically turned off when the device is +- * reopened read-write. +- */ +-#define FE_TUNE_MODE_ONESHOT 0x01 +- +-#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info) +- +-#define FE_DISEQC_RESET_OVERLOAD _IO('o', 62) +-#define FE_DISEQC_SEND_MASTER_CMD _IOW('o', 63, struct dvb_diseqc_master_cmd) +-#define FE_DISEQC_RECV_SLAVE_REPLY _IOR('o', 64, struct dvb_diseqc_slave_reply) +-#define FE_DISEQC_SEND_BURST _IO('o', 65) /* fe_sec_mini_cmd_t */ +- +-#define FE_SET_TONE _IO('o', 66) /* fe_sec_tone_mode_t */ +-#define FE_SET_VOLTAGE _IO('o', 67) /* fe_sec_voltage_t */ +-#define FE_ENABLE_HIGH_LNB_VOLTAGE _IO('o', 68) /* int */ +- +-#define FE_READ_STATUS _IOR('o', 69, fe_status_t) +-#define FE_READ_BER _IOR('o', 70, __u32) +-#define FE_READ_SIGNAL_STRENGTH _IOR('o', 71, __u16) +-#define FE_READ_SNR _IOR('o', 72, __u16) +-#define FE_READ_UNCORRECTED_BLOCKS _IOR('o', 73, __u32) ++/* DVBv3 API calls */ + + #define FE_SET_FRONTEND _IOW('o', 76, struct dvb_frontend_parameters) + #define FE_GET_FRONTEND _IOR('o', 77, struct dvb_frontend_parameters) +-#define FE_SET_FRONTEND_TUNE_MODE _IO('o', 81) /* unsigned int */ +-#define FE_GET_EVENT _IOR('o', 78, struct dvb_frontend_event) + +-#define FE_DISHNETWORK_SEND_LEGACY_CMD _IO('o', 80) /* unsigned int */ ++#endif + + #endif /*_DVBFRONTEND_H_*/ +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h +index 5dd3332ebc66..e7e4e672d9a8 100644 +--- a/include/uapi/linux/ethtool.h ++++ b/include/uapi/linux/ethtool.h +@@ -13,7 +13,7 @@ + #ifndef _UAPI_LINUX_ETHTOOL_H + #define _UAPI_LINUX_ETHTOOL_H + +-#include ++#include + #include + #include + +diff --git a/include/uapi/linux/kernel.h b/include/uapi/linux/kernel.h +index 466073f0ce46..6e8db547fbd0 100644 +--- a/include/uapi/linux/kernel.h ++++ b/include/uapi/linux/kernel.h +@@ -2,13 +2,6 @@ + #define _UAPI_LINUX_KERNEL_H + + #include +- +-/* +- * 'kernel.h' contains some often-used function prototypes etc +- */ +-#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +-#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +- +-#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) ++#include + + #endif /* _UAPI_LINUX_KERNEL_H */ +diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h +index a0a365cbf3c9..0c02441d2cc9 100644 +--- a/include/uapi/linux/kvm.h ++++ b/include/uapi/linux/kvm.h +@@ -159,9 +159,11 @@ struct kvm_hyperv_exit { + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; ++ __u32 pad1; + union { + struct { + __u32 msr; ++ __u32 pad2; + __u64 control; + __u64 evt_page; + __u64 msg_page; +diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h +index 774a43128a7a..fd18dcf76ec6 100644 +--- a/include/uapi/linux/lightnvm.h ++++ b/include/uapi/linux/lightnvm.h +@@ -20,7 +20,7 @@ + #define _UAPI_LINUX_LIGHTNVM_H + + #ifdef __KERNEL__ +-#include ++#include + #include + #else /* __KERNEL__ */ + #include +diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h +index 23158dbe2424..2bb48ed86144 100644 +--- a/include/uapi/linux/loop.h ++++ b/include/uapi/linux/loop.h +@@ -24,6 +24,16 @@ enum { + LO_FLAGS_DIRECT_IO = 16, + }; + ++/* LO_FLAGS that can be set using LOOP_SET_STATUS(64) */ ++#define LOOP_SET_STATUS_SETTABLE_FLAGS (LO_FLAGS_AUTOCLEAR | LO_FLAGS_PARTSCAN) ++ ++/* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */ ++#define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR) ++ ++/* LO_FLAGS that can be set using LOOP_CONFIGURE */ ++#define LOOP_CONFIGURE_SETTABLE_FLAGS (LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR \ ++ | LO_FLAGS_PARTSCAN | LO_FLAGS_DIRECT_IO) ++ + #include /* for __kernel_old_dev_t */ + #include /* for __u64 */ + +@@ -36,7 +46,7 @@ struct loop_info { + int lo_offset; + int lo_encrypt_type; + int lo_encrypt_key_size; /* ioctl w/o */ +- int lo_flags; /* ioctl r/o */ ++ int lo_flags; + char lo_name[LO_NAME_SIZE]; + unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + unsigned long lo_init[2]; +@@ -52,13 +62,29 @@ struct loop_info64 { + __u32 lo_number; /* ioctl r/o */ + __u32 lo_encrypt_type; + __u32 lo_encrypt_key_size; /* ioctl w/o */ +- __u32 lo_flags; /* ioctl r/o */ ++ __u32 lo_flags; + __u8 lo_file_name[LO_NAME_SIZE]; + __u8 lo_crypt_name[LO_NAME_SIZE]; + __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + __u64 lo_init[2]; + }; + ++/** ++ * struct loop_config - Complete configuration for a loop device. ++ * @fd: fd of the file to be used as a backing file for the loop device. ++ * @block_size: block size to use; ignored if 0. ++ * @info: struct loop_info64 to configure the loop device with. ++ * ++ * This structure is used with the LOOP_CONFIGURE ioctl, and can be used to ++ * atomically setup and configure all loop device parameters at once. ++ */ ++struct loop_config { ++ __u32 fd; ++ __u32 block_size; ++ struct loop_info64 info; ++ __u64 __reserved[8]; ++}; ++ + /* + * Loop filter types + */ +@@ -89,6 +115,7 @@ struct loop_info64 { + #define LOOP_SET_CAPACITY 0x4C07 + #define LOOP_SET_DIRECT_IO 0x4C08 + #define LOOP_SET_BLOCK_SIZE 0x4C09 ++#define LOOP_CONFIGURE 0x4C0A + + /* /dev/loop-control interface */ + #define LOOP_CTL_ADD 0x4C80 +diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h +index ed5721148768..54543bca1b79 100644 +--- a/include/uapi/linux/mroute6.h ++++ b/include/uapi/linux/mroute6.h +@@ -1,7 +1,7 @@ + #ifndef _UAPI__LINUX_MROUTE6_H + #define _UAPI__LINUX_MROUTE6_H + +-#include ++#include + #include + #include + #include /* For struct sockaddr_in6. */ +diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h +index e956704f5fb1..95b8a9395ec1 100644 +--- a/include/uapi/linux/msdos_fs.h ++++ b/include/uapi/linux/msdos_fs.h +@@ -9,7 +9,9 @@ + * The MS-DOS filesystem constants/structures + */ + ++#ifndef SECTOR_SIZE + #define SECTOR_SIZE 512 /* sector size (bytes) */ ++#endif + #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ + #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */ + #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */ +diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h +index 078672b09c56..26f0b70ce354 100644 +--- a/include/uapi/linux/msm_ipa.h ++++ b/include/uapi/linux/msm_ipa.h +@@ -1796,7 +1796,7 @@ struct ipa_ioc_nat_pdn_entry { + */ + struct ipa_ioc_vlan_iface_info { + char name[IPA_RESOURCE_NAME_MAX]; +- uint8_t vlan_id; ++ uint16_t vlan_id; + }; + + /** +@@ -2102,12 +2102,14 @@ struct ipa_ioc_get_vlan_mode { + * @vlan_id: vlan ID bridge is mapped to + * @bridge_ipv4: bridge interface ipv4 address + * @subnet_mask: bridge interface subnet mask ++ * @lan2lan_sw: indicate lan2lan traffic take sw-path or not + */ + struct ipa_ioc_bridge_vlan_mapping_info { + char bridge_name[IPA_RESOURCE_NAME_MAX]; + uint16_t vlan_id; + uint32_t bridge_ipv4; + uint32_t subnet_mask; ++ uint8_t lan2lan_sw; + }; + + /** +diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h +index c6c4477c136b..d121c22bf928 100644 +--- a/include/uapi/linux/netfilter/nf_tables.h ++++ b/include/uapi/linux/netfilter/nf_tables.h +@@ -114,7 +114,7 @@ enum nf_tables_msg_types { + * @NFTA_LIST_ELEM: list element (NLA_NESTED) + */ + enum nft_list_attributes { +- NFTA_LIST_UNPEC, ++ NFTA_LIST_UNSPEC, + NFTA_LIST_ELEM, + __NFTA_LIST_MAX + }; +diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h +index 33659f6fad3e..30557bade935 100644 +--- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h ++++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h +@@ -4,7 +4,7 @@ + #define NFCT_HELPER_STATUS_DISABLED 0 + #define NFCT_HELPER_STATUS_ENABLED 1 + +-enum nfnl_acct_msg_types { ++enum nfnl_cthelper_msg_types { + NFNL_MSG_CTHELPER_NEW, + NFNL_MSG_CTHELPER_GET, + NFNL_MSG_CTHELPER_DEL, +diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h +index c36969b91533..8f40c2fe0ed4 100644 +--- a/include/uapi/linux/netfilter/x_tables.h ++++ b/include/uapi/linux/netfilter/x_tables.h +@@ -1,6 +1,6 @@ + #ifndef _UAPI_X_TABLES_H + #define _UAPI_X_TABLES_H +-#include ++#include + #include + + #define XT_FUNCTION_MAXNAMELEN 30 +diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h +index 2817ca1c1ee5..b0e61994ecf5 100644 +--- a/include/uapi/linux/netlink.h ++++ b/include/uapi/linux/netlink.h +@@ -1,7 +1,7 @@ + #ifndef _UAPI__LINUX_NETLINK_H + #define _UAPI__LINUX_NETLINK_H + +-#include ++#include + #include /* for __kernel_sa_family_t */ + #include + +diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h +index 57d7c0f916b6..645ef3cf3dd0 100644 +--- a/include/uapi/linux/pr.h ++++ b/include/uapi/linux/pr.h +@@ -1,6 +1,8 @@ + #ifndef _UAPI_PR_H + #define _UAPI_PR_H + ++#include ++ + enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, +diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h +index 4b25d2fc65aa..fc557cccb638 100644 +--- a/include/uapi/linux/qrtr.h ++++ b/include/uapi/linux/qrtr.h +@@ -2,6 +2,7 @@ + #define _LINUX_QRTR_H + + #include ++#include + + #define QRTR_NODE_BCAST 0xffffffffu + #define QRTR_PORT_CTRL 0xfffffffeu +diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h +index d67f47648427..d6b9cbb29d15 100644 +--- a/include/uapi/linux/sysctl.h ++++ b/include/uapi/linux/sysctl.h +@@ -22,7 +22,7 @@ + #ifndef _UAPI_LINUX_SYSCTL_H + #define _UAPI_LINUX_SYSCTL_H + +-#include ++#include + #include + #include + +diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h +index 66e4d8bcb16f..9f9572b2a596 100644 +--- a/include/uapi/linux/tty_flags.h ++++ b/include/uapi/linux/tty_flags.h +@@ -38,7 +38,7 @@ + * WARNING: These flags are no longer used and have been superceded by the + * TTY_PORT_ flags in the iflags field (and not userspace-visible) + */ +-#ifndef _KERNEL_ ++#ifndef __KERNEL__ + #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ + #define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ + #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ +@@ -80,7 +80,7 @@ + #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) + #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) + +-#ifndef _KERNEL_ ++#ifndef __KERNEL__ + /* These flags are no longer used (and were always masked from userspace) */ + #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) + #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) +diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h +index 0303a6fb2097..ce6cafd3b5f0 100644 +--- a/include/uapi/linux/usb/ch9.h ++++ b/include/uapi/linux/usb/ch9.h +@@ -358,6 +358,9 @@ struct usb_config_descriptor { + + /*-------------------------------------------------------------------------*/ + ++/* USB String descriptors can contain at most 126 characters. */ ++#define USB_MAX_STRING_LEN 126 ++ + /* USB_DT_STRING: String descriptor */ + struct usb_string_descriptor { + __u8 bLength; +diff --git a/include/xen/events.h b/include/xen/events.h +index 88da2abaf535..ad0c61cf399b 100644 +--- a/include/xen/events.h ++++ b/include/xen/events.h +@@ -12,11 +12,16 @@ + + unsigned xen_evtchn_nr_channels(void); + +-int bind_evtchn_to_irq(unsigned int evtchn); +-int bind_evtchn_to_irqhandler(unsigned int evtchn, ++int bind_evtchn_to_irq(evtchn_port_t evtchn); ++int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn); ++int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, + irq_handler_t handler, + unsigned long irqflags, const char *devname, + void *dev_id); ++int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn, ++ irq_handler_t handler, ++ unsigned long irqflags, const char *devname, ++ void *dev_id); + int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); + int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, + irq_handler_t handler, +@@ -29,13 +34,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, + const char *devname, + void *dev_id); + int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, +- unsigned int remote_port); ++ evtchn_port_t remote_port); ++int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain, ++ evtchn_port_t remote_port); + int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, +- unsigned int remote_port, ++ evtchn_port_t remote_port, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, + void *dev_id); ++int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain, ++ evtchn_port_t remote_port, ++ irq_handler_t handler, ++ unsigned long irqflags, ++ const char *devname, ++ void *dev_id); + + /* + * Common unbind function for all event sources. Takes IRQ to unbind from. +@@ -44,6 +57,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, + */ + void unbind_from_irqhandler(unsigned int irq, void *dev_id); + ++/* ++ * Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi ++ * functions above. ++ */ ++void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags); ++/* Signal an event was spurious, i.e. there was no action resulting from it. */ ++#define XEN_EOI_FLAG_SPURIOUS 0x00000001 ++ + #define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX + #define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT + #define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN +diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h +index 34b1379f9777..f9d8aac170fb 100644 +--- a/include/xen/grant_table.h ++++ b/include/xen/grant_table.h +@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, + map->flags = flags; + map->ref = ref; + map->dom = domid; ++ map->status = 1; /* arbitrary positive value */ + } + + static inline void +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h +index 32b944b7cebd..ed9e7e3307b7 100644 +--- a/include/xen/xenbus.h ++++ b/include/xen/xenbus.h +@@ -58,6 +58,15 @@ struct xenbus_watch + /* Path being watched. */ + const char *node; + ++ unsigned int nr_pending; ++ ++ /* ++ * Called just before enqueing new event while a spinlock is held. ++ * The event will be discarded if this callback returns false. ++ */ ++ bool (*will_handle)(struct xenbus_watch *, ++ const char **vec, unsigned int len); ++ + /* Callback (executed in a process context with no locks held). */ + void (*callback)(struct xenbus_watch *, + const char **vec, unsigned int len); +@@ -194,10 +203,14 @@ void xenbus_suspend_cancel(void); + + int xenbus_watch_path(struct xenbus_device *dev, const char *path, + struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char **, unsigned int), + void (*callback)(struct xenbus_watch *, + const char **, unsigned int)); +-__printf(4, 5) ++__printf(5, 6) + int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, ++ bool (*will_handle)(struct xenbus_watch *, ++ const char **, unsigned int), + void (*callback)(struct xenbus_watch *, + const char **, unsigned int), + const char *pathfmt, ...); +diff --git a/init/Kconfig b/init/Kconfig +index 6b5e69a360aa..73cd3d53fdae 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -65,8 +65,7 @@ config CROSS_COMPILE + + config COMPILE_TEST + bool "Compile also drivers which will not load" +- depends on !UML +- default n ++ depends on HAS_IOMEM + help + Some drivers can be compiled on a different platform than they are + intended to be run on. Despite they cannot be loaded there (or even +@@ -871,7 +870,8 @@ config IKHEADERS + + config LOG_BUF_SHIFT + int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" +- range 12 25 ++ range 12 25 if !H8300 ++ range 12 19 if H8300 + default 17 + depends on PRINTK + help +diff --git a/init/main.c b/init/main.c +index 369f3df3bcf3..9b59b43ed420 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -470,6 +470,8 @@ static void __init mm_init(void) + */ + page_ext_init_flatmem(); + mem_init(); ++ /* page_owner must be initialized after buddy is ready */ ++ page_ext_init_flatmem_late(); + kmem_cache_init(); + percpu_init_late(); + pgtable_init(); +@@ -511,7 +513,7 @@ asmlinkage __visible void __init start_kernel(void) + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + boot_cpu_hotplug_init(); + +- build_all_zonelists(NULL, NULL); ++ build_all_zonelists(NULL, NULL, false); + page_alloc_init(); + + pr_notice("Kernel command line: %s\n", boot_command_line); +@@ -1011,7 +1013,7 @@ static noinline void __init kernel_init_freeable(void) + */ + set_cpus_allowed_ptr(current, cpu_all_mask); + +- cad_pid = task_pid(current); ++ cad_pid = get_pid(task_pid(current)); + + smp_prepare_cpus(setup_max_cpus); + +diff --git a/init/version.c b/init/version.c +index fe41a63efed6..5606341e9efd 100644 +--- a/init/version.c ++++ b/init/version.c +@@ -23,9 +23,7 @@ int version_string(LINUX_VERSION_CODE); + #endif + + struct uts_namespace init_uts_ns = { +- .kref = { +- .refcount = ATOMIC_INIT(2), +- }, ++ .kref = KREF_INIT(2), + .name = { + .sysname = UTS_SYSNAME, + .nodename = UTS_NODENAME, +diff --git a/kernel/Makefile b/kernel/Makefile +index 29071ac3c66b..f3a91fa080bf 100644 +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -50,9 +50,6 @@ obj-$(CONFIG_PROFILING) += profile.o + obj-$(CONFIG_STACKTRACE) += stacktrace.o + obj-y += time/ + obj-$(CONFIG_FUTEX) += futex.o +-ifeq ($(CONFIG_COMPAT),y) +-obj-$(CONFIG_FUTEX) += futex_compat.o +-endif + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o + obj-$(CONFIG_SMP) += smp.o + ifneq ($(CONFIG_SMP),y) +@@ -97,7 +94,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o + obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o + obj-$(CONFIG_TRACEPOINTS) += tracepoint.o + obj-$(CONFIG_LATENCYTOP) += latencytop.o +-obj-$(CONFIG_ELFCORE) += elfcore.o + obj-$(CONFIG_FUNCTION_TRACER) += trace/ + obj-$(CONFIG_TRACING) += trace/ + obj-$(CONFIG_TRACE_CLOCK) += trace/ +diff --git a/kernel/audit.c b/kernel/audit.c +index 07225af01e30..8660712ef632 100644 +--- a/kernel/audit.c ++++ b/kernel/audit.c +@@ -540,7 +540,7 @@ static int kauditd_thread(void *dummy) + return 0; + } + +-int audit_send_list(void *_dest) ++int audit_send_list_thread(void *_dest) + { + struct audit_netlink_list *dest = _dest; + struct sk_buff *skb; +@@ -585,6 +585,18 @@ struct sk_buff *audit_make_reply(__u32 portid, int seq, int type, int done, + return NULL; + } + ++static void audit_free_reply(struct audit_reply *reply) ++{ ++ if (!reply) ++ return; ++ ++ if (reply->skb) ++ kfree_skb(reply->skb); ++ if (reply->net) ++ put_net(reply->net); ++ kfree(reply); ++} ++ + static int audit_send_reply_thread(void *arg) + { + struct audit_reply *reply = (struct audit_reply *)arg; +@@ -597,8 +609,8 @@ static int audit_send_reply_thread(void *arg) + /* Ignore failure. It'll only happen if the sender goes away, + because our timeout is set to infinite. */ + netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); +- put_net(net); +- kfree(reply); ++ reply->skb = NULL; ++ audit_free_reply(reply); + return 0; + } + /** +@@ -611,36 +623,34 @@ static int audit_send_reply_thread(void *arg) + * @payload: payload data + * @size: payload size + * +- * Allocates an skb, builds the netlink message, and sends it to the port id. +- * No failure notifications. ++ * Allocates a skb, builds the netlink message, and sends it to the port id. + */ + static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, + int multi, const void *payload, int size) + { + u32 portid = NETLINK_CB(request_skb).portid; +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); +- struct sk_buff *skb; + struct task_struct *tsk; +- struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), +- GFP_KERNEL); ++ struct audit_reply *reply; + ++ reply = kzalloc(sizeof(*reply), GFP_KERNEL); + if (!reply) + return; + +- skb = audit_make_reply(portid, seq, type, done, multi, payload, size); +- if (!skb) +- goto out; ++ reply->skb = audit_make_reply(portid, seq, type, done, multi, payload, size); ++ if (!reply->skb) ++ goto err; + +- reply->net = get_net(net); ++ reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + reply->portid = portid; +- reply->skb = skb; + + tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); +- if (!IS_ERR(tsk)) +- return; +- kfree_skb(skb); +-out: +- kfree(reply); ++ if (IS_ERR(tsk)) ++ goto err; ++ ++ return; ++ ++err: ++ audit_free_reply(reply); + } + + /* +diff --git a/kernel/audit.h b/kernel/audit.h +index 431444c3708b..2eaf45018837 100644 +--- a/kernel/audit.h ++++ b/kernel/audit.h +@@ -245,7 +245,7 @@ struct audit_netlink_list { + struct sk_buff_head q; + }; + +-int audit_send_list(void *); ++int audit_send_list_thread(void *); + + struct audit_net { + struct sock *nlsk; +diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c +index 712469a3103a..54b30c9bd8b1 100644 +--- a/kernel/audit_watch.c ++++ b/kernel/audit_watch.c +@@ -316,8 +316,6 @@ static void audit_update_watch(struct audit_parent *parent, + if (oentry->rule.exe) + audit_remove_mark(oentry->rule.exe); + +- audit_watch_log_rule_change(r, owatch, "updated_rules"); +- + call_rcu(&oentry->rcu, audit_free_rule_rcu); + } + +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index a71ff9965cba..cd12d79d1b17 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -1139,10 +1139,8 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, + int audit_list_rules_send(struct sk_buff *request_skb, int seq) + { + u32 portid = NETLINK_CB(request_skb).portid; +- struct net *net = sock_net(NETLINK_CB(request_skb).sk); + struct task_struct *tsk; + struct audit_netlink_list *dest; +- int err = 0; + + /* We can't just spew out the rules here because we might fill + * the available socket buffer space and deadlock waiting for +@@ -1150,10 +1148,10 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) + * happen if we're actually running in the context of auditctl + * trying to _send_ the stuff */ + +- dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); ++ dest = kmalloc(sizeof(*dest), GFP_KERNEL); + if (!dest) + return -ENOMEM; +- dest->net = get_net(net); ++ dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); + dest->portid = portid; + skb_queue_head_init(&dest->q); + +@@ -1161,14 +1159,15 @@ int audit_list_rules_send(struct sk_buff *request_skb, int seq) + audit_list_rules(portid, seq, &dest->q); + mutex_unlock(&audit_filter_mutex); + +- tsk = kthread_run(audit_send_list, dest, "audit_send_list"); ++ tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); + if (IS_ERR(tsk)) { + skb_queue_purge(&dest->q); ++ put_net(dest->net); + kfree(dest); +- err = PTR_ERR(tsk); ++ return PTR_ERR(tsk); + } + +- return err; ++ return 0; + } + + int audit_comparator(u32 left, u32 op, u32 right) +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c +index b6e2bfd3491e..998f7466e69d 100644 +--- a/kernel/bpf/hashtab.c ++++ b/kernel/bpf/hashtab.c +@@ -429,15 +429,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) + struct htab_elem *l = container_of(head, struct htab_elem, rcu); + struct bpf_htab *htab = l->htab; + +- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while +- * we're calling kfree, otherwise deadlock is possible if kprobes +- * are placed somewhere inside of slub +- */ +- preempt_disable(); +- __this_cpu_inc(bpf_prog_active); + htab_elem_free(htab, l); +- __this_cpu_dec(bpf_prog_active); +- preempt_enable(); + } + + static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c +index 6039db3dcf02..770779d3cd1b 100644 +--- a/kernel/bpf/stackmap.c ++++ b/kernel/bpf/stackmap.c +@@ -73,6 +73,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) + + /* hash table size must be power of 2 */ + n_buckets = roundup_pow_of_two(attr->max_entries); ++ if (!n_buckets) ++ return ERR_PTR(-E2BIG); + + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); + if (cost >= U32_MAX - PAGE_SIZE) +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 335c00209f74..78bdfbefd996 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1732,12 +1732,11 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, + * unsigned shift, so make the appropriate casts. + */ + if (min_val < 0 || dst_reg->min_value < 0) +- dst_reg->min_value = BPF_REGISTER_MIN_RANGE; ++ reset_reg_range_values(regs, insn->dst_reg); + else +- dst_reg->min_value = +- (u64)(dst_reg->min_value) >> min_val; ++ dst_reg->min_value = (u64)(dst_reg->min_value) >> max_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) +- dst_reg->max_value >>= max_val; ++ dst_reg->max_value >>= min_val; + break; + default: + reset_reg_range_values(regs, insn->dst_reg); +diff --git a/kernel/cgroup.c b/kernel/cgroup.c +index e3311fe8f526..464200954737 100644 +--- a/kernel/cgroup.c ++++ b/kernel/cgroup.c +@@ -3710,6 +3710,10 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, + struct cgroup *cgrp = kn->priv; + int ret; + ++ /* do not accept '\n' to prevent making /proc//cgroup unparsable */ ++ if (strchr(new_name_str, '\n')) ++ return -EINVAL; ++ + if (kernfs_type(kn) != KERNFS_DIR) + return -ENOTDIR; + if (kn->parent != new_parent) +@@ -5789,8 +5793,6 @@ int __init cgroup_init_early(void) + return 0; + } + +-static u16 cgroup_disable_mask __initdata; +- + /** + * cgroup_init - cgroup initialization + * +@@ -5848,12 +5850,8 @@ int __init cgroup_init(void) + * disabled flag and cftype registration needs kmalloc, + * both of which aren't available during early_init. + */ +- if (cgroup_disable_mask & (1 << ssid)) { +- static_branch_disable(cgroup_subsys_enabled_key[ssid]); +- printk(KERN_INFO "Disabling %s control group subsystem\n", +- ss->name); ++ if (!cgroup_ssid_enabled(ssid)) + continue; +- } + + if (cgroup_ssid_no_v1(ssid)) + printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n", +@@ -6296,7 +6294,10 @@ static int __init cgroup_disable(char *str) + if (strcmp(token, ss->name) && + strcmp(token, ss->legacy_name)) + continue; +- cgroup_disable_mask |= 1 << i; ++ ++ static_branch_disable(cgroup_subsys_enabled_key[i]); ++ pr_info("Disabling %s control group subsystem\n", ++ ss->name); + } + } + return 1; +@@ -6479,12 +6480,8 @@ void cgroup_sk_alloc_disable(void) + + void cgroup_sk_alloc(struct sock_cgroup_data *skcd) + { +- if (cgroup_sk_alloc_disabled) +- return; +- +- /* Socket clone path */ +- if (skcd->val) { +- cgroup_get(sock_cgroup_ptr(skcd)); ++ if (cgroup_sk_alloc_disabled) { ++ skcd->no_refcnt = 1; + return; + } + +@@ -6508,8 +6505,26 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) + rcu_read_unlock(); + } + ++void cgroup_sk_clone(struct sock_cgroup_data *skcd) ++{ ++ /* Socket clone path */ ++ if (skcd->val) { ++ if (skcd->no_refcnt) ++ return; ++ /* ++ * We might be cloning a socket which is left in an empty ++ * cgroup and the cgroup might have already been rmdir'd. ++ * Don't use cgroup_get_live(). ++ */ ++ cgroup_get(sock_cgroup_ptr(skcd)); ++ } ++} ++ + void cgroup_sk_free(struct sock_cgroup_data *skcd) + { ++ if (skcd->no_refcnt) ++ return; ++ + cgroup_put(sock_cgroup_ptr(skcd)); + } + +diff --git a/kernel/cpu.c b/kernel/cpu.c +index b4e8c9b4bfc1..d3e4290e36d9 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -823,6 +823,10 @@ void __unregister_cpu_notifier(struct notifier_block *nb) + EXPORT_SYMBOL(__unregister_cpu_notifier); + + #ifdef CONFIG_HOTPLUG_CPU ++#ifndef arch_clear_mm_cpumask_cpu ++#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) ++#endif ++ + /** + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU + * @cpu: a CPU id +@@ -858,7 +862,7 @@ void clear_tasks_mm_cpumask(int cpu) + t = find_lock_task_mm(p); + if (!t) + continue; +- cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); ++ arch_clear_mm_cpumask_cpu(cpu, t->mm); + task_unlock(t); + } + rcu_read_unlock(); +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c +index f3c19d0bed60..9ab8a3d0f2b6 100644 +--- a/kernel/cpu_pm.c ++++ b/kernel/cpu_pm.c +@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); + */ + int cpu_pm_enter(void) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + read_lock(&cpu_pm_notifier_lock); +@@ -159,7 +159,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); + */ + int cpu_cluster_pm_enter(unsigned long aff_level) + { +- int nr_calls; ++ int nr_calls = 0; + int ret = 0; + + read_lock(&cpu_pm_notifier_lock); +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c +index 79517e5549f1..bc791cec58e6 100644 +--- a/kernel/debug/debug_core.c ++++ b/kernel/debug/debug_core.c +@@ -94,14 +94,6 @@ int dbg_switch_cpu; + /* Use kdb or gdbserver mode */ + int dbg_kdb_mode = 1; + +-static int __init opt_kgdb_con(char *str) +-{ +- kgdb_use_con = 1; +- return 0; +-} +- +-early_param("kgdbcon", opt_kgdb_con); +- + module_param(kgdb_use_con, int, 0644); + module_param(kgdbreboot, int, 0644); + +@@ -443,6 +435,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) + + if (exception_level > 1) { + dump_stack(); ++ kgdb_io_module_registered = false; + panic("Recursive entry to debugger"); + } + +@@ -487,6 +480,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + arch_kgdb_ops.disable_hw_break(regs); + + acquirelock: ++ rcu_read_lock(); + /* + * Interrupts will be restored by the 'trap return' code, except when + * single stepping. +@@ -541,6 +535,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + atomic_dec(&slaves_in_kgdb); + dbg_touch_watchdogs(); + local_irq_restore(flags); ++ rcu_read_unlock(); + return 0; + } + cpu_relax(); +@@ -559,6 +554,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + raw_spin_unlock(&dbg_master_lock); + dbg_touch_watchdogs(); + local_irq_restore(flags); ++ rcu_read_unlock(); + + goto acquirelock; + } +@@ -676,6 +672,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, + raw_spin_unlock(&dbg_master_lock); + dbg_touch_watchdogs(); + local_irq_restore(flags); ++ rcu_read_unlock(); + + return kgdb_info[cpu].ret_state; + } +@@ -806,6 +803,20 @@ static struct console kgdbcons = { + .index = -1, + }; + ++static int __init opt_kgdb_con(char *str) ++{ ++ kgdb_use_con = 1; ++ ++ if (kgdb_io_module_registered && !kgdb_con_registered) { ++ register_console(&kgdbcons); ++ kgdb_con_registered = 1; ++ } ++ ++ return 0; ++} ++ ++early_param("kgdbcon", opt_kgdb_con); ++ + #ifdef CONFIG_MAGIC_SYSRQ + static void sysrq_handle_dbg(int key) + { +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c +index d3c5b15c86c1..4b537520432a 100644 +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -691,12 +691,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) + size_avail = sizeof(kdb_buffer) - len; + goto kdb_print_out; + } +- if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) ++ if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) { + /* + * This was a interactive search (using '/' at more +- * prompt) and it has completed. Clear the flag. ++ * prompt) and it has completed. Replace the \0 with ++ * its original value to ensure multi-line strings ++ * are handled properly, and return to normal mode. + */ ++ *cphold = replaced_byte; + kdb_grepping_flag = 0; ++ } + /* + * at this point the string is a full line and + * should be printed, up to the null. +diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h +index 533e04e75a9c..f51b762d6886 100644 +--- a/kernel/debug/kdb/kdb_private.h ++++ b/kernel/debug/kdb/kdb_private.h +@@ -234,7 +234,7 @@ extern struct task_struct *kdb_curr_task(int); + #define kdb_do_each_thread(g, p) do_each_thread(g, p) + #define kdb_while_each_thread(g, p) while_each_thread(g, p) + +-#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) ++#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL) + + extern void *debug_kmalloc(size_t size, gfp_t flags); + extern void debug_kfree(void *); +diff --git a/kernel/elfcore.c b/kernel/elfcore.c +deleted file mode 100644 +index a2b29b9bdfcb..000000000000 +--- a/kernel/elfcore.c ++++ /dev/null +@@ -1,25 +0,0 @@ +-#include +-#include +-#include +-#include +-#include +- +-Elf_Half __weak elf_core_extra_phdrs(void) +-{ +- return 0; +-} +- +-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +-{ +- return 1; +-} +- +-int __weak elf_core_write_extra_data(struct coredump_params *cprm) +-{ +- return 1; +-} +- +-size_t __weak elf_core_extra_data_size(void) +-{ +- return 0; +-} +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 2854afbbc440..7a2d3a61a938 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -90,11 +90,11 @@ static void remote_function(void *data) + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might +- * be on the current CPU, which just calls the function directly ++ * be on the current CPU, which just calls the function directly. This will ++ * retry due to any failures in smp_call_function_single(), such as if the ++ * task_cpu() goes offline concurrently. + * +- * returns: @func return value, or +- * -ESRCH - when the process isn't running +- * -EAGAIN - when the process moved away ++ * returns @func return value or -ESRCH or -ENXIO when the process isn't running + */ + static int + task_function_call(struct task_struct *p, remote_function_f func, void *info) +@@ -107,11 +107,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) + }; + int ret; + +- do { +- ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); ++ for (;;) { ++ ret = smp_call_function_single(task_cpu(p), remote_function, ++ &data, 1); + if (!ret) + ret = data.ret; +- } while (ret == -EAGAIN); ++ ++ if (ret != -EAGAIN) ++ break; ++ ++ cond_resched(); ++ } + + return ret; + } +@@ -3882,7 +3888,9 @@ find_get_context(struct pmu *pmu, struct task_struct *task, + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + ctx = &cpuctx->ctx; + get_ctx(ctx); ++ raw_spin_lock_irqsave(&ctx->lock, flags); + ++ctx->pin_count; ++ raw_spin_unlock_irqrestore(&ctx->lock, flags); + + return ctx; + } +@@ -5239,11 +5247,11 @@ static void perf_pmu_output_stop(struct perf_event *event); + static void perf_mmap_close(struct vm_area_struct *vma) + { + struct perf_event *event = vma->vm_file->private_data; +- + struct ring_buffer *rb = ring_buffer_get(event); + struct user_struct *mmap_user = rb->mmap_user; + int mmap_locked = rb->mmap_locked; + unsigned long size = perf_data_size(rb); ++ bool detach_rest = false; + + if (event->pmu->event_unmapped) + event->pmu->event_unmapped(event); +@@ -5274,7 +5282,8 @@ static void perf_mmap_close(struct vm_area_struct *vma) + mutex_unlock(&event->mmap_mutex); + } + +- atomic_dec(&rb->mmap_count); ++ if (atomic_dec_and_test(&rb->mmap_count)) ++ detach_rest = true; + + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) + goto out_put; +@@ -5283,7 +5292,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) + mutex_unlock(&event->mmap_mutex); + + /* If there's still other mmap()s of this buffer, we're done. */ +- if (atomic_read(&rb->mmap_count)) ++ if (!detach_rest) + goto out_put; + + /* +@@ -6441,7 +6450,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data) + + raw_spin_lock_irqsave(&ifh->lock, flags); + list_for_each_entry(filter, &ifh->list, entry) { +- if (filter->inode) { ++ if (filter->path.dentry) { + event->addr_filters_offs[count] = 0; + restart++; + } +@@ -6984,7 +6993,11 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter, + struct file *file, unsigned long offset, + unsigned long size) + { +- if (filter->inode != file->f_inode) ++ /* d_inode(NULL) won't be equal to any mapped user-space file */ ++ if (!filter->path.dentry) ++ return false; ++ ++ if (d_inode(filter->path.dentry) != file_inode(file)) + return false; + + if (filter->offset > offset + size) +@@ -8190,8 +8203,7 @@ static void free_filters_list(struct list_head *filters) + struct perf_addr_filter *filter, *iter; + + list_for_each_entry_safe(filter, iter, filters, entry) { +- if (filter->inode) +- iput(filter->inode); ++ path_put(&filter->path); + list_del(&filter->entry); + kfree(filter); + } +@@ -8285,7 +8297,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event) + * Adjust base offset if the filter is associated to a binary + * that needs to be mapped: + */ +- if (filter->inode) ++ if (filter->path.dentry) + event->addr_filters_offs[count] = + perf_addr_filter_apply(filter, mm); + +@@ -8358,7 +8370,6 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, + { + struct perf_addr_filter *filter = NULL; + char *start, *orig, *filename = NULL; +- struct path path; + substring_t args[MAX_OPT_ARGS]; + int state = IF_STATE_ACTION, token; + unsigned int kernel = 0; +@@ -8421,6 +8432,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, + if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { + int fpos = filter->range ? 2 : 1; + ++ kfree(filename); + filename = match_strdup(&args[fpos]); + if (!filename) { + ret = -ENOMEM; +@@ -8449,19 +8461,15 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, + goto fail; + + /* look up the path and grab its inode */ +- ret = kern_path(filename, LOOKUP_FOLLOW, &path); ++ ret = kern_path(filename, LOOKUP_FOLLOW, ++ &filter->path); + if (ret) +- goto fail_free_name; +- +- filter->inode = igrab(d_inode(path.dentry)); +- path_put(&path); +- kfree(filename); +- filename = NULL; ++ goto fail; + + ret = -EINVAL; +- if (!filter->inode || +- !S_ISREG(filter->inode->i_mode)) +- /* free_filters_list() will iput() */ ++ if (!filter->path.dentry || ++ !S_ISREG(d_inode(filter->path.dentry) ++ ->i_mode)) + goto fail; + } + +@@ -8474,13 +8482,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, + if (state != IF_STATE_ACTION) + goto fail; + ++ kfree(filename); + kfree(orig); + + return 0; + +-fail_free_name: +- kfree(filename); + fail: ++ kfree(filename); + free_filters_list(filters); + kfree(orig); + +diff --git a/kernel/events/internal.h b/kernel/events/internal.h +index 486fd78eb8d5..c8c1c3db5d06 100644 +--- a/kernel/events/internal.h ++++ b/kernel/events/internal.h +@@ -212,7 +212,7 @@ static inline int get_recursion_context(int *recursion) + rctx = 3; + else if (in_irq()) + rctx = 2; +- else if (in_softirq()) ++ else if (in_serving_softirq()) + rctx = 1; + else + rctx = 0; +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c +index 1fcaa174ed32..3f1d008a7c28 100644 +--- a/kernel/events/uprobes.c ++++ b/kernel/events/uprobes.c +@@ -1885,7 +1885,7 @@ static void handle_swbp(struct pt_regs *regs) + if (!uprobe) { + if (is_swbp > 0) { + /* No matching uprobe; signal SIGTRAP. */ +- send_sig(SIGTRAP, current, 0); ++ force_sig(SIGTRAP, current); + } else { + /* + * Either we raced with uprobe_unregister() or we can't +diff --git a/kernel/exit.c b/kernel/exit.c +index c7b2cfc2aa13..14d111c3b371 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -467,7 +467,7 @@ static void exit_mm(struct task_struct *tsk) + struct core_state *core_state; + int mm_released; + +- mm_release(tsk, mm); ++ exit_mm_release(tsk, mm); + if (!mm) + return; + sync_mm_rss(mm); +@@ -486,7 +486,10 @@ static void exit_mm(struct task_struct *tsk) + up_read(&mm->mmap_sem); + + self.task = tsk; +- self.next = xchg(&core_state->dumper.next, &self); ++ if (self.task->flags & PF_SIGNALED) ++ self.next = xchg(&core_state->dumper.next, &self); ++ else ++ self.task = NULL; + /* + * Implies mb(), the result of xchg() must be visible + * to core_state->dumper. +@@ -751,8 +754,12 @@ void __noreturn do_exit(long code) + int group_dead; + TASKS_RCU(int tasks_rcu_i); + +- profile_task_exit(tsk); +- kcov_task_exit(tsk); ++ /* ++ * We can get here from a kernel oops, sometimes with preemption off. ++ * Start by checking for critical errors. ++ * Then fix up important state like USER_DS and preemption. ++ * Then do everything else. ++ */ + + WARN_ON(blk_needs_flush_plug(tsk)); + +@@ -770,6 +777,16 @@ void __noreturn do_exit(long code) + */ + set_fs(USER_DS); + ++ if (unlikely(in_atomic())) { ++ pr_info("note: %s[%d] exited with preempt_count %d\n", ++ current->comm, task_pid_nr(current), ++ preempt_count()); ++ preempt_count_set(PREEMPT_ENABLED); ++ } ++ ++ profile_task_exit(tsk); ++ kcov_task_exit(tsk); ++ + ptrace_event(PTRACE_EVENT_EXIT, code); + + validate_creds_for_do_exit(tsk); +@@ -784,16 +801,7 @@ void __noreturn do_exit(long code) + #else + pr_alert("Fixing recursive fault but reboot is needed!\n"); + #endif +- /* +- * We can do this unlocked here. The futex code uses +- * this flag just to verify whether the pi state +- * cleanup has been done or not. In the worst case it +- * loops once more. We pretend that the cleanup was +- * done as there is no way to return. Either the +- * OWNER_DIED bit is set by now or we push the blocked +- * task into the wait for ever nirwana as well. +- */ +- tsk->flags |= PF_EXITPIDONE; ++ futex_exit_recursive(tsk); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + } +@@ -803,24 +811,6 @@ void __noreturn do_exit(long code) + sched_exit(tsk); + schedtune_exit_task(tsk); + +- /* +- * Ensure that all new tsk->pi_lock acquisitions must observe +- * PF_EXITING. Serializes against futex.c:attach_to_pi_owner(). +- */ +- smp_mb(); +- /* +- * Ensure that we must observe the pi_state in exit_mm() -> +- * mm_release() -> exit_pi_state_list(). +- */ +- raw_spin_unlock_wait(&tsk->pi_lock); +- +- if (unlikely(in_atomic())) { +- pr_info("note: %s[%d] exited with preempt_count %d\n", +- current->comm, task_pid_nr(current), +- preempt_count()); +- preempt_count_set(PREEMPT_ENABLED); +- } +- + /* sync mm's RSS info before statistics gathering */ + if (tsk->mm) + sync_mm_rss(tsk->mm); +@@ -886,12 +876,6 @@ void __noreturn do_exit(long code) + * Make sure we are holding no locks: + */ + debug_check_no_locks_held(); +- /* +- * We can do this unlocked here. The futex code uses this flag +- * just to verify whether the pi state cleanup has been done +- * or not. In the worst case it loops once more. +- */ +- tsk->flags |= PF_EXITPIDONE; + + if (tsk->io_context) + exit_io_context(tsk); +diff --git a/kernel/fork.c b/kernel/fork.c +index 04e239cdcfaa..33b60de1b6f1 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1094,24 +1094,8 @@ static int wait_for_vfork_done(struct task_struct *child, + * restoring the old one. . . + * Eric Biederman 10 January 1998 + */ +-void mm_release(struct task_struct *tsk, struct mm_struct *mm) ++static void mm_release(struct task_struct *tsk, struct mm_struct *mm) + { +- /* Get rid of any futexes when releasing the mm */ +-#ifdef CONFIG_FUTEX +- if (unlikely(tsk->robust_list)) { +- exit_robust_list(tsk); +- tsk->robust_list = NULL; +- } +-#ifdef CONFIG_COMPAT +- if (unlikely(tsk->compat_robust_list)) { +- compat_exit_robust_list(tsk); +- tsk->compat_robust_list = NULL; +- } +-#endif +- if (unlikely(!list_empty(&tsk->pi_state_list))) +- exit_pi_state_list(tsk); +-#endif +- + uprobe_free_utask(tsk); + + /* Get rid of any cached register state */ +@@ -1144,6 +1128,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) + complete_vfork_done(tsk); + } + ++void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) ++{ ++ futex_exit_release(tsk); ++ mm_release(tsk, mm); ++} ++ ++void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) ++{ ++ futex_exec_release(tsk); ++ mm_release(tsk, mm); ++} ++ + /* + * Allocate a new mm structure and copy contents from the + * mm structure of the passed in task structure. +@@ -1856,14 +1852,8 @@ static __latent_entropy struct task_struct *copy_process( + #ifdef CONFIG_BLOCK + p->plug = NULL; + #endif +-#ifdef CONFIG_FUTEX +- p->robust_list = NULL; +-#ifdef CONFIG_COMPAT +- p->compat_robust_list = NULL; +-#endif +- INIT_LIST_HEAD(&p->pi_state_list); +- p->pi_state_cache = NULL; +-#endif ++ futex_init_task(p); ++ + /* + * sigaltstack should be cleared when sharing the same VM + */ +@@ -1884,14 +1874,9 @@ static __latent_entropy struct task_struct *copy_process( + /* ok, now we should be set up.. */ + p->pid = pid_nr(pid); + if (clone_flags & CLONE_THREAD) { +- p->exit_signal = -1; + p->group_leader = current->group_leader; + p->tgid = current->tgid; + } else { +- if (clone_flags & CLONE_PARENT) +- p->exit_signal = current->group_leader->exit_signal; +- else +- p->exit_signal = (clone_flags & CSIGNAL); + p->group_leader = p; + p->tgid = p->pid; + } +@@ -1936,9 +1921,14 @@ static __latent_entropy struct task_struct *copy_process( + if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { + p->real_parent = current->real_parent; + p->parent_exec_id = current->parent_exec_id; ++ if (clone_flags & CLONE_THREAD) ++ p->exit_signal = -1; ++ else ++ p->exit_signal = current->group_leader->exit_signal; + } else { + p->real_parent = current; + p->parent_exec_id = current->self_exec_id; ++ p->exit_signal = (clone_flags & CSIGNAL); + } + + spin_lock(¤t->sighand->siglock); +diff --git a/kernel/futex.c b/kernel/futex.c +index 7123d9cab456..b3823736af6f 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -44,6 +44,7 @@ + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ ++#include + #include + #include + #include +@@ -171,8 +172,10 @@ + * double_lock_hb() and double_unlock_hb(), respectively. + */ + +-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG +-int __read_mostly futex_cmpxchg_enabled; ++#ifdef CONFIG_HAVE_FUTEX_CMPXCHG ++#define futex_cmpxchg_enabled 1 ++#else ++static int __read_mostly futex_cmpxchg_enabled; + #endif + + /* +@@ -336,6 +339,12 @@ static inline bool should_fail_futex(bool fshared) + } + #endif /* CONFIG_FAIL_FUTEX */ + ++#ifdef CONFIG_COMPAT ++static void compat_exit_robust_list(struct task_struct *curr); ++#else ++static inline void compat_exit_robust_list(struct task_struct *curr) { } ++#endif ++ + static inline void futex_get_mm(union futex_key *key) + { + atomic_inc(&key->private.mm->mm_count); +@@ -708,7 +717,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) + + key->both.offset |= FUT_OFF_INODE; /* inode-based key */ + key->shared.i_seq = get_inode_sequence_number(inode); +- key->shared.pgoff = basepage_index(tail); ++ key->shared.pgoff = page_to_pgoff(tail); + rcu_read_unlock(); + } + +@@ -818,7 +827,7 @@ static int refill_pi_state_cache(void) + return 0; + } + +-static struct futex_pi_state * alloc_pi_state(void) ++static struct futex_pi_state *alloc_pi_state(void) + { + struct futex_pi_state *pi_state = current->pi_state_cache; + +@@ -828,11 +837,37 @@ static struct futex_pi_state * alloc_pi_state(void) + return pi_state; + } + ++static void pi_state_update_owner(struct futex_pi_state *pi_state, ++ struct task_struct *new_owner) ++{ ++ struct task_struct *old_owner = pi_state->owner; ++ ++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock); ++ ++ if (old_owner) { ++ raw_spin_lock(&old_owner->pi_lock); ++ WARN_ON(list_empty(&pi_state->list)); ++ list_del_init(&pi_state->list); ++ raw_spin_unlock(&old_owner->pi_lock); ++ } ++ ++ if (new_owner) { ++ raw_spin_lock(&new_owner->pi_lock); ++ WARN_ON(!list_empty(&pi_state->list)); ++ list_add(&pi_state->list, &new_owner->pi_state_list); ++ pi_state->owner = new_owner; ++ raw_spin_unlock(&new_owner->pi_lock); ++ } ++} ++ ++static void get_pi_state(struct futex_pi_state *pi_state) ++{ ++ WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); ++} ++ + /* + * Drops a reference to the pi_state object and frees or caches it + * when the last reference is gone. +- * +- * Must be called with the hb lock held. + */ + static void put_pi_state(struct futex_pi_state *pi_state) + { +@@ -847,16 +882,17 @@ static void put_pi_state(struct futex_pi_state *pi_state) + * and has cleaned up the pi_state already + */ + if (pi_state->owner) { +- raw_spin_lock_irq(&pi_state->owner->pi_lock); +- list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); ++ unsigned long flags; + +- rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); ++ raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); ++ pi_state_update_owner(pi_state, NULL); ++ rt_mutex_proxy_unlock(&pi_state->pi_mutex); ++ raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); + } + +- if (current->pi_state_cache) ++ if (current->pi_state_cache) { + kfree(pi_state); +- else { ++ } else { + /* + * pi_state->list is already empty. + * clear pi_state->owner. +@@ -872,7 +908,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) + * Look up the task based on what TID userspace gave us. + * We dont trust it. + */ +-static struct task_struct * futex_find_get_task(pid_t pid) ++static struct task_struct *futex_find_get_task(pid_t pid) + { + struct task_struct *p; + +@@ -891,7 +927,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) + * Kernel cleans up PI-state, but userspace is likely hosed. + * (Robust-futex cleanup is separate and might save the day for userspace.) + */ +-void exit_pi_state_list(struct task_struct *curr) ++static void exit_pi_state_list(struct task_struct *curr) + { + struct list_head *next, *head = &curr->pi_state_list; + struct futex_pi_state *pi_state; +@@ -907,22 +943,41 @@ void exit_pi_state_list(struct task_struct *curr) + */ + raw_spin_lock_irq(&curr->pi_lock); + while (!list_empty(head)) { +- + next = head->next; + pi_state = list_entry(next, struct futex_pi_state, list); + key = pi_state->key; + hb = hash_futex(&key); ++ ++ /* ++ * We can race against put_pi_state() removing itself from the ++ * list (a waiter going away). put_pi_state() will first ++ * decrement the reference count and then modify the list, so ++ * its possible to see the list entry but fail this reference ++ * acquire. ++ * ++ * In that case; drop the locks to let put_pi_state() make ++ * progress and retry the loop. ++ */ ++ if (!atomic_inc_not_zero(&pi_state->refcount)) { ++ raw_spin_unlock_irq(&curr->pi_lock); ++ cpu_relax(); ++ raw_spin_lock_irq(&curr->pi_lock); ++ continue; ++ } + raw_spin_unlock_irq(&curr->pi_lock); + + spin_lock(&hb->lock); +- +- raw_spin_lock_irq(&curr->pi_lock); ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ raw_spin_lock(&curr->pi_lock); + /* + * We dropped the pi-lock, so re-check whether this + * task still owns the PI-state: + */ + if (head->next != next) { ++ /* retain curr->pi_lock for the loop invariant */ ++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + spin_unlock(&hb->lock); ++ put_pi_state(pi_state); + continue; + } + +@@ -930,12 +985,14 @@ void exit_pi_state_list(struct task_struct *curr) + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + pi_state->owner = NULL; +- raw_spin_unlock_irq(&curr->pi_lock); +- +- rt_mutex_unlock(&pi_state->pi_mutex); + ++ raw_spin_unlock(&curr->pi_lock); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + spin_unlock(&hb->lock); + ++ rt_mutex_futex_unlock(&pi_state->pi_mutex); ++ put_pi_state(pi_state); ++ + raw_spin_lock_irq(&curr->pi_lock); + } + raw_spin_unlock_irq(&curr->pi_lock); +@@ -988,7 +1045,41 @@ void exit_pi_state_list(struct task_struct *curr) + * FUTEX_OWNER_DIED bit. See [4] + * + * [10] There is no transient state which leaves owner and user space +- * TID out of sync. ++ * TID out of sync. Except one error case where the kernel is denied ++ * write access to the user address, see fixup_pi_state_owner(). ++ * ++ * ++ * Serialization and lifetime rules: ++ * ++ * hb->lock: ++ * ++ * hb -> futex_q, relation ++ * futex_q -> pi_state, relation ++ * ++ * (cannot be raw because hb can contain arbitrary amount ++ * of futex_q's) ++ * ++ * pi_mutex->wait_lock: ++ * ++ * {uval, pi_state} ++ * ++ * (and pi_mutex 'obviously') ++ * ++ * p->pi_lock: ++ * ++ * p->pi_state_list -> pi_state->list, relation ++ * ++ * pi_state->refcount: ++ * ++ * pi_state lifetime ++ * ++ * ++ * Lock order: ++ * ++ * hb->lock ++ * pi_mutex->wait_lock ++ * p->pi_lock ++ * + */ + + /* +@@ -996,10 +1087,12 @@ void exit_pi_state_list(struct task_struct *curr) + * the pi_state against the user space value. If correct, attach to + * it. + */ +-static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, ++static int attach_to_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_pi_state *pi_state, + struct futex_pi_state **ps) + { + pid_t pid = uval & FUTEX_TID_MASK; ++ int ret, uval2; + + /* + * Userspace might have messed up non-PI and PI futexes [3] +@@ -1007,8 +1100,38 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + if (unlikely(!pi_state)) + return -EINVAL; + ++ /* ++ * We get here with hb->lock held, and having found a ++ * futex_top_waiter(). This means that futex_lock_pi() of said futex_q ++ * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), ++ * which in turn means that futex_lock_pi() still has a reference on ++ * our pi_state. ++ * ++ * The waiter holding a reference on @pi_state also protects against ++ * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() ++ * and futex_wait_requeue_pi() as it cannot go to 0 and consequently ++ * free pi_state before we can take a reference ourselves. ++ */ + WARN_ON(!atomic_read(&pi_state->refcount)); + ++ /* ++ * Now that we have a pi_state, we can acquire wait_lock ++ * and do the state validation. ++ */ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ++ /* ++ * Since {uval, pi_state} is serialized by wait_lock, and our current ++ * uval was read without holding it, it can have changed. Verify it ++ * still is what we expect it to be, otherwise retry the entire ++ * operation. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ goto out_efault; ++ ++ if (uval != uval2) ++ goto out_eagain; ++ + /* + * Handle the owner died case: + */ +@@ -1024,11 +1147,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * is not 0. Inconsistent state. [5] + */ + if (pid) +- return -EINVAL; ++ goto out_einval; + /* + * Take a ref on the state and return success. [4] + */ +- goto out_state; ++ goto out_attach; + } + + /* +@@ -1040,14 +1163,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * Take a ref on the state and return success. [6] + */ + if (!pid) +- goto out_state; ++ goto out_attach; + } else { + /* + * If the owner died bit is not set, then the pi_state + * must have an owner. [7] + */ + if (!pi_state->owner) +- return -EINVAL; ++ goto out_einval; + } + + /* +@@ -1056,19 +1179,124 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, + * user space TID. [9/10] + */ + if (pid != task_pid_vnr(pi_state->owner)) +- return -EINVAL; +-out_state: +- atomic_inc(&pi_state->refcount); ++ goto out_einval; ++ ++out_attach: ++ get_pi_state(pi_state); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + *ps = pi_state; + return 0; ++ ++out_einval: ++ ret = -EINVAL; ++ goto out_error; ++ ++out_eagain: ++ ret = -EAGAIN; ++ goto out_error; ++ ++out_efault: ++ ret = -EFAULT; ++ goto out_error; ++ ++out_error: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; ++} ++ ++/** ++ * wait_for_owner_exiting - Block until the owner has exited ++ * @exiting: Pointer to the exiting task ++ * ++ * Caller must hold a refcount on @exiting. ++ */ ++static void wait_for_owner_exiting(int ret, struct task_struct *exiting) ++{ ++ if (ret != -EBUSY) { ++ WARN_ON_ONCE(exiting); ++ return; ++ } ++ ++ if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) ++ return; ++ ++ mutex_lock(&exiting->futex_exit_mutex); ++ /* ++ * No point in doing state checking here. If the waiter got here ++ * while the task was in exec()->exec_futex_release() then it can ++ * have any FUTEX_STATE_* value when the waiter has acquired the ++ * mutex. OK, if running, EXITING or DEAD if it reached exit() ++ * already. Highly unlikely and not a problem. Just one more round ++ * through the futex maze. ++ */ ++ mutex_unlock(&exiting->futex_exit_mutex); ++ ++ put_task_struct(exiting); ++} ++ ++static int handle_exit_race(u32 __user *uaddr, u32 uval, ++ struct task_struct *tsk) ++{ ++ u32 uval2; ++ ++ /* ++ * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the ++ * caller that the alleged owner is busy. ++ */ ++ if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) ++ return -EBUSY; ++ ++ /* ++ * Reread the user space value to handle the following situation: ++ * ++ * CPU0 CPU1 ++ * ++ * sys_exit() sys_futex() ++ * do_exit() futex_lock_pi() ++ * futex_lock_pi_atomic() ++ * exit_signals(tsk) No waiters: ++ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID ++ * mm_release(tsk) Set waiter bit ++ * exit_robust_list(tsk) { *uaddr = 0x80000PID; ++ * Set owner died attach_to_pi_owner() { ++ * *uaddr = 0xC0000000; tsk = get_task(PID); ++ * } if (!tsk->flags & PF_EXITING) { ++ * ... attach(); ++ * tsk->futex_state = } else { ++ * FUTEX_STATE_DEAD; if (tsk->futex_state != ++ * FUTEX_STATE_DEAD) ++ * return -EAGAIN; ++ * return -ESRCH; <--- FAIL ++ * } ++ * ++ * Returning ESRCH unconditionally is wrong here because the ++ * user space value has been changed by the exiting task. ++ * ++ * The same logic applies to the case where the exiting task is ++ * already gone. ++ */ ++ if (get_futex_value_locked(&uval2, uaddr)) ++ return -EFAULT; ++ ++ /* If the user space value has changed, try again. */ ++ if (uval2 != uval) ++ return -EAGAIN; ++ ++ /* ++ * The exiting task did not have a robust list, the robust list was ++ * corrupted or the user space value in *uaddr is simply bogus. ++ * Give up and tell user space. ++ */ ++ return -ESRCH; + } + + /* + * Lookup the task for the TID provided from user space and attach to + * it after doing proper sanity checks. + */ +-static int attach_to_pi_owner(u32 uval, union futex_key *key, +- struct futex_pi_state **ps) ++static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, ++ struct futex_pi_state **ps, ++ struct task_struct **exiting) + { + pid_t pid = uval & FUTEX_TID_MASK; + struct futex_pi_state *pi_state; +@@ -1077,12 +1305,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + /* + * We are the first waiter - try to look up the real owner and attach + * the new pi_state to it, but bail out when TID = 0 [1] ++ * ++ * The !pid check is paranoid. None of the call sites should end up ++ * with pid == 0, but better safe than sorry. Let the caller retry + */ + if (!pid) +- return -ESRCH; ++ return -EAGAIN; + p = futex_find_get_task(pid); + if (!p) +- return -ESRCH; ++ return handle_exit_race(uaddr, uval, NULL); + + if (unlikely(p->flags & PF_KTHREAD)) { + put_task_struct(p); +@@ -1090,27 +1321,41 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + } + + /* +- * We need to look at the task state flags to figure out, +- * whether the task is exiting. To protect against the do_exit +- * change of the task flags, we do this protected by +- * p->pi_lock: ++ * We need to look at the task state to figure out, whether the ++ * task is exiting. To protect against the change of the task state ++ * in futex_exit_release(), we do this protected by p->pi_lock: + */ + raw_spin_lock_irq(&p->pi_lock); +- if (unlikely(p->flags & PF_EXITING)) { ++ if (unlikely(p->futex_state != FUTEX_STATE_OK)) { + /* +- * The task is on the way out. When PF_EXITPIDONE is +- * set, we know that the task has finished the +- * cleanup: ++ * The task is on the way out. When the futex state is ++ * FUTEX_STATE_DEAD, we know that the task has finished ++ * the cleanup: + */ +- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; ++ int ret = handle_exit_race(uaddr, uval, p); + + raw_spin_unlock_irq(&p->pi_lock); +- put_task_struct(p); ++ /* ++ * If the owner task is between FUTEX_STATE_EXITING and ++ * FUTEX_STATE_DEAD then store the task pointer and keep ++ * the reference on the task struct. The calling code will ++ * drop all locks, wait for the task to reach ++ * FUTEX_STATE_DEAD and then drop the refcount. This is ++ * required to prevent a live lock when the current task ++ * preempted the exiting task between the two states. ++ */ ++ if (ret == -EBUSY) ++ *exiting = p; ++ else ++ put_task_struct(p); + return ret; + } + + /* + * No existing pi state. First waiter. [2] ++ * ++ * This creates pi_state, we have hb->lock held, this means nothing can ++ * observe this state, wait_lock is irrelevant. + */ + pi_state = alloc_pi_state(); + +@@ -1125,6 +1370,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &p->pi_state_list); ++ /* ++ * Assignment without holding pi_state->pi_mutex.wait_lock is safe ++ * because there is no concurrency as the object is not published yet. ++ */ + pi_state->owner = p; + raw_spin_unlock_irq(&p->pi_lock); + +@@ -1135,36 +1384,40 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, + return 0; + } + +-static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, +- union futex_key *key, struct futex_pi_state **ps) ++static int lookup_pi_state(u32 __user *uaddr, u32 uval, ++ struct futex_hash_bucket *hb, ++ union futex_key *key, struct futex_pi_state **ps, ++ struct task_struct **exiting) + { +- struct futex_q *match = futex_top_waiter(hb, key); ++ struct futex_q *top_waiter = futex_top_waiter(hb, key); + + /* + * If there is a waiter on that futex, validate it and + * attach to the pi_state when the validation succeeds. + */ +- if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ if (top_waiter) ++ return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); + + /* + * We are the first waiter - try to look up the owner based on + * @uval and attach to it. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uaddr, uval, key, ps, exiting); + } + + static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + { ++ int err; + u32 uninitialized_var(curval); + + if (unlikely(should_fail_futex(true))) + return -EFAULT; + +- if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) +- return -EFAULT; ++ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); ++ if (unlikely(err)) ++ return err; + +- /*If user space value changed, let the caller retry */ ++ /* If user space value changed, let the caller retry */ + return curval != uval ? -EAGAIN : 0; + } + +@@ -1177,6 +1430,8 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + * lookup + * @task: the task to perform the atomic lock work for. This will + * be "current" except in the case of requeue pi. ++ * @exiting: Pointer to store the task pointer of the owner task ++ * which is in the middle of exiting + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) + * + * Return: +@@ -1185,14 +1440,20 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) + * <0 - error + * + * The hb->lock and futex_key refs shall be held by the caller. ++ * ++ * @exiting is only set when the return value is -EBUSY. If so, this holds ++ * a refcount on the exiting task on return and the caller needs to drop it ++ * after waiting for the exit to complete. + */ + static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + union futex_key *key, + struct futex_pi_state **ps, +- struct task_struct *task, int set_waiters) ++ struct task_struct *task, ++ struct task_struct **exiting, ++ int set_waiters) + { + u32 uval, newval, vpid = task_pid_vnr(task); +- struct futex_q *match; ++ struct futex_q *top_waiter; + int ret; + + /* +@@ -1218,9 +1479,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + * Lookup existing state first. If it exists, try to attach to + * its pi_state. + */ +- match = futex_top_waiter(hb, key); +- if (match) +- return attach_to_pi_state(uval, match->pi_state, ps); ++ top_waiter = futex_top_waiter(hb, key); ++ if (top_waiter) ++ return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); + + /* + * No waiter and user TID is 0. We are here because the +@@ -1259,7 +1520,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + * attach to the owner. If that fails, no harm done, we only + * set the FUTEX_WAITERS bit in the user space variable. + */ +- return attach_to_pi_owner(uval, key, ps); ++ return attach_to_pi_owner(uaddr, newval, key, ps, exiting); + } + + /** +@@ -1294,11 +1555,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) + if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) + return; + +- /* +- * Queue the task for later wakeup for after we've released +- * the hb->lock. wake_q_add() grabs reference to p. +- */ +- wake_q_add(wake_q, p); ++ get_task_struct(p); + __unqueue_futex(q); + /* + * The waiting task can free the futex_q as soon as +@@ -1306,54 +1563,55 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) + * memory barrier is required here to prevent the following + * store to lock_ptr from getting ahead of the plist_del. + */ +- smp_wmb(); +- q->lock_ptr = NULL; ++ smp_store_release(&q->lock_ptr, NULL); ++ ++ /* ++ * Queue the task for later wakeup for after we've released ++ * the hb->lock. wake_q_add() grabs reference to p. ++ */ ++ wake_q_add(wake_q, p); ++ put_task_struct(p); + } + +-static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, +- struct futex_hash_bucket *hb) ++/* ++ * Caller must hold a reference on @pi_state. ++ */ ++static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) + { +- struct task_struct *new_owner; +- struct futex_pi_state *pi_state = this->pi_state; + u32 uninitialized_var(curval), newval; ++ struct task_struct *new_owner; ++ bool deboost = false; + WAKE_Q(wake_q); +- bool deboost; + int ret = 0; + +- if (!pi_state) +- return -EINVAL; +- +- /* +- * If current does not own the pi_state then the futex is +- * inconsistent and user space fiddled with the futex value. +- */ +- if (pi_state->owner != current) +- return -EINVAL; +- +- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); ++ if (WARN_ON_ONCE(!new_owner)) { ++ /* ++ * As per the comment in futex_unlock_pi() this should not happen. ++ * ++ * When this happens, give up our locks and try again, giving ++ * the futex_lock_pi() instance time to complete, either by ++ * waiting on the rtmutex or removing itself from the futex ++ * queue. ++ */ ++ ret = -EAGAIN; ++ goto out_unlock; ++ } + + /* +- * It is possible that the next waiter (the one that brought +- * this owner to the kernel) timed out and is no longer +- * waiting on the lock. +- */ +- if (!new_owner) +- new_owner = this->task; +- +- /* +- * We pass it to the next owner. The WAITERS bit is always +- * kept enabled while there is PI state around. We cleanup the +- * owner died bit, because we are the owner. ++ * We pass it to the next owner. The WAITERS bit is always kept ++ * enabled while there is PI state around. We cleanup the owner ++ * died bit, because we are the owner. + */ + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); + +- if (unlikely(should_fail_futex(true))) ++ if (unlikely(should_fail_futex(true))) { + ret = -EFAULT; ++ goto out_unlock; ++ } + +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { +- ret = -EFAULT; +- } else if (curval != uval) { ++ ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); ++ if (!ret && (curval != uval)) { + /* + * If a unconditional UNLOCK_PI operation (user space did not + * try the TID->0 transition) raced with a waiter setting the +@@ -1365,38 +1623,26 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, + else + ret = -EINVAL; + } +- if (ret) { +- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); +- return ret; +- } +- +- raw_spin_lock(&pi_state->owner->pi_lock); +- WARN_ON(list_empty(&pi_state->list)); +- list_del_init(&pi_state->list); +- raw_spin_unlock(&pi_state->owner->pi_lock); + +- raw_spin_lock(&new_owner->pi_lock); +- WARN_ON(!list_empty(&pi_state->list)); +- list_add(&pi_state->list, &new_owner->pi_state_list); +- pi_state->owner = new_owner; +- raw_spin_unlock(&new_owner->pi_lock); ++ if (!ret) { ++ /* ++ * This is a point of no return; once we modified the uval ++ * there is no going back and subsequent operations must ++ * not fail. ++ */ ++ pi_state_update_owner(pi_state, new_owner); ++ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); ++ } + ++out_unlock: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); +- +- /* +- * First unlock HB so the waiter does not spin on it once he got woken +- * up. Second wake up the waiter before the priority is adjusted. If we +- * deboost first (and lose our higher priority), then the task might get +- * scheduled away before the wake up can take place. +- */ +- spin_unlock(&hb->lock); +- wake_up_q(&wake_q); +- if (deboost) ++ if (deboost) { ++ wake_up_q(&wake_q); + rt_mutex_adjust_prio(current); ++ } + +- return 0; ++ return ret; + } + + /* +@@ -1551,32 +1797,32 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, + double_lock_hb(hb1, hb2); + op_ret = futex_atomic_op_inuser(op, uaddr2); + if (unlikely(op_ret < 0)) { +- + double_unlock_hb(hb1, hb2); + +-#ifndef CONFIG_MMU +- /* +- * we don't get EFAULT from MMU faults if we don't have an MMU, +- * but we might get them from range checking +- */ +- ret = op_ret; +- goto out_put_keys; +-#endif +- +- if (unlikely(op_ret != -EFAULT)) { ++ if (!IS_ENABLED(CONFIG_MMU) || ++ unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { ++ /* ++ * we don't get EFAULT from MMU faults if we don't have ++ * an MMU, but we might get them from range checking ++ */ + ret = op_ret; + goto out_put_keys; + } + +- ret = fault_in_user_writeable(uaddr2); +- if (ret) +- goto out_put_keys; ++ if (op_ret == -EFAULT) { ++ ret = fault_in_user_writeable(uaddr2); ++ if (ret) ++ goto out_put_keys; ++ } + +- if (!(flags & FLAGS_SHARED)) ++ if (!(flags & FLAGS_SHARED)) { ++ cond_resched(); + goto retry_private; ++ } + + put_futex_key(&key2); + put_futex_key(&key1); ++ cond_resched(); + goto retry; + } + +@@ -1685,6 +1931,8 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, + * @key1: the from futex key + * @key2: the to futex key + * @ps: address to store the pi_state pointer ++ * @exiting: Pointer to store the task pointer of the owner task ++ * which is in the middle of exiting + * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) + * + * Try and get the lock on behalf of the top waiter if we can do it atomically. +@@ -1692,16 +1940,20 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, + * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. + * hb1 and hb2 must be held by the caller. + * ++ * @exiting is only set when the return value is -EBUSY. If so, this holds ++ * a refcount on the exiting task on return and the caller needs to drop it ++ * after waiting for the exit to complete. ++ * + * Return: + * 0 - failed to acquire the lock atomically; + * >0 - acquired the lock, return value is vpid of the top_waiter + * <0 - error + */ +-static int futex_proxy_trylock_atomic(u32 __user *pifutex, +- struct futex_hash_bucket *hb1, +- struct futex_hash_bucket *hb2, +- union futex_key *key1, union futex_key *key2, +- struct futex_pi_state **ps, int set_waiters) ++static int ++futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, ++ struct futex_hash_bucket *hb2, union futex_key *key1, ++ union futex_key *key2, struct futex_pi_state **ps, ++ struct task_struct **exiting, int set_waiters) + { + struct futex_q *top_waiter = NULL; + u32 curval; +@@ -1738,7 +1990,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, + */ + vpid = task_pid_vnr(top_waiter->task); + ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, +- set_waiters); ++ exiting, set_waiters); + if (ret == 1) { + requeue_pi_wake_futex(top_waiter, key2, hb2); + return vpid; +@@ -1858,6 +2110,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + } + + if (requeue_pi && (task_count - nr_wake < nr_requeue)) { ++ struct task_struct *exiting = NULL; ++ + /* + * Attempt to acquire uaddr2 and wake the top waiter. If we + * intend to requeue waiters, force setting the FUTEX_WAITERS +@@ -1865,7 +2119,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + * faults rather in the requeue loop below. + */ + ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, +- &key2, &pi_state, nr_requeue); ++ &key2, &pi_state, ++ &exiting, nr_requeue); + + /* + * At this point the top_waiter has either taken uaddr2 or is +@@ -1892,7 +2147,8 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + * If that call succeeds then we have pi_state and an + * initial refcount on it. + */ +- ret = lookup_pi_state(ret, hb2, &key2, &pi_state); ++ ret = lookup_pi_state(uaddr2, ret, hb2, &key2, ++ &pi_state, &exiting); + } + + switch (ret) { +@@ -1910,17 +2166,24 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + if (!ret) + goto retry; + goto out; ++ case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: +- * - Owner is exiting and we just wait for the ++ * - EBUSY: Owner is exiting and we just wait for the + * exit to complete. +- * - The user space value changed. ++ * - EAGAIN: The user space value changed. + */ + double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); + put_futex_key(&key2); + put_futex_key(&key1); ++ /* ++ * Handle the case where the owner is in the middle of ++ * exiting. Wait for the exit to complete otherwise ++ * this task might loop forever, aka. live lock. ++ */ ++ wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: +@@ -1975,7 +2238,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, + * refcount on the pi_state and store the pointer in + * the futex_q object of the waiter. + */ +- atomic_inc(&pi_state->refcount); ++ get_pi_state(pi_state); + this->pi_state = pi_state; + ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, + this->rt_waiter, +@@ -2075,20 +2338,7 @@ queue_unlock(struct futex_hash_bucket *hb) + hb_waiters_dec(hb); + } + +-/** +- * queue_me() - Enqueue the futex_q on the futex_hash_bucket +- * @q: The futex_q to enqueue +- * @hb: The destination hash bucket +- * +- * The hb->lock must be held by the caller, and is released here. A call to +- * queue_me() is typically paired with exactly one call to unqueue_me(). The +- * exceptions involve the PI related operations, which may use unqueue_me_pi() +- * or nothing if the unqueue is done as part of the wake process and the unqueue +- * state is implicit in the state of woken task (see futex_wait_requeue_pi() for +- * an example). +- */ +-static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) +- __releases(&hb->lock) ++static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + { + int prio; + +@@ -2105,6 +2355,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + plist_node_init(&q->list, prio); + plist_add(&q->list, &hb->chain); + q->task = current; ++} ++ ++/** ++ * queue_me() - Enqueue the futex_q on the futex_hash_bucket ++ * @q: The futex_q to enqueue ++ * @hb: The destination hash bucket ++ * ++ * The hb->lock must be held by the caller, and is released here. A call to ++ * queue_me() is typically paired with exactly one call to unqueue_me(). The ++ * exceptions involve the PI related operations, which may use unqueue_me_pi() ++ * or nothing if the unqueue is done as part of the wake process and the unqueue ++ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for ++ * an example). ++ */ ++static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) ++ __releases(&hb->lock) ++{ ++ __queue_me(q, hb); + spin_unlock(&hb->lock); + } + +@@ -2180,102 +2448,190 @@ static void unqueue_me_pi(struct futex_q *q) + spin_unlock(q->lock_ptr); + } + +-/* +- * Fixup the pi_state owner with the new owner. +- * +- * Must be called with hash bucket lock held and mm->sem held for non +- * private futexes. +- */ +-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, +- struct task_struct *newowner) ++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ++ struct task_struct *argowner) + { +- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; + struct futex_pi_state *pi_state = q->pi_state; +- struct task_struct *oldowner = pi_state->owner; +- u32 uval, uninitialized_var(curval), newval; +- int ret; ++ struct task_struct *oldowner, *newowner; ++ u32 uval, curval, newval, newtid; ++ int err = 0; + +- /* Owner died? */ +- if (!pi_state->owner) +- newtid |= FUTEX_OWNER_DIED; ++ oldowner = pi_state->owner; + + /* +- * We are here either because we stole the rtmutex from the +- * previous highest priority waiter or we are the highest priority +- * waiter but failed to get the rtmutex the first time. +- * We have to replace the newowner TID in the user space variable. ++ * We are here because either: ++ * ++ * - we stole the lock and pi_state->owner needs updating to reflect ++ * that (@argowner == current), ++ * ++ * or: ++ * ++ * - someone stole our lock and we need to fix things to point to the ++ * new owner (@argowner == NULL). ++ * ++ * Either way, we have to replace the TID in the user space variable. + * This must be atomic as we have to preserve the owner died bit here. + * + * Note: We write the user space value _before_ changing the pi_state + * because we can fault here. Imagine swapped out pages or a fork + * that marked all the anonymous memory readonly for cow. + * +- * Modifying pi_state _before_ the user space value would +- * leave the pi_state in an inconsistent state when we fault +- * here, because we need to drop the hash bucket lock to +- * handle the fault. This might be observed in the PID check +- * in lookup_pi_state. ++ * Modifying pi_state _before_ the user space value would leave the ++ * pi_state in an inconsistent state when we fault here, because we ++ * need to drop the locks to handle the fault. This might be observed ++ * in the PID check in lookup_pi_state. + */ + retry: +- if (get_futex_value_locked(&uval, uaddr)) +- goto handle_fault; ++ if (!argowner) { ++ if (oldowner != current) { ++ /* ++ * We raced against a concurrent self; things are ++ * already fixed up. Nothing to do. ++ */ ++ return 0; ++ } + +- while (1) { +- newval = (uval & FUTEX_OWNER_DIED) | newtid; ++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { ++ /* We got the lock. pi_state is correct. Tell caller. */ ++ return 1; ++ } + +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) +- goto handle_fault; +- if (curval == uval) +- break; +- uval = curval; ++ /* ++ * The trylock just failed, so either there is an owner or ++ * there is a higher priority waiter than this one. ++ */ ++ newowner = rt_mutex_owner(&pi_state->pi_mutex); ++ /* ++ * If the higher priority waiter has not yet taken over the ++ * rtmutex then newowner is NULL. We can't return here with ++ * that state because it's inconsistent vs. the user space ++ * state. So drop the locks and try again. It's a valid ++ * situation and not any different from the other retry ++ * conditions. ++ */ ++ if (unlikely(!newowner)) { ++ err = -EAGAIN; ++ goto handle_err; ++ } ++ } else { ++ WARN_ON_ONCE(argowner != current); ++ if (oldowner == current) { ++ /* ++ * We raced against a concurrent self; things are ++ * already fixed up. Nothing to do. ++ */ ++ return 1; ++ } ++ newowner = argowner; + } + +- /* ++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; ++ /* Owner died? */ ++ if (!pi_state->owner) ++ newtid |= FUTEX_OWNER_DIED; ++ ++ err = get_futex_value_locked(&uval, uaddr); ++ if (err) ++ goto handle_err; ++ ++ for (;;) { ++ newval = (uval & FUTEX_OWNER_DIED) | newtid; ++ ++ err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); ++ if (err) ++ goto handle_err; ++ ++ if (curval == uval) ++ break; ++ uval = curval; ++ } ++ ++ /* + * We fixed up user space. Now we need to fix the pi_state + * itself. + */ +- if (pi_state->owner != NULL) { +- raw_spin_lock_irq(&pi_state->owner->pi_lock); +- WARN_ON(list_empty(&pi_state->list)); +- list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); +- } ++ pi_state_update_owner(pi_state, newowner); + +- pi_state->owner = newowner; +- +- raw_spin_lock_irq(&newowner->pi_lock); +- WARN_ON(!list_empty(&pi_state->list)); +- list_add(&pi_state->list, &newowner->pi_state_list); +- raw_spin_unlock_irq(&newowner->pi_lock); +- return 0; ++ return argowner == current; + + /* +- * To handle the page fault we need to drop the hash bucket +- * lock here. That gives the other task (either the highest priority +- * waiter itself or the task which stole the rtmutex) the +- * chance to try the fixup of the pi_state. So once we are +- * back from handling the fault we need to check the pi_state +- * after reacquiring the hash bucket lock and before trying to +- * do another fixup. When the fixup has been done already we +- * simply return. ++ * In order to reschedule or handle a page fault, we need to drop the ++ * locks here. In the case of a fault, this gives the other task ++ * (either the highest priority waiter itself or the task which stole ++ * the rtmutex) the chance to try the fixup of the pi_state. So once we ++ * are back from handling the fault we need to check the pi_state after ++ * reacquiring the locks and before trying to do another fixup. When ++ * the fixup has been done already we simply return. ++ * ++ * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely ++ * drop hb->lock since the caller owns the hb -> futex_q relation. ++ * Dropping the pi_mutex->wait_lock requires the state revalidate. + */ +-handle_fault: ++handle_err: ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + spin_unlock(q->lock_ptr); + +- ret = fault_in_user_writeable(uaddr); ++ switch (err) { ++ case -EFAULT: ++ err = fault_in_user_writeable(uaddr); ++ break; ++ ++ case -EAGAIN: ++ cond_resched(); ++ err = 0; ++ break; ++ ++ default: ++ WARN_ON_ONCE(1); ++ break; ++ } + + spin_lock(q->lock_ptr); ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + /* + * Check if someone else fixed it for us: + */ + if (pi_state->owner != oldowner) +- return 0; ++ return argowner == current; + +- if (ret) +- return ret; ++ /* Retry if err was -EAGAIN or the fault in succeeded */ ++ if (!err) ++ goto retry; + +- goto retry; ++ /* ++ * fault_in_user_writeable() failed so user state is immutable. At ++ * best we can make the kernel state consistent but user state will ++ * be most likely hosed and any subsequent unlock operation will be ++ * rejected due to PI futex rule [10]. ++ * ++ * Ensure that the rtmutex owner is also the pi_state owner despite ++ * the user space value claiming something different. There is no ++ * point in unlocking the rtmutex if current is the owner as it ++ * would need to wait until the next waiter has taken the rtmutex ++ * to guarantee consistent state. Keep it simple. Userspace asked ++ * for this wreckaged state. ++ * ++ * The rtmutex has an owner - either current or some other ++ * task. See the EAGAIN loop above. ++ */ ++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); ++ ++ return err; ++} ++ ++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ++ struct task_struct *argowner) ++{ ++ struct futex_pi_state *pi_state = q->pi_state; ++ int ret; ++ ++ lockdep_assert_held(q->lock_ptr); ++ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ ret = __fixup_pi_state_owner(uaddr, q, argowner); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); ++ return ret; + } + + static long futex_wait_restart(struct restart_block *restart); +@@ -2297,60 +2653,39 @@ static long futex_wait_restart(struct restart_block *restart); + */ + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) + { +- struct task_struct *owner; +- int ret = 0; +- + if (locked) { + /* + * Got the lock. We might not be the anticipated owner if we + * did a lock-steal - fix up the PI-state in that case: ++ * ++ * Speculative pi_state->owner read (we don't hold wait_lock); ++ * since we own the lock pi_state->owner == current is the ++ * stable state, anything else needs more attention. + */ + if (q->pi_state->owner != current) +- ret = fixup_pi_state_owner(uaddr, q, current); +- goto out; ++ return fixup_pi_state_owner(uaddr, q, current); ++ return 1; + } + + /* +- * Catch the rare case, where the lock was released when we were on the +- * way back before we locked the hash bucket. ++ * If we didn't get the lock; check if anybody stole it from us. In ++ * that case, we need to fix up the uval to point to them instead of ++ * us, otherwise bad things happen. [10] ++ * ++ * Another speculative read; pi_state->owner == current is unstable ++ * but needs our attention. + */ +- if (q->pi_state->owner == current) { +- /* +- * Try to get the rt_mutex now. This might fail as some other +- * task acquired the rt_mutex after we removed ourself from the +- * rt_mutex waiters list. +- */ +- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { +- locked = 1; +- goto out; +- } +- +- /* +- * pi_state is incorrect, some other task did a lock steal and +- * we returned due to timeout or signal without taking the +- * rt_mutex. Too late. +- */ +- raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); +- owner = rt_mutex_owner(&q->pi_state->pi_mutex); +- if (!owner) +- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); +- raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); +- ret = fixup_pi_state_owner(uaddr, q, owner); +- goto out; +- } ++ if (q->pi_state->owner == current) ++ return fixup_pi_state_owner(uaddr, q, NULL); + + /* + * Paranoia check. If we did not take the lock, then we should not be +- * the owner of the rt_mutex. ++ * the owner of the rt_mutex. Warn and establish consistent state. + */ +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) +- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " +- "pi-state %p\n", ret, +- q->pi_state->pi_mutex.owner, +- q->pi_state->owner); ++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) ++ return fixup_pi_state_owner(uaddr, q, current); + +-out: +- return ret ? ret : locked; ++ return 0; + } + + /** +@@ -2524,14 +2859,13 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, + goto out; + + restart = ¤t->restart_block; +- restart->fn = futex_wait_restart; + restart->futex.uaddr = uaddr; + restart->futex.val = val; + restart->futex.time = abs_time->tv64; + restart->futex.bitset = bitset; + restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; + +- ret = -ERESTART_RESTARTBLOCK; ++ ret = set_restart_fn(restart, futex_wait_restart); + + out: + if (to) { +@@ -2571,6 +2905,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + ktime_t *time, int trylock) + { + struct hrtimer_sleeper timeout, *to = NULL; ++ struct task_struct *exiting = NULL; ++ struct rt_mutex_waiter rt_waiter; + struct futex_hash_bucket *hb; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2594,7 +2930,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + retry_private: + hb = queue_lock(&q); + +- ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); ++ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, ++ &exiting, 0); + if (unlikely(ret)) { + /* + * Atomic work succeeded and we got the lock, +@@ -2607,15 +2944,22 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + goto out_unlock_put_key; + case -EFAULT: + goto uaddr_faulted; ++ case -EBUSY: + case -EAGAIN: + /* + * Two reasons for this: +- * - Task is exiting and we just wait for the ++ * - EBUSY: Task is exiting and we just wait for the + * exit to complete. +- * - The user space value changed. ++ * - EAGAIN: The user space value changed. + */ + queue_unlock(hb); + put_futex_key(&q.key); ++ /* ++ * Handle the case where the owner is in the middle of ++ * exiting. Wait for the exit to complete otherwise ++ * this task might loop forever, aka. live lock. ++ */ ++ wait_for_owner_exiting(ret, exiting); + cond_resched(); + goto retry; + default: +@@ -2623,24 +2967,71 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + } + } + ++ WARN_ON(!q.pi_state); ++ + /* + * Only actually queue now that the atomic ops are done: + */ +- queue_me(&q, hb); ++ __queue_me(&q, hb); + +- WARN_ON(!q.pi_state); +- /* +- * Block on the PI mutex: +- */ +- if (!trylock) { +- ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); +- } else { +- ret = rt_mutex_trylock(&q.pi_state->pi_mutex); ++ if (trylock) { ++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; ++ goto no_block; ++ } ++ ++ rt_mutex_init_waiter(&rt_waiter); ++ ++ /* ++ * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not ++ * hold it while doing rt_mutex_start_proxy(), because then it will ++ * include hb->lock in the blocking chain, even through we'll not in ++ * fact hold it while blocking. This will lead it to report -EDEADLK ++ * and BUG when futex_unlock_pi() interleaves with this. ++ * ++ * Therefore acquire wait_lock while holding hb->lock, but drop the ++ * latter before calling __rt_mutex_start_proxy_lock(). This ++ * interleaves with futex_unlock_pi() -- which does a similar lock ++ * handoff -- such that the latter can observe the futex_q::pi_state ++ * before __rt_mutex_start_proxy_lock() is done. ++ */ ++ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); ++ spin_unlock(q.lock_ptr); ++ /* ++ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter ++ * such that futex_unlock_pi() is guaranteed to observe the waiter when ++ * it sees the futex_q::pi_state. ++ */ ++ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); ++ raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); ++ ++ if (ret) { ++ if (ret == 1) ++ ret = 0; ++ goto cleanup; + } + ++ if (unlikely(to)) ++ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); ++ ++ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); ++ ++cleanup: + spin_lock(q.lock_ptr); ++ /* ++ * If we failed to acquire the lock (deadlock/signal/timeout), we must ++ * first acquire the hb->lock before removing the lock from the ++ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait ++ * lists consistent. ++ * ++ * In particular; it is important that futex_unlock_pi() can not ++ * observe this inconsistency. ++ */ ++ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) ++ ret = 0; ++ ++no_block: + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +@@ -2653,13 +3044,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + if (res) + ret = (res < 0) ? res : 0; + +- /* +- * If fixup_owner() faulted and was unable to handle the fault, unlock +- * it and return the fault to userspace. +- */ +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) +- rt_mutex_unlock(&q.pi_state->pi_mutex); +- + /* Unqueue and drop the lock */ + unqueue_me_pi(&q); + +@@ -2671,8 +3055,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, + out_put_key: + put_futex_key(&q.key); + out: +- if (to) ++ if (to) { ++ hrtimer_cancel(&to->timer); + destroy_hrtimer_on_stack(&to->timer); ++ } + return ret != -EINTR ? ret : -ERESTARTNOINTR; + + uaddr_faulted: +@@ -2699,7 +3085,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); + union futex_key key = FUTEX_KEY_INIT; + struct futex_hash_bucket *hb; +- struct futex_q *match; ++ struct futex_q *top_waiter; + int ret; + + retry: +@@ -2723,12 +3109,42 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * all and we at least want to know if user space fiddled + * with the futex value instead of blindly unlocking. + */ +- match = futex_top_waiter(hb, &key); +- if (match) { +- ret = wake_futex_pi(uaddr, uval, match, hb); ++ top_waiter = futex_top_waiter(hb, &key); ++ if (top_waiter) { ++ struct futex_pi_state *pi_state = top_waiter->pi_state; ++ ++ ret = -EINVAL; ++ if (!pi_state) ++ goto out_unlock; ++ ++ /* ++ * If current does not own the pi_state then the futex is ++ * inconsistent and user space fiddled with the futex value. ++ */ ++ if (pi_state->owner != current) ++ goto out_unlock; ++ ++ get_pi_state(pi_state); ++ /* ++ * By taking wait_lock while still holding hb->lock, we ensure ++ * there is no point where we hold neither; and therefore ++ * wake_futex_pi() must observe a state consistent with what we ++ * observed. ++ * ++ * In particular; this forces __rt_mutex_start_proxy() to ++ * complete such that we're guaranteed to observe the ++ * rt_waiter. Also see the WARN in wake_futex_pi(). ++ */ ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); ++ spin_unlock(&hb->lock); ++ ++ /* drops pi_state->pi_mutex.wait_lock */ ++ ret = wake_futex_pi(uaddr, uval, pi_state); ++ ++ put_pi_state(pi_state); ++ + /* +- * In case of success wake_futex_pi dropped the hash +- * bucket lock. ++ * Success, we're done! No tricky corner cases. + */ + if (!ret) + goto out_putkey; +@@ -2742,16 +3158,13 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * A unconditional UNLOCK_PI op raced against a waiter + * setting the FUTEX_WAITERS bit. Try again. + */ +- if (ret == -EAGAIN) { +- spin_unlock(&hb->lock); +- put_futex_key(&key); +- goto retry; +- } ++ if (ret == -EAGAIN) ++ goto pi_retry; + /* + * wake_futex_pi has detected invalid state. Tell user + * space. + */ +- goto out_unlock; ++ goto out_putkey; + } + + /* +@@ -2761,8 +3174,20 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + * preserve the WAITERS bit not the OWNER_DIED one. We are the + * owner. + */ +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) +- goto pi_faulted; ++ if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { ++ spin_unlock(&hb->lock); ++ switch (ret) { ++ case -EFAULT: ++ goto pi_faulted; ++ ++ case -EAGAIN: ++ goto pi_retry; ++ ++ default: ++ WARN_ON_ONCE(1); ++ goto out_putkey; ++ } ++ } + + /* + * If uval has changed, let user space handle it. +@@ -2775,8 +3200,12 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) + put_futex_key(&key); + return ret; + ++pi_retry: ++ put_futex_key(&key); ++ cond_resched(); ++ goto retry; ++ + pi_faulted: +- spin_unlock(&hb->lock); + put_futex_key(&key); + + ret = fault_in_user_writeable(uaddr); +@@ -2906,10 +3335,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); +- RB_CLEAR_NODE(&rt_waiter.tree_entry); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +@@ -2964,14 +3390,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (q.pi_state && (q.pi_state->owner != current)) { + spin_lock(q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) +- rt_mutex_unlock(&q.pi_state->pi_mutex); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. + */ + put_pi_state(q.pi_state); + spin_unlock(q.lock_ptr); ++ /* ++ * Adjust the return value. It's either -EFAULT or ++ * success (1) but the caller expects 0 for success. ++ */ ++ ret = ret < 0 ? ret : 0; + } + } else { + struct rt_mutex *pi_mutex; +@@ -3002,14 +3431,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, + if (res) + ret = (res < 0) ? res : 0; + +- /* +- * If fixup_pi_state_owner() faulted and was unable to handle +- * the fault, unlock the rt_mutex and return the fault to +- * userspace. +- */ +- if (ret && rt_mutex_owner(pi_mutex) == current) +- rt_mutex_unlock(pi_mutex); +- + /* Unqueue and drop the lock. */ + unqueue_me_pi(&q); + } +@@ -3119,13 +3540,19 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, + return ret; + } + ++/* Constants for the pending_op argument of handle_futex_death */ ++#define HANDLE_DEATH_PENDING true ++#define HANDLE_DEATH_LIST false ++ + /* + * Process a futex-list entry, check whether it's owned by the + * dying task, and do notification if so: + */ +-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) ++static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, ++ bool pi, bool pending_op) + { + u32 uval, uninitialized_var(nval), mval; ++ int err; + + /* Futex address must be 32bit aligned */ + if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) +@@ -3135,42 +3562,93 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) + if (get_user(uval, uaddr)) + return -1; + +- if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { +- /* +- * Ok, this dying thread is truly holding a futex +- * of interest. Set the OWNER_DIED bit atomically +- * via cmpxchg, and if the value had FUTEX_WAITERS +- * set, wake up a waiter (if any). (We have to do a +- * futex_wake() even if OWNER_DIED is already set - +- * to handle the rare but possible case of recursive +- * thread-death.) The rest of the cleanup is done in +- * userspace. +- */ +- mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; +- /* +- * We are not holding a lock here, but we want to have +- * the pagefault_disable/enable() protection because +- * we want to handle the fault gracefully. If the +- * access fails we try to fault in the futex with R/W +- * verification via get_user_pages. get_user() above +- * does not guarantee R/W access. If that fails we +- * give up and leave the futex locked. +- */ +- if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) { ++ /* ++ * Special case for regular (non PI) futexes. The unlock path in ++ * user space has two race scenarios: ++ * ++ * 1. The unlock path releases the user space futex value and ++ * before it can execute the futex() syscall to wake up ++ * waiters it is killed. ++ * ++ * 2. A woken up waiter is killed before it can acquire the ++ * futex in user space. ++ * ++ * In both cases the TID validation below prevents a wakeup of ++ * potential waiters which can cause these waiters to block ++ * forever. ++ * ++ * In both cases the following conditions are met: ++ * ++ * 1) task->robust_list->list_op_pending != NULL ++ * @pending_op == true ++ * 2) User space futex value == 0 ++ * 3) Regular futex: @pi == false ++ * ++ * If these conditions are met, it is safe to attempt waking up a ++ * potential waiter without touching the user space futex value and ++ * trying to set the OWNER_DIED bit. The user space futex value is ++ * uncontended and the rest of the user space mutex state is ++ * consistent, so a woken waiter will just take over the ++ * uncontended futex. Setting the OWNER_DIED bit would create ++ * inconsistent state and malfunction of the user space owner died ++ * handling. ++ */ ++ if (pending_op && !pi && !uval) { ++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); ++ return 0; ++ } ++ ++ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) ++ return 0; ++ ++ /* ++ * Ok, this dying thread is truly holding a futex ++ * of interest. Set the OWNER_DIED bit atomically ++ * via cmpxchg, and if the value had FUTEX_WAITERS ++ * set, wake up a waiter (if any). (We have to do a ++ * futex_wake() even if OWNER_DIED is already set - ++ * to handle the rare but possible case of recursive ++ * thread-death.) The rest of the cleanup is done in ++ * userspace. ++ */ ++ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; ++ ++ /* ++ * We are not holding a lock here, but we want to have ++ * the pagefault_disable/enable() protection because ++ * we want to handle the fault gracefully. If the ++ * access fails we try to fault in the futex with R/W ++ * verification via get_user_pages. get_user() above ++ * does not guarantee R/W access. If that fails we ++ * give up and leave the futex locked. ++ */ ++ if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) { ++ switch (err) { ++ case -EFAULT: + if (fault_in_user_writeable(uaddr)) + return -1; + goto retry; +- } +- if (nval != uval) ++ ++ case -EAGAIN: ++ cond_resched(); + goto retry; + +- /* +- * Wake robust non-PI futexes here. The wakeup of +- * PI futexes happens in exit_pi_state(): +- */ +- if (!pi && (uval & FUTEX_WAITERS)) +- futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); ++ default: ++ WARN_ON_ONCE(1); ++ return err; ++ } + } ++ ++ if (nval != uval) ++ goto retry; ++ ++ /* ++ * Wake robust non-PI futexes here. The wakeup of ++ * PI futexes happens in exit_pi_state(): ++ */ ++ if (!pi && (uval & FUTEX_WAITERS)) ++ futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); ++ + return 0; + } + +@@ -3198,7 +3676,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, + * + * We silently return on any sign of list-walking problem. + */ +-void exit_robust_list(struct task_struct *curr) ++static void exit_robust_list(struct task_struct *curr) + { + struct robust_list_head __user *head = curr->robust_list; + struct robust_list __user *entry, *next_entry, *pending; +@@ -3239,10 +3717,11 @@ void exit_robust_list(struct task_struct *curr) + * A pending lock might already be on the list, so + * don't process it twice: + */ +- if (entry != pending) ++ if (entry != pending) { + if (handle_futex_death((void __user *)entry + futex_offset, +- curr, pi)) ++ curr, pi, HANDLE_DEATH_LIST)) + return; ++ } + if (rc) + return; + entry = next_entry; +@@ -3256,9 +3735,118 @@ void exit_robust_list(struct task_struct *curr) + cond_resched(); + } + +- if (pending) ++ if (pending) { + handle_futex_death((void __user *)pending + futex_offset, +- curr, pip); ++ curr, pip, HANDLE_DEATH_PENDING); ++ } ++} ++ ++static void futex_cleanup(struct task_struct *tsk) ++{ ++ if (unlikely(tsk->robust_list)) { ++ exit_robust_list(tsk); ++ tsk->robust_list = NULL; ++ } ++ ++#ifdef CONFIG_COMPAT ++ if (unlikely(tsk->compat_robust_list)) { ++ compat_exit_robust_list(tsk); ++ tsk->compat_robust_list = NULL; ++ } ++#endif ++ ++ if (unlikely(!list_empty(&tsk->pi_state_list))) ++ exit_pi_state_list(tsk); ++} ++ ++/** ++ * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD ++ * @tsk: task to set the state on ++ * ++ * Set the futex exit state of the task lockless. The futex waiter code ++ * observes that state when a task is exiting and loops until the task has ++ * actually finished the futex cleanup. The worst case for this is that the ++ * waiter runs through the wait loop until the state becomes visible. ++ * ++ * This is called from the recursive fault handling path in do_exit(). ++ * ++ * This is best effort. Either the futex exit code has run already or ++ * not. If the OWNER_DIED bit has been set on the futex then the waiter can ++ * take it over. If not, the problem is pushed back to user space. If the ++ * futex exit code did not run yet, then an already queued waiter might ++ * block forever, but there is nothing which can be done about that. ++ */ ++void futex_exit_recursive(struct task_struct *tsk) ++{ ++ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ ++ if (tsk->futex_state == FUTEX_STATE_EXITING) ++ mutex_unlock(&tsk->futex_exit_mutex); ++ tsk->futex_state = FUTEX_STATE_DEAD; ++} ++ ++static void futex_cleanup_begin(struct task_struct *tsk) ++{ ++ /* ++ * Prevent various race issues against a concurrent incoming waiter ++ * including live locks by forcing the waiter to block on ++ * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in ++ * attach_to_pi_owner(). ++ */ ++ mutex_lock(&tsk->futex_exit_mutex); ++ ++ /* ++ * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. ++ * ++ * This ensures that all subsequent checks of tsk->futex_state in ++ * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with ++ * tsk->pi_lock held. ++ * ++ * It guarantees also that a pi_state which was queued right before ++ * the state change under tsk->pi_lock by a concurrent waiter must ++ * be observed in exit_pi_state_list(). ++ */ ++ raw_spin_lock_irq(&tsk->pi_lock); ++ tsk->futex_state = FUTEX_STATE_EXITING; ++ raw_spin_unlock_irq(&tsk->pi_lock); ++} ++ ++static void futex_cleanup_end(struct task_struct *tsk, int state) ++{ ++ /* ++ * Lockless store. The only side effect is that an observer might ++ * take another loop until it becomes visible. ++ */ ++ tsk->futex_state = state; ++ /* ++ * Drop the exit protection. This unblocks waiters which observed ++ * FUTEX_STATE_EXITING to reevaluate the state. ++ */ ++ mutex_unlock(&tsk->futex_exit_mutex); ++} ++ ++void futex_exec_release(struct task_struct *tsk) ++{ ++ /* ++ * The state handling is done for consistency, but in the case of ++ * exec() there is no way to prevent futher damage as the PID stays ++ * the same. But for the unlikely and arguably buggy case that a ++ * futex is held on exec(), this provides at least as much state ++ * consistency protection which is possible. ++ */ ++ futex_cleanup_begin(tsk); ++ futex_cleanup(tsk); ++ /* ++ * Reset the state to FUTEX_STATE_OK. The task is alive and about ++ * exec a new binary. ++ */ ++ futex_cleanup_end(tsk, FUTEX_STATE_OK); ++} ++ ++void futex_exit_release(struct task_struct *tsk) ++{ ++ futex_cleanup_begin(tsk); ++ futex_cleanup(tsk); ++ futex_cleanup_end(tsk, FUTEX_STATE_DEAD); + } + + long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, +@@ -3272,8 +3860,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + + if (op & FUTEX_CLOCK_REALTIME) { + flags |= FLAGS_CLOCKRT; +- if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \ +- cmd != FUTEX_WAIT_REQUEUE_PI) ++ if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) + return -ENOSYS; + } + +@@ -3354,6 +3941,193 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); + } + ++#ifdef CONFIG_COMPAT ++/* ++ * Fetch a robust-list pointer. Bit 0 signals PI futexes: ++ */ ++static inline int ++compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, ++ compat_uptr_t __user *head, unsigned int *pi) ++{ ++ if (get_user(*uentry, head)) ++ return -EFAULT; ++ ++ *entry = compat_ptr((*uentry) & ~1); ++ *pi = (unsigned int)(*uentry) & 1; ++ ++ return 0; ++} ++ ++static void __user *futex_uaddr(struct robust_list __user *entry, ++ compat_long_t futex_offset) ++{ ++ compat_uptr_t base = ptr_to_compat(entry); ++ void __user *uaddr = compat_ptr(base + futex_offset); ++ ++ return uaddr; ++} ++ ++/* ++ * Walk curr->robust_list (very carefully, it's a userspace list!) ++ * and mark any locks found there dead, and notify any waiters. ++ * ++ * We silently return on any sign of list-walking problem. ++ */ ++void compat_exit_robust_list(struct task_struct *curr) ++{ ++ struct compat_robust_list_head __user *head = curr->compat_robust_list; ++ struct robust_list __user *entry, *next_entry, *pending; ++ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; ++ unsigned int uninitialized_var(next_pi); ++ compat_uptr_t uentry, next_uentry, upending; ++ compat_long_t futex_offset; ++ int rc; ++ ++ if (!futex_cmpxchg_enabled) ++ return; ++ ++ /* ++ * Fetch the list head (which was registered earlier, via ++ * sys_set_robust_list()): ++ */ ++ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) ++ return; ++ /* ++ * Fetch the relative futex offset: ++ */ ++ if (get_user(futex_offset, &head->futex_offset)) ++ return; ++ /* ++ * Fetch any possibly pending lock-add first, and handle it ++ * if it exists: ++ */ ++ if (compat_fetch_robust_entry(&upending, &pending, ++ &head->list_op_pending, &pip)) ++ return; ++ ++ next_entry = NULL; /* avoid warning with gcc */ ++ while (entry != (struct robust_list __user *) &head->list) { ++ /* ++ * Fetch the next entry in the list before calling ++ * handle_futex_death: ++ */ ++ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, ++ (compat_uptr_t __user *)&entry->next, &next_pi); ++ /* ++ * A pending lock might already be on the list, so ++ * dont process it twice: ++ */ ++ if (entry != pending) { ++ void __user *uaddr = futex_uaddr(entry, futex_offset); ++ ++ if (handle_futex_death(uaddr, curr, pi, ++ HANDLE_DEATH_LIST)) ++ return; ++ } ++ if (rc) ++ return; ++ uentry = next_uentry; ++ entry = next_entry; ++ pi = next_pi; ++ /* ++ * Avoid excessively long or circular lists: ++ */ ++ if (!--limit) ++ break; ++ ++ cond_resched(); ++ } ++ if (pending) { ++ void __user *uaddr = futex_uaddr(pending, futex_offset); ++ ++ handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); ++ } ++} ++ ++COMPAT_SYSCALL_DEFINE2(set_robust_list, ++ struct compat_robust_list_head __user *, head, ++ compat_size_t, len) ++{ ++ if (!futex_cmpxchg_enabled) ++ return -ENOSYS; ++ ++ if (unlikely(len != sizeof(*head))) ++ return -EINVAL; ++ ++ current->compat_robust_list = head; ++ ++ return 0; ++} ++ ++COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, ++ compat_uptr_t __user *, head_ptr, ++ compat_size_t __user *, len_ptr) ++{ ++ struct compat_robust_list_head __user *head; ++ unsigned long ret; ++ struct task_struct *p; ++ ++ if (!futex_cmpxchg_enabled) ++ return -ENOSYS; ++ ++ rcu_read_lock(); ++ ++ ret = -ESRCH; ++ if (!pid) ++ p = current; ++ else { ++ p = find_task_by_vpid(pid); ++ if (!p) ++ goto err_unlock; ++ } ++ ++ ret = -EPERM; ++ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) ++ goto err_unlock; ++ ++ head = p->compat_robust_list; ++ rcu_read_unlock(); ++ ++ if (put_user(sizeof(*head), len_ptr)) ++ return -EFAULT; ++ return put_user(ptr_to_compat(head), head_ptr); ++ ++err_unlock: ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, ++ struct compat_timespec __user *, utime, u32 __user *, uaddr2, ++ u32, val3) ++{ ++ struct timespec ts; ++ ktime_t t, *tp = NULL; ++ int val2 = 0; ++ int cmd = op & FUTEX_CMD_MASK; ++ ++ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || ++ cmd == FUTEX_WAIT_BITSET || ++ cmd == FUTEX_WAIT_REQUEUE_PI)) { ++ if (compat_get_timespec(&ts, utime)) ++ return -EFAULT; ++ if (!timespec_valid(&ts)) ++ return -EINVAL; ++ ++ t = timespec_to_ktime(ts); ++ if (cmd == FUTEX_WAIT) ++ t = ktime_add_safe(ktime_get(), t); ++ tp = &t; ++ } ++ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || ++ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) ++ val2 = (int) (unsigned long) utime; ++ ++ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); ++} ++#endif /* CONFIG_COMPAT */ ++ + static void __init futex_detect_cmpxchg(void) + { + #ifndef CONFIG_HAVE_FUTEX_CMPXCHG +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +deleted file mode 100644 +index 4ae3232e7a28..000000000000 +--- a/kernel/futex_compat.c ++++ /dev/null +@@ -1,201 +0,0 @@ +-/* +- * linux/kernel/futex_compat.c +- * +- * Futex compatibililty routines. +- * +- * Copyright 2006, Red Hat, Inc., Ingo Molnar +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include +- +- +-/* +- * Fetch a robust-list pointer. Bit 0 signals PI futexes: +- */ +-static inline int +-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, +- compat_uptr_t __user *head, unsigned int *pi) +-{ +- if (get_user(*uentry, head)) +- return -EFAULT; +- +- *entry = compat_ptr((*uentry) & ~1); +- *pi = (unsigned int)(*uentry) & 1; +- +- return 0; +-} +- +-static void __user *futex_uaddr(struct robust_list __user *entry, +- compat_long_t futex_offset) +-{ +- compat_uptr_t base = ptr_to_compat(entry); +- void __user *uaddr = compat_ptr(base + futex_offset); +- +- return uaddr; +-} +- +-/* +- * Walk curr->robust_list (very carefully, it's a userspace list!) +- * and mark any locks found there dead, and notify any waiters. +- * +- * We silently return on any sign of list-walking problem. +- */ +-void compat_exit_robust_list(struct task_struct *curr) +-{ +- struct compat_robust_list_head __user *head = curr->compat_robust_list; +- struct robust_list __user *entry, *next_entry, *pending; +- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; +- unsigned int uninitialized_var(next_pi); +- compat_uptr_t uentry, next_uentry, upending; +- compat_long_t futex_offset; +- int rc; +- +- if (!futex_cmpxchg_enabled) +- return; +- +- /* +- * Fetch the list head (which was registered earlier, via +- * sys_set_robust_list()): +- */ +- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) +- return; +- /* +- * Fetch the relative futex offset: +- */ +- if (get_user(futex_offset, &head->futex_offset)) +- return; +- /* +- * Fetch any possibly pending lock-add first, and handle it +- * if it exists: +- */ +- if (fetch_robust_entry(&upending, &pending, +- &head->list_op_pending, &pip)) +- return; +- +- next_entry = NULL; /* avoid warning with gcc */ +- while (entry != (struct robust_list __user *) &head->list) { +- /* +- * Fetch the next entry in the list before calling +- * handle_futex_death: +- */ +- rc = fetch_robust_entry(&next_uentry, &next_entry, +- (compat_uptr_t __user *)&entry->next, &next_pi); +- /* +- * A pending lock might already be on the list, so +- * dont process it twice: +- */ +- if (entry != pending) { +- void __user *uaddr = futex_uaddr(entry, futex_offset); +- +- if (handle_futex_death(uaddr, curr, pi)) +- return; +- } +- if (rc) +- return; +- uentry = next_uentry; +- entry = next_entry; +- pi = next_pi; +- /* +- * Avoid excessively long or circular lists: +- */ +- if (!--limit) +- break; +- +- cond_resched(); +- } +- if (pending) { +- void __user *uaddr = futex_uaddr(pending, futex_offset); +- +- handle_futex_death(uaddr, curr, pip); +- } +-} +- +-COMPAT_SYSCALL_DEFINE2(set_robust_list, +- struct compat_robust_list_head __user *, head, +- compat_size_t, len) +-{ +- if (!futex_cmpxchg_enabled) +- return -ENOSYS; +- +- if (unlikely(len != sizeof(*head))) +- return -EINVAL; +- +- current->compat_robust_list = head; +- +- return 0; +-} +- +-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, +- compat_uptr_t __user *, head_ptr, +- compat_size_t __user *, len_ptr) +-{ +- struct compat_robust_list_head __user *head; +- unsigned long ret; +- struct task_struct *p; +- +- if (!futex_cmpxchg_enabled) +- return -ENOSYS; +- +- rcu_read_lock(); +- +- ret = -ESRCH; +- if (!pid) +- p = current; +- else { +- p = find_task_by_vpid(pid); +- if (!p) +- goto err_unlock; +- } +- +- ret = -EPERM; +- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) +- goto err_unlock; +- +- head = p->compat_robust_list; +- rcu_read_unlock(); +- +- if (put_user(sizeof(*head), len_ptr)) +- return -EFAULT; +- return put_user(ptr_to_compat(head), head_ptr); +- +-err_unlock: +- rcu_read_unlock(); +- +- return ret; +-} +- +-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, +- struct compat_timespec __user *, utime, u32 __user *, uaddr2, +- u32, val3) +-{ +- struct timespec ts; +- ktime_t t, *tp = NULL; +- int val2 = 0; +- int cmd = op & FUTEX_CMD_MASK; +- +- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || +- cmd == FUTEX_WAIT_BITSET || +- cmd == FUTEX_WAIT_REQUEUE_PI)) { +- if (compat_get_timespec(&ts, utime)) +- return -EFAULT; +- if (!timespec_valid(&ts)) +- return -EINVAL; +- +- t = timespec_to_ktime(ts); +- if (cmd == FUTEX_WAIT) +- t = ktime_add_safe(ktime_get(), t); +- tp = &t; +- } +- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || +- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) +- val2 = (int) (unsigned long) utime; +- +- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); +-} +diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c +index 00564f4d811b..27a97b4a81e2 100644 +--- a/kernel/gcov/gcc_4_7.c ++++ b/kernel/gcov/gcc_4_7.c +@@ -18,7 +18,9 @@ + #include + #include "gcov.h" + +-#if (__GNUC__ >= 7) ++#if (__GNUC__ >= 10) ++#define GCOV_COUNTERS 8 ++#elif (__GNUC__ >= 7) + #define GCOV_COUNTERS 9 + #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) + #define GCOV_COUNTERS 10 +diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig +index 9b2d34f6887b..02e610acfd64 100644 +--- a/kernel/irq/Kconfig ++++ b/kernel/irq/Kconfig +@@ -72,6 +72,7 @@ config IRQ_DOMAIN_HIERARCHY + # Generic IRQ IPI support + config GENERIC_IRQ_IPI + bool ++ select IRQ_DOMAIN_HIERARCHY + + # Generic MSI interrupt support + config GENERIC_MSI_IRQ +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index b2fc2a581b86..33770ef64d21 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -886,11 +886,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) + irqreturn_t ret; + + local_bh_disable(); ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) ++ local_irq_disable(); + ret = action->thread_fn(action->irq, action->dev_id); + if (ret == IRQ_HANDLED) + atomic_inc(&desc->threads_handled); + + irq_finalize_oneshot(desc, action); ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) ++ local_irq_enable(); + local_bh_enable(); + return ret; + } +diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c +index 38f76653ed7a..20cdcccea16d 100644 +--- a/kernel/kexec_file.c ++++ b/kernel/kexec_file.c +@@ -521,8 +521,10 @@ static int kexec_calculate_store_digests(struct kimage *image) + + sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); + sha_regions = vzalloc(sha_region_sz); +- if (!sha_regions) ++ if (!sha_regions) { ++ ret = -ENOMEM; + goto out_free_desc; ++ } + + desc->tfm = tfm; + desc->flags = 0; +diff --git a/kernel/kmod.c b/kernel/kmod.c +index e4e5e98002fe..3f3bbae4cec3 100644 +--- a/kernel/kmod.c ++++ b/kernel/kmod.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -222,6 +223,14 @@ static int call_usermodehelper_exec_async(void *data) + flush_signal_handlers(current, 1); + spin_unlock_irq(¤t->sighand->siglock); + ++ /* ++ * Initial kernel threads share ther FS with init, in order to ++ * get the init root directory. But we've now created a new ++ * thread that is going to execve a user process and has its own ++ * 'struct fs_struct'. Reset umask to the default. ++ */ ++ current->fs->umask = 0022; ++ + /* + * Our parent (unbound workqueue) runs with elevated scheduling + * priority. Avoid propagating that into the userspace child. +diff --git a/kernel/kprobes.c b/kernel/kprobes.c +index 1b75fb8c7735..51867a2e537f 100644 +--- a/kernel/kprobes.c ++++ b/kernel/kprobes.c +@@ -561,11 +561,12 @@ static void kprobe_optimizer(struct work_struct *work) + do_free_cleaned_kprobes(); + + mutex_unlock(&module_mutex); +- mutex_unlock(&kprobe_mutex); + + /* Step 5: Kick optimizer again if needed */ + if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) + kick_kprobe_optimizer(); ++ ++ mutex_unlock(&kprobe_mutex); + } + + /* Wait for completing optimization and unoptimization */ +@@ -1149,6 +1150,26 @@ __releases(hlist_lock) + } + NOKPROBE_SYMBOL(kretprobe_table_unlock); + ++struct kprobe kprobe_busy = { ++ .addr = (void *) get_kprobe, ++}; ++ ++void kprobe_busy_begin(void) ++{ ++ struct kprobe_ctlblk *kcb; ++ ++ preempt_disable(); ++ __this_cpu_write(current_kprobe, &kprobe_busy); ++ kcb = get_kprobe_ctlblk(); ++ kcb->kprobe_status = KPROBE_HIT_ACTIVE; ++} ++ ++void kprobe_busy_end(void) ++{ ++ __this_cpu_write(current_kprobe, NULL); ++ preempt_enable(); ++} ++ + /* + * This function is called from finish_task_switch when task tk becomes dead, + * so that we can recycle any function-return probe instances associated +@@ -1166,6 +1187,8 @@ void kprobe_flush_task(struct task_struct *tk) + /* Early boot. kretprobe_table_locks not yet initialized. */ + return; + ++ kprobe_busy_begin(); ++ + INIT_HLIST_HEAD(&empty_rp); + hash = hash_ptr(tk, KPROBE_HASH_BITS); + head = &kretprobe_inst_table[hash]; +@@ -1179,6 +1202,8 @@ void kprobe_flush_task(struct task_struct *tk) + hlist_del(&ri->hlist); + kfree(ri); + } ++ ++ kprobe_busy_end(); + } + NOKPROBE_SYMBOL(kprobe_flush_task); + +@@ -1859,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp) + int i; + void *addr; + ++ /* If only rp->kp.addr is specified, check reregistering kprobes */ ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) ++ return -EINVAL; ++ + if (kretprobe_blacklist_size) { + addr = kprobe_addr(&rp->kp); + if (IS_ERR(addr)) +@@ -1987,6 +2016,9 @@ static void kill_kprobe(struct kprobe *p) + { + struct kprobe *kp; + ++ if (WARN_ON_ONCE(kprobe_gone(p))) ++ return; ++ + p->flags |= KPROBE_FLAG_GONE; + if (kprobe_aggrprobe(p)) { + /* +@@ -2004,6 +2036,14 @@ static void kill_kprobe(struct kprobe *p) + * the original probed function (which will be freed soon) any more. + */ + arch_remove_kprobe(p); ++ ++ /* ++ * The module is going away. We should disarm the kprobe which ++ * is using ftrace, because ftrace framework is still available at ++ * MODULE_STATE_GOING notification. ++ */ ++ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) ++ disarm_kprobe_ftrace(p); + } + + /* Disable one kprobe */ +@@ -2122,7 +2162,10 @@ static int kprobes_module_callback(struct notifier_block *nb, + mutex_lock(&kprobe_mutex); + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; +- hlist_for_each_entry_rcu(p, head, hlist) ++ hlist_for_each_entry_rcu(p, head, hlist) { ++ if (kprobe_gone(p)) ++ continue; ++ + if (within_module_init((unsigned long)p->addr, mod) || + (checkcore && + within_module_core((unsigned long)p->addr, mod))) { +@@ -2133,6 +2176,7 @@ static int kprobes_module_callback(struct notifier_block *nb, + */ + kill_kprobe(p); + } ++ } + } + mutex_unlock(&kprobe_mutex); + return NOTIFY_DONE; +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 981ebe937264..2e709b5726bc 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -828,7 +828,8 @@ void kthread_delayed_work_timer_fn(unsigned long __data) + /* Move the work from worker->delayed_work_list. */ + WARN_ON_ONCE(list_empty(&work->node)); + list_del_init(&work->node); +- kthread_insert_work(worker, work, &worker->work_list); ++ if (!work->canceling) ++ kthread_insert_work(worker, work, &worker->work_list); + + spin_unlock(&worker->lock); + } +@@ -950,8 +951,38 @@ void kthread_flush_work(struct kthread_work *work) + EXPORT_SYMBOL_GPL(kthread_flush_work); + + /* +- * This function removes the work from the worker queue. Also it makes sure +- * that it won't get queued later via the delayed work's timer. ++ * Make sure that the timer is neither set nor running and could ++ * not manipulate the work list_head any longer. ++ * ++ * The function is called under worker->lock. The lock is temporary ++ * released but the timer can't be set again in the meantime. ++ */ ++static void kthread_cancel_delayed_work_timer(struct kthread_work *work, ++ unsigned long *flags) ++{ ++ struct kthread_delayed_work *dwork = ++ container_of(work, struct kthread_delayed_work, work); ++ struct kthread_worker *worker = work->worker; ++ ++ /* ++ * del_timer_sync() must be called to make sure that the timer ++ * callback is not running. The lock must be temporary released ++ * to avoid a deadlock with the callback. In the meantime, ++ * any queuing is blocked by setting the canceling counter. ++ */ ++ work->canceling++; ++ spin_unlock_irqrestore(&worker->lock, *flags); ++ del_timer_sync(&dwork->timer); ++ spin_lock_irqsave(&worker->lock, *flags); ++ work->canceling--; ++} ++ ++/* ++ * This function removes the work from the worker queue. ++ * ++ * It is called under worker->lock. The caller must make sure that ++ * the timer used by delayed work is not running, e.g. by calling ++ * kthread_cancel_delayed_work_timer(). + * + * The work might still be in use when this function finishes. See the + * current_work proceed by the worker. +@@ -959,28 +990,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work); + * Return: %true if @work was pending and successfully canceled, + * %false if @work was not pending + */ +-static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, +- unsigned long *flags) ++static bool __kthread_cancel_work(struct kthread_work *work) + { +- /* Try to cancel the timer if exists. */ +- if (is_dwork) { +- struct kthread_delayed_work *dwork = +- container_of(work, struct kthread_delayed_work, work); +- struct kthread_worker *worker = work->worker; +- +- /* +- * del_timer_sync() must be called to make sure that the timer +- * callback is not running. The lock must be temporary released +- * to avoid a deadlock with the callback. In the meantime, +- * any queuing is blocked by setting the canceling counter. +- */ +- work->canceling++; +- spin_unlock_irqrestore(&worker->lock, *flags); +- del_timer_sync(&dwork->timer); +- spin_lock_irqsave(&worker->lock, *flags); +- work->canceling--; +- } +- + /* + * Try to remove the work from a worker list. It might either + * be from worker->work_list or from worker->delayed_work_list. +@@ -1033,11 +1044,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker, + /* Work must not be used with >1 worker, see kthread_queue_work() */ + WARN_ON_ONCE(work->worker != worker); + +- /* Do not fight with another command that is canceling this work. */ ++ /* ++ * Temporary cancel the work but do not fight with another command ++ * that is canceling the work as well. ++ * ++ * It is a bit tricky because of possible races with another ++ * mod_delayed_work() and cancel_delayed_work() callers. ++ * ++ * The timer must be canceled first because worker->lock is released ++ * when doing so. But the work can be removed from the queue (list) ++ * only when it can be queued again so that the return value can ++ * be used for reference counting. ++ */ ++ kthread_cancel_delayed_work_timer(work, &flags); + if (work->canceling) + goto out; ++ ret = __kthread_cancel_work(work); + +- ret = __kthread_cancel_work(work, true, &flags); + fast_queue: + __kthread_queue_delayed_work(worker, dwork, delay); + out: +@@ -1059,7 +1082,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) + /* Work must not be used with >1 worker, see kthread_queue_work(). */ + WARN_ON_ONCE(work->worker != worker); + +- ret = __kthread_cancel_work(work, is_dwork, &flags); ++ if (is_dwork) ++ kthread_cancel_delayed_work_timer(work, &flags); ++ ++ ret = __kthread_cancel_work(work); + + if (worker->current_work != work) + goto out_fast; +diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c +index 75d80809c48c..09bad6cbb95c 100644 +--- a/kernel/locking/lockdep_proc.c ++++ b/kernel/locking/lockdep_proc.c +@@ -425,7 +425,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) + seq_time(m, lt->min); + seq_time(m, lt->max); + seq_time(m, lt->total); +- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); ++ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); + } + + static void seq_stats(struct seq_file *m, struct lock_stat_data *data) +diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c +index 62b6cee8ea7f..0613c4b1d059 100644 +--- a/kernel/locking/rtmutex-debug.c ++++ b/kernel/locking/rtmutex-debug.c +@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) + lock->name = name; + } + +-void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) +-{ +-} +- +-void rt_mutex_deadlock_account_unlock(struct task_struct *task) +-{ +-} +- +diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h +index d0519c3432b6..b585af9a1b50 100644 +--- a/kernel/locking/rtmutex-debug.h ++++ b/kernel/locking/rtmutex-debug.h +@@ -9,9 +9,6 @@ + * This file contains macros used solely by rtmutex.c. Debug version. + */ + +-extern void +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); +-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); + extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); + extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 7615e7722258..1589e131ee4b 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -956,8 +956,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + */ + rt_mutex_set_owner(lock, task); + +- rt_mutex_deadlock_account_lock(lock, task); +- + return 1; + } + +@@ -1178,6 +1176,14 @@ void rt_mutex_adjust_pi(struct task_struct *task) + next_lock, NULL, task); + } + ++void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ RB_CLEAR_NODE(&waiter->pi_tree_entry); ++ RB_CLEAR_NODE(&waiter->tree_entry); ++ waiter->task = NULL; ++} ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take +@@ -1260,9 +1266,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + unsigned long flags; + int ret = 0; + +- debug_rt_mutex_init_waiter(&waiter); +- RB_CLEAR_NODE(&waiter.pi_tree_entry); +- RB_CLEAR_NODE(&waiter.tree_entry); ++ rt_mutex_init_waiter(&waiter); + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can +@@ -1316,6 +1320,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, + return ret; + } + ++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) ++{ ++ int ret = try_to_take_rt_mutex(lock, current, NULL); ++ ++ /* ++ * try_to_take_rt_mutex() sets the lock waiters bit ++ * unconditionally. Clean this up. ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ return ret; ++} ++ + /* + * Slow path try-lock function: + */ +@@ -1338,13 +1355,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + +- ret = try_to_take_rt_mutex(lock, current, NULL); +- +- /* +- * try_to_take_rt_mutex() sets the lock waiters bit +- * unconditionally. Clean this up. +- */ +- fixup_rt_mutex_waiters(lock); ++ ret = __rt_mutex_slowtrylock(lock); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + +@@ -1365,8 +1376,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, + + debug_rt_mutex_unlock(lock); + +- rt_mutex_deadlock_account_unlock(current); +- + /* + * We must be careful here if the fast path is enabled. If we + * have no waiters queued we cannot set owner to NULL here +@@ -1432,11 +1441,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); ++ ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + } + + static inline int +@@ -1448,21 +1456,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + enum rtmutex_chainwalk chwalk)) + { + if (chwalk == RT_MUTEX_MIN_CHAINWALK && +- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 0; +- } else +- return slowfn(lock, state, timeout, chwalk); ++ ++ return slowfn(lock, state, timeout, chwalk); + } + + static inline int + rt_mutex_fasttrylock(struct rt_mutex *lock, + int (*slowfn)(struct rt_mutex *lock)) + { +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { +- rt_mutex_deadlock_account_lock(lock, current); ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; +- } ++ + return slowfn(lock); + } + +@@ -1472,19 +1478,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock, + struct wake_q_head *wqh)) + { + WAKE_Q(wake_q); ++ bool deboost; + +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ return; + +- } else { +- bool deboost = slowfn(lock, &wake_q); ++ deboost = slowfn(lock, &wake_q); + +- wake_up_q(&wake_q); ++ wake_up_q(&wake_q); + +- /* Undo pi boosting if necessary: */ +- if (deboost) +- rt_mutex_adjust_prio(current); +- } ++ /* Undo pi boosting if necessary: */ ++ if (deboost) ++ rt_mutex_adjust_prio(current); + } + + /** +@@ -1518,16 +1523,16 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + + /* +- * Futex variant with full deadlock detection. ++ * Futex variant, must not use fastpath. + */ +-int rt_mutex_timed_futex_lock(struct rt_mutex *lock, +- struct hrtimer_sleeper *timeout) ++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) + { +- might_sleep(); ++ return rt_mutex_slowtrylock(lock); ++} + +- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, +- RT_MUTEX_FULL_CHAINWALK, +- rt_mutex_slowlock); ++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) ++{ ++ return __rt_mutex_slowtrylock(lock); + } + + /** +@@ -1586,20 +1591,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) + EXPORT_SYMBOL_GPL(rt_mutex_unlock); + + /** +- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock +- * @lock: the rt_mutex to be unlocked +- * +- * Returns: true/false indicating whether priority adjustment is +- * required or not. ++ * Futex variant, that since futex variants do not use the fast-path, can be ++ * simple and will not need to retry. + */ +-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh) ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wake_q) + { +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { +- rt_mutex_deadlock_account_unlock(current); +- return false; ++ lockdep_assert_held(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ return false; /* done */ ++ } ++ ++ mark_wakeup_next_waiter(wake_q, lock); ++ return true; /* deboost and wakeups */ ++} ++ ++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) ++{ ++ WAKE_Q(wake_q); ++ bool deboost; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ deboost = __rt_mutex_futex_unlock(lock, &wake_q); ++ raw_spin_unlock_irq(&lock->wait_lock); ++ ++ if (deboost) { ++ wake_up_q(&wake_q); ++ rt_mutex_adjust_prio(current); + } +- return rt_mutex_slowunlock(lock, wqh); + } + + /** +@@ -1656,7 +1679,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + __rt_mutex_init(lock, NULL); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); +- rt_mutex_deadlock_account_lock(lock, proxy_owner); + } + + /** +@@ -1667,39 +1689,41 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + * No locking. Caller has to do serializing itself + * Special API call for PI-futex support + */ +-void rt_mutex_proxy_unlock(struct rt_mutex *lock, +- struct task_struct *proxy_owner) ++void rt_mutex_proxy_unlock(struct rt_mutex *lock) + { + debug_rt_mutex_proxy_unlock(lock); + rt_mutex_set_owner(lock, NULL); +- rt_mutex_deadlock_account_unlock(proxy_owner); + } + + /** +- * rt_mutex_start_proxy_lock() - Start lock acquisition for another task ++ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take + * @waiter: the pre-initialized rt_mutex_waiter + * @task: the task to prepare + * ++ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock ++ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. ++ * ++ * NOTE: does _NOT_ remove the @waiter on failure; must either call ++ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. ++ * + * Returns: + * 0 - task blocked on lock + * 1 - acquired the lock for task, caller should wake it up + * <0 - error + * +- * Special API call for FUTEX_REQUEUE_PI support. ++ * Special API call for PI-futex support. + */ +-int rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) + { + int ret; + +- raw_spin_lock_irq(&lock->wait_lock); ++ lockdep_assert_held(&lock->wait_lock); + +- if (try_to_take_rt_mutex(lock, task, NULL)) { +- raw_spin_unlock_irq(&lock->wait_lock); ++ if (try_to_take_rt_mutex(lock, task, NULL)) + return 1; +- } + + /* We enforce deadlock detection for futexes */ + ret = task_blocks_on_rt_mutex(lock, waiter, task, +@@ -1715,13 +1739,42 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + ret = 0; + } + ++ debug_rt_mutex_print_deadlock(waiter); ++ ++ return ret; ++} ++ ++/** ++ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task ++ * @lock: the rt_mutex to take ++ * @waiter: the pre-initialized rt_mutex_waiter ++ * @task: the task to prepare ++ * ++ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock ++ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. ++ * ++ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter ++ * on failure. ++ * ++ * Returns: ++ * 0 - task blocked on lock ++ * 1 - acquired the lock for task, caller should wake it up ++ * <0 - error ++ * ++ * Special API call for PI-futex support. ++ */ ++int rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task) ++{ ++ int ret; ++ ++ raw_spin_lock_irq(&lock->wait_lock); ++ ret = __rt_mutex_start_proxy_lock(lock, waiter, task); + if (unlikely(ret)) + remove_waiter(lock, waiter); +- + raw_spin_unlock_irq(&lock->wait_lock); + +- debug_rt_mutex_print_deadlock(waiter); +- + return ret; + } + +@@ -1769,18 +1822,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + int ret; + + raw_spin_lock_irq(&lock->wait_lock); +- +- set_current_state(TASK_INTERRUPTIBLE); +- + /* sleep on the mutex */ ++ set_current_state(TASK_INTERRUPTIBLE); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); +- + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might + * have to fix that up. + */ + fixup_rt_mutex_waiters(lock); +- + raw_spin_unlock_irq(&lock->wait_lock); + + return ret; +@@ -1791,7 +1840,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + * @lock: the rt_mutex we were woken on + * @waiter: the pre-initialized rt_mutex_waiter + * +- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). ++ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or ++ * rt_mutex_wait_proxy_lock(). + * + * Unless we acquired the lock; we're still enqueued on the wait-list and can + * in fact still be granted ownership until we're removed. Therefore we can +@@ -1811,15 +1861,32 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + bool cleanup = false; + + raw_spin_lock_irq(&lock->wait_lock); ++ /* ++ * Do an unconditional try-lock, this deals with the lock stealing ++ * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() ++ * sets a NULL owner. ++ * ++ * We're not interested in the return value, because the subsequent ++ * test on rt_mutex_owner() will infer that. If the trylock succeeded, ++ * we will own the lock and it will have removed the waiter. If we ++ * failed the trylock, we're still not owner and we need to remove ++ * ourselves. ++ */ ++ try_to_take_rt_mutex(lock, current, waiter); + /* + * Unless we're the owner; we're still enqueued on the wait_list. + * So check if we became owner, if not, take us off the wait_list. + */ + if (rt_mutex_owner(lock) != current) { + remove_waiter(lock, waiter); +- fixup_rt_mutex_waiters(lock); + cleanup = true; + } ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might ++ * have to fix that up. ++ */ ++ fixup_rt_mutex_waiters(lock); ++ + raw_spin_unlock_irq(&lock->wait_lock); + + return cleanup; +diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h +index c4060584c407..6607802efa8b 100644 +--- a/kernel/locking/rtmutex.h ++++ b/kernel/locking/rtmutex.h +@@ -11,8 +11,6 @@ + */ + + #define rt_mutex_deadlock_check(l) (0) +-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) +-#define rt_mutex_deadlock_account_unlock(l) do { } while (0) + #define debug_rt_mutex_init_waiter(w) do { } while (0) + #define debug_rt_mutex_free_waiter(w) do { } while (0) + #define debug_rt_mutex_lock(l) do { } while (0) +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h +index 14cbafed0014..c5d3f577b2a7 100644 +--- a/kernel/locking/rtmutex_common.h ++++ b/kernel/locking/rtmutex_common.h +@@ -102,8 +102,11 @@ enum rtmutex_chainwalk { + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, +- struct task_struct *proxy_owner); ++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); ++extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); ++extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task); + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); +@@ -112,9 +115,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter); + extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter); +-extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); +-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh); ++extern int rt_mutex_futex_trylock(struct rt_mutex *l); ++extern int __rt_mutex_futex_trylock(struct rt_mutex *l); ++ ++extern void rt_mutex_futex_unlock(struct rt_mutex *lock); ++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, ++ struct wake_q_head *wqh); ++ + extern void rt_mutex_adjust_prio(struct task_struct *task); + + #ifdef CONFIG_DEBUG_RT_MUTEXES +diff --git a/kernel/module.c b/kernel/module.c +index 5eb3bf76d54b..f42fbf18cb8a 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -1762,7 +1762,6 @@ static int mod_sysfs_init(struct module *mod) + if (err) + mod_kobject_put(mod); + +- /* delay uevent until full sysfs population */ + out: + return err; + } +@@ -1796,7 +1795,6 @@ static int mod_sysfs_setup(struct module *mod, + add_sect_attrs(mod, info); + add_notes_attrs(mod, info); + +- kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); + return 0; + + out_unreg_param: +@@ -2222,6 +2220,21 @@ static int verify_export_symbols(struct module *mod) + return 0; + } + ++static bool ignore_undef_symbol(Elf_Half emachine, const char *name) ++{ ++ /* ++ * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as ++ * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. ++ * i386 has a similar problem but may not deserve a fix. ++ * ++ * If we ever have to ignore many symbols, consider refactoring the code to ++ * only warn if referenced by a relocation. ++ */ ++ if (emachine == EM_386 || emachine == EM_X86_64) ++ return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); ++ return false; ++} ++ + /* Change all symbols so that st_value encodes the pointer directly. */ + static int simplify_symbols(struct module *mod, const struct load_info *info) + { +@@ -2267,8 +2280,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) + break; + } + +- /* Ok if weak. */ +- if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) ++ /* Ok if weak or ignored. */ ++ if (!ksym && ++ (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || ++ ignore_undef_symbol(info->hdr->e_machine, name))) + break; + + pr_warn("%s: Unknown symbol %s (err %li)\n", +@@ -3443,6 +3458,9 @@ static noinline int do_init_module(struct module *mod) + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_LIVE, mod); + ++ /* Delay uevent until module has finished its init routine */ ++ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); ++ + /* + * We need to finish all async code before the module init sequence + * is done. This has potential to deadlock. For example, a newly +@@ -3762,6 +3780,7 @@ static int load_module(struct load_info *info, const char __user *uargs, + MODULE_STATE_GOING, mod); + klp_module_going(mod); + bug_cleanup: ++ mod->state = MODULE_STATE_GOING; + /* module_bug_cleanup needs module_mutex protection */ + mutex_lock(&module_mutex); + module_bug_cleanup(mod); +diff --git a/kernel/pid.c b/kernel/pid.c +index 6e855d2011e7..2c19594da84c 100644 +--- a/kernel/pid.c ++++ b/kernel/pid.c +@@ -69,9 +69,7 @@ static inline int mk_pid(struct pid_namespace *pid_ns, + * the scheme scales to up to 4 million PIDs, runtime. + */ + struct pid_namespace init_pid_ns = { +- .kref = { +- .refcount = ATOMIC_INIT(2), +- }, ++ .kref = KREF_INIT(2), + .pidmap = { + [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } + }, +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c +index 3c775d6b7317..7b393faf930f 100644 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -834,17 +834,6 @@ static int software_resume(void) + + /* Check if the device is there */ + swsusp_resume_device = name_to_dev_t(resume_file); +- +- /* +- * name_to_dev_t is ineffective to verify parition if resume_file is in +- * integer format. (e.g. major:minor) +- */ +- if (isdigit(resume_file[0]) && resume_wait) { +- int partno; +- while (!get_gendisk(swsusp_resume_device, &partno)) +- msleep(10); +- } +- + if (!swsusp_resume_device) { + /* + * Some device discovery might still be in progress; we need +diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c +index 2c3e7f024c15..7a50b405ad28 100644 +--- a/kernel/printk/nmi.c ++++ b/kernel/printk/nmi.c +@@ -52,6 +52,8 @@ struct nmi_seq_buf { + }; + static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); + ++static DEFINE_RAW_SPINLOCK(nmi_read_lock); ++ + /* + * Safe printk() for NMI context. It uses a per-CPU buffer to + * store the message. NMIs are not nested, so there is always only +@@ -134,8 +136,6 @@ static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s, + */ + static void __printk_nmi_flush(struct irq_work *work) + { +- static raw_spinlock_t read_lock = +- __RAW_SPIN_LOCK_INITIALIZER(read_lock); + struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work); + unsigned long flags; + size_t len, size; +@@ -148,7 +148,7 @@ static void __printk_nmi_flush(struct irq_work *work) + * different CPUs. This is especially important when printing + * a backtrace. + */ +- raw_spin_lock_irqsave(&read_lock, flags); ++ raw_spin_lock_irqsave(&nmi_read_lock, flags); + + i = 0; + more: +@@ -197,7 +197,7 @@ static void __printk_nmi_flush(struct irq_work *work) + goto more; + + out: +- raw_spin_unlock_irqrestore(&read_lock, flags); ++ raw_spin_unlock_irqrestore(&nmi_read_lock, flags); + } + + /** +@@ -239,6 +239,14 @@ void printk_nmi_flush_on_panic(void) + raw_spin_lock_init(&logbuf_lock); + } + ++ if (in_nmi() && raw_spin_is_locked(&nmi_read_lock)) { ++ if (num_online_cpus() > 1) ++ return; ++ ++ debug_locks_off(); ++ raw_spin_lock_init(&nmi_read_lock); ++ } ++ + printk_nmi_flush(); + } + +diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c +index 96592f1192b3..9c0a09a35289 100644 +--- a/kernel/printk/printk.c ++++ b/kernel/printk/printk.c +@@ -2044,6 +2044,9 @@ static int __init console_setup(char *str) + char *s, *options, *brl_options = NULL; + int idx; + ++ if (str[0] == 0) ++ return 1; ++ + if (_braille_console_setup(&str, &brl_options)) + return 1; + +diff --git a/kernel/ptrace.c b/kernel/ptrace.c +index ea3370e205fb..4f10223bc7b0 100644 +--- a/kernel/ptrace.c ++++ b/kernel/ptrace.c +@@ -159,6 +159,21 @@ void __ptrace_unlink(struct task_struct *child) + spin_unlock(&child->sighand->siglock); + } + ++static bool looks_like_a_spurious_pid(struct task_struct *task) ++{ ++ if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP)) ++ return false; ++ ++ if (task_pid_vnr(task) == task->ptrace_message) ++ return false; ++ /* ++ * The tracee changed its pid but the PTRACE_EVENT_EXEC event ++ * was not wait()'ed, most probably debugger targets the old ++ * leader which was destroyed in de_thread(). ++ */ ++ return true; ++} ++ + /* Ensure that nothing can wake it up, even SIGKILL */ + static bool ptrace_freeze_traced(struct task_struct *task) + { +@@ -169,7 +184,8 @@ static bool ptrace_freeze_traced(struct task_struct *task) + return ret; + + spin_lock_irq(&task->sighand->siglock); +- if (task_is_traced(task) && !__fatal_signal_pending(task)) { ++ if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && ++ !__fatal_signal_pending(task)) { + task->state = __TASK_TRACED; + ret = true; + } +diff --git a/kernel/reboot.c b/kernel/reboot.c +index bd30a973fe94..2946ed1d99d4 100644 +--- a/kernel/reboot.c ++++ b/kernel/reboot.c +@@ -512,22 +512,22 @@ static int __init reboot_setup(char *str) + break; + + case 's': +- { +- int rc; +- +- if (isdigit(*(str+1))) { +- rc = kstrtoint(str+1, 0, &reboot_cpu); +- if (rc) +- return rc; +- } else if (str[1] == 'm' && str[2] == 'p' && +- isdigit(*(str+3))) { +- rc = kstrtoint(str+3, 0, &reboot_cpu); +- if (rc) +- return rc; +- } else ++ if (isdigit(*(str+1))) ++ reboot_cpu = simple_strtoul(str+1, NULL, 0); ++ else if (str[1] == 'm' && str[2] == 'p' && ++ isdigit(*(str+3))) ++ reboot_cpu = simple_strtoul(str+3, NULL, 0); ++ else + reboot_mode = REBOOT_SOFT; ++ if (reboot_cpu >= num_possible_cpus()) { ++ pr_err("Ignoring the CPU number in reboot= option. " ++ "CPU %d exceeds possible cpu number %d\n", ++ reboot_cpu, num_possible_cpus()); ++ reboot_cpu = 0; ++ break; ++ } + break; +- } ++ + case 'g': + reboot_mode = REBOOT_GPIO; + break; +diff --git a/kernel/relay.c b/kernel/relay.c +index 5034cb3a339f..3623ad9b529c 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -196,6 +196,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) + static void relay_destroy_channel(struct kref *kref) + { + struct rchan *chan = container_of(kref, struct rchan, kref); ++ free_percpu(chan->buf); + kfree(chan); + } + +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index e6bfa9af3570..02a799e2a707 100755 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3933,7 +3933,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) + if (dl_prio(prio)) { + struct task_struct *pi_task = rt_mutex_get_top_task(p); + if (!dl_prio(p->normal_prio) || +- (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { ++ (pi_task && dl_prio(pi_task->prio) && ++ dl_entity_preempt(&pi_task->dl, &p->dl))) { + p->dl.dl_boosted = 1; + queue_flag |= ENQUEUE_REPLENISH; + } else +@@ -9074,8 +9075,9 @@ int sched_rr_handler(struct ctl_table *table, int write, + /* make sure that internally we keep jiffies */ + /* also, writing zero resets timeslice to default */ + if (!ret && write) { +- sched_rr_timeslice = sched_rr_timeslice <= 0 ? +- RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); ++ sched_rr_timeslice = ++ sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : ++ msecs_to_jiffies(sysctl_sched_rr_timeslice); + } + mutex_unlock(&mutex); + return ret; +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index dc669e370ee5..73bb0233e6a7 100755 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2629,7 +2629,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) + /* + * We don't care about NUMA placement if we don't have memory. + */ +- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) ++ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) + return; + + /* +@@ -4553,7 +4553,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; + static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) + { + struct hrtimer *refresh_timer = &cfs_b->period_timer; +- u64 remaining; ++ s64 remaining; + + /* if the call-back is running a quota refresh is already occurring */ + if (hrtimer_callback_running(refresh_timer)) +@@ -4561,7 +4561,7 @@ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) + + /* is a quota refresh about to occur? */ + remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); +- if (remaining < min_expire) ++ if (remaining < (s64)min_expire) + return 1; + + return 0; +@@ -8680,7 +8680,15 @@ static int detach_tasks(struct lb_env *env) + if (!can_migrate_task(p, env)) + goto next; + +- load = task_h_load(p); ++ /* ++ * Depending of the number of CPUs and tasks and the ++ * cgroup hierarchy, task_h_load() can return a null ++ * value. Make sure that env->imbalance decreases ++ * otherwise detach_tasks() will stop only after ++ * detaching up to loop_max tasks. ++ */ ++ load = max_t(unsigned long, task_h_load(p), 1); ++ + + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) + goto next; +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index 4e56c0859a5e..026a471bf59a 100755 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -14,6 +14,7 @@ + #include "walt.h" + + int sched_rr_timeslice = RR_TIMESLICE; ++int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; + + static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); + +diff --git a/kernel/seccomp.c b/kernel/seccomp.c +index 3975856d476c..ac56ebffeb95 100644 +--- a/kernel/seccomp.c ++++ b/kernel/seccomp.c +@@ -669,6 +669,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd, + const bool recheck_after_trace) + { + BUG(); ++ ++ return -1; + } + #endif + +diff --git a/kernel/sys.c b/kernel/sys.c +index 15cf3eea6f2e..2f055b6d33c3 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1186,11 +1186,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) + + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) + { +- struct oldold_utsname tmp = {}; ++ struct oldold_utsname tmp; + + if (!name) + return -EFAULT; + ++ memset(&tmp, 0, sizeof(tmp)); ++ + down_read(&uts_sem); + memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); + memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index f3a22c903ddb..4b3f3f066711 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -543,7 +543,7 @@ static struct ctl_table kern_table[] = { + }, + { + .procname = "sched_rr_timeslice_ms", +- .data = &sched_rr_timeslice, ++ .data = &sysctl_sched_rr_timeslice, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = sched_rr_handler, +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c +index 1c2dc98ba63d..4e728352af75 100644 +--- a/kernel/time/alarmtimer.c ++++ b/kernel/time/alarmtimer.c +@@ -838,10 +838,10 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, + } + + restart = ¤t->restart_block; +- restart->fn = alarm_timer_nsleep_restart; + restart->nanosleep.clockid = type; + restart->nanosleep.expires = exp.tv64; + restart->nanosleep.rmtp = rmtp; ++ set_restart_fn(restart, alarm_timer_nsleep_restart); + ret = -ERESTART_RESTARTBLOCK; + + out: +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 8be1bddf5973..470e97ce0076 100755 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -1558,10 +1558,10 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + } + + restart = ¤t->restart_block; +- restart->fn = hrtimer_nanosleep_restart; + restart->nanosleep.clockid = t.timer.base->clockid; + restart->nanosleep.rmtp = rmtp; + restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); ++ set_restart_fn(restart, hrtimer_nanosleep_restart); + + ret = -ERESTART_RESTARTBLOCK; + out: +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index be1cc6124bb4..9294d6922c58 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -1377,10 +1377,10 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, + if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) + return -EFAULT; + +- restart_block->fn = posix_cpu_nsleep_restart; + restart_block->nanosleep.clockid = which_clock; + restart_block->nanosleep.rmtp = rmtp; + restart_block->nanosleep.expires = timespec_to_ns(rqtp); ++ set_restart_fn(restart_block, posix_cpu_nsleep_restart); + } + return error; + } +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index 86e896981da0..ef0440514e60 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -988,9 +988,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) + ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) + return -EOVERFLOW; + tmp *= mult; +- rem *= mult; + +- do_div(rem, div); ++ rem = div64_u64(rem * mult, div); + *base = tmp + rem; + return 0; + } +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 97da9247cb38..c012154e74ff 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -514,8 +515,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk) + * Force expire obscene large timeouts to expire at the + * capacity limit of the wheel. + */ +- if (expires >= WHEEL_TIMEOUT_CUTOFF) +- expires = WHEEL_TIMEOUT_MAX; ++ if (delta >= WHEEL_TIMEOUT_CUTOFF) ++ expires = clk + WHEEL_TIMEOUT_MAX; + + idx = calc_index(expires, LVL_DEPTH - 1); + } +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index 6d3b432a748a..88eb9261c7b5 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -15,6 +15,9 @@ + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ + #include + #include + #include +@@ -481,6 +484,16 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + */ + strreplace(buts->name, '/', '_'); + ++ /* ++ * bdev can be NULL, as with scsi-generic, this is a helpful as ++ * we can be. ++ */ ++ if (q->blk_trace) { ++ pr_warn("Concurrent blktraces are not allowed on %s\n", ++ buts->name); ++ return -EBUSY; ++ } ++ + bt = kzalloc(sizeof(*bt), GFP_KERNEL); + if (!bt) + return -ENOMEM; +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index 3126dbc29dc7..5be2bfa57535 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -1635,6 +1635,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) + static struct ftrace_ops * + ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); + static struct ftrace_ops * ++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); ++static struct ftrace_ops * + ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); + + static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, +@@ -1772,7 +1774,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, + * to it. + */ + if (ftrace_rec_count(rec) == 1 && +- ftrace_find_tramp_ops_any(rec)) ++ ftrace_find_tramp_ops_any_other(rec, ops)) + rec->flags |= FTRACE_FL_TRAMP; + else + rec->flags &= ~FTRACE_FL_TRAMP; +@@ -1960,12 +1962,18 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, + + static void print_ip_ins(const char *fmt, const unsigned char *p) + { ++ char ins[MCOUNT_INSN_SIZE]; + int i; + ++ if (probe_kernel_read(ins, p, MCOUNT_INSN_SIZE)) { ++ printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); ++ return; ++ } ++ + printk(KERN_CONT "%s", fmt); + + for (i = 0; i < MCOUNT_INSN_SIZE; i++) +- printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); ++ printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]); + } + + enum ftrace_bug_type ftrace_bug_type; +@@ -2200,6 +2208,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) + return NULL; + } + ++static struct ftrace_ops * ++ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) ++{ ++ struct ftrace_ops *op; ++ unsigned long ip = rec->ip; ++ ++ do_for_each_ftrace_op(op, ftrace_ops_list) { ++ ++ if (op == op_exclude || !op->trampoline) ++ continue; ++ ++ if (hash_contains_ip(ip, op->func_hash)) ++ return op; ++ } while_for_each_ftrace_op(op); ++ ++ return NULL; ++} ++ + static struct ftrace_ops * + ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, + struct ftrace_ops *op) +@@ -4467,8 +4493,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file) + + parser = &iter->parser; + if (trace_parser_loaded(parser)) { ++ int enable = !(iter->flags & FTRACE_ITER_NOTRACE); ++ + parser->buffer[parser->idx] = 0; +- ftrace_match_records(iter->hash, parser->buffer, parser->idx); ++ ftrace_process_regex(iter->hash, parser->buffer, ++ parser->idx, enable); + } + + trace_parser_put(parser); +@@ -4988,8 +5017,11 @@ static int referenced_filters(struct dyn_ftrace *rec) + int cnt = 0; + + for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { +- if (ops_references_rec(ops, rec)) +- cnt++; ++ if (ops_references_rec(ops, rec)) { ++ cnt++; ++ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) ++ rec->flags |= FTRACE_FL_REGS; ++ } + } + + return cnt; +@@ -5085,8 +5117,8 @@ void ftrace_module_enable(struct module *mod) + if (ftrace_start_up) + cnt += referenced_filters(rec); + +- /* This clears FTRACE_FL_DISABLED */ +- rec->flags = cnt; ++ rec->flags &= ~FTRACE_FL_DISABLED; ++ rec->flags += cnt; + + if (ftrace_start_up && cnt) { + int failed = __ftrace_replace_code(rec, 1); +@@ -5333,17 +5365,15 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, + { + int bit; + +- if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) +- return; +- + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); + if (bit < 0) + return; + + preempt_disable_notrace(); + +- if (!(op->flags & FTRACE_OPS_FL_PER_CPU) || +- !ftrace_function_local_disabled(op)) { ++ if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && ++ (!(op->flags & FTRACE_OPS_FL_PER_CPU) || ++ !ftrace_function_local_disabled(op))) { + op->func(ip, parent_ip, op, regs); + } + +@@ -5787,7 +5817,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) + } + + if (t->ret_stack == NULL) { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->curr_ret_stack = -1; + /* Make sure the tasks see the -1 first: */ +@@ -5999,7 +6028,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + static void + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) + { +- atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visible before we add the ret_stack */ +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 2cfe11e1190b..c30a70ab0e7a 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -416,14 +416,16 @@ struct rb_event_info { + + /* + * Used for which event context the event is in. +- * NMI = 0 +- * IRQ = 1 +- * SOFTIRQ = 2 +- * NORMAL = 3 ++ * TRANSITION = 0 ++ * NMI = 1 ++ * IRQ = 2 ++ * SOFTIRQ = 3 ++ * NORMAL = 4 + * + * See trace_recursive_lock() comment below for more details. + */ + enum { ++ RB_CTX_TRANSITION, + RB_CTX_NMI, + RB_CTX_IRQ, + RB_CTX_SOFTIRQ, +@@ -1650,18 +1652,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, + { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long nr_pages; +- int cpu, err = 0; ++ int cpu, err; + + /* + * Always succeed at resizing a non-existent buffer: + */ + if (!buffer) +- return size; ++ return 0; + + /* Make sure the requested buffer exists */ + if (cpu_id != RING_BUFFER_ALL_CPUS && + !cpumask_test_cpu(cpu_id, buffer->cpumask)) +- return size; ++ return 0; + + nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); + +@@ -1801,7 +1803,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, + } + + mutex_unlock(&buffer->mutex); +- return size; ++ return 0; + + out_err: + for_each_buffer_cpu(buffer, cpu) { +@@ -2579,10 +2581,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) + * a bit of overhead in something as critical as function tracing, + * we use a bitmask trick. + * +- * bit 0 = NMI context +- * bit 1 = IRQ context +- * bit 2 = SoftIRQ context +- * bit 3 = normal context. ++ * bit 1 = NMI context ++ * bit 2 = IRQ context ++ * bit 3 = SoftIRQ context ++ * bit 4 = normal context. + * + * This works because this is the order of contexts that can + * preempt other contexts. A SoftIRQ never preempts an IRQ +@@ -2605,6 +2607,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) + * The least significant bit can be cleared this way, and it + * just so happens that it is the same bit corresponding to + * the current context. ++ * ++ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit ++ * is set when a recursion is detected at the current context, and if ++ * the TRANSITION bit is already set, it will fail the recursion. ++ * This is needed because there's a lag between the changing of ++ * interrupt context and updating the preempt count. In this case, ++ * a false positive will be found. To handle this, one extra recursion ++ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION ++ * bit is already set, then it is considered a recursion and the function ++ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. ++ * ++ * On the trace_recursive_unlock(), the TRANSITION bit will be the first ++ * to be cleared. Even if it wasn't the context that set it. That is, ++ * if an interrupt comes in while NORMAL bit is set and the ring buffer ++ * is called before preempt_count() is updated, since the check will ++ * be on the NORMAL bit, the TRANSITION bit will then be set. If an ++ * NMI then comes in, it will set the NMI bit, but when the NMI code ++ * does the trace_recursive_unlock() it will clear the TRANSTION bit ++ * and leave the NMI bit set. But this is fine, because the interrupt ++ * code that set the TRANSITION bit will then clear the NMI bit when it ++ * calls trace_recursive_unlock(). If another NMI comes in, it will ++ * set the TRANSITION bit and continue. ++ * ++ * Note: The TRANSITION bit only handles a single transition between context. + */ + + static __always_inline int +@@ -2623,8 +2649,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) + } else + bit = RB_CTX_NORMAL; + +- if (unlikely(val & (1 << bit))) +- return 1; ++ if (unlikely(val & (1 << bit))) { ++ /* ++ * It is possible that this was called by transitioning ++ * between interrupt context, and preempt_count() has not ++ * been updated yet. In this case, use the TRANSITION bit. ++ */ ++ bit = RB_CTX_TRANSITION; ++ if (val & (1 << bit)) ++ return 1; ++ } + + val |= (1 << bit); + cpu_buffer->current_context = val; +@@ -3047,10 +3081,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) + if (unlikely(!head)) + return true; + +- return reader->read == rb_page_commit(reader) && +- (commit == reader || +- (commit == head && +- head->read == rb_page_commit(commit))); ++ /* Reader should exhaust content in reader page */ ++ if (reader->read != rb_page_commit(reader)) ++ return false; ++ ++ /* ++ * If writers are committing on the reader page, knowing all ++ * committed content has been read, the ring buffer is empty. ++ */ ++ if (commit == reader) ++ return true; ++ ++ /* ++ * If writers are committing on a page other than reader page ++ * and head page, there should always be content to read. ++ */ ++ if (commit != head) ++ return false; ++ ++ /* ++ * Writers are committing on the head page, we just need ++ * to care about there're committed data, and the reader will ++ * swap reader page with head page when it is to read data. ++ */ ++ return rb_page_commit(commit) == 0; + } + + /** +@@ -4255,6 +4309,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return; ++ /* prevent another thread from changing buffer sizes */ ++ mutex_lock(&buffer->mutex); + + atomic_inc(&buffer->resize_disabled); + atomic_inc(&cpu_buffer->record_disabled); +@@ -4278,6 +4334,8 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) + + atomic_dec(&cpu_buffer->record_disabled); + atomic_dec(&buffer->resize_disabled); ++ ++ mutex_unlock(&buffer->mutex); + } + EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); + +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index aa1150d79f38..33f5755f6d15 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1619,9 +1619,6 @@ struct saved_cmdlines_buffer { + }; + static struct saved_cmdlines_buffer *savedcmd; + +-/* temporary disable recording */ +-static atomic_t trace_record_cmdline_disabled __read_mostly; +- + static inline char *get_saved_cmdlines(int idx) + { + return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; +@@ -1823,10 +1820,13 @@ void trace_stop_cmdline_recording(void); + + static int trace_save_cmdline(struct task_struct *tsk) + { +- unsigned pid, idx; ++ unsigned tpid, idx; + +- if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) +- return 0; ++ /* treat recording of idle task as a success */ ++ if (!tsk->pid) ++ return 1; ++ ++ tpid = tsk->pid & (PID_MAX_DEFAULT - 1); + + preempt_disable(); + /* +@@ -1840,26 +1840,15 @@ static int trace_save_cmdline(struct task_struct *tsk) + return 0; + } + +- idx = savedcmd->map_pid_to_cmdline[tsk->pid]; ++ idx = savedcmd->map_pid_to_cmdline[tpid]; + if (idx == NO_CMDLINE_MAP) { + idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; + +- /* +- * Check whether the cmdline buffer at idx has a pid +- * mapped. We are going to overwrite that entry so we +- * need to clear the map_pid_to_cmdline. Otherwise we +- * would read the new comm for the old pid. +- */ +- pid = savedcmd->map_cmdline_to_pid[idx]; +- if (pid != NO_CMDLINE_MAP) +- savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; +- +- savedcmd->map_cmdline_to_pid[idx] = tsk->pid; +- savedcmd->map_pid_to_cmdline[tsk->pid] = idx; +- ++ savedcmd->map_pid_to_cmdline[tpid] = idx; + savedcmd->cmdline_idx = idx; + } + ++ savedcmd->map_cmdline_to_pid[idx] = tsk->pid; + set_cmdline(idx, tsk->comm); + savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid; + arch_spin_unlock(&trace_cmdline_lock); +@@ -1871,6 +1860,7 @@ static int trace_save_cmdline(struct task_struct *tsk) + static void __trace_find_cmdline(int pid, char comm[]) + { + unsigned map; ++ int tpid; + + if (!pid) { + strcpy(comm, ""); +@@ -1882,16 +1872,16 @@ static void __trace_find_cmdline(int pid, char comm[]) + return; + } + +- if (pid > PID_MAX_DEFAULT) { +- strcpy(comm, "<...>"); +- return; ++ tpid = pid & (PID_MAX_DEFAULT - 1); ++ map = savedcmd->map_pid_to_cmdline[tpid]; ++ if (map != NO_CMDLINE_MAP) { ++ tpid = savedcmd->map_cmdline_to_pid[map]; ++ if (tpid == pid) { ++ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); ++ return; ++ } + } +- +- map = savedcmd->map_pid_to_cmdline[pid]; +- if (map != NO_CMDLINE_MAP) +- strcpy(comm, get_saved_cmdlines(map)); +- else +- strcpy(comm, "<...>"); ++ strcpy(comm, "<...>"); + } + + void trace_find_cmdline(int pid, char comm[]) +@@ -1936,9 +1926,6 @@ int trace_find_tgid(int pid) + + void tracing_record_cmdline(struct task_struct *tsk) + { +- if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) +- return; +- + if (!__this_cpu_read(trace_cmdline_save)) + return; + +@@ -2137,7 +2124,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, + (entry = this_cpu_read(trace_buffered_event))) { + /* Try to use the per cpu buffer first */ + val = this_cpu_inc_return(trace_buffered_event_cnt); +- if (val == 1) { ++ if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) { + trace_event_setup(entry, type, flags, pc); + entry->array[0] = len; + return entry; +@@ -2181,7 +2168,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, + * two. They are that meaningful. + */ + ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); +- ftrace_trace_userstack(buffer, flags, pc); ++ ftrace_trace_userstack(tr, buffer, flags, pc); + } + + void +@@ -2272,7 +2259,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, + size *= sizeof(unsigned long); + + event = trace_buffer_lock_reserve(buffer, TRACE_STACK, +- sizeof(*entry) + size, flags, pc); ++ (sizeof(*entry) - sizeof(entry->caller)) + size, ++ flags, pc); + if (!event) + goto out; + entry = ring_buffer_event_data(event); +@@ -2346,14 +2334,15 @@ void trace_dump_stack(int skip) + static DEFINE_PER_CPU(int, user_stack_count); + + void +-ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ++ftrace_trace_userstack(struct trace_array *tr, ++ struct ring_buffer *buffer, unsigned long flags, int pc) + { + struct trace_event_call *call = &event_user_stack; + struct ring_buffer_event *event; + struct userstack_entry *entry; + struct stack_trace trace; + +- if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) ++ if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) + return; + + /* +@@ -2429,7 +2418,7 @@ static char *get_trace_buf(void) + + /* Interrupts must see nesting incremented before we use the buffer */ + barrier(); +- return &buffer->buffer[buffer->nesting][0]; ++ return &buffer->buffer[buffer->nesting - 1][0]; + } + + static void put_trace_buf(void) +@@ -2648,6 +2637,9 @@ int trace_array_printk(struct trace_array *tr, + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) + return 0; + ++ if (!tr) ++ return -ENOENT; ++ + va_start(ap, fmt); + ret = trace_array_vprintk(tr, ip, fmt, ap); + va_end(ap); +@@ -2878,9 +2870,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) + return ERR_PTR(-EBUSY); + #endif + +- if (!iter->snapshot) +- atomic_inc(&trace_record_cmdline_disabled); +- + if (*pos != iter->pos) { + iter->ent = NULL; + iter->cpu = 0; +@@ -2923,9 +2912,6 @@ static void s_stop(struct seq_file *m, void *p) + return; + #endif + +- if (!iter->snapshot) +- atomic_dec(&trace_record_cmdline_disabled); +- + trace_access_unlock(iter->cpu_file); + trace_event_read_unlock(); + } +@@ -7852,7 +7838,7 @@ __init static int tracer_alloc_buffers(void) + goto out_free_buffer_mask; + + /* Only allocate trace_printk buffers if a trace_printk exists */ +- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) ++ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) + /* Must be called before global_trace.buffer is allocated */ + trace_printk_init_buffers(); + +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index 5078a317e284..994f47fdd9cf 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -491,6 +491,12 @@ enum { + * can only be modified by current, we can reuse trace_recursion. + */ + TRACE_IRQ_BIT, ++ ++ /* ++ * When transitioning between context, the preempt_count() may ++ * not be correct. Allow for a single recursion to cover this case. ++ */ ++ TRACE_TRANSITION_BIT, + }; + + #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) +@@ -535,14 +541,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max) + return 0; + + bit = trace_get_context_bit() + start; +- if (unlikely(val & (1 << bit))) +- return -1; ++ if (unlikely(val & (1 << bit))) { ++ /* ++ * It could be that preempt_count has not been updated during ++ * a switch between contexts. Allow for a single recursion. ++ */ ++ bit = TRACE_TRANSITION_BIT; ++ if (trace_recursion_test(bit)) ++ return -1; ++ trace_recursion_set(bit); ++ barrier(); ++ return bit + 1; ++ } ++ ++ /* Normal check passed, clear the transition to allow it again */ ++ trace_recursion_clear(TRACE_TRANSITION_BIT); + + val |= 1 << bit; + current->trace_recursion = val; + barrier(); + +- return bit; ++ return bit + 1; + } + + static __always_inline void trace_clear_recursion(int bit) +@@ -552,6 +571,7 @@ static __always_inline void trace_clear_recursion(int bit) + if (!bit) + return; + ++ bit--; + bit = 1 << bit; + val &= ~bit; + +@@ -669,13 +689,15 @@ void update_max_tr_single(struct trace_array *tr, + #endif /* CONFIG_TRACER_MAX_TRACE */ + + #ifdef CONFIG_STACKTRACE +-void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, ++void ftrace_trace_userstack(struct trace_array *tr, ++ struct ring_buffer *buffer, unsigned long flags, + int pc); + + void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, + int pc); + #else +-static inline void ftrace_trace_userstack(struct ring_buffer *buffer, ++static inline void ftrace_trace_userstack(struct trace_array *tr, ++ struct ring_buffer *buffer, + unsigned long flags, int pc) + { + } +diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c +index 0f06532a755b..b70233a9563f 100644 +--- a/kernel/trace/trace_clock.c ++++ b/kernel/trace/trace_clock.c +@@ -93,33 +93,49 @@ u64 notrace trace_clock_global(void) + { + unsigned long flags; + int this_cpu; +- u64 now; ++ u64 now, prev_time; + + local_irq_save(flags); + + this_cpu = raw_smp_processor_id(); +- now = sched_clock_cpu(this_cpu); ++ + /* +- * If in an NMI context then dont risk lockups and return the +- * cpu_clock() time: ++ * The global clock "guarantees" that the events are ordered ++ * between CPUs. But if two events on two different CPUS call ++ * trace_clock_global at roughly the same time, it really does ++ * not matter which one gets the earlier time. Just make sure ++ * that the same CPU will always show a monotonic clock. ++ * ++ * Use a read memory barrier to get the latest written ++ * time that was recorded. + */ +- if (unlikely(in_nmi())) +- goto out; ++ smp_rmb(); ++ prev_time = READ_ONCE(trace_clock_struct.prev_time); ++ now = sched_clock_cpu(this_cpu); + +- arch_spin_lock(&trace_clock_struct.lock); ++ /* Make sure that now is always greater than or equal to prev_time */ ++ if ((s64)(now - prev_time) < 0) ++ now = prev_time; + + /* +- * TODO: if this happens often then maybe we should reset +- * my_scd->clock to prev_time+1, to make sure +- * we start ticking with the local clock from now on? ++ * If in an NMI context then dont risk lockups and simply return ++ * the current time. + */ +- if ((s64)(now - trace_clock_struct.prev_time) < 0) +- now = trace_clock_struct.prev_time + 1; ++ if (unlikely(in_nmi())) ++ goto out; + +- trace_clock_struct.prev_time = now; ++ /* Tracing can cause strange recursion, always use a try lock */ ++ if (arch_spin_trylock(&trace_clock_struct.lock)) { ++ /* Reread prev_time in case it was already updated */ ++ prev_time = READ_ONCE(trace_clock_struct.prev_time); ++ if ((s64)(now - prev_time) < 0) ++ now = prev_time; + +- arch_spin_unlock(&trace_clock_struct.lock); ++ trace_clock_struct.prev_time = now; + ++ /* The unlock acts as the wmb for the above rmb */ ++ arch_spin_unlock(&trace_clock_struct.lock); ++ } + out: + local_irq_restore(flags); + +diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h +index d1cc37e78f99..1430f6bbb1a0 100644 +--- a/kernel/trace/trace_entries.h ++++ b/kernel/trace/trace_entries.h +@@ -178,7 +178,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, + + F_STRUCT( + __field( int, size ) +- __dynamic_array(unsigned long, caller ) ++ __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) + ), + + F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n" +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index ffa1a0b6509e..ffd570ae9830 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -791,6 +791,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) + char *event = NULL, *sub = NULL, *match; + int ret; + ++ if (!tr) ++ return -ENOENT; + /* + * The buf format can be : + * *: means any event by that name. +@@ -1104,7 +1106,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, + mutex_lock(&event_mutex); + list_for_each_entry(file, &tr->events, list) { + call = file->event_call; +- if (!trace_event_name(call) || !call->class || !call->class->reg) ++ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || ++ !trace_event_name(call) || !call->class || !call->class->reg) + continue; + + if (system && strcmp(call->class->system, system->name) != 0) +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index 766e5ccad60a..57a9341c67e4 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -374,7 +374,9 @@ static struct hist_field *create_hist_field(struct ftrace_event_field *field, + if (WARN_ON_ONCE(!field)) + goto out; + +- if (is_string_field(field)) { ++ /* Pointers to strings are just pointers and dangerous to dereference */ ++ if (is_string_field(field) && ++ (field->filter_type != FILTER_PTR_STRING)) { + flags |= HIST_FIELD_FL_STRING; + + if (field->filter_type == FILTER_STATIC_STRING) +@@ -863,8 +865,6 @@ static inline void add_to_key(char *compound_key, void *key, + field = key_field->field; + if (field->filter_type == FILTER_DYN_STRING) + size = *(u32 *)(rec + field->offset) >> 16; +- else if (field->filter_type == FILTER_PTR_STRING) +- size = strlen(key); + else if (field->filter_type == FILTER_STATIC_STRING) + size = field->size; + +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index c9ca2ed50c0e..a371c7def875 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -222,11 +222,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) + + static int trigger_process_regex(struct trace_event_file *file, char *buff) + { +- char *command, *next = buff; ++ char *command, *next; + struct event_command *p; + int ret = -EINVAL; + ++ next = buff = skip_spaces(buff); + command = strsep(&next, ": \t"); ++ if (next) { ++ next = skip_spaces(next); ++ if (!*next) ++ next = NULL; ++ } + command = (command[0] != '!') ? command : command + 1; + + mutex_lock(&trigger_cmd_mutex); +@@ -629,8 +635,14 @@ event_trigger_callback(struct event_command *cmd_ops, + int ret; + + /* separate the trigger from the filter (t:n [if filter]) */ +- if (param && isdigit(param[0])) ++ if (param && isdigit(param[0])) { + trigger = strsep(¶m, " \t"); ++ if (param) { ++ param = skip_spaces(param); ++ if (!*param) ++ param = NULL; ++ } ++ } + + trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); + +@@ -1335,6 +1347,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, + trigger = strsep(¶m, " \t"); + if (!trigger) + return -EINVAL; ++ if (param) { ++ param = skip_spaces(param); ++ if (!*param) ++ param = NULL; ++ } + + system = strsep(&trigger, ":"); + if (!trigger) +diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c +index 5fe23f0ee7db..d1e007c72923 100644 +--- a/kernel/trace/trace_hwlat.c ++++ b/kernel/trace/trace_hwlat.c +@@ -268,24 +268,14 @@ static int get_sample(void) + static struct cpumask save_cpumask; + static bool disable_migrate; + +-static void move_to_next_cpu(bool initmask) ++static void move_to_next_cpu(void) + { +- static struct cpumask *current_mask; ++ struct cpumask *current_mask = &save_cpumask; ++ struct trace_array *tr = hwlat_trace; + int next_cpu; + + if (disable_migrate) + return; +- +- /* Just pick the first CPU on first iteration */ +- if (initmask) { +- current_mask = &save_cpumask; +- get_online_cpus(); +- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); +- put_online_cpus(); +- next_cpu = cpumask_first(current_mask); +- goto set_affinity; +- } +- + /* + * If for some reason the user modifies the CPU affinity + * of this thread, than stop migrating for the duration +@@ -295,14 +285,13 @@ static void move_to_next_cpu(bool initmask) + goto disable; + + get_online_cpus(); +- cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); ++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); + next_cpu = cpumask_next(smp_processor_id(), current_mask); + put_online_cpus(); + + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(current_mask); + +- set_affinity: + if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ + goto disable; + +@@ -332,12 +321,10 @@ static void move_to_next_cpu(bool initmask) + static int kthread_fn(void *data) + { + u64 interval; +- bool initmask = true; + + while (!kthread_should_stop()) { + +- move_to_next_cpu(initmask); +- initmask = false; ++ move_to_next_cpu(); + + local_irq_disable(); + get_sample(); +@@ -368,13 +355,27 @@ static int kthread_fn(void *data) + */ + static int start_kthread(struct trace_array *tr) + { ++ struct cpumask *current_mask = &save_cpumask; + struct task_struct *kthread; ++ int next_cpu; ++ ++ /* Just pick the first CPU on first iteration */ ++ current_mask = &save_cpumask; ++ get_online_cpus(); ++ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); ++ put_online_cpus(); ++ next_cpu = cpumask_first(current_mask); + + kthread = kthread_create(kthread_fn, NULL, "hwlatd"); + if (IS_ERR(kthread)) { + pr_err(BANNER "could not start sampling thread\n"); + return -ENOMEM; + } ++ ++ cpumask_clear(current_mask); ++ cpumask_set_cpu(next_cpu, current_mask); ++ sched_setaffinity(kthread->pid, current_mask); ++ + hwlat_kthread = kthread; + wake_up_process(kthread); + +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +index ca70d11b8aa7..f444f57f1338 100644 +--- a/kernel/trace/trace_selftest.c ++++ b/kernel/trace/trace_selftest.c +@@ -490,8 +490,13 @@ trace_selftest_function_recursion(void) + unregister_ftrace_function(&test_rec_probe); + + ret = -1; +- if (trace_selftest_recursion_cnt != 1) { +- pr_cont("*callback not called once (%d)* ", ++ /* ++ * Recursion allows for transitions between context, ++ * and may call the callback twice. ++ */ ++ if (trace_selftest_recursion_cnt != 1 && ++ trace_selftest_recursion_cnt != 2) { ++ pr_cont("*callback not called once (or twice) (%d)* ", + trace_selftest_recursion_cnt); + goto out; + } +diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c +index c8e7cc0e6ff6..88ae873ee6cf 100644 +--- a/kernel/tracepoint.c ++++ b/kernel/tracepoint.c +@@ -59,6 +59,12 @@ struct tp_probes { + struct tracepoint_func probes[0]; + }; + ++/* Called in removal of a func but failed to allocate a new tp_funcs */ ++static void tp_stub_func(void) ++{ ++ return; ++} ++ + static inline void *allocate_probes(int count) + { + struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) +@@ -97,6 +103,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, + { + struct tracepoint_func *old, *new; + int nr_probes = 0; ++ int stub_funcs = 0; + int pos = -1; + + if (WARN_ON(!tp_func->func)) +@@ -113,14 +120,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, + if (old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) + return ERR_PTR(-EEXIST); ++ if (old[nr_probes].func == tp_stub_func) ++ stub_funcs++; + } + } +- /* + 2 : one for new probe, one for NULL func */ +- new = allocate_probes(nr_probes + 2); ++ /* + 2 : one for new probe, one for NULL func - stub functions */ ++ new = allocate_probes(nr_probes + 2 - stub_funcs); + if (new == NULL) + return ERR_PTR(-ENOMEM); + if (old) { +- if (pos < 0) { ++ if (stub_funcs) { ++ /* Need to copy one at a time to remove stubs */ ++ int probes = 0; ++ ++ pos = -1; ++ for (nr_probes = 0; old[nr_probes].func; nr_probes++) { ++ if (old[nr_probes].func == tp_stub_func) ++ continue; ++ if (pos < 0 && old[nr_probes].prio < prio) ++ pos = probes++; ++ new[probes++] = old[nr_probes]; ++ } ++ nr_probes = probes; ++ if (pos < 0) ++ pos = probes; ++ else ++ nr_probes--; /* Account for insertion */ ++ ++ } else if (pos < 0) { + pos = nr_probes; + memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); + } else { +@@ -154,8 +181,9 @@ static void *func_remove(struct tracepoint_func **funcs, + /* (N -> M), (N > 1, M >= 0) probes */ + if (tp_func->func) { + for (nr_probes = 0; old[nr_probes].func; nr_probes++) { +- if (old[nr_probes].func == tp_func->func && +- old[nr_probes].data == tp_func->data) ++ if ((old[nr_probes].func == tp_func->func && ++ old[nr_probes].data == tp_func->data) || ++ old[nr_probes].func == tp_stub_func) + nr_del++; + } + } +@@ -174,14 +202,32 @@ static void *func_remove(struct tracepoint_func **funcs, + /* N -> M, (N > 1, M > 0) */ + /* + 1 for NULL */ + new = allocate_probes(nr_probes - nr_del + 1); +- if (new == NULL) +- return ERR_PTR(-ENOMEM); +- for (i = 0; old[i].func; i++) +- if (old[i].func != tp_func->func +- || old[i].data != tp_func->data) +- new[j++] = old[i]; +- new[nr_probes - nr_del].func = NULL; +- *funcs = new; ++ if (new) { ++ for (i = 0; old[i].func; i++) ++ if ((old[i].func != tp_func->func ++ || old[i].data != tp_func->data) ++ && old[i].func != tp_stub_func) ++ new[j++] = old[i]; ++ new[nr_probes - nr_del].func = NULL; ++ *funcs = new; ++ } else { ++ /* ++ * Failed to allocate, replace the old function ++ * with calls to tp_stub_func. ++ */ ++ for (i = 0; old[i].func; i++) ++ if (old[i].func == tp_func->func && ++ old[i].data == tp_func->data) { ++ old[i].func = tp_stub_func; ++ /* Set the prio to the next event. */ ++ if (old[i + 1].func) ++ old[i].prio = ++ old[i + 1].prio; ++ else ++ old[i].prio = -1; ++ } ++ *funcs = old; ++ } + } + debug_print_probes(*funcs); + return old; +@@ -234,10 +280,12 @@ static int tracepoint_remove_func(struct tracepoint *tp, + tp_funcs = rcu_dereference_protected(tp->funcs, + lockdep_is_held(&tracepoints_mutex)); + old = func_remove(&tp_funcs, func); +- if (IS_ERR(old)) { +- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); ++ if (WARN_ON_ONCE(IS_ERR(old))) + return PTR_ERR(old); +- } ++ ++ if (tp_funcs == old) ++ /* Failed allocating new tp_funcs, replaced func with stub */ ++ return 0; + + if (!tp_funcs) { + /* Removed last function */ +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index ac381bff69df..134200a12196 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -51,6 +51,7 @@ + #include + #include + #include ++#include + + #include "workqueue_internal.h" + +@@ -1407,7 +1408,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + */ + WARN_ON_ONCE(!irqs_disabled()); + +- debug_work_activate(work); + + /* if draining, only works from the same workqueue are allowed */ + if (unlikely(wq->flags & __WQ_DRAINING) && +@@ -1490,6 +1490,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, + worklist = &pwq->delayed_works; + } + ++ debug_work_activate(work); + insert_work(pwq, work, worklist, work_flags); + + spin_unlock(&pwq->pool->lock); +@@ -3440,15 +3441,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work) + unbound_release_work); + struct workqueue_struct *wq = pwq->wq; + struct worker_pool *pool = pwq->pool; +- bool is_last; ++ bool is_last = false; + +- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) +- return; ++ /* ++ * when @pwq is not linked, it doesn't hold any reference to the ++ * @wq, and @wq is invalid to access. ++ */ ++ if (!list_empty(&pwq->pwqs_node)) { ++ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) ++ return; + +- mutex_lock(&wq->mutex); +- list_del_rcu(&pwq->pwqs_node); +- is_last = list_empty(&wq->pwqs); +- mutex_unlock(&wq->mutex); ++ mutex_lock(&wq->mutex); ++ list_del_rcu(&pwq->pwqs_node); ++ is_last = list_empty(&wq->pwqs); ++ mutex_unlock(&wq->mutex); ++ } + + mutex_lock(&wq_pool_mutex); + put_unbound_pool(pool); +@@ -3494,17 +3501,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) + * is updated and visible. + */ + if (!freezable || !workqueue_freezing) { ++ bool kick = false; ++ + pwq->max_active = wq->saved_max_active; + + while (!list_empty(&pwq->delayed_works) && +- pwq->nr_active < pwq->max_active) ++ pwq->nr_active < pwq->max_active) { + pwq_activate_first_delayed(pwq); ++ kick = true; ++ } + + /* + * Need to kick a worker after thawed or an unbound wq's +- * max_active is bumped. It's a slow path. Do it always. ++ * max_active is bumped. In realtime scenarios, always kicking a ++ * worker will cause interference on the isolated cpu cores, so ++ * let's kick iff work items were activated. + */ +- wake_up_worker(pwq->pool); ++ if (kick) ++ wake_up_worker(pwq->pool); + } else { + pwq->max_active = 0; + } +@@ -5423,6 +5437,7 @@ static void wq_watchdog_timer_fn(unsigned long data) + { + unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; + bool lockup_detected = false; ++ unsigned long now = jiffies; + struct worker_pool *pool; + int pi; + +@@ -5437,6 +5452,12 @@ static void wq_watchdog_timer_fn(unsigned long data) + if (list_empty(&pool->worklist)) + continue; + ++ /* ++ * If a virtual machine is stopped by the host it can look to ++ * the watchdog like a stall. ++ */ ++ kvm_check_and_clear_guest_paused(); ++ + /* get the latest of pool and touched timestamps */ + pool_ts = READ_ONCE(pool->watchdog_ts); + touched = READ_ONCE(wq_watchdog_touched); +@@ -5455,12 +5476,12 @@ static void wq_watchdog_timer_fn(unsigned long data) + } + + /* did we stall? */ +- if (time_after(jiffies, ts + thresh)) { ++ if (time_after(now, ts + thresh)) { + lockup_detected = true; + pr_emerg("BUG: workqueue lockup - pool"); + pr_cont_pool_info(pool); + pr_cont(" stuck for %us!\n", +- jiffies_to_msecs(jiffies - pool_ts) / 1000); ++ jiffies_to_msecs(now - pool_ts) / 1000); + } + } + +diff --git a/lib/crc32.c b/lib/crc32.c +index 7fbd1a112b9d..0d450462b0bd 100644 +--- a/lib/crc32.c ++++ b/lib/crc32.c +@@ -327,7 +327,7 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, + return crc; + } + +-#if CRC_LE_BITS == 1 ++#if CRC_BE_BITS == 1 + u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) + { + return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE); +diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c +index 036fc882cd72..f1449244fdd4 100644 +--- a/lib/decompress_unlz4.c ++++ b/lib/decompress_unlz4.c +@@ -115,6 +115,9 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, + error("data corrupted"); + goto exit_2; + } ++ } else if (size < 4) { ++ /* empty or end-of-file */ ++ goto exit_3; + } + + chunksize = get_unaligned_le32(inp); +@@ -128,6 +131,10 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, + continue; + } + ++ if (!fill && chunksize == 0) { ++ /* empty or end-of-file */ ++ goto exit_3; ++ } + + if (posp) + *posp += 4; +@@ -184,6 +191,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, + } + } + ++exit_3: + ret = 0; + exit_2: + if (!input) +diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c +index c7c96bc7654a..91c451e0f474 100644 +--- a/lib/dynamic_debug.c ++++ b/lib/dynamic_debug.c +@@ -85,22 +85,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = { + { _DPRINTK_FLAGS_NONE, '_' }, + }; + ++struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; ++ + /* format a string into buf[] which describes the _ddebug's flags */ +-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, +- size_t maxlen) ++static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) + { +- char *p = buf; ++ char *p = fb->buf; + int i; + +- BUG_ON(maxlen < 6); + for (i = 0; i < ARRAY_SIZE(opt_array); ++i) +- if (dp->flags & opt_array[i].flag) ++ if (flags & opt_array[i].flag) + *p++ = opt_array[i].opt_char; +- if (p == buf) ++ if (p == fb->buf) + *p++ = '_'; + *p = '\0'; + +- return buf; ++ return fb->buf; + } + + #define vpr_info(fmt, ...) \ +@@ -142,7 +142,7 @@ static int ddebug_change(const struct ddebug_query *query, + struct ddebug_table *dt; + unsigned int newflags; + unsigned int nfound = 0; +- char flagbuf[10]; ++ struct flagsbuf fbuf; + + /* search for matching ddebugs */ + mutex_lock(&ddebug_lock); +@@ -199,8 +199,7 @@ static int ddebug_change(const struct ddebug_query *query, + vpr_info("changed %s:%d [%s]%s =%s\n", + trim_prefix(dp->filename), dp->lineno, + dt->mod_name, dp->function, +- ddebug_describe_flags(dp, flagbuf, +- sizeof(flagbuf))); ++ ddebug_describe_flags(dp->flags, &fbuf)); + } + } + mutex_unlock(&ddebug_lock); +@@ -779,7 +778,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) + { + struct ddebug_iter *iter = m->private; + struct _ddebug *dp = p; +- char flagsbuf[10]; ++ struct flagsbuf flags; + + vpr_info("called m=%p p=%p\n", m, p); + +@@ -792,7 +791,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) + seq_printf(m, "%s:%u [%s]%s =%s \"", + trim_prefix(dp->filename), dp->lineno, + iter->table->mod_name, dp->function, +- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); ++ ddebug_describe_flags(dp->flags, &flags)); + seq_escape(m, dp->format, "\t\r\n\""); + seq_puts(m, "\"\n"); + +diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c +index 6be72bb218ee..0ea39bfdc3cf 100644 +--- a/lib/fonts/font_10x18.c ++++ b/lib/fonts/font_10x18.c +@@ -7,8 +7,8 @@ + + #define FONTDATAMAX 9216 + +-static const unsigned char fontdata_10x18[FONTDATAMAX] = { +- ++static const struct font_data fontdata_10x18 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, 0x00, /* 0000000000 */ + 0x00, 0x00, /* 0000000000 */ +@@ -5128,8 +5128,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = { + 0x00, 0x00, /* 0000000000 */ + 0x00, 0x00, /* 0000000000 */ + 0x00, 0x00, /* 0000000000 */ +- +-}; ++} }; + + + const struct font_desc font_10x18 = { +@@ -5137,7 +5136,7 @@ const struct font_desc font_10x18 = { + .name = "10x18", + .width = 10, + .height = 18, +- .data = fontdata_10x18, ++ .data = fontdata_10x18.data, + #ifdef __sparc__ + .pref = 5, + #else +diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c +index b20620904d31..ec243d7d2e0e 100644 +--- a/lib/fonts/font_6x10.c ++++ b/lib/fonts/font_6x10.c +@@ -1,7 +1,9 @@ + #include + +-static const unsigned char fontdata_6x10[] = { ++#define FONTDATAMAX 2560 + ++static const struct font_data fontdata_6x10 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +@@ -3073,14 +3075,13 @@ static const unsigned char fontdata_6x10[] = { + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +- +-}; ++} }; + + const struct font_desc font_6x10 = { + .idx = FONT6x10_IDX, + .name = "6x10", + .width = 6, + .height = 10, +- .data = fontdata_6x10, ++ .data = fontdata_6x10.data, + .pref = 0, + }; +diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c +index 46e86e67aa6a..0010e213fbe2 100644 +--- a/lib/fonts/font_6x11.c ++++ b/lib/fonts/font_6x11.c +@@ -8,8 +8,8 @@ + + #define FONTDATAMAX (11*256) + +-static const unsigned char fontdata_6x11[FONTDATAMAX] = { +- ++static const struct font_data fontdata_6x11 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +@@ -3337,8 +3337,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = { + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +- +-}; ++} }; + + + const struct font_desc font_vga_6x11 = { +@@ -3346,7 +3345,7 @@ const struct font_desc font_vga_6x11 = { + .name = "ProFont6x11", + .width = 6, + .height = 11, +- .data = fontdata_6x11, ++ .data = fontdata_6x11.data, + /* Try avoiding this font if possible unless on MAC */ + .pref = -2000, + }; +diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c +index 3b7dbf9c060b..2900b59325e5 100644 +--- a/lib/fonts/font_7x14.c ++++ b/lib/fonts/font_7x14.c +@@ -7,8 +7,8 @@ + + #define FONTDATAMAX 3584 + +-static const unsigned char fontdata_7x14[FONTDATAMAX] = { +- ++static const struct font_data fontdata_7x14 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 0000000 */ + 0x00, /* 0000000 */ +@@ -4104,8 +4104,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = { + 0x00, /* 0000000 */ + 0x00, /* 0000000 */ + 0x00, /* 0000000 */ +- +-}; ++} }; + + + const struct font_desc font_7x14 = { +@@ -4113,6 +4112,6 @@ const struct font_desc font_7x14 = { + .name = "7x14", + .width = 7, + .height = 14, +- .data = fontdata_7x14, ++ .data = fontdata_7x14.data, + .pref = 0, + }; +diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c +index 00a0c67a5c7d..cc3fa17ff94d 100644 +--- a/lib/fonts/font_8x16.c ++++ b/lib/fonts/font_8x16.c +@@ -9,8 +9,8 @@ + + #define FONTDATAMAX 4096 + +-static const unsigned char fontdata_8x16[FONTDATAMAX] = { +- ++static const struct font_data fontdata_8x16 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +@@ -4618,8 +4618,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = { + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +- +-}; ++} }; + + + const struct font_desc font_vga_8x16 = { +@@ -4627,7 +4626,7 @@ const struct font_desc font_vga_8x16 = { + .name = "VGA8x16", + .width = 8, + .height = 16, +- .data = fontdata_8x16, ++ .data = fontdata_8x16.data, + .pref = 0, + }; + EXPORT_SYMBOL(font_vga_8x16); +diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c +index 9f56efe2cee7..1519b7ce8827 100644 +--- a/lib/fonts/font_8x8.c ++++ b/lib/fonts/font_8x8.c +@@ -8,8 +8,8 @@ + + #define FONTDATAMAX 2048 + +-static const unsigned char fontdata_8x8[FONTDATAMAX] = { +- ++static const struct font_data fontdata_8x8 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +@@ -2569,8 +2569,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = { + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +- +-}; ++} }; + + + const struct font_desc font_vga_8x8 = { +@@ -2578,6 +2577,6 @@ const struct font_desc font_vga_8x8 = { + .name = "VGA8x8", + .width = 8, + .height = 8, +- .data = fontdata_8x8, ++ .data = fontdata_8x8.data, + .pref = 0, + }; +diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c +index 639e31ae1100..c6367ed4c5bc 100644 +--- a/lib/fonts/font_acorn_8x8.c ++++ b/lib/fonts/font_acorn_8x8.c +@@ -2,7 +2,10 @@ + + #include + +-static const unsigned char acorndata_8x8[] = { ++#define FONTDATAMAX 2048 ++ ++static const struct font_data acorndata_8x8 = { ++{ 0, 0, FONTDATAMAX, 0 }, { + /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ + /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ + /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ +@@ -259,14 +262,14 @@ static const unsigned char acorndata_8x8[] = { + /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, + /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, + /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +-}; ++} }; + + const struct font_desc font_acorn_8x8 = { + .idx = ACORN8x8_IDX, + .name = "Acorn8x8", + .width = 8, + .height = 8, +- .data = acorndata_8x8, ++ .data = acorndata_8x8.data, + #ifdef CONFIG_ARCH_ACORN + .pref = 20, + #else +diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c +index 838caa1cfef7..592774a90917 100644 +--- a/lib/fonts/font_mini_4x6.c ++++ b/lib/fonts/font_mini_4x6.c +@@ -43,8 +43,8 @@ __END__; + + #define FONTDATAMAX 1536 + +-static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { +- ++static const struct font_data fontdata_mini_4x6 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /*{*/ + /* Char 0: ' ' */ + 0xee, /*= [*** ] */ +@@ -2145,14 +2145,14 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { + 0xee, /*= [*** ] */ + 0x00, /*= [ ] */ + /*}*/ +-}; ++} }; + + const struct font_desc font_mini_4x6 = { + .idx = MINI4x6_IDX, + .name = "MINI4x6", + .width = 4, + .height = 6, +- .data = fontdata_mini_4x6, ++ .data = fontdata_mini_4x6.data, + .pref = 3, + }; + +diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c +index dc6ad539ca4e..6351b759ae70 100644 +--- a/lib/fonts/font_pearl_8x8.c ++++ b/lib/fonts/font_pearl_8x8.c +@@ -13,8 +13,8 @@ + + #define FONTDATAMAX 2048 + +-static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { +- ++static const struct font_data fontdata_pearl8x8 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +@@ -2574,14 +2574,13 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ + 0x00, /* 00000000 */ +- +-}; ++} }; + + const struct font_desc font_pearl_8x8 = { + .idx = PEARL8x8_IDX, + .name = "PEARL8x8", + .width = 8, + .height = 8, +- .data = fontdata_pearl8x8, ++ .data = fontdata_pearl8x8.data, + .pref = 2, + }; +diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c +index d3643853c33a..057b0bf368a2 100644 +--- a/lib/fonts/font_sun12x22.c ++++ b/lib/fonts/font_sun12x22.c +@@ -2,8 +2,8 @@ + + #define FONTDATAMAX 11264 + +-static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { +- ++static const struct font_data fontdata_sun12x22 = { ++ { 0, 0, FONTDATAMAX, 0 }, { + /* 0 0x00 '^@' */ + 0x00, 0x00, /* 000000000000 */ + 0x00, 0x00, /* 000000000000 */ +@@ -6147,8 +6147,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { + 0x00, 0x00, /* 000000000000 */ + 0x00, 0x00, /* 000000000000 */ + 0x00, 0x00, /* 000000000000 */ +- +-}; ++} }; + + + const struct font_desc font_sun_12x22 = { +@@ -6156,7 +6155,7 @@ const struct font_desc font_sun_12x22 = { + .name = "SUN12x22", + .width = 12, + .height = 22, +- .data = fontdata_sun12x22, ++ .data = fontdata_sun12x22.data, + #ifdef __sparc__ + .pref = 5, + #else +diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c +index 268151325b83..84db7275e053 100644 +--- a/lib/fonts/font_sun8x16.c ++++ b/lib/fonts/font_sun8x16.c +@@ -2,7 +2,8 @@ + + #define FONTDATAMAX 4096 + +-static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { ++static const struct font_data fontdata_sun8x16 = { ++{ 0, 0, FONTDATAMAX, 0 }, { + /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, + /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, +@@ -259,14 +260,14 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { + /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, + /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +-}; ++} }; + + const struct font_desc font_sun_8x16 = { + .idx = SUN8x16_IDX, + .name = "SUN8x16", + .width = 8, + .height = 16, +- .data = fontdata_sun8x16, ++ .data = fontdata_sun8x16.data, + #ifdef __sparc__ + .pref = 10, + #else +diff --git a/lib/genalloc.c b/lib/genalloc.c +index 7e85d1e37a6e..0b8ee173cf3a 100644 +--- a/lib/genalloc.c ++++ b/lib/genalloc.c +@@ -83,14 +83,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) + * users set the same bit, one user will return remain bits, otherwise + * return 0. + */ +-static int bitmap_set_ll(unsigned long *map, int start, int nr) ++static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) + { + unsigned long *p = map + BIT_WORD(start); +- const int size = start + nr; ++ const unsigned long size = start + nr; + int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); + +- while (nr - bits_to_set >= 0) { ++ while (nr >= bits_to_set) { + if (set_bits_ll(p, mask_to_set)) + return nr; + nr -= bits_to_set; +@@ -118,14 +118,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr) + * users clear the same bit, one user will return remain bits, + * otherwise return 0. + */ +-static int bitmap_clear_ll(unsigned long *map, int start, int nr) ++static unsigned long ++bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) + { + unsigned long *p = map + BIT_WORD(start); +- const int size = start + nr; ++ const unsigned long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + +- while (nr - bits_to_clear >= 0) { ++ while (nr >= bits_to_clear) { + if (clear_bits_ll(p, mask_to_clear)) + return nr; + nr -= bits_to_clear; +@@ -184,8 +185,8 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy + size_t size, int nid) + { + struct gen_pool_chunk *chunk; +- int nbits = size >> pool->min_alloc_order; +- int nbytes = sizeof(struct gen_pool_chunk) + ++ unsigned long nbits = size >> pool->min_alloc_order; ++ unsigned long nbytes = sizeof(struct gen_pool_chunk) + + BITS_TO_LONGS(nbits) * sizeof(long); + + chunk = vzalloc_node(nbytes, nid); +@@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool) + struct list_head *_chunk, *_next_chunk; + struct gen_pool_chunk *chunk; + int order = pool->min_alloc_order; +- int bit, end_bit; ++ unsigned long bit, end_bit; + + list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { + chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); +@@ -293,7 +294,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, + struct gen_pool_chunk *chunk; + unsigned long addr = 0; + int order = pool->min_alloc_order; +- int nbits, start_bit, end_bit, remain; ++ unsigned long nbits, start_bit, end_bit, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -376,7 +377,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) + { + struct gen_pool_chunk *chunk; + int order = pool->min_alloc_order; +- int start_bit, nbits, remain; ++ unsigned long start_bit, nbits, remain; + + #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +@@ -638,7 +639,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, + index = bitmap_find_next_zero_area(map, size, start, nr, 0); + + while (index < size) { +- int next_bit = find_next_bit(map, size, index + nr); ++ unsigned long next_bit = find_next_bit(map, size, index + nr); + if ((next_bit - index) < len) { + len = next_bit - index; + start_bit = index; +diff --git a/lib/iov_iter.c b/lib/iov_iter.c +index a75ea633b5c4..07d735b2eccf 100644 +--- a/lib/iov_iter.c ++++ b/lib/iov_iter.c +@@ -394,7 +394,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) + int err; + struct iovec v; + +- if (!(i->type & (ITER_BVEC|ITER_KVEC))) { ++ if (iter_is_iovec(i)) { + iterate_iovec(i, bytes, v, iov, skip, ({ + err = fault_in_pages_readable(v.iov_base, v.iov_len); + if (unlikely(err)) +diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c +index f6c2c1e7779c..6104daf98ad9 100644 +--- a/lib/kobject_uevent.c ++++ b/lib/kobject_uevent.c +@@ -128,12 +128,13 @@ static int kobj_usermode_filter(struct kobject *kobj) + + static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) + { ++ int buffer_size = sizeof(env->buf) - env->buflen; + int len; + +- len = strlcpy(&env->buf[env->buflen], subsystem, +- sizeof(env->buf) - env->buflen); +- if (len >= (sizeof(env->buf) - env->buflen)) { +- WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); ++ len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); ++ if (len >= buffer_size) { ++ pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", ++ buffer_size, len); + return -ENOMEM; + } + +diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h +index e01b705556aa..6c5229f98c9e 100644 +--- a/lib/mpi/longlong.h ++++ b/lib/mpi/longlong.h +@@ -671,7 +671,7 @@ do { \ + ************** MIPS/64 ************** + ***************************************/ + #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 +-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 ++#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) + /* + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C + * code below, so we special case MIPS64r6 until the compiler can do better. +diff --git a/lib/random32.c b/lib/random32.c +index fa594b1140e6..3c5b67b69cba 100644 +--- a/lib/random32.c ++++ b/lib/random32.c +@@ -39,16 +39,6 @@ + #include + #include + +-#ifdef CONFIG_RANDOM32_SELFTEST +-static void __init prandom_state_selftest(void); +-#else +-static inline void prandom_state_selftest(void) +-{ +-} +-#endif +- +-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +- + /** + * prandom_u32_state - seeded pseudo-random number generator. + * @state: pointer to state structure holding seeded state. +@@ -68,25 +58,6 @@ u32 prandom_u32_state(struct rnd_state *state) + } + EXPORT_SYMBOL(prandom_u32_state); + +-/** +- * prandom_u32 - pseudo random number generator +- * +- * A 32 bit pseudo-random number is generated using a fast +- * algorithm suitable for simulation. This algorithm is NOT +- * considered safe for cryptographic use. +- */ +-u32 prandom_u32(void) +-{ +- struct rnd_state *state = &get_cpu_var(net_rand_state); +- u32 res; +- +- res = prandom_u32_state(state); +- put_cpu_var(net_rand_state); +- +- return res; +-} +-EXPORT_SYMBOL(prandom_u32); +- + /** + * prandom_bytes_state - get the requested number of pseudo-random bytes + * +@@ -118,20 +89,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) + } + EXPORT_SYMBOL(prandom_bytes_state); + +-/** +- * prandom_bytes - get the requested number of pseudo-random bytes +- * @buf: where to copy the pseudo-random bytes to +- * @bytes: the requested number of bytes +- */ +-void prandom_bytes(void *buf, size_t bytes) +-{ +- struct rnd_state *state = &get_cpu_var(net_rand_state); +- +- prandom_bytes_state(state, buf, bytes); +- put_cpu_var(net_rand_state); +-} +-EXPORT_SYMBOL(prandom_bytes); +- + static void prandom_warmup(struct rnd_state *state) + { + /* Calling RNG ten times to satisfy recurrence condition */ +@@ -147,96 +104,6 @@ static void prandom_warmup(struct rnd_state *state) + prandom_u32_state(state); + } + +-static u32 __extract_hwseed(void) +-{ +- unsigned int val = 0; +- +- (void)(arch_get_random_seed_int(&val) || +- arch_get_random_int(&val)); +- +- return val; +-} +- +-static void prandom_seed_early(struct rnd_state *state, u32 seed, +- bool mix_with_hwseed) +-{ +-#define LCG(x) ((x) * 69069U) /* super-duper LCG */ +-#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) +- state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); +- state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); +- state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); +- state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); +-} +- +-/** +- * prandom_seed - add entropy to pseudo random number generator +- * @seed: seed value +- * +- * Add some additional seeding to the prandom pool. +- */ +-void prandom_seed(u32 entropy) +-{ +- int i; +- /* +- * No locking on the CPUs, but then somewhat random results are, well, +- * expected. +- */ +- for_each_possible_cpu(i) { +- struct rnd_state *state = &per_cpu(net_rand_state, i); +- +- state->s1 = __seed(state->s1 ^ entropy, 2U); +- prandom_warmup(state); +- } +-} +-EXPORT_SYMBOL(prandom_seed); +- +-/* +- * Generate some initially weak seeding values to allow +- * to start the prandom_u32() engine. +- */ +-static int __init prandom_init(void) +-{ +- int i; +- +- prandom_state_selftest(); +- +- for_each_possible_cpu(i) { +- struct rnd_state *state = &per_cpu(net_rand_state, i); +- u32 weak_seed = (i + jiffies) ^ random_get_entropy(); +- +- prandom_seed_early(state, weak_seed, true); +- prandom_warmup(state); +- } +- +- return 0; +-} +-core_initcall(prandom_init); +- +-static void __prandom_timer(unsigned long dontcare); +- +-static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); +- +-static void __prandom_timer(unsigned long dontcare) +-{ +- u32 entropy; +- unsigned long expires; +- +- get_random_bytes(&entropy, sizeof(entropy)); +- prandom_seed(entropy); +- +- /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ +- expires = 40 + prandom_u32_max(40); +- seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); +- +- add_timer(&seed_timer); +-} +- +-static void __init __prandom_start_seed_timer(void) +-{ +- seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); +- add_timer(&seed_timer); +-} +- + void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) + { + int i; +@@ -256,51 +123,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) + } + EXPORT_SYMBOL(prandom_seed_full_state); + +-/* +- * Generate better values after random number generator +- * is fully initialized. +- */ +-static void __prandom_reseed(bool late) +-{ +- unsigned long flags; +- static bool latch = false; +- static DEFINE_SPINLOCK(lock); +- +- /* Asking for random bytes might result in bytes getting +- * moved into the nonblocking pool and thus marking it +- * as initialized. In this case we would double back into +- * this function and attempt to do a late reseed. +- * Ignore the pointless attempt to reseed again if we're +- * already waiting for bytes when the nonblocking pool +- * got initialized. +- */ +- +- /* only allow initial seeding (late == false) once */ +- if (!spin_trylock_irqsave(&lock, flags)) +- return; +- +- if (latch && !late) +- goto out; +- +- latch = true; +- prandom_seed_full_state(&net_rand_state); +-out: +- spin_unlock_irqrestore(&lock, flags); +-} +- +-void prandom_reseed_late(void) +-{ +- __prandom_reseed(true); +-} +- +-static int __init prandom_reseed(void) +-{ +- __prandom_reseed(false); +- __prandom_start_seed_timer(); +- return 0; +-} +-late_initcall(prandom_reseed); +- + #ifdef CONFIG_RANDOM32_SELFTEST + static struct prandom_test1 { + u32 seed; +@@ -420,7 +242,28 @@ static struct prandom_test2 { + { 407983964U, 921U, 728767059U }, + }; + +-static void __init prandom_state_selftest(void) ++static u32 __extract_hwseed(void) ++{ ++ unsigned int val = 0; ++ ++ (void)(arch_get_random_seed_int(&val) || ++ arch_get_random_int(&val)); ++ ++ return val; ++} ++ ++static void prandom_seed_early(struct rnd_state *state, u32 seed, ++ bool mix_with_hwseed) ++{ ++#define LCG(x) ((x) * 69069U) /* super-duper LCG */ ++#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) ++ state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); ++ state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); ++ state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); ++ state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); ++} ++ ++static int __init prandom_state_selftest(void) + { + int i, j, errors = 0, runs = 0; + bool error = false; +@@ -460,5 +303,266 @@ static void __init prandom_state_selftest(void) + pr_warn("prandom: %d/%d self tests failed\n", errors, runs); + else + pr_info("prandom: %d self tests passed\n", runs); ++ return 0; + } ++core_initcall(prandom_state_selftest); + #endif ++ ++/* ++ * The prandom_u32() implementation is now completely separate from the ++ * prandom_state() functions, which are retained (for now) for compatibility. ++ * ++ * Because of (ab)use in the networking code for choosing random TCP/UDP port ++ * numbers, which open DoS possibilities if guessable, we want something ++ * stronger than a standard PRNG. But the performance requirements of ++ * the network code do not allow robust crypto for this application. ++ * ++ * So this is a homebrew Junior Spaceman implementation, based on the ++ * lowest-latency trustworthy crypto primitive available, SipHash. ++ * (The authors of SipHash have not been consulted about this abuse of ++ * their work.) ++ * ++ * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to ++ * one word of output. This abbreviated version uses 2 rounds per word ++ * of output. ++ */ ++ ++struct siprand_state { ++ unsigned long v0; ++ unsigned long v1; ++ unsigned long v2; ++ unsigned long v3; ++}; ++ ++static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; ++ ++/* ++ * This is the core CPRNG function. As "pseudorandom", this is not used ++ * for truly valuable things, just intended to be a PITA to guess. ++ * For maximum speed, we do just two SipHash rounds per word. This is ++ * the same rate as 4 rounds per 64 bits that SipHash normally uses, ++ * so hopefully it's reasonably secure. ++ * ++ * There are two changes from the official SipHash finalization: ++ * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; ++ * they are there only to make the output rounds distinct from the input ++ * rounds, and this application has no input rounds. ++ * - Rather than returning v0^v1^v2^v3, return v1+v3. ++ * If you look at the SipHash round, the last operation on v3 is ++ * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. ++ * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but ++ * it still cancels out half of the bits in v2 for no benefit.) ++ * Second, since the last combining operation was xor, continue the ++ * pattern of alternating xor/add for a tiny bit of extra non-linearity. ++ */ ++static inline u32 siprand_u32(struct siprand_state *s) ++{ ++ unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; ++ ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; ++ return v1 + v3; ++} ++ ++ ++/** ++ * prandom_u32 - pseudo random number generator ++ * ++ * A 32 bit pseudo-random number is generated using a fast ++ * algorithm suitable for simulation. This algorithm is NOT ++ * considered safe for cryptographic use. ++ */ ++u32 prandom_u32(void) ++{ ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state); ++ u32 res = siprand_u32(state); ++ ++ put_cpu_ptr(&net_rand_state); ++ return res; ++} ++EXPORT_SYMBOL(prandom_u32); ++ ++/** ++ * prandom_bytes - get the requested number of pseudo-random bytes ++ * @buf: where to copy the pseudo-random bytes to ++ * @bytes: the requested number of bytes ++ */ ++void prandom_bytes(void *buf, size_t bytes) ++{ ++ struct siprand_state *state = get_cpu_ptr(&net_rand_state); ++ u8 *ptr = buf; ++ ++ while (bytes >= sizeof(u32)) { ++ put_unaligned(siprand_u32(state), (u32 *)ptr); ++ ptr += sizeof(u32); ++ bytes -= sizeof(u32); ++ } ++ ++ if (bytes > 0) { ++ u32 rem = siprand_u32(state); ++ ++ do { ++ *ptr++ = (u8)rem; ++ rem >>= BITS_PER_BYTE; ++ } while (--bytes > 0); ++ } ++ put_cpu_ptr(&net_rand_state); ++} ++EXPORT_SYMBOL(prandom_bytes); ++ ++/** ++ * prandom_seed - add entropy to pseudo random number generator ++ * @entropy: entropy value ++ * ++ * Add some additional seed material to the prandom pool. ++ * The "entropy" is actually our IP address (the only caller is ++ * the network code), not for unpredictability, but to ensure that ++ * different machines are initialized differently. ++ */ ++void prandom_seed(u32 entropy) ++{ ++ int i; ++ ++ add_device_randomness(&entropy, sizeof(entropy)); ++ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); ++ unsigned long v0 = state->v0, v1 = state->v1; ++ unsigned long v2 = state->v2, v3 = state->v3; ++ ++ do { ++ v3 ^= entropy; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= entropy; ++ } while (unlikely(!v0 || !v1 || !v2 || !v3)); ++ ++ WRITE_ONCE(state->v0, v0); ++ WRITE_ONCE(state->v1, v1); ++ WRITE_ONCE(state->v2, v2); ++ WRITE_ONCE(state->v3, v3); ++ } ++} ++EXPORT_SYMBOL(prandom_seed); ++ ++/* ++ * Generate some initially weak seeding values to allow ++ * the prandom_u32() engine to be started. ++ */ ++static int __init prandom_init_early(void) ++{ ++ int i; ++ unsigned long v0, v1, v2, v3; ++ ++ if (!arch_get_random_long(&v0)) ++ v0 = jiffies; ++ if (!arch_get_random_long(&v1)) ++ v1 = random_get_entropy(); ++ v2 = v0 ^ PRND_K0; ++ v3 = v1 ^ PRND_K1; ++ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state; ++ ++ v3 ^= i; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= i; ++ ++ state = per_cpu_ptr(&net_rand_state, i); ++ state->v0 = v0; state->v1 = v1; ++ state->v2 = v2; state->v3 = v3; ++ } ++ ++ return 0; ++} ++core_initcall(prandom_init_early); ++ ++ ++/* Stronger reseeding when available, and periodically thereafter. */ ++static void prandom_reseed(unsigned long dontcare); ++ ++static DEFINE_TIMER(seed_timer, prandom_reseed, 0, 0); ++ ++static void prandom_reseed(unsigned long dontcare) ++{ ++ unsigned long expires; ++ int i; ++ ++ /* ++ * Reinitialize each CPU's PRNG with 128 bits of key. ++ * No locking on the CPUs, but then somewhat random results are, ++ * well, expected. ++ */ ++ for_each_possible_cpu(i) { ++ struct siprand_state *state; ++ unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; ++ unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; ++#if BITS_PER_LONG == 32 ++ int j; ++ ++ /* ++ * On 32-bit machines, hash in two extra words to ++ * approximate 128-bit key length. Not that the hash ++ * has that much security, but this prevents a trivial ++ * 64-bit brute force. ++ */ ++ for (j = 0; j < 2; j++) { ++ unsigned long m = get_random_long(); ++ ++ v3 ^= m; ++ PRND_SIPROUND(v0, v1, v2, v3); ++ PRND_SIPROUND(v0, v1, v2, v3); ++ v0 ^= m; ++ } ++#endif ++ /* ++ * Probably impossible in practice, but there is a ++ * theoretical risk that a race between this reseeding ++ * and the target CPU writing its state back could ++ * create the all-zero SipHash fixed point. ++ * ++ * To ensure that never happens, ensure the state ++ * we write contains no zero words. ++ */ ++ state = per_cpu_ptr(&net_rand_state, i); ++ WRITE_ONCE(state->v0, v0 ? v0 : -1ul); ++ WRITE_ONCE(state->v1, v1 ? v1 : -1ul); ++ WRITE_ONCE(state->v2, v2 ? v2 : -1ul); ++ WRITE_ONCE(state->v3, v3 ? v3 : -1ul); ++ } ++ ++ /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ ++ expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); ++ mod_timer(&seed_timer, expires); ++} ++ ++/* ++ * The random ready callback can be called from almost any interrupt. ++ * To avoid worrying about whether it's safe to delay that interrupt ++ * long enough to seed all CPUs, just schedule an immediate timer event. ++ */ ++static void prandom_timer_start(struct random_ready_callback *unused) ++{ ++ mod_timer(&seed_timer, jiffies); ++} ++ ++/* ++ * Start periodic full reseeding as soon as strong ++ * random numbers are available. ++ */ ++static int __init prandom_init_late(void) ++{ ++ static struct random_ready_callback random_ready = { ++ .func = prandom_timer_start ++ }; ++ int ret = add_random_ready_callback(&random_ready); ++ ++ if (ret == -EALREADY) { ++ prandom_timer_start(&random_ready); ++ ret = 0; ++ } ++ return ret; ++} ++late_initcall(prandom_init_late); +diff --git a/lib/seq_buf.c b/lib/seq_buf.c +index 5954f9fb6675..b3ec487c9728 100644 +--- a/lib/seq_buf.c ++++ b/lib/seq_buf.c +@@ -227,8 +227,10 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + + WARN_ON(s->size == 0); + ++ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS); ++ + while (len) { +- start_len = min(len, HEX_CHARS - 1); ++ start_len = min(len, MAX_MEMHEX_BYTES); + #ifdef __BIG_ENDIAN + for (i = 0, j = 0; i < start_len; i++) { + #else +@@ -241,12 +243,14 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + break; + + /* j increments twice per loop */ +- len -= j / 2; + hex[j++] = ' '; + + seq_buf_putmem(s, hex, j); + if (seq_buf_has_overflowed(s)) + return -1; ++ ++ len -= start_len; ++ data += start_len; + } + return 0; + } +diff --git a/lib/stackdepot.c b/lib/stackdepot.c +index 759ff419fe61..c519aa07d2e9 100644 +--- a/lib/stackdepot.c ++++ b/lib/stackdepot.c +@@ -78,7 +78,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; + static int depot_index; + static int next_slab_inited; + static size_t depot_offset; +-static DEFINE_SPINLOCK(depot_lock); ++static DEFINE_RAW_SPINLOCK(depot_lock); + + static bool init_stack_slab(void **prealloc) + { +@@ -253,7 +253,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, + prealloc = page_address(page); + } + +- spin_lock_irqsave(&depot_lock, flags); ++ raw_spin_lock_irqsave(&depot_lock, flags); + + found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); + if (!found) { +@@ -277,7 +277,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, + WARN_ON(!init_stack_slab(&prealloc)); + } + +- spin_unlock_irqrestore(&depot_lock, flags); ++ raw_spin_unlock_irqrestore(&depot_lock, flags); + exit: + if (prealloc) { + /* Nobody used this memory, ok to free it. */ +diff --git a/lib/string.c b/lib/string.c +index 9b2df9077a92..9f4a98cd47e1 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -157,11 +157,9 @@ EXPORT_SYMBOL(strlcpy); + * @src: Where to copy the string from + * @count: Size of destination buffer + * +- * Copy the string, or as much of it as fits, into the dest buffer. +- * The routine returns the number of characters copied (not including +- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. +- * The behavior is undefined if the string buffers overlap. +- * The destination buffer is always NUL terminated, unless it's zero-sized. ++ * Copy the string, or as much of it as fits, into the dest buffer. The ++ * behavior is undefined if the string buffers overlap. The destination ++ * buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since +@@ -171,8 +169,10 @@ EXPORT_SYMBOL(strlcpy); + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be +- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() +- * with an overflow test, then just memset() the tail of the dest buffer. ++ * zeroed. If zeroing is desired please use strscpy_pad(). ++ * ++ * Return: The number of characters copied (not including the trailing ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough. + */ + ssize_t strscpy(char *dest, const char *src, size_t count) + { +@@ -259,6 +259,39 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) + } + EXPORT_SYMBOL(stpcpy); + ++/** ++ * strscpy_pad() - Copy a C-string into a sized buffer ++ * @dest: Where to copy the string to ++ * @src: Where to copy the string from ++ * @count: Size of destination buffer ++ * ++ * Copy the string, or as much of it as fits, into the dest buffer. The ++ * behavior is undefined if the string buffers overlap. The destination ++ * buffer is always %NUL terminated, unless it's zero-sized. ++ * ++ * If the source string is shorter than the destination buffer, zeros ++ * the tail of the destination buffer. ++ * ++ * For full explanation of why you may want to consider using the ++ * 'strscpy' functions please see the function docstring for strscpy(). ++ * ++ * Return: The number of characters copied (not including the trailing ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough. ++ */ ++ssize_t strscpy_pad(char *dest, const char *src, size_t count) ++{ ++ ssize_t written; ++ ++ written = strscpy(dest, src, count); ++ if (written < 0 || written == count - 1) ++ return written; ++ ++ memset(dest + written + 1, 0, count - written - 1); ++ ++ return written; ++} ++EXPORT_SYMBOL(strscpy_pad); ++ + #ifndef __HAVE_ARCH_STRCAT + /** + * strcat - Append one %NUL-terminated string to another +@@ -721,6 +754,72 @@ void memzero_explicit(void *s, size_t count) + } + EXPORT_SYMBOL(memzero_explicit); + ++#ifndef __HAVE_ARCH_MEMSET16 ++/** ++ * memset16() - Fill a memory area with a uint16_t ++ * @s: Pointer to the start of the area. ++ * @v: The value to fill the area with ++ * @count: The number of values to store ++ * ++ * Differs from memset() in that it fills with a uint16_t instead ++ * of a byte. Remember that @count is the number of uint16_ts to ++ * store, not the number of bytes. ++ */ ++void *memset16(uint16_t *s, uint16_t v, size_t count) ++{ ++ uint16_t *xs = s; ++ ++ while (count--) ++ *xs++ = v; ++ return s; ++} ++EXPORT_SYMBOL(memset16); ++#endif ++ ++#ifndef __HAVE_ARCH_MEMSET32 ++/** ++ * memset32() - Fill a memory area with a uint32_t ++ * @s: Pointer to the start of the area. ++ * @v: The value to fill the area with ++ * @count: The number of values to store ++ * ++ * Differs from memset() in that it fills with a uint32_t instead ++ * of a byte. Remember that @count is the number of uint32_ts to ++ * store, not the number of bytes. ++ */ ++void *memset32(uint32_t *s, uint32_t v, size_t count) ++{ ++ uint32_t *xs = s; ++ ++ while (count--) ++ *xs++ = v; ++ return s; ++} ++EXPORT_SYMBOL(memset32); ++#endif ++ ++#ifndef __HAVE_ARCH_MEMSET64 ++/** ++ * memset64() - Fill a memory area with a uint64_t ++ * @s: Pointer to the start of the area. ++ * @v: The value to fill the area with ++ * @count: The number of values to store ++ * ++ * Differs from memset() in that it fills with a uint64_t instead ++ * of a byte. Remember that @count is the number of uint64_ts to ++ * store, not the number of bytes. ++ */ ++void *memset64(uint64_t *s, uint64_t v, size_t count) ++{ ++ uint64_t *xs = s; ++ ++ while (count--) ++ *xs++ = v; ++ return s; ++} ++EXPORT_SYMBOL(memset64); ++#endif ++ + #ifndef __HAVE_ARCH_MEMCPY + /** + * memcpy - Copy one area of memory to another +diff --git a/lib/swiotlb.c b/lib/swiotlb.c +index 7ff9dc36c2f8..74b5b8862198 100644 +--- a/lib/swiotlb.c ++++ b/lib/swiotlb.c +@@ -199,6 +199,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; ++ no_iotlb_memory = false; + + if (verbose) + swiotlb_print_info(); +@@ -229,9 +230,11 @@ swiotlb_init(int verbose) + if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) + return; + +- if (io_tlb_start) ++ if (io_tlb_start) { + memblock_free_early(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); ++ io_tlb_start = 0; ++ } + pr_warn("Cannot allocate buffer"); + no_iotlb_memory = true; + } +@@ -330,6 +333,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; ++ no_iotlb_memory = false; + + swiotlb_print_info(); + +diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c +index 2c13ecc5bb2c..ed1f3df27260 100644 +--- a/lib/zlib_inflate/inffast.c ++++ b/lib/zlib_inflate/inffast.c +@@ -10,17 +10,6 @@ + + #ifndef ASMINF + +-/* Allow machine dependent optimization for post-increment or pre-increment. +- Based on testing to date, +- Pre-increment preferred for: +- - PowerPC G3 (Adler) +- - MIPS R5000 (Randers-Pehrson) +- Post-increment preferred for: +- - none +- No measurable difference: +- - Pentium III (Anderson) +- - M68060 (Nikl) +- */ + union uu { + unsigned short us; + unsigned char b[2]; +@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p) + return mm.us; + } + +-#ifdef POSTINC +-# define OFF 0 +-# define PUP(a) *(a)++ +-# define UP_UNALIGNED(a) get_unaligned16((a)++) +-#else +-# define OFF 1 +-# define PUP(a) *++(a) +-# define UP_UNALIGNED(a) get_unaligned16(++(a)) +-#endif +- + /* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is +@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start) + + /* copy state to local variables */ + state = (struct inflate_state *)strm->state; +- in = strm->next_in - OFF; ++ in = strm->next_in; + last = in + (strm->avail_in - 5); +- out = strm->next_out - OFF; ++ out = strm->next_out; + beg = out - (start - strm->avail_out); + end = out + (strm->avail_out - 257); + #ifdef INFLATE_STRICT +@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start) + input data or output space */ + do { + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = lcode[hold & lmask]; +@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + op = (unsigned)(this.op); + if (op == 0) { /* literal */ +- PUP(out) = (unsigned char)(this.val); ++ *out++ = (unsigned char)(this.val); + } + else if (op & 16) { /* length base */ + len = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + len += (unsigned)hold & ((1U << op) - 1); +@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start) + bits -= op; + } + if (bits < 15) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + this = dcode[hold & dmask]; +@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start) + dist = (unsigned)(this.val); + op &= 15; /* number of extra bits */ + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + if (bits < op) { +- hold += (unsigned long)(PUP(in)) << bits; ++ hold += (unsigned long)(*in++) << bits; + bits += 8; + } + } +@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start) + state->mode = BAD; + break; + } +- from = window - OFF; ++ from = window; + if (write == 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from end of window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); +- from = window - OFF; ++ from = window; + if (write < len) { /* some from start of window */ + op = write; + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } +@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start) + if (op < len) { /* some from window */ + len -= op; + do { +- PUP(out) = PUP(from); ++ *out++ = *from++; + } while (--op); + from = out - dist; /* rest from output */ + } + } + while (len > 2) { +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); +- PUP(out) = PUP(from); ++ *out++ = *from++; ++ *out++ = *from++; ++ *out++ = *from++; + len -= 3; + } + if (len) { +- PUP(out) = PUP(from); ++ *out++ = *from++; + if (len > 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else { +@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start) + from = out - dist; /* copy direct from output */ + /* minimum length is three */ + /* Align out addr */ +- if (!((long)(out - 1 + OFF) & 1)) { +- PUP(out) = PUP(from); ++ if (!((long)(out - 1) & 1)) { ++ *out++ = *from++; + len--; + } +- sout = (unsigned short *)(out - OFF); ++ sout = (unsigned short *)(out); + if (dist > 2) { + unsigned short *sfrom; + +- sfrom = (unsigned short *)(from - OFF); ++ sfrom = (unsigned short *)(from); + loops = len >> 1; + do + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +- PUP(sout) = PUP(sfrom); ++ *sout++ = *sfrom++; + #else +- PUP(sout) = UP_UNALIGNED(sfrom); ++ *sout++ = get_unaligned16(sfrom++); + #endif + while (--loops); +- out = (unsigned char *)sout + OFF; +- from = (unsigned char *)sfrom + OFF; ++ out = (unsigned char *)sout; ++ from = (unsigned char *)sfrom; + } else { /* dist == 1 or dist == 2 */ + unsigned short pat16; + +- pat16 = *(sout-1+OFF); ++ pat16 = *(sout-1); + if (dist == 1) { + union uu mm; + /* copy one char pattern to both bytes */ +@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start) + } + loops = len >> 1; + do +- PUP(sout) = pat16; ++ *sout++ = pat16; + while (--loops); +- out = (unsigned char *)sout + OFF; ++ out = (unsigned char *)sout; + } + if (len & 1) +- PUP(out) = PUP(from); ++ *out++ = *from++; + } + } + else if ((op & 64) == 0) { /* 2nd level distance code */ +@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start) + hold &= (1U << bits) - 1; + + /* update state and return */ +- strm->next_in = in + OFF; +- strm->next_out = out + OFF; ++ strm->next_in = in; ++ strm->next_out = out; + strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); + strm->avail_out = (unsigned)(out < end ? + 257 + (end - out) : 257 - (out - end)); +diff --git a/mm/backing-dev.c b/mm/backing-dev.c +index bdb7d168293b..269adc2de903 100755 +--- a/mm/backing-dev.c ++++ b/mm/backing-dev.c +@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = { + EXPORT_SYMBOL_GPL(noop_backing_dev_info); + + static struct class *bdi_class; ++const char *bdi_unknown_name = "(unknown)"; + + /* + * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side +diff --git a/mm/filemap.c b/mm/filemap.c +index 1aaebed9d20d..4b50ece85394 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -2685,6 +2685,14 @@ static struct page *do_read_cache_page(struct address_space *mapping, + unlock_page(page); + goto out; + } ++ ++ /* ++ * A previous I/O error may have been due to temporary ++ * failures. ++ * Clear page error before actual read, PG_error will be ++ * set again if read page fails. ++ */ ++ ClearPageError(page); + goto filler; + + out: +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index fad4a0ca903c..d11aef19f184 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -661,7 +661,6 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) + transparent_hugepage_use_zero_page()) { + pgtable_t pgtable; + struct page *zero_page; +- bool set; + int ret; + pgtable = pte_alloc_one(vma->vm_mm, haddr); + if (unlikely(!pgtable)) +@@ -674,22 +673,21 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe) + } + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + ret = 0; +- set = false; + if (pmd_none(*fe->pmd)) { + if (userfaultfd_missing(vma)) { + spin_unlock(fe->ptl); ++ pte_free(vma->vm_mm, pgtable); + ret = handle_userfault(fe, VM_UFFD_MISSING); + VM_BUG_ON(ret & VM_FAULT_FALLBACK); + } else { + set_huge_zero_page(pgtable, vma->vm_mm, vma, + haddr, fe->pmd, zero_page); + spin_unlock(fe->ptl); +- set = true; + } +- } else ++ } else { + spin_unlock(fe->ptl); +- if (!set) + pte_free(vma->vm_mm, pgtable); ++ } + return ret; + } + gfp = alloc_hugepage_direct_gfpmask(vma); +@@ -1024,6 +1022,19 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) + * We can only reuse the page if nobody else maps the huge page or it's + * part. + */ ++ if (!trylock_page(page)) { ++ get_page(page); ++ spin_unlock(fe->ptl); ++ lock_page(page); ++ spin_lock(fe->ptl); ++ if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { ++ unlock_page(page); ++ put_page(page); ++ goto out_unlock; ++ } ++ put_page(page); ++ } ++ + if (page_trans_huge_mapcount(page, NULL) == 1) { + pmd_t entry; + entry = pmd_mkyoung(orig_pmd); +@@ -1031,8 +1042,10 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) + if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) + update_mmu_cache_pmd(vma, fe->address, fe->pmd); + ret |= VM_FAULT_WRITE; ++ unlock_page(page); + goto out_unlock; + } ++ unlock_page(page); + get_page(page); + spin_unlock(fe->ptl); + alloc: +@@ -1356,7 +1369,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, + * If other processes are mapping this page, we couldn't discard + * the page unless they all do MADV_FREE so let's skip the page. + */ +- if (page_mapcount(page) != 1) ++ if (total_mapcount(page) != 1) + goto out; + + if (!trylock_page(page)) +@@ -1755,6 +1768,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + spinlock_t *ptl; + struct mm_struct *mm = vma->vm_mm; + unsigned long haddr = address & HPAGE_PMD_MASK; ++ bool do_unlock_page = false; ++ pmd_t _pmd; + + mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); + ptl = pmd_lock(mm, pmd); +@@ -1764,11 +1779,41 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + * pmd against. Otherwise we can end up replacing wrong page. + */ + VM_BUG_ON(freeze && !page); +- if (page && page != pmd_page(*pmd)) +- goto out; ++ if (page) { ++ VM_WARN_ON_ONCE(!PageLocked(page)); ++ if (page != pmd_page(*pmd)) ++ goto out; ++ } + ++repeat: + if (pmd_trans_huge(*pmd)) { +- page = pmd_page(*pmd); ++ if (!page) { ++ page = pmd_page(*pmd); ++ /* ++ * An anonymous page must be locked, to ensure that a ++ * concurrent reuse_swap_page() sees stable mapcount; ++ * but reuse_swap_page() is not used on shmem or file, ++ * and page lock must not be taken when zap_pmd_range() ++ * calls __split_huge_pmd() while i_mmap_lock is held. ++ */ ++ if (PageAnon(page)) { ++ if (unlikely(!trylock_page(page))) { ++ get_page(page); ++ _pmd = *pmd; ++ spin_unlock(ptl); ++ lock_page(page); ++ spin_lock(ptl); ++ if (unlikely(!pmd_same(*pmd, _pmd))) { ++ unlock_page(page); ++ put_page(page); ++ page = NULL; ++ goto repeat; ++ } ++ put_page(page); ++ } ++ do_unlock_page = true; ++ } ++ } + if (PageMlocked(page)) + clear_page_mlock(page); + } else if (!pmd_devmap(*pmd)) +@@ -1776,6 +1821,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + __split_huge_pmd_locked(vma, pmd, haddr, freeze); + out: + spin_unlock(ptl); ++ if (do_unlock_page) ++ unlock_page(page); + mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); + } + +@@ -1844,7 +1891,7 @@ static void unmap_page(struct page *page) + { + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | + TTU_RMAP_LOCKED; +- int i, ret; ++ int i; + + VM_BUG_ON_PAGE(!PageHead(page), page); + +@@ -1852,15 +1899,16 @@ static void unmap_page(struct page *page) + ttu_flags |= TTU_MIGRATION; + + /* We only need TTU_SPLIT_HUGE_PMD once */ +- ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); +- for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { ++ try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); ++ for (i = 1; i < HPAGE_PMD_NR; i++) { + /* Cut short if the page is unmapped */ + if (page_count(page) == 1) + return; + +- ret = try_to_unmap(page + i, ttu_flags); ++ try_to_unmap(page + i, ttu_flags); + } +- VM_BUG_ON_PAGE(ret, page + i - 1); ++ ++ VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); + } + + static void remap_page(struct page *page) +@@ -2091,7 +2139,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); + struct anon_vma *anon_vma = NULL; + struct address_space *mapping = NULL; +- int count, mapcount, extra_pins, ret; ++ int extra_pins, ret; + bool mlocked; + unsigned long flags; + pgoff_t end; +@@ -2154,7 +2202,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + + mlocked = PageMlocked(page); + unmap_page(head); +- VM_BUG_ON_PAGE(compound_mapcount(head), head); + + /* Make sure the page is not on per-CPU pagevec as it takes pin */ + if (mlocked) +@@ -2180,9 +2227,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + + /* Prevent deferred_split_scan() touching ->_refcount */ + spin_lock(&pgdata->split_queue_lock); +- count = page_count(head); +- mapcount = total_mapcount(head); +- if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { ++ if (page_ref_freeze(head, 1 + extra_pins)) { + if (!list_empty(page_deferred_list(head))) { + pgdata->split_queue_len--; + list_del(page_deferred_list(head)); +@@ -2193,16 +2238,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) + __split_huge_page(page, list, end, flags); + ret = 0; + } else { +- if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { +- pr_alert("total_mapcount: %u, page_count(): %u\n", +- mapcount, count); +- if (PageTail(page)) +- dump_page(head, NULL); +- dump_page(page, "total_mapcount(head) > 0"); +- BUG(); +- } + spin_unlock(&pgdata->split_queue_lock); +-fail: if (mapping) ++fail: ++ if (mapping) + spin_unlock(&mapping->tree_lock); + spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags); + remap_page(head); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 9914da93069e..de89e9295f6c 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -66,6 +66,21 @@ DEFINE_SPINLOCK(hugetlb_lock); + static int num_fault_mutexes; + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; + ++static inline bool PageHugeFreed(struct page *head) ++{ ++ return page_private(head + 4) == -1UL; ++} ++ ++static inline void SetPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, -1UL); ++} ++ ++static inline void ClearPageHugeFreed(struct page *head) ++{ ++ set_page_private(head + 4, 0); ++} ++ + /* Forward declaration */ + static int hugetlb_acct_memory(struct hstate *h, long delta); + +@@ -571,13 +586,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode) + { + struct hugepage_subpool *spool = subpool_inode(inode); + long rsv_adjust; ++ bool reserved = false; + + rsv_adjust = hugepage_subpool_get_pages(spool, 1); +- if (rsv_adjust) { ++ if (rsv_adjust > 0) { + struct hstate *h = hstate_inode(inode); + +- hugetlb_acct_memory(h, 1); ++ if (!hugetlb_acct_memory(h, 1)) ++ reserved = true; ++ } else if (!rsv_adjust) { ++ reserved = true; + } ++ ++ if (!reserved) ++ pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); + } + + /* +@@ -863,6 +885,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) + list_move(&page->lru, &h->hugepage_freelists[nid]); + h->free_huge_pages++; + h->free_huge_pages_node[nid]++; ++ SetPageHugeFreed(page); + } + + static struct page *dequeue_huge_page_node(struct hstate *h, int nid) +@@ -880,6 +903,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid) + return NULL; + list_move(&page->lru, &h->hugepage_activelist); + set_page_refcounted(page); ++ ClearPageHugeFreed(page); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; + return page; +@@ -1168,14 +1192,16 @@ static inline int alloc_fresh_gigantic_page(struct hstate *h, + static void update_and_free_page(struct hstate *h, struct page *page) + { + int i; ++ struct page *subpage = page; + + if (hstate_is_gigantic(h) && !gigantic_page_supported()) + return; + + h->nr_huge_pages--; + h->nr_huge_pages_node[page_to_nid(page)]--; +- for (i = 0; i < pages_per_huge_page(h); i++) { +- page[i].flags &= ~(1 << PG_locked | 1 << PG_error | ++ for (i = 0; i < pages_per_huge_page(h); ++ i++, subpage = mem_map_next(subpage, page, i)) { ++ subpage->flags &= ~(1 << PG_locked | 1 << PG_error | + 1 << PG_referenced | 1 << PG_dirty | + 1 << PG_active | 1 << PG_private | + 1 << PG_writeback); +@@ -1210,12 +1236,11 @@ struct hstate *size_to_hstate(unsigned long size) + */ + bool page_huge_active(struct page *page) + { +- VM_BUG_ON_PAGE(!PageHuge(page), page); +- return PageHead(page) && PagePrivate(&page[1]); ++ return PageHeadHuge(page) && PagePrivate(&page[1]); + } + + /* never called for tail page */ +-static void set_page_huge_active(struct page *page) ++void set_page_huge_active(struct page *page) + { + VM_BUG_ON_PAGE(!PageHeadHuge(page), page); + SetPagePrivate(&page[1]); +@@ -1293,6 +1318,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) + set_hugetlb_cgroup(page, NULL); + h->nr_huge_pages++; + h->nr_huge_pages_node[nid]++; ++ ClearPageHugeFreed(page); + spin_unlock(&hugetlb_lock); + put_page(page); /* free it into the hugepage allocator */ + } +@@ -1354,15 +1380,12 @@ int PageHeadHuge(struct page *page_head) + return get_compound_page_dtor(page_head) == free_huge_page; + } + +-pgoff_t __basepage_index(struct page *page) ++pgoff_t hugetlb_basepage_index(struct page *page) + { + struct page *page_head = compound_head(page); + pgoff_t index = page_index(page_head); + unsigned long compound_idx; + +- if (!PageHuge(page_head)) +- return page_index(page); +- + if (compound_order(page_head) >= MAX_ORDER) + compound_idx = page_to_pfn(page) - page_to_pfn(page_head); + else +@@ -1456,6 +1479,7 @@ static int dissolve_free_huge_page(struct page *page) + { + int rc = 0; + ++retry: + spin_lock(&hugetlb_lock); + if (PageHuge(page) && !page_count(page)) { + struct page *head = compound_head(page); +@@ -1465,6 +1489,26 @@ static int dissolve_free_huge_page(struct page *page) + rc = -EBUSY; + goto out; + } ++ ++ /* ++ * We should make sure that the page is already on the free list ++ * when it is dissolved. ++ */ ++ if (unlikely(!PageHugeFreed(head))) { ++ spin_unlock(&hugetlb_lock); ++ cond_resched(); ++ ++ /* ++ * Theoretically, we should return -EBUSY when we ++ * encounter this race. In fact, we have a chance ++ * to successfully dissolve the page if we do a ++ * retry. Because the race window is quite small. ++ * If we seize this opportunity, it is an optimization ++ * for increasing the success rate of dissolving page. ++ */ ++ goto retry; ++ } ++ + list_del(&head->lru); + h->free_huge_pages--; + h->free_huge_pages_node[nid]--; +@@ -2619,8 +2663,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, + return -ENOMEM; + + retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); +- if (retval) ++ if (retval) { + kobject_put(hstate_kobjs[hi]); ++ hstate_kobjs[hi] = NULL; ++ } + + return retval; + } +@@ -2921,6 +2967,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) + } + + #ifdef CONFIG_SYSCTL ++static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, ++ void *buffer, size_t *length, ++ loff_t *ppos, unsigned long *out) ++{ ++ struct ctl_table dup_table; ++ ++ /* ++ * In order to avoid races with __do_proc_doulongvec_minmax(), we ++ * can duplicate the @table and alter the duplicate of it. ++ */ ++ dup_table = *table; ++ dup_table.data = out; ++ ++ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); ++} ++ + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, + struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +@@ -2932,9 +2994,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, + if (!hugepages_supported()) + return -EOPNOTSUPP; + +- table->data = &tmp; +- table->maxlen = sizeof(unsigned long); +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, ++ &tmp); + if (ret) + goto out; + +@@ -2978,9 +3039,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, + if (write && hstate_is_gigantic(h)) + return -EINVAL; + +- table->data = &tmp; +- table->maxlen = sizeof(unsigned long); +- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); ++ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, ++ &tmp); + if (ret) + goto out; + +@@ -3753,7 +3813,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, + * So we need to block hugepage fault by PG_hwpoison bit check. + */ + if (unlikely(PageHWPoison(page))) { +- ret = VM_FAULT_HWPOISON | ++ ret = VM_FAULT_HWPOISON_LARGE | + VM_FAULT_SET_HINDEX(hstate_index(h)); + goto backout_unlocked; + } +@@ -3824,7 +3884,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, + + #ifdef CONFIG_SMP + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address) ++ pgoff_t idx) + { + unsigned long key[2]; + u32 hash; +@@ -3832,7 +3892,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, + key[0] = (unsigned long) mapping; + key[1] = idx; + +- hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); ++ hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); + + return hash & (num_fault_mutexes - 1); + } +@@ -3842,7 +3902,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, + * return 0 and avoid the hashing overhead. + */ + u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, +- pgoff_t idx, unsigned long address) ++ pgoff_t idx) + { + return 0; + } +@@ -3887,7 +3947,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + * get spurious allocation failures if two CPUs race to instantiate + * the same page in the page cache. + */ +- hash = hugetlb_fault_mutex_hash(h, mapping, idx, address); ++ hash = hugetlb_fault_mutex_hash(h, mapping, idx); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + entry = huge_ptep_get(ptep); +@@ -4380,25 +4440,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) + void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, + unsigned long *start, unsigned long *end) + { +- unsigned long check_addr = *start; ++ unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), ++ v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); + +- if (!(vma->vm_flags & VM_MAYSHARE)) ++ /* ++ * vma need span at least one aligned PUD size and the start,end range ++ * must at least partialy within it. ++ */ ++ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || ++ (*end <= v_start) || (*start >= v_end)) + return; + +- for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { +- unsigned long a_start = check_addr & PUD_MASK; +- unsigned long a_end = a_start + PUD_SIZE; ++ /* Extend the range to be PUD aligned for a worst case scenario */ ++ if (*start > v_start) ++ *start = ALIGN_DOWN(*start, PUD_SIZE); + +- /* +- * If sharing is possible, adjust start/end if necessary. +- */ +- if (range_in_vma(vma, a_start, a_end)) { +- if (a_start < *start) +- *start = a_start; +- if (a_end > *end) +- *end = a_end; +- } +- } ++ if (*end < v_end) ++ *end = ALIGN(*end, PUD_SIZE); + } + + /* +@@ -4647,9 +4705,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) + { + bool ret = true; + +- VM_BUG_ON_PAGE(!PageHead(page), page); + spin_lock(&hugetlb_lock); +- if (!page_huge_active(page) || !get_page_unless_zero(page)) { ++ if (!PageHeadHuge(page) || !page_huge_active(page) || ++ !get_page_unless_zero(page)) { + ret = false; + goto unlock; + } +diff --git a/mm/khugepaged.c b/mm/khugepaged.c +index f5d1c1535f8f..7cb00eac1a4a 100644 +--- a/mm/khugepaged.c ++++ b/mm/khugepaged.c +@@ -50,6 +50,9 @@ enum scan_result { + #define CREATE_TRACE_POINTS + #include + ++static struct task_struct *khugepaged_thread __read_mostly; ++static DEFINE_MUTEX(khugepaged_mutex); ++ + /* default scan 8*512 pte (or vmas) every 30 second */ + static unsigned int khugepaged_pages_to_scan __read_mostly; + static unsigned int khugepaged_pages_collapsed; +@@ -391,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, + + static inline int khugepaged_test_exit(struct mm_struct *mm) + { +- return atomic_read(&mm->mm_users) == 0; ++ return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); + } + + int __khugepaged_enter(struct mm_struct *mm) +@@ -404,7 +407,7 @@ int __khugepaged_enter(struct mm_struct *mm) + return -ENOMEM; + + /* __khugepaged_exit() must not run from under us */ +- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); ++ VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { + free_mm_slot(mm_slot); + return 0; +@@ -593,17 +596,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, + mmu_notifier_test_young(vma->vm_mm, address)) + referenced++; + } +- if (likely(writable)) { +- if (likely(referenced)) { +- result = SCAN_SUCCEED; +- trace_mm_collapse_huge_page_isolate(page, none_or_zero, +- referenced, writable, result); +- return 1; +- } +- } else { ++ ++ if (unlikely(!writable)) { + result = SCAN_PAGE_RO; ++ } else if (unlikely(!referenced)) { ++ result = SCAN_LACK_REFERENCED_PAGE; ++ } else { ++ result = SCAN_SUCCEED; ++ trace_mm_collapse_huge_page_isolate(page, none_or_zero, ++ referenced, writable, result); ++ return 1; + } +- + out: + release_pte_pages(pte, _pte); + trace_mm_collapse_huge_page_isolate(page, none_or_zero, +@@ -802,6 +805,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait) + + static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) + { ++ /* ++ * If the hpage allocated earlier was briefly exposed in page cache ++ * before collapse_file() failed, it is possible that racing lookups ++ * have not yet completed, and would then be unpleasantly surprised by ++ * finding the hpage reused for the same mapping at a different offset. ++ * Just release the previous allocation if there is any danger of that. ++ */ ++ if (*hpage && page_count(*hpage) > 1) { ++ put_page(*hpage); ++ *hpage = NULL; ++ } ++ + if (!*hpage) + *hpage = khugepaged_alloc_hugepage(wait); + +@@ -1006,9 +1021,6 @@ static void collapse_huge_page(struct mm_struct *mm, + * handled by the anon_vma lock + PG_lock. + */ + down_write(&mm->mmap_sem); +- result = SCAN_ANY_PROCESS; +- if (!mmget_still_valid(mm)) +- goto out; + result = hugepage_vma_revalidate(mm, address, &vma); + if (result) + goto out; +@@ -1255,6 +1267,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) + static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) + { + struct vm_area_struct *vma; ++ struct mm_struct *mm; + unsigned long addr; + pmd_t *pmd, _pmd; + +@@ -1268,7 +1281,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) + continue; + if (vma->vm_end < addr + HPAGE_PMD_SIZE) + continue; +- pmd = mm_find_pmd(vma->vm_mm, addr); ++ mm = vma->vm_mm; ++ pmd = mm_find_pmd(mm, addr); + if (!pmd) + continue; + /* +@@ -1277,14 +1291,16 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) + * re-fault. Not ideal, but it's more important to not disturb + * the system too much. + */ +- if (down_write_trylock(&vma->vm_mm->mmap_sem)) { +- spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); +- /* assume page table is clear */ +- _pmd = pmdp_collapse_flush(vma, addr, pmd); +- spin_unlock(ptl); +- up_write(&vma->vm_mm->mmap_sem); +- atomic_long_dec(&vma->vm_mm->nr_ptes); +- pte_free(vma->vm_mm, pmd_pgtable(_pmd)); ++ if (down_write_trylock(&mm->mmap_sem)) { ++ if (!khugepaged_test_exit(mm)) { ++ spinlock_t *ptl = pmd_lock(mm, pmd); ++ /* assume page table is clear */ ++ _pmd = pmdp_collapse_flush(vma, addr, pmd); ++ spin_unlock(ptl); ++ atomic_long_dec(&mm->nr_ptes); ++ pte_free(mm, pmd_pgtable(_pmd)); ++ } ++ up_write(&mm->mmap_sem); + } + } + i_mmap_unlock_write(mapping); +@@ -1940,8 +1956,6 @@ static void set_recommended_min_free_kbytes(void) + + int start_stop_khugepaged(void) + { +- static struct task_struct *khugepaged_thread __read_mostly; +- static DEFINE_MUTEX(khugepaged_mutex); + int err = 0; + + mutex_lock(&khugepaged_mutex); +@@ -1968,3 +1982,11 @@ int start_stop_khugepaged(void) + mutex_unlock(&khugepaged_mutex); + return err; + } ++ ++void khugepaged_min_free_kbytes_update(void) ++{ ++ mutex_lock(&khugepaged_mutex); ++ if (khugepaged_enabled() && khugepaged_thread) ++ set_recommended_min_free_kbytes(); ++ mutex_unlock(&khugepaged_mutex); ++} +diff --git a/mm/ksm.c b/mm/ksm.c +index 34b64fb48a8b..470e268a1d94 100644 +--- a/mm/ksm.c ++++ b/mm/ksm.c +@@ -647,6 +647,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) + ksm_pages_shared--; + + put_anon_vma(rmap_item->anon_vma); ++ rmap_item->head = NULL; + rmap_item->address &= PAGE_MASK; + + } else if (rmap_item->address & UNSTABLE_FLAG) { +diff --git a/mm/maccess.c b/mm/maccess.c +index 78f9274dd49d..03ea550f5a74 100644 +--- a/mm/maccess.c ++++ b/mm/maccess.c +@@ -5,8 +5,32 @@ + #include + #include + ++static __always_inline long ++probe_read_common(void *dst, const void __user *src, size_t size) ++{ ++ long ret; ++ ++ pagefault_disable(); ++ ret = __copy_from_user_inatomic(dst, src, size); ++ pagefault_enable(); ++ ++ return ret ? -EFAULT : 0; ++} ++ ++static __always_inline long ++probe_write_common(void __user *dst, const void *src, size_t size) ++{ ++ long ret; ++ ++ pagefault_disable(); ++ ret = __copy_to_user_inatomic(dst, src, size); ++ pagefault_enable(); ++ ++ return ret ? -EFAULT : 0; ++} ++ + /** +- * probe_kernel_read(): safely attempt to read from a location ++ * probe_kernel_read(): safely attempt to read from a kernel-space location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk +@@ -29,16 +53,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- pagefault_disable(); +- ret = __copy_from_user_inatomic(dst, +- (__force const void __user *)src, size); +- pagefault_enable(); ++ ret = probe_read_common(dst, (__force const void __user *)src, size); + set_fs(old_fs); + +- return ret ? -EFAULT : 0; ++ return ret; + } + EXPORT_SYMBOL_GPL(probe_kernel_read); + ++/** ++ * probe_user_read(): safely attempt to read from a user-space location ++ * @dst: pointer to the buffer that shall take the data ++ * @src: address to read from. This must be a user address. ++ * @size: size of the data chunk ++ * ++ * Safely read from user address @src to the buffer at @dst. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++ ++long __weak probe_user_read(void *dst, const void __user *src, size_t size) ++ __attribute__((alias("__probe_user_read"))); ++ ++long __probe_user_read(void *dst, const void __user *src, size_t size) ++{ ++ long ret = -EFAULT; ++ mm_segment_t old_fs = get_fs(); ++ ++ set_fs(USER_DS); ++ if (access_ok(VERIFY_READ, src, size)) ++ ret = probe_read_common(dst, src, size); ++ set_fs(old_fs); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(probe_user_read); ++ + /** + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to +@@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ ++ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) + __attribute__((alias("__probe_kernel_write"))); + +@@ -57,15 +106,40 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); +- pagefault_disable(); +- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); +- pagefault_enable(); ++ ret = probe_write_common((__force void __user *)dst, src, size); + set_fs(old_fs); + +- return ret ? -EFAULT : 0; ++ return ret; + } + EXPORT_SYMBOL_GPL(probe_kernel_write); + ++/** ++ * probe_user_write(): safely attempt to write to a user-space location ++ * @dst: address to write to ++ * @src: pointer to the data that shall be written ++ * @size: size of the data chunk ++ * ++ * Safely write to address @dst from the buffer at @src. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++ ++long __weak probe_user_write(void __user *dst, const void *src, size_t size) ++ __attribute__((alias("__probe_user_write"))); ++ ++long __probe_user_write(void __user *dst, const void *src, size_t size) ++{ ++ long ret = -EFAULT; ++ mm_segment_t old_fs = get_fs(); ++ ++ set_fs(USER_DS); ++ if (access_ok(VERIFY_WRITE, dst, size)) ++ ret = probe_write_common(dst, src, size); ++ set_fs(old_fs); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(probe_user_write); ++ + /** + * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. + * @dst: Destination address, in kernel space. This buffer must be at +@@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) + + return ret ? -EFAULT : src - unsafe_addr; + } ++ ++/** ++ * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user ++ * address. ++ * @dst: Destination address, in kernel space. This buffer must be at ++ * least @count bytes long. ++ * @unsafe_addr: Unsafe user address. ++ * @count: Maximum number of bytes to copy, including the trailing NUL. ++ * ++ * Copies a NUL-terminated string from unsafe user address to kernel buffer. ++ * ++ * On success, returns the length of the string INCLUDING the trailing NUL. ++ * ++ * If access fails, returns -EFAULT (some data may have been copied ++ * and the trailing NUL added). ++ * ++ * If @count is smaller than the length of the string, copies @count-1 bytes, ++ * sets the last byte of @dst buffer to NUL and returns @count. ++ */ ++long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, ++ long count) ++{ ++ mm_segment_t old_fs = get_fs(); ++ long ret; ++ ++ if (unlikely(count <= 0)) ++ return 0; ++ ++ set_fs(USER_DS); ++ pagefault_disable(); ++ ret = strncpy_from_user(dst, unsafe_addr, count); ++ pagefault_enable(); ++ set_fs(old_fs); ++ ++ if (ret >= count) { ++ ret = count; ++ dst[ret - 1] = '\0'; ++ } else if (ret > 0) { ++ ret++; ++ } ++ ++ return ret; ++} ++ ++/** ++ * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL. ++ * @unsafe_addr: The string to measure. ++ * @count: Maximum count (including NUL) ++ * ++ * Get the size of a NUL-terminated string in user space without pagefault. ++ * ++ * Returns the size of the string INCLUDING the terminating NUL. ++ * ++ * If the string is too long, returns a number larger than @count. User ++ * has to check the return value against "> count". ++ * On exception (or invalid count), returns 0. ++ * ++ * Unlike strnlen_user, this can be used from IRQ handler etc. because ++ * it disables pagefaults. ++ */ ++long strnlen_unsafe_user(const void __user *unsafe_addr, long count) ++{ ++ mm_segment_t old_fs = get_fs(); ++ int ret; ++ ++ set_fs(USER_DS); ++ pagefault_disable(); ++ ret = strnlen_user(unsafe_addr, count); ++ pagefault_enable(); ++ set_fs(old_fs); ++ ++ return ret; ++} +diff --git a/mm/memblock.c b/mm/memblock.c +index abaed7ae1597..019221a290d7 100644 +--- a/mm/memblock.c ++++ b/mm/memblock.c +@@ -190,14 +190,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, + * + * Find @size free area aligned to @align in the specified range and node. + * +- * When allocation direction is bottom-up, the @start should be greater +- * than the end of the kernel image. Otherwise, it will be trimmed. The +- * reason is that we want the bottom-up allocation just near the kernel +- * image so it is highly likely that the allocated memory and the kernel +- * will reside in the same node. +- * +- * If bottom-up allocation failed, will try to allocate memory top-down. +- * + * RETURNS: + * Found address on success, 0 on failure. + */ +@@ -205,8 +197,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + phys_addr_t align, phys_addr_t start, + phys_addr_t end, int nid, ulong flags) + { +- phys_addr_t kernel_end, ret; +- + /* pump up @end */ + if (end == MEMBLOCK_ALLOC_ACCESSIBLE) + end = memblock.current_limit; +@@ -214,39 +204,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, + /* avoid allocating the first page */ + start = max_t(phys_addr_t, start, PAGE_SIZE); + end = max(start, end); +- kernel_end = __pa_symbol(_end); +- +- /* +- * try bottom-up allocation only when bottom-up mode +- * is set and @end is above the kernel image. +- */ +- if (memblock_bottom_up() && end > kernel_end) { +- phys_addr_t bottom_up_start; +- +- /* make sure we will allocate above the kernel */ +- bottom_up_start = max(start, kernel_end); + +- /* ok, try bottom-up allocation first */ +- ret = __memblock_find_range_bottom_up(bottom_up_start, end, +- size, align, nid, flags); +- if (ret) +- return ret; +- +- /* +- * we always limit bottom-up allocation above the kernel, +- * but top-down allocation doesn't have the limit, so +- * retrying top-down allocation may succeed when bottom-up +- * allocation failed. +- * +- * bottom-up allocation is expected to be fail very rarely, +- * so we use WARN_ONCE() here to see the stack trace if +- * fail happens. +- */ +- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); +- } +- +- return __memblock_find_range_top_down(start, end, size, align, nid, +- flags); ++ if (memblock_bottom_up()) ++ return __memblock_find_range_bottom_up(start, end, size, align, ++ nid, flags); ++ else ++ return __memblock_find_range_top_down(start, end, size, align, ++ nid, flags); + } + + /** +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index f8088c1a0083..319d01e7846c 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -4813,7 +4813,6 @@ static void __mem_cgroup_clear_mc(void) + if (!mem_cgroup_is_root(mc.to)) + page_counter_uncharge(&mc.to->memory, mc.moved_swap); + +- mem_cgroup_id_get_many(mc.to, mc.moved_swap); + css_put_many(&mc.to->css, mc.moved_swap); + + mc.moved_swap = 0; +@@ -4996,7 +4995,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, + ent = target.ent; + if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { + mc.precharge--; +- /* we fixup refcnts and charges later. */ ++ mem_cgroup_id_get_many(mc.to, 1); ++ /* we fixup other refcnts and charges later. */ + mc.moved_swap++; + } + break; +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index a17d9c0b98fb..14be367f6f6a 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1010,22 +1010,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, + return ret; + } + +-static void set_page_hwpoison_huge_page(struct page *hpage) +-{ +- int i; +- int nr_pages = 1 << compound_order(hpage); +- for (i = 0; i < nr_pages; i++) +- SetPageHWPoison(hpage + i); +-} +- +-static void clear_page_hwpoison_huge_page(struct page *hpage) +-{ +- int i; +- int nr_pages = 1 << compound_order(hpage); +- for (i = 0; i < nr_pages; i++) +- ClearPageHWPoison(hpage + i); +-} +- + /** + * memory_failure - Handle memory failure of a page. + * @pfn: Page Number of the corrupted page +@@ -1051,7 +1035,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + struct page *hpage; + struct page *orig_head; + int res; +- unsigned int nr_pages; + unsigned long page_flags; + + if (!sysctl_memory_failure_recovery) +@@ -1065,24 +1048,23 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + + p = pfn_to_page(pfn); + orig_head = hpage = compound_head(p); ++ ++ /* tmporary check code, to be updated in later patches */ ++ if (PageHuge(p)) { ++ if (TestSetPageHWPoison(hpage)) { ++ pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn); ++ return 0; ++ } ++ goto tmp; ++ } + if (TestSetPageHWPoison(p)) { + pr_err("Memory failure: %#lx: already hardware poisoned\n", + pfn); + return 0; + } + +- /* +- * Currently errors on hugetlbfs pages are measured in hugepage units, +- * so nr_pages should be 1 << compound_order. OTOH when errors are on +- * transparent hugepages, they are supposed to be split and error +- * measurement is done in normal page units. So nr_pages should be one +- * in this case. +- */ +- if (PageHuge(p)) +- nr_pages = 1 << compound_order(hpage); +- else /* normal page or thp */ +- nr_pages = 1; +- num_poisoned_pages_add(nr_pages); ++tmp: ++ num_poisoned_pages_inc(); + + /* + * We need/can do nothing about count=0 pages. +@@ -1110,12 +1092,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + if (PageHWPoison(hpage)) { + if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) + || (p != hpage && TestSetPageHWPoison(hpage))) { +- num_poisoned_pages_sub(nr_pages); ++ num_poisoned_pages_dec(); + unlock_page(hpage); + return 0; + } + } +- set_page_hwpoison_huge_page(hpage); + res = dequeue_hwpoisoned_huge_page(hpage); + action_result(pfn, MF_MSG_FREE_HUGE, + res ? MF_IGNORED : MF_DELAYED); +@@ -1138,7 +1119,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + pr_err("Memory failure: %#lx: thp split failed\n", + pfn); + if (TestClearPageHWPoison(p)) +- num_poisoned_pages_sub(nr_pages); ++ num_poisoned_pages_dec(); + put_hwpoison_page(p); + return -EBUSY; + } +@@ -1202,14 +1183,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + */ + if (!PageHWPoison(p)) { + pr_err("Memory failure: %#lx: just unpoisoned\n", pfn); +- num_poisoned_pages_sub(nr_pages); ++ num_poisoned_pages_dec(); + unlock_page(hpage); + put_hwpoison_page(hpage); + return 0; + } + if (hwpoison_filter(p)) { + if (TestClearPageHWPoison(p)) +- num_poisoned_pages_sub(nr_pages); ++ num_poisoned_pages_dec(); + unlock_page(hpage); + put_hwpoison_page(hpage); + return 0; +@@ -1228,14 +1209,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) + put_hwpoison_page(hpage); + return 0; + } +- /* +- * Set PG_hwpoison on all pages in an error hugepage, +- * because containment is done in hugepage unit for now. +- * Since we have done TestSetPageHWPoison() for the head page with +- * page lock held, we can safely set PG_hwpoison bits on tail pages. +- */ +- if (PageHuge(p)) +- set_page_hwpoison_huge_page(hpage); + + /* + * It's very difficult to mess with pages currently under IO +@@ -1407,7 +1380,6 @@ int unpoison_memory(unsigned long pfn) + struct page *page; + struct page *p; + int freeit = 0; +- unsigned int nr_pages; + static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + +@@ -1452,8 +1424,6 @@ int unpoison_memory(unsigned long pfn) + return 0; + } + +- nr_pages = 1 << compound_order(page); +- + if (!get_hwpoison_page(p)) { + /* + * Since HWPoisoned hugepage should have non-zero refcount, +@@ -1483,10 +1453,8 @@ int unpoison_memory(unsigned long pfn) + if (TestClearPageHWPoison(page)) { + unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", + pfn, &unpoison_rs); +- num_poisoned_pages_sub(nr_pages); ++ num_poisoned_pages_dec(); + freeit = 1; +- if (PageHuge(page)) +- clear_page_hwpoison_huge_page(page); + } + unlock_page(page); + +@@ -1612,14 +1580,10 @@ static int soft_offline_huge_page(struct page *page, int flags) + ret = -EIO; + } else { + /* overcommit hugetlb page will be freed to buddy */ +- if (PageHuge(page)) { +- set_page_hwpoison_huge_page(hpage); ++ SetPageHWPoison(page); ++ if (PageHuge(page)) + dequeue_hwpoisoned_huge_page(hpage); +- num_poisoned_pages_add(1 << compound_order(hpage)); +- } else { +- SetPageHWPoison(page); +- num_poisoned_pages_inc(); +- } ++ num_poisoned_pages_inc(); + } + return ret; + } +@@ -1728,15 +1692,12 @@ static int soft_offline_in_use_page(struct page *page, int flags) + + static void soft_offline_free_page(struct page *page) + { +- if (PageHuge(page)) { +- struct page *hpage = compound_head(page); ++ struct page *head = compound_head(page); + +- set_page_hwpoison_huge_page(hpage); +- if (!dequeue_hwpoisoned_huge_page(hpage)) +- num_poisoned_pages_add(1 << compound_order(hpage)); +- } else { +- if (!TestSetPageHWPoison(page)) +- num_poisoned_pages_inc(); ++ if (!TestSetPageHWPoison(head)) { ++ num_poisoned_pages_inc(); ++ if (PageHuge(head)) ++ dequeue_hwpoisoned_huge_page(head); + } + } + +diff --git a/mm/memory.c b/mm/memory.c +index cff4067b6edd..ea2a223754a2 100755 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -135,7 +135,7 @@ static int __init init_zero_pfn(void) + zero_pfn = page_to_pfn(ZERO_PAGE(0)); + return 0; + } +-core_initcall(init_zero_pfn); ++early_initcall(init_zero_pfn); + + + #if defined(SPLIT_RSS_COUNTING) +@@ -1712,11 +1712,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, unsigned long end, + unsigned long pfn, pgprot_t prot) + { +- pte_t *pte; ++ pte_t *pte, *mapped_pte; + spinlock_t *ptl; + int err = 0; + +- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); ++ mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -ENOMEM; + arch_enter_lazy_mmu_mode(); +@@ -1730,7 +1730,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + arch_leave_lazy_mmu_mode(); +- pte_unmap_unlock(pte - 1, ptl); ++ pte_unmap_unlock(mapped_pte, ptl); + return err; + } + +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c +index 68d009b90415..9bf78deba3e3 100644 +--- a/mm/memory_hotplug.c ++++ b/mm/memory_hotplug.c +@@ -1125,7 +1125,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ + mutex_lock(&zonelists_mutex); + if (!populated_zone(zone)) { + need_zonelists_rebuild = 1; +- build_all_zonelists(NULL, zone); ++ build_all_zonelists(NULL, zone, true); + } + + ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, +@@ -1146,7 +1146,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ + if (onlined_pages) { + node_states_set_node(nid, &arg); + if (need_zonelists_rebuild) +- build_all_zonelists(NULL, NULL); ++ build_all_zonelists(NULL, NULL, true); + else + zone_pcp_update(zone); + } +@@ -1224,7 +1224,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) + * to access not-initialized zonelist, build here. + */ + mutex_lock(&zonelists_mutex); +- build_all_zonelists(pgdat, NULL); ++ build_all_zonelists(pgdat, NULL, true); + mutex_unlock(&zonelists_mutex); + + /* +@@ -1280,7 +1280,7 @@ int try_online_node(int nid) + + if (pgdat->node_zonelists->_zonerefs->zone == NULL) { + mutex_lock(&zonelists_mutex); +- build_all_zonelists(NULL, NULL); ++ build_all_zonelists(NULL, NULL, true); + mutex_unlock(&zonelists_mutex); + } + +@@ -2020,7 +2020,7 @@ static int __ref __offline_pages(unsigned long start_pfn, + if (!populated_zone(zone)) { + zone_pcp_reset(zone); + mutex_lock(&zonelists_mutex); +- build_all_zonelists(NULL, NULL); ++ build_all_zonelists(NULL, NULL, true); + mutex_unlock(&zonelists_mutex); + } else + zone_pcp_update(zone); +diff --git a/mm/mempolicy.c b/mm/mempolicy.c +index b6821d28641e..fd0244f4c297 100644 +--- a/mm/mempolicy.c ++++ b/mm/mempolicy.c +@@ -490,7 +490,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, + struct queue_pages *qp = walk->private; + unsigned long flags = qp->flags; + int nid, ret; +- pte_t *pte; ++ pte_t *pte, *mapped_pte; + spinlock_t *ptl; + + if (pmd_trans_huge(*pmd)) { +@@ -518,7 +518,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, + if (pmd_trans_unstable(pmd)) + return 0; + retry: +- pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); ++ mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + if (!pte_present(*pte)) + continue; +@@ -557,7 +557,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, + } else + break; + } +- pte_unmap_unlock(pte - 1, ptl); ++ pte_unmap_unlock(mapped_pte, ptl); + cond_resched(); + return addr != end ? -EIO : 0; + } +diff --git a/mm/mmap.c b/mm/mmap.c +index ddfff14bb63f..fa924527c8cf 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2117,6 +2117,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = 0; ++ info.align_offset = 0; + return vm_unmapped_area(&info); + } + #endif +@@ -2158,6 +2159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = mm->mmap_base; + info.align_mask = 0; ++ info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* +@@ -3148,6 +3150,7 @@ void exit_mmap(struct mm_struct *mm) + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += vma_pages(vma); + vma = remove_vma(vma); ++ cond_resched(); + } + vm_unacct_memory(nr_accounted); + } +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 21a12577b287..a207bc406884 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -2732,7 +2732,6 @@ int test_clear_page_writeback(struct page *page) + } else { + ret = TestClearPageWriteback(page); + } +- + /* + * NOTE: Page might be free now! Writeback doesn't hold a page + * reference on its own, it relies on truncation to wait for +@@ -2740,7 +2739,8 @@ int test_clear_page_writeback(struct page *page) + * page state that is static across allocation cycles. + */ + if (ret) { +- mem_cgroup_dec_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); ++ __mem_cgroup_update_page_stat(page, memcg, ++ MEM_CGROUP_STAT_WRITEBACK, -1); + dec_node_page_state(page, NR_WRITEBACK); + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); + inc_node_page_state(page, NR_WRITTEN); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 3f30ba2d7a09..dc37518275d9 100755 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -65,6 +65,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -1126,6 +1127,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, + spin_lock(&zone->lock); + isolated_pageblocks = has_isolate_pageblock(zone); + ++ /* ++ * Ensure proper count is passed which otherwise would stuck in the ++ * below while (list_empty(list)) loop. ++ */ ++ count = min(pcp->count, count); + while (count) { + struct page *page; + struct list_head *list; +@@ -4759,7 +4765,7 @@ int numa_zonelist_order_handler(struct ctl_table *table, int write, + user_zonelist_order = oldval; + } else if (oldval != user_zonelist_order) { + mutex_lock(&zonelists_mutex); +- build_all_zonelists(NULL, NULL); ++ build_all_zonelists(NULL, NULL, false); + mutex_unlock(&zonelists_mutex); + } + } +@@ -5139,11 +5145,12 @@ build_all_zonelists_init(void) + * (2) call of __init annotated helper build_all_zonelists_init + * [protected by SYSTEM_BOOTING]. + */ +-void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) ++void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone, ++ bool hotplug_context) + { + set_zonelist_order(); + +- if (system_state == SYSTEM_BOOTING) { ++ if (system_state == SYSTEM_BOOTING && !hotplug_context) { + build_all_zonelists_init(); + } else { + #ifdef CONFIG_MEMORY_HOTPLUG +@@ -6936,9 +6943,11 @@ int __meminit init_per_zone_wmark_min(void) + setup_min_slab_ratio(); + #endif + ++ khugepaged_min_free_kbytes_update(); ++ + return 0; + } +-core_initcall(init_per_zone_wmark_min) ++postcore_initcall(init_per_zone_wmark_min) + + /* + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so +diff --git a/mm/page_ext.c b/mm/page_ext.c +index 561ed6e16810..557d4d306df0 100644 +--- a/mm/page_ext.c ++++ b/mm/page_ext.c +@@ -99,6 +99,13 @@ static void __init invoke_init_callbacks(void) + } + } + ++#if !defined(CONFIG_SPARSEMEM) ++void __init page_ext_init_flatmem_late(void) ++{ ++ invoke_init_callbacks(); ++} ++#endif ++ + static unsigned long get_entry_size(void) + { + return sizeof(struct page_ext) + extra_mem; +@@ -187,7 +194,6 @@ void __init page_ext_init_flatmem(void) + goto fail; + } + pr_info("allocated %ld bytes of page_ext\n", total_usage); +- invoke_init_callbacks(); + return; + + fail: +diff --git a/mm/page_io.c b/mm/page_io.c +index 83e5613082cf..7a39aed0c3b7 100644 +--- a/mm/page_io.c ++++ b/mm/page_io.c +@@ -33,7 +33,6 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, + bio = bio_alloc(gfp_flags, 1); + if (bio) { + bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); +- bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; + bio->bi_end_io = end_io; + + bio_add_page(bio, page, PAGE_SIZE, 0); +@@ -253,11 +252,6 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) + return ret; + } + +-static sector_t swap_page_sector(struct page *page) +-{ +- return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); +-} +- + int __swap_writepage(struct page *page, struct writeback_control *wbc, + bio_end_io_t end_write_func) + { +@@ -307,7 +301,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, + return ret; + } + +- ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); ++ ret = bdev_write_page(sis->bdev, map_swap_page(page, &sis->bdev), ++ page, wbc); + if (!ret) { + count_vm_event(PSWPOUT); + return 0; +@@ -367,7 +362,7 @@ int swap_readpage(struct page *page) + goto out; + } + +- ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); ++ ret = bdev_read_page(sis->bdev, map_swap_page(page, &sis->bdev), page); + if (!ret) { + if (trylock_page(page)) { + swap_slot_free_notify(page); +diff --git a/mm/pagewalk.c b/mm/pagewalk.c +index d95341cffc2f..8d6290502631 100644 +--- a/mm/pagewalk.c ++++ b/mm/pagewalk.c +@@ -14,9 +14,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, + err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); + if (err) + break; +- addr += PAGE_SIZE; +- if (addr == end) ++ if (addr >= end - PAGE_SIZE) + break; ++ addr += PAGE_SIZE; + pte++; + } + +diff --git a/mm/slab_common.c b/mm/slab_common.c +index 467cf11f6990..0e6e78b96267 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -1347,7 +1347,7 @@ void kzfree(const void *p) + if (unlikely(ZERO_OR_NULL_PTR(mem))) + return; + ks = ksize(mem); +- memset(mem, 0, ks); ++ memzero_explicit(mem, ks); + kfree(mem); + } + EXPORT_SYMBOL(kzfree); +diff --git a/mm/slob.c b/mm/slob.c +index eac04d4357ec..e6e7137df116 100644 +--- a/mm/slob.c ++++ b/mm/slob.c +@@ -472,6 +472,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) + { + return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); + } ++EXPORT_SYMBOL(__kmalloc_track_caller); + + #ifdef CONFIG_NUMA + void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, +@@ -479,6 +480,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, + { + return __do_kmalloc_node(size, gfp, node, caller); + } ++EXPORT_SYMBOL(__kmalloc_node_track_caller); + #endif + + void kfree(const void *block) +diff --git a/mm/slub.c b/mm/slub.c +index cae85b2db87b..dbefbbef0ace 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -624,6 +624,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) + va_end(args); + } + ++static bool freelist_corrupted(struct kmem_cache *s, struct page *page, ++ void **freelist, void *nextfree) ++{ ++ if ((s->flags & SLAB_CONSISTENCY_CHECKS) && ++ !check_valid_pointer(s, page, nextfree) && freelist) { ++ object_err(s, page, *freelist, "Freechain corrupt"); ++ *freelist = NULL; ++ slab_fix(s, "Isolate corrupted freechain"); ++ return true; ++ } ++ ++ return false; ++} ++ + static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) + { + unsigned int off; /* Offset of last byte */ +@@ -1317,6 +1331,11 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, + static inline void dec_slabs_node(struct kmem_cache *s, int node, + int objects) {} + ++static bool freelist_corrupted(struct kmem_cache *s, struct page *page, ++ void **freelist, void *nextfree) ++{ ++ return false; ++} + #endif /* CONFIG_SLUB_DEBUG */ + + /* +@@ -2029,6 +2048,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, + void *prior; + unsigned long counters; + ++ /* ++ * If 'nextfree' is invalid, it is possible that the object at ++ * 'freelist' is already corrupted. So isolate all objects ++ * starting at 'freelist'. ++ */ ++ if (freelist_corrupted(s, page, &freelist, nextfree)) ++ break; ++ + do { + prior = page->freelist; + counters = page->counters; +@@ -4269,6 +4296,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) + + return ret; + } ++EXPORT_SYMBOL(__kmalloc_track_caller); + + #ifdef CONFIG_NUMA + void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, +@@ -4299,6 +4327,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, + + return ret; + } ++EXPORT_SYMBOL(__kmalloc_node_track_caller); + #endif + + #ifdef CONFIG_SYSFS +@@ -5529,7 +5558,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) + */ + if (buffer) + buf = buffer; +- else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) ++ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && ++ !IS_ENABLED(CONFIG_SLUB_STATS)) + buf = mbuf; + else { + buffer = (char *) get_zeroed_page(GFP_KERNEL); +diff --git a/mm/swap_state.c b/mm/swap_state.c +index d4f3b2fec061..57c35b5e75fc 100644 +--- a/mm/swap_state.c ++++ b/mm/swap_state.c +@@ -20,6 +20,7 @@ + #include + + #include ++#include "internal.h" + + /* + * swapper_space is a fiction, retained to simplify the path through +@@ -327,7 +328,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, + /* + * call radix_tree_preload() while we can wait. + */ +- err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); ++ err = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); + if (err) + break; + +diff --git a/mm/swapfile.c b/mm/swapfile.c +index 7b996b669cf0..2c6c23deeb08 100644 +--- a/mm/swapfile.c ++++ b/mm/swapfile.c +@@ -1708,7 +1708,7 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev) + { + swp_entry_t entry; + entry.val = page_private(page); +- return map_swap_entry(entry, bdev); ++ return map_swap_entry(entry, bdev) << (PAGE_SHIFT - 9); + } + + /* +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 67c229324086..f272a74d9dcf 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -1186,6 +1186,9 @@ static void pagetypeinfo_showfree_print(struct seq_file *m, + list_for_each(curr, &area->free_list[mtype]) + freecount++; + seq_printf(m, "%6lu ", freecount); ++ spin_unlock_irq(&zone->lock); ++ cond_resched(); ++ spin_lock_irq(&zone->lock); + } + seq_putc(m, '\n'); + } +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c +index ffe8c4341ec9..ad5fc40281eb 100644 +--- a/mm/zsmalloc.c ++++ b/mm/zsmalloc.c +@@ -2352,11 +2352,13 @@ static unsigned long zs_can_compact(struct size_class *class) + return obj_wasted * class->pages_per_zspage; + } + +-static void __zs_compact(struct zs_pool *pool, struct size_class *class) ++static unsigned long __zs_compact(struct zs_pool *pool, ++ struct size_class *class) + { + struct zs_compact_control cc; + struct zspage *src_zspage; + struct zspage *dst_zspage = NULL; ++ unsigned long pages_freed = 0; + + spin_lock(&class->lock); + while ((src_zspage = isolate_zspage(class, true))) { +@@ -2386,7 +2388,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) + putback_zspage(class, dst_zspage); + if (putback_zspage(class, src_zspage) == ZS_EMPTY) { + free_zspage(pool, class, src_zspage); +- pool->stats.pages_compacted += class->pages_per_zspage; ++ pages_freed += class->pages_per_zspage; + } + spin_unlock(&class->lock); + cond_resched(); +@@ -2397,12 +2399,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) + putback_zspage(class, src_zspage); + + spin_unlock(&class->lock); ++ ++ return pages_freed; + } + + unsigned long zs_compact(struct zs_pool *pool) + { + int i; + struct size_class *class; ++ unsigned long pages_freed = 0; + + for (i = zs_size_classes - 1; i >= 0; i--) { + class = pool->size_class[i]; +@@ -2410,10 +2415,11 @@ unsigned long zs_compact(struct zs_pool *pool) + continue; + if (class->index != i) + continue; +- __zs_compact(pool, class); ++ pages_freed += __zs_compact(pool, class); + } ++ atomic_long_add(pages_freed, &pool->stats.pages_compacted); + +- return pool->stats.pages_compacted; ++ return pages_freed; + } + EXPORT_SYMBOL_GPL(zs_compact); + +@@ -2430,13 +2436,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker, + struct zs_pool *pool = container_of(shrinker, struct zs_pool, + shrinker); + +- pages_freed = pool->stats.pages_compacted; + /* + * Compact classes and calculate compaction delta. + * Can run concurrently with a manually triggered + * (by user) compaction. + */ +- pages_freed = zs_compact(pool) - pages_freed; ++ pages_freed = zs_compact(pool); + + return pages_freed ? pages_freed : SHRINK_STOP; + } +diff --git a/net/802/garp.c b/net/802/garp.c +index b38ee6dcba45..5239b8f244e7 100644 +--- a/net/802/garp.c ++++ b/net/802/garp.c +@@ -206,6 +206,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr + kfree(attr); + } + ++static void garp_attr_destroy_all(struct garp_applicant *app) ++{ ++ struct rb_node *node, *next; ++ struct garp_attr *attr; ++ ++ for (node = rb_first(&app->gid); ++ next = node ? rb_next(node) : NULL, node != NULL; ++ node = next) { ++ attr = rb_entry(node, struct garp_attr, node); ++ garp_attr_destroy(app, attr); ++ } ++} ++ + static int garp_pdu_init(struct garp_applicant *app) + { + struct sk_buff *skb; +@@ -612,6 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl + + spin_lock_bh(&app->lock); + garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); ++ garp_attr_destroy_all(app); + garp_pdu_queue(app); + spin_unlock_bh(&app->lock); + +diff --git a/net/802/mrp.c b/net/802/mrp.c +index 72db2785ef2c..4ee3af3d400b 100644 +--- a/net/802/mrp.c ++++ b/net/802/mrp.c +@@ -295,6 +295,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) + kfree(attr); + } + ++static void mrp_attr_destroy_all(struct mrp_applicant *app) ++{ ++ struct rb_node *node, *next; ++ struct mrp_attr *attr; ++ ++ for (node = rb_first(&app->mad); ++ next = node ? rb_next(node) : NULL, node != NULL; ++ node = next) { ++ attr = rb_entry(node, struct mrp_attr, node); ++ mrp_attr_destroy(app, attr); ++ } ++} ++ + static int mrp_pdu_init(struct mrp_applicant *app) + { + struct sk_buff *skb; +@@ -900,6 +913,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) + + spin_lock_bh(&app->lock); + mrp_mad_event(app, MRP_EVENT_TX); ++ mrp_attr_destroy_all(app); + mrp_pdu_queue(app); + spin_unlock_bh(&app->lock); + +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c +index aa4586672cee..33b317a25a2d 100644 +--- a/net/9p/trans_fd.c ++++ b/net/9p/trans_fd.c +@@ -295,7 +295,6 @@ static void p9_read_work(struct work_struct *work) + { + int n, err; + struct p9_conn *m; +- int status = REQ_STATUS_ERROR; + + m = container_of(work, struct p9_conn, rq); + +@@ -375,11 +374,21 @@ static void p9_read_work(struct work_struct *work) + if ((m->req) && (m->rc.offset == m->rc.capacity)) { + p9_debug(P9_DEBUG_TRANS, "got new packet\n"); + spin_lock(&m->client->lock); +- if (m->req->status != REQ_STATUS_ERROR) +- status = REQ_STATUS_RCVD; +- list_del(&m->req->req_list); +- /* update req->status while holding client->lock */ +- p9_client_cb(m->client, m->req, status); ++ if (m->req->status == REQ_STATUS_SENT) { ++ list_del(&m->req->req_list); ++ p9_client_cb(m->client, m->req, REQ_STATUS_RCVD); ++ } else if (m->req->status == REQ_STATUS_FLSHD) { ++ /* Ignore replies associated with a cancelled request. */ ++ p9_debug(P9_DEBUG_TRANS, ++ "Ignore replies associated with a cancelled request\n"); ++ } else { ++ spin_unlock(&m->client->lock); ++ p9_debug(P9_DEBUG_ERROR, ++ "Request tag %d errored out while we were reading the reply\n", ++ m->rc.tag); ++ err = -EIO; ++ goto error; ++ } + spin_unlock(&m->client->lock); + m->rc.sdata = NULL; + m->rc.offset = 0; +@@ -712,11 +721,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) + { + p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + ++ spin_lock(&client->lock); ++ /* Ignore cancelled request if message has been received ++ * before lock. ++ */ ++ if (req->status == REQ_STATUS_RCVD) { ++ spin_unlock(&client->lock); ++ return 0; ++ } ++ + /* we haven't received a response for oldreq, + * remove it from the list. + */ +- spin_lock(&client->lock); + list_del(&req->req_list); ++ req->status = REQ_STATUS_FLSHD; + spin_unlock(&client->lock); + + return 0; +@@ -797,20 +815,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) + return -ENOMEM; + + ts->rd = fget(rfd); ++ if (!ts->rd) ++ goto out_free_ts; ++ if (!(ts->rd->f_mode & FMODE_READ)) ++ goto out_put_rd; + ts->wr = fget(wfd); +- if (!ts->rd || !ts->wr) { +- if (ts->rd) +- fput(ts->rd); +- if (ts->wr) +- fput(ts->wr); +- kfree(ts); +- return -EIO; +- } ++ if (!ts->wr) ++ goto out_put_rd; ++ if (!(ts->wr->f_mode & FMODE_WRITE)) ++ goto out_put_wr; + + client->trans = ts; + client->status = Connected; + + return 0; ++ ++out_put_wr: ++ fput(ts->wr); ++out_put_rd: ++ fput(ts->rd); ++out_free_ts: ++ kfree(ts); ++ return -EIO; + } + + static int p9_socket_open(struct p9_client *client, struct socket *csocket) +@@ -987,7 +1013,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) + + csocket = NULL; + +- if (addr == NULL) ++ if (!addr || !strlen(addr)) + return -EINVAL; + + if (strlen(addr) >= UNIX_PATH_MAX) { +diff --git a/net/Makefile b/net/Makefile +index c84a3470ad8d..83e91dcafec0 100644 +--- a/net/Makefile ++++ b/net/Makefile +@@ -16,7 +16,7 @@ obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/ + obj-$(CONFIG_NETFILTER) += netfilter/ + obj-$(CONFIG_INET) += ipv4/ + obj-$(CONFIG_XFRM) += xfrm/ +-obj-$(CONFIG_UNIX) += unix/ ++obj-$(CONFIG_UNIX_SCM) += unix/ + obj-$(CONFIG_NET) += ipv6/ + obj-$(CONFIG_PACKET) += packet/ + obj-$(CONFIG_NET_KEY) += key/ +diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c +index 93209c009df5..a66de21671ac 100644 +--- a/net/appletalk/ddp.c ++++ b/net/appletalk/ddp.c +@@ -1575,8 +1575,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + struct sk_buff *skb; + struct net_device *dev; + struct ddpehdr *ddp; +- int size; +- struct atalk_route *rt; ++ int size, hard_header_len; ++ struct atalk_route *rt, *rt_lo = NULL; + int err; + + if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) +@@ -1639,7 +1639,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", + sk, size, dev->name); + +- size += dev->hard_header_len; ++ hard_header_len = dev->hard_header_len; ++ /* Leave room for loopback hardware header if necessary */ ++ if (usat->sat_addr.s_node == ATADDR_BCAST && ++ (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) { ++ struct atalk_addr at_lo; ++ ++ at_lo.s_node = 0; ++ at_lo.s_net = 0; ++ ++ rt_lo = atrtr_find(&at_lo); ++ ++ if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len) ++ hard_header_len = rt_lo->dev->hard_header_len; ++ } ++ ++ size += hard_header_len; + release_sock(sk); + skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); + lock_sock(sk); +@@ -1647,7 +1662,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + goto out; + + skb_reserve(skb, ddp_dl->header_length); +- skb_reserve(skb, dev->hard_header_len); ++ skb_reserve(skb, hard_header_len); + skb->dev = dev; + + SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); +@@ -1698,18 +1713,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + /* loop back */ + skb_orphan(skb); + if (ddp->deh_dnode == ATADDR_BCAST) { +- struct atalk_addr at_lo; +- +- at_lo.s_node = 0; +- at_lo.s_net = 0; +- +- rt = atrtr_find(&at_lo); +- if (!rt) { ++ if (!rt_lo) { + kfree_skb(skb); + err = -ENETUNREACH; + goto out; + } +- dev = rt->dev; ++ dev = rt_lo->dev; + skb->dev = dev; + } + ddp_dl->request(ddp_dl, skb, dev->dev_addr); +diff --git a/net/atm/lec.c b/net/atm/lec.c +index 704892d79bf1..756429c95e85 100644 +--- a/net/atm/lec.c ++++ b/net/atm/lec.c +@@ -1290,6 +1290,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) + entry->vcc = NULL; + } + if (entry->recv_vcc) { ++ struct atm_vcc *vcc = entry->recv_vcc; ++ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); ++ ++ kfree(vpriv); ++ vcc->user_back = NULL; ++ + entry->recv_vcc->push = entry->old_recv_push; + vcc_release_async(entry->recv_vcc, -EPIPE); + entry->recv_vcc = NULL; +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c +index 02be8ee23271..64fede18aa33 100644 +--- a/net/ax25/af_ax25.c ++++ b/net/ax25/af_ax25.c +@@ -1191,7 +1191,10 @@ static int __must_check ax25_connect(struct socket *sock, + if (addr_len > sizeof(struct sockaddr_ax25) && + fsa->fsa_ax25.sax25_ndigis != 0) { + /* Valid number of digipeaters ? */ +- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { ++ if (fsa->fsa_ax25.sax25_ndigis < 1 || ++ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || ++ addr_len < sizeof(struct sockaddr_ax25) + ++ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { + err = -EINVAL; + goto out_release; + } +@@ -1510,7 +1513,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; + + /* Valid number of digipeaters ? */ +- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { ++ if (usax->sax25_ndigis < 1 || ++ usax->sax25_ndigis > AX25_MAX_DIGIS || ++ addr_len < sizeof(struct sockaddr_ax25) + ++ sizeof(ax25_address) * usax->sax25_ndigis) { + err = -EINVAL; + goto out; + } +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index 2b663622bdb4..f85e6a9ee5ea 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -585,8 +585,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) + if (WARN_ON(!forw_packet->if_outgoing)) + return; + +- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface)) ++ if (forw_packet->if_outgoing->soft_iface != soft_iface) { ++ pr_warn("%s: soft interface switch for queued OGM\n", __func__); + return; ++ } + + if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) + return; +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c +index 00123064eb26..516c45771d59 100644 +--- a/net/batman-adv/bridge_loop_avoidance.c ++++ b/net/batman-adv/bridge_loop_avoidance.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -95,11 +96,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) + */ + static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) + { +- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; ++ const struct batadv_bla_backbone_gw *gw; + u32 hash = 0; + +- hash = jhash(&claim->addr, sizeof(claim->addr), hash); +- hash = jhash(&claim->vid, sizeof(claim->vid), hash); ++ gw = (struct batadv_bla_backbone_gw *)data; ++ hash = jhash(&gw->orig, sizeof(gw->orig), hash); ++ hash = jhash(&gw->vid, sizeof(gw->vid), hash); + + return hash % size; + } +@@ -451,7 +453,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, + skb->len + ETH_HLEN); + soft_iface->last_rx = jiffies; + +- netif_rx(skb); ++ if (in_interrupt()) ++ netif_rx(skb); ++ else ++ netif_rx_ni(skb); + out: + if (primary_if) + batadv_hardif_put(primary_if); +@@ -1817,7 +1822,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, + * @bat_priv: the bat priv with all the soft interface information + * @skb: the frame to be checked + * @vid: the VLAN ID of the frame +- * @is_bcast: the packet came in a broadcast packet type. ++ * @packet_type: the batman packet type this frame came in + * + * batadv_bla_rx avoidance checks if: + * * we have to race for a claim +@@ -1829,7 +1834,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, + * further process the skb. + */ + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, +- unsigned short vid, bool is_bcast) ++ unsigned short vid, int packet_type) + { + struct batadv_bla_backbone_gw *backbone_gw; + struct ethhdr *ethhdr; +@@ -1851,9 +1856,24 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + goto handled; + + if (unlikely(atomic_read(&bat_priv->bla.num_requests))) +- /* don't allow broadcasts while requests are in flight */ +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) +- goto handled; ++ /* don't allow multicast packets while requests are in flight */ ++ if (is_multicast_ether_addr(ethhdr->h_dest)) ++ /* Both broadcast flooding or multicast-via-unicasts ++ * delivery might send to multiple backbone gateways ++ * sharing the same LAN and therefore need to coordinate ++ * which backbone gateway forwards into the LAN, ++ * by claiming the payload source address. ++ * ++ * Broadcast flooding and multicast-via-unicasts ++ * delivery use the following two batman packet types. ++ * Note: explicitly exclude BATADV_UNICAST_4ADDR, ++ * as the DHCP gateway feature will send explicitly ++ * to only one BLA gateway, so the claiming process ++ * should be avoided there. ++ */ ++ if (packet_type == BATADV_BCAST || ++ packet_type == BATADV_UNICAST) ++ goto handled; + + ether_addr_copy(search_claim.addr, ethhdr->h_source); + search_claim.vid = vid; +@@ -1881,13 +1901,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + goto allow; + } + +- /* if it is a broadcast ... */ +- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { ++ /* if it is a multicast ... */ ++ if (is_multicast_ether_addr(ethhdr->h_dest) && ++ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { + /* ... drop it. the responsible gateway is in charge. + * +- * We need to check is_bcast because with the gateway ++ * We need to check packet type because with the gateway + * feature, broadcasts (like DHCP requests) may be sent +- * using a unicast packet type. ++ * using a unicast 4 address packet type. See comment above. + */ + goto handled; + } else { +diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h +index 1ae93e46fb98..40b8ec9d4b1b 100644 +--- a/net/batman-adv/bridge_loop_avoidance.h ++++ b/net/batman-adv/bridge_loop_avoidance.h +@@ -29,7 +29,7 @@ struct sk_buff; + + #ifdef CONFIG_BATMAN_ADV_BLA + bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, +- unsigned short vid, bool is_bcast); ++ unsigned short vid, int packet_type); + bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid); + bool batadv_bla_is_backbone_gw(struct sk_buff *skb, +@@ -56,7 +56,7 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); + + static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, + struct sk_buff *skb, unsigned short vid, +- bool is_bcast) ++ int packet_type) + { + return false; + } +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c +index 3bd7ed6b6b3e..9727afc030d8 100644 +--- a/net/batman-adv/gateway_client.c ++++ b/net/batman-adv/gateway_client.c +@@ -673,8 +673,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, + + chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; + /* store the client address if the message is going to a client */ +- if (ret == BATADV_DHCP_TO_CLIENT && +- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { ++ if (ret == BATADV_DHCP_TO_CLIENT) { ++ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) ++ return BATADV_DHCP_NO; ++ + /* check if the DHCP packet carries an Ethernet DHCP */ + p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; + if (*p != BATADV_DHCP_HTYPE_ETHERNET) +diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c +index 56dc532f7a2c..b422a8b34b9f 100644 +--- a/net/batman-adv/log.c ++++ b/net/batman-adv/log.c +@@ -196,6 +196,7 @@ static const struct file_operations batadv_log_fops = { + .read = batadv_log_read, + .poll = batadv_log_poll, + .llseek = no_llseek, ++ .owner = THIS_MODULE, + }; + + int batadv_debug_log_setup(struct batadv_priv *bat_priv) +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c +index 19059ae26e51..1ba205c3ea9f 100644 +--- a/net/batman-adv/routing.c ++++ b/net/batman-adv/routing.c +@@ -803,6 +803,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, + vid = batadv_get_vid(skb, hdr_len); + ethhdr = (struct ethhdr *)(skb->data + hdr_len); + ++ /* do not reroute multicast frames in a unicast header */ ++ if (is_multicast_ether_addr(ethhdr->h_dest)) ++ return true; ++ + /* check if the destination client was served by this node and it is now + * roaming. In this case, it means that the node has got a ROAM_ADV + * message and that it knows the new destination in the mesh to re-route +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index 99d2c453c872..af0a8439cf08 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -415,10 +415,10 @@ void batadv_interface_rx(struct net_device *soft_iface, + struct vlan_ethhdr *vhdr; + struct ethhdr *ethhdr; + unsigned short vid; +- bool is_bcast; ++ int packet_type; + + batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; +- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); ++ packet_type = batadv_bcast_packet->packet_type; + + skb_pull_rcsum(skb, hdr_size); + skb_reset_mac_header(skb); +@@ -463,7 +463,7 @@ void batadv_interface_rx(struct net_device *soft_iface, + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ +- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) ++ if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) + goto out; + + if (orig_node) +diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c +index d40d83949b00..607d8bac8376 100644 +--- a/net/batman-adv/translation-table.c ++++ b/net/batman-adv/translation-table.c +@@ -897,6 +897,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, + hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { + tt_vlan->vid = htons(vlan->vid); + tt_vlan->crc = htonl(vlan->tt.crc); ++ tt_vlan->reserved = 0; + + tt_vlan++; + } +@@ -980,6 +981,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, + + tt_vlan->vid = htons(vlan->vid); + tt_vlan->crc = htonl(vlan->tt.crc); ++ tt_vlan->reserved = 0; + + tt_vlan++; + } +diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c +index 21096c882223..3bfd747aa515 100644 +--- a/net/bluetooth/6lowpan.c ++++ b/net/bluetooth/6lowpan.c +@@ -57,6 +57,7 @@ static bool enable_6lowpan; + /* We are listening incoming connections via this channel + */ + static struct l2cap_chan *listen_chan; ++static DEFINE_MUTEX(set_lock); + + struct lowpan_peer { + struct list_head list; +@@ -1187,12 +1188,14 @@ static void do_enable_set(struct work_struct *work) + + enable_6lowpan = set_enable->flag; + ++ mutex_lock(&set_lock); + if (listen_chan) { + l2cap_chan_close(listen_chan, 0); + l2cap_chan_put(listen_chan); + } + + listen_chan = bt_6lowpan_listen(); ++ mutex_unlock(&set_lock); + + kfree(set_enable); + } +@@ -1244,11 +1247,13 @@ static ssize_t lowpan_control_write(struct file *fp, + if (ret == -EINVAL) + return ret; + ++ mutex_lock(&set_lock); + if (listen_chan) { + l2cap_chan_close(listen_chan, 0); + l2cap_chan_put(listen_chan); + listen_chan = NULL; + } ++ mutex_unlock(&set_lock); + + if (conn) { + struct lowpan_peer *peer; +diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c +index 5f123c3320a7..fcd819ffda10 100644 +--- a/net/bluetooth/a2mp.c ++++ b/net/bluetooth/a2mp.c +@@ -233,6 +233,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_info_req req; + + found = true; ++ ++ memset(&req, 0, sizeof(req)); ++ + req.id = cl->id; + a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), + sizeof(req), &req); +@@ -312,6 +315,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, + if (!hdev || hdev->dev_type != HCI_AMP) { + struct a2mp_info_rsp rsp; + ++ memset(&rsp, 0, sizeof(rsp)); ++ + rsp.id = req->id; + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + +@@ -355,6 +360,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + if (!ctrl) + return -ENOMEM; + ++ memset(&req, 0, sizeof(req)); ++ + req.id = rsp->id; + a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), + &req); +@@ -381,6 +388,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, + hdev = hci_dev_get(req->id); + if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { + struct a2mp_amp_assoc_rsp rsp; ++ ++ memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; + + if (tmp) { +@@ -471,7 +480,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) + { + struct a2mp_physlink_req *req = (void *) skb->data; +- + struct a2mp_physlink_rsp rsp; + struct hci_dev *hdev; + struct hci_conn *hcon; +@@ -482,6 +490,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + + BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); + ++ memset(&rsp, 0, sizeof(rsp)); ++ + rsp.local_id = req->remote_id; + rsp.remote_id = req->local_id; + +@@ -509,6 +519,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); + if (!assoc) { + amp_ctrl_put(ctrl); ++ hci_dev_put(hdev); + return -ENOMEM; + } + +@@ -560,6 +571,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + + BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); + ++ memset(&rsp, 0, sizeof(rsp)); ++ + rsp.local_id = req->remote_id; + rsp.remote_id = req->local_id; + rsp.status = A2MP_STATUS_SUCCESS; +@@ -682,6 +695,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) + if (err) { + struct a2mp_cmd_rej rej; + ++ memset(&rej, 0, sizeof(rej)); ++ + rej.reason = cpu_to_le16(0); + hdr = (void *) skb->data; + +@@ -905,6 +920,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev) + + BT_DBG("%s mgr %p", hdev->name, mgr); + ++ memset(&rsp, 0, sizeof(rsp)); ++ + rsp.id = hdev->id; + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + +@@ -1002,6 +1019,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) + if (!mgr) + return; + ++ memset(&rsp, 0, sizeof(rsp)); ++ + hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); + if (!hs_hcon) { + rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; +@@ -1034,6 +1053,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan) + + mgr->bredr_chan = chan; + ++ memset(&req, 0, sizeof(req)); ++ + req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); + req.ext_feat = 0; + a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); +diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c +index e32f34189007..b01b43ab6f83 100644 +--- a/net/bluetooth/amp.c ++++ b/net/bluetooth/amp.c +@@ -305,6 +305,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, + struct hci_request req; + int err = 0; + ++ if (!mgr) ++ return; ++ + cp.phy_handle = hcon->handle; + cp.len_so_far = cpu_to_le16(0); + cp.max_len = cpu_to_le16(hdev->amp_assoc_size); +diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c +index 1152ce34dad4..0bb150e68c53 100644 +--- a/net/bluetooth/cmtp/core.c ++++ b/net/bluetooth/cmtp/core.c +@@ -391,6 +391,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) + if (!(session->flags & BIT(CMTP_LOOPBACK))) { + err = cmtp_attach_device(session); + if (err < 0) { ++ /* Caller will call fput in case of failure, and so ++ * will cmtp_session kthread. ++ */ ++ get_file(session->sock->file); ++ + atomic_inc(&session->terminate); + wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 1d085eed72d0..e3cd81ce2a7b 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1163,6 +1163,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) + return 0; + } + ++ /* AES encryption is required for Level 4: ++ * ++ * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C ++ * page 1319: ++ * ++ * 128-bit equivalent strength for link and encryption keys ++ * required using FIPS approved algorithms (E0 not allowed, ++ * SAFER+ not allowed, and P-192 not allowed; encryption key ++ * not shortened) ++ */ ++ if (conn->sec_level == BT_SECURITY_FIPS && ++ !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { ++ bt_dev_err(conn->hdev, ++ "Invalid security: Missing AES-CCM usage"); ++ return 0; ++ } ++ + if (hci_conn_ssp_enabled(conn) && + !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + return 0; +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index a70b078ceb3c..8517da7f282e 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -1243,8 +1243,10 @@ int hci_inquiry(void __user *arg) + * cleared). If it is interrupted by a signal, return -EINTR. + */ + if (wait_on_bit(&hdev->flags, HCI_INQUIRY, +- TASK_INTERRUPTIBLE)) +- return -EINTR; ++ TASK_INTERRUPTIBLE)) { ++ err = -EINTR; ++ goto done; ++ } + } + + /* for unlimited number of responses we will use buffer with +@@ -1420,8 +1422,13 @@ static int hci_dev_do_open(struct hci_dev *hdev) + } else { + /* Init failed, cleanup */ + flush_work(&hdev->tx_work); +- flush_work(&hdev->cmd_work); ++ ++ /* Since hci_rx_work() is possible to awake new cmd_work ++ * it should be flushed first to avoid unexpected call of ++ * hci_cmd_work() ++ */ + flush_work(&hdev->rx_work); ++ flush_work(&hdev->cmd_work); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); +@@ -3139,14 +3146,10 @@ EXPORT_SYMBOL(hci_register_dev); + /* Unregister HCI device */ + void hci_unregister_dev(struct hci_dev *hdev) + { +- int id; +- + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + hci_dev_set_flag(hdev, HCI_UNREGISTER); + +- id = hdev->id; +- + write_lock(&hci_dev_list_lock); + list_del(&hdev->list); + write_unlock(&hci_dev_list_lock); +@@ -3175,7 +3178,14 @@ void hci_unregister_dev(struct hci_dev *hdev) + } + + device_del(&hdev->dev); ++ /* Actual cleanup is deferred until hci_cleanup_dev(). */ ++ hci_dev_put(hdev); ++} ++EXPORT_SYMBOL(hci_unregister_dev); + ++/* Cleanup HCI device */ ++void hci_cleanup_dev(struct hci_dev *hdev) ++{ + debugfs_remove_recursive(hdev->debugfs); + kfree_const(hdev->hw_info); + kfree_const(hdev->fw_info); +@@ -3197,11 +3207,8 @@ void hci_unregister_dev(struct hci_dev *hdev) + hci_discovery_filter_clear(hdev); + hci_dev_unlock(hdev); + +- hci_dev_put(hdev); +- +- ida_simple_remove(&hci_index_ida, id); ++ ida_simple_remove(&hci_index_ida, hdev->id); + } +-EXPORT_SYMBOL(hci_unregister_dev); + + /* Suspend HCI device */ + int hci_suspend_dev(struct hci_dev *hdev) +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c +index 6f78489fdb13..44eeb27e341a 100644 +--- a/net/bluetooth/hci_event.c ++++ b/net/bluetooth/hci_event.c +@@ -41,12 +41,27 @@ + + /* Handle HCI Event packets */ + +-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) ++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, ++ u8 *new_status) + { + __u8 status = *((__u8 *) skb->data); + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + ++ /* It is possible that we receive Inquiry Complete event right ++ * before we receive Inquiry Cancel Command Complete event, in ++ * which case the latter event should have status of Command ++ * Disallowed (0x0c). This should not be treated as error, since ++ * we actually achieve what Inquiry Cancel wants to achieve, ++ * which is to end the last Inquiry session. ++ */ ++ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { ++ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); ++ status = 0x00; ++ } ++ ++ *new_status = status; ++ + if (status) + return; + +@@ -1118,6 +1133,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, + { + struct discovery_state *d = &hdev->discovery; + ++ if (len > HCI_MAX_AD_LENGTH) ++ return; ++ + bacpy(&d->last_adv_addr, bdaddr); + d->last_adv_addr_type = bdaddr_type; + d->last_adv_rssi = rssi; +@@ -2094,7 +2112,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + +- if (!num_rsp) ++ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) + return; + + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) +@@ -2475,7 +2493,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) + &cp); + } else { + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); +- hci_encrypt_cfm(conn, ev->status, 0x00); ++ hci_encrypt_cfm(conn, ev->status); + } + } + +@@ -2561,22 +2579,7 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, + conn->enc_key_size = rp->key_size; + } + +- if (conn->state == BT_CONFIG) { +- conn->state = BT_CONNECTED; +- hci_connect_cfm(conn, 0); +- hci_conn_drop(conn); +- } else { +- u8 encrypt; +- +- if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) +- encrypt = 0x00; +- else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) +- encrypt = 0x02; +- else +- encrypt = 0x01; +- +- hci_encrypt_cfm(conn, 0, encrypt); +- } ++ hci_encrypt_cfm(conn, 0); + + unlock: + hci_dev_unlock(hdev); +@@ -2623,27 +2626,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) + + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + ++ /* Check link security requirements are met */ ++ if (!hci_conn_check_link_mode(conn)) ++ ev->status = HCI_ERROR_AUTH_FAILURE; ++ + if (ev->status && conn->state == BT_CONNECTED) { + if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) + set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + ++ /* Notify upper layers so they can cleanup before ++ * disconnecting. ++ */ ++ hci_encrypt_cfm(conn, ev->status); + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + goto unlock; + } + +- /* In Secure Connections Only mode, do not allow any connections +- * that are not encrypted with AES-CCM using a P-256 authenticated +- * combination key. +- */ +- if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && +- (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || +- conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { +- hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); +- hci_conn_drop(conn); +- goto unlock; +- } +- + /* Try reading the encryption key size for encrypted ACL links */ + if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { + struct hci_cp_read_enc_key_size cp; +@@ -2673,14 +2672,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) + } + + notify: +- if (conn->state == BT_CONFIG) { +- if (!ev->status) +- conn->state = BT_CONNECTED; +- +- hci_connect_cfm(conn, ev->status); +- hci_conn_drop(conn); +- } else +- hci_encrypt_cfm(conn, ev->status, ev->encrypt); ++ hci_encrypt_cfm(conn, ev->status); + + unlock: + hci_dev_unlock(hdev); +@@ -2772,7 +2764,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, + + switch (*opcode) { + case HCI_OP_INQUIRY_CANCEL: +- hci_cc_inquiry_cancel(hdev, skb); ++ hci_cc_inquiry_cancel(hdev, skb, status); + break; + + case HCI_OP_PERIODIC_INQ: +@@ -3623,6 +3615,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, + struct inquiry_info_with_rssi_and_pscan_mode *info; + info = (void *) (skb->data + 1); + ++ if (skb->len < num_rsp * sizeof(*info) + 1) ++ goto unlock; ++ + for (; num_rsp; num_rsp--, info++) { + u32 flags; + +@@ -3644,6 +3639,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, + } else { + struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + ++ if (skb->len < num_rsp * sizeof(*info) + 1) ++ goto unlock; ++ + for (; num_rsp; num_rsp--, info++) { + u32 flags; + +@@ -3664,6 +3662,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, + } + } + ++unlock: + hci_dev_unlock(hdev); + } + +@@ -3775,6 +3774,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ ++ case 0x1e: /* Invalid LMP Parameters */ + case 0x1f: /* Unspecified error */ + case 0x20: /* Unsupported LMP Parameter value */ + if (conn->out) { +@@ -3825,7 +3825,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, + + BT_DBG("%s num_rsp %d", hdev->name, num_rsp); + +- if (!num_rsp) ++ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) + return; + + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) +@@ -4350,6 +4350,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, + return; + } + ++ if (!hcon->amp_mgr) { ++ hci_dev_unlock(hdev); ++ return; ++ } ++ + if (ev->status) { + hci_conn_del(hcon); + hci_dev_unlock(hdev); +@@ -4394,6 +4399,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) + return; + + hchan->handle = le16_to_cpu(ev->handle); ++ hchan->amp = true; + + BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); + +@@ -4426,7 +4432,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, + hci_dev_lock(hdev); + + hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); +- if (!hchan) ++ if (!hchan || !hchan->amp) + goto unlock; + + amp_destroy_logical_link(hchan, ev->reason); +@@ -4756,6 +4762,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, + return; + } + ++ if (len > HCI_MAX_AD_LENGTH) { ++ pr_err_ratelimited("legacy adv larger than 31 bytes"); ++ return; ++ } ++ + /* Find the end of the data in case the report contains padded zero + * bytes at the end causing an invalid length value. + * +@@ -4816,7 +4827,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, + */ + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, + direct_addr); +- if (conn && type == LE_ADV_IND) { ++ if (conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { + /* Store report for later inclusion by + * mgmt_device_connected + */ +@@ -4941,10 +4952,14 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) + struct hci_ev_le_advertising_info *ev = ptr; + s8 rssi; + +- rssi = ev->data[ev->length]; +- process_adv_report(hdev, ev->evt_type, &ev->bdaddr, +- ev->bdaddr_type, NULL, 0, rssi, +- ev->data, ev->length); ++ if (ev->length <= HCI_MAX_AD_LENGTH) { ++ rssi = ev->data[ev->length]; ++ process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ++ ev->bdaddr_type, NULL, 0, rssi, ++ ev->data, ev->length); ++ } else { ++ bt_dev_err(hdev, "Dropping invalid advertising data"); ++ } + + ptr += sizeof(*ev) + ev->length + 1; + } +@@ -5132,20 +5147,18 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, + struct sk_buff *skb) + { + u8 num_reports = skb->data[0]; +- void *ptr = &skb->data[1]; ++ struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; + +- hci_dev_lock(hdev); ++ if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) ++ return; + +- while (num_reports--) { +- struct hci_ev_le_direct_adv_info *ev = ptr; ++ hci_dev_lock(hdev); + ++ for (; num_reports; num_reports--, ev++) + process_adv_report(hdev, ev->evt_type, &ev->bdaddr, + ev->bdaddr_type, &ev->direct_addr, + ev->direct_addr_type, ev->rssi, NULL, 0); + +- ptr += sizeof(*ev); +- } +- + hci_dev_unlock(hdev); + } + +@@ -5249,6 +5262,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) + u8 status = 0, event = hdr->evt, req_evt = 0; + u16 opcode = HCI_OP_NOP; + ++ if (!event) { ++ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); ++ goto done; ++ } ++ + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { + struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; + opcode = __le16_to_cpu(cmd_hdr->opcode); +@@ -5460,6 +5478,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) + req_complete_skb(hdev, status, opcode, orig_skb); + } + ++done: + kfree_skb(orig_skb); + kfree_skb(skb); + hdev->stat.evt_rx++; +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c +index 4a89e121d662..bfbfe1758978 100644 +--- a/net/bluetooth/hci_request.c ++++ b/net/bluetooth/hci_request.c +@@ -275,12 +275,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, + { + int ret; + +- if (!test_bit(HCI_UP, &hdev->flags)) +- return -ENETDOWN; +- + /* Serialize all requests */ + hci_req_sync_lock(hdev); +- ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); ++ /* check the state after obtaing the lock to protect the HCI_UP ++ * against any races from hci_dev_do_close when the controller ++ * gets removed. ++ */ ++ if (test_bit(HCI_UP, &hdev->flags)) ++ ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); ++ else ++ ret = -ENETDOWN; + hci_req_sync_unlock(hdev); + + return ret; +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index 44b3146c6117..d30163eb1643 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -59,6 +59,17 @@ struct hci_pinfo { + char comm[TASK_COMM_LEN]; + }; + ++static struct hci_dev *hci_hdev_from_sock(struct sock *sk) ++{ ++ struct hci_dev *hdev = hci_pi(sk)->hdev; ++ ++ if (!hdev) ++ return ERR_PTR(-EBADFD); ++ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) ++ return ERR_PTR(-EPIPE); ++ return hdev; ++} ++ + void hci_sock_set_flag(struct sock *sk, int nr) + { + set_bit(nr, &hci_pi(sk)->flags); +@@ -747,19 +758,13 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) + if (event == HCI_DEV_UNREG) { + struct sock *sk; + +- /* Detach sockets from device */ ++ /* Wake up sockets using this dead device */ + read_lock(&hci_sk_list.lock); + sk_for_each(sk, &hci_sk_list.head) { +- bh_lock_sock_nested(sk); + if (hci_pi(sk)->hdev == hdev) { +- hci_pi(sk)->hdev = NULL; + sk->sk_err = EPIPE; +- sk->sk_state = BT_OPEN; + sk->sk_state_change(sk); +- +- hci_dev_put(hdev); + } +- bh_unlock_sock(sk); + } + read_unlock(&hci_sk_list.lock); + } +@@ -918,10 +923,10 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) + static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, + unsigned long arg) + { +- struct hci_dev *hdev = hci_pi(sk)->hdev; ++ struct hci_dev *hdev = hci_hdev_from_sock(sk); + +- if (!hdev) +- return -EBADFD; ++ if (IS_ERR(hdev)) ++ return PTR_ERR(hdev); + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return -EBUSY; +@@ -1075,6 +1080,18 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, + + lock_sock(sk); + ++ /* Allow detaching from dead device and attaching to alive device, if ++ * the caller wants to re-bind (instead of close) this socket in ++ * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. ++ */ ++ hdev = hci_pi(sk)->hdev; ++ if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ++ hci_pi(sk)->hdev = NULL; ++ sk->sk_state = BT_OPEN; ++ hci_dev_put(hdev); ++ } ++ hdev = NULL; ++ + if (sk->sk_state == BT_BOUND) { + err = -EALREADY; + goto done; +@@ -1351,9 +1368,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, + + lock_sock(sk); + +- hdev = hci_pi(sk)->hdev; +- if (!hdev) { +- err = -EBADFD; ++ hdev = hci_hdev_from_sock(sk); ++ if (IS_ERR(hdev)) { ++ err = PTR_ERR(hdev); + goto done; + } + +@@ -1713,9 +1730,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, + goto done; + } + +- hdev = hci_pi(sk)->hdev; +- if (!hdev) { +- err = -EBADFD; ++ hdev = hci_hdev_from_sock(sk); ++ if (IS_ERR(hdev)) { ++ err = PTR_ERR(hdev); + goto done; + } + +diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c +index ca7a35ebaefb..cb7d06bb0243 100644 +--- a/net/bluetooth/hci_sysfs.c ++++ b/net/bluetooth/hci_sysfs.c +@@ -82,6 +82,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn) + static void bt_host_release(struct device *dev) + { + struct hci_dev *hdev = to_hci_dev(dev); ++ ++ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) ++ hci_cleanup_dev(hdev); + kfree(hdev); + module_put(THIS_MODULE); + } +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c +index 552e00b07196..9ec37c6c8c4a 100644 +--- a/net/bluetooth/hidp/core.c ++++ b/net/bluetooth/hidp/core.c +@@ -1282,7 +1282,7 @@ static int hidp_session_thread(void *arg) + + /* cleanup runtime environment */ + remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); +- remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait); ++ remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + wake_up_interruptible(&session->report_queue); + hidp_del_timer(session); + +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c +index 11012a509070..204b6ebd2a24 100644 +--- a/net/bluetooth/l2cap_core.c ++++ b/net/bluetooth/l2cap_core.c +@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work) + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + mutex_lock(&conn->chan_lock); ++ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling ++ * this work. No need to call l2cap_chan_hold(chan) here again. ++ */ + l2cap_chan_lock(chan); + + if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) +@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work) + + l2cap_chan_close(chan, reason); + +- l2cap_chan_unlock(chan); +- + chan->ops->close(chan); +- mutex_unlock(&conn->chan_lock); + ++ l2cap_chan_unlock(chan); + l2cap_chan_put(chan); ++ ++ mutex_unlock(&conn->chan_lock); + } + + struct l2cap_chan *l2cap_chan_create(void) +@@ -442,6 +445,8 @@ struct l2cap_chan *l2cap_chan_create(void) + if (!chan) + return NULL; + ++ skb_queue_head_init(&chan->tx_q); ++ skb_queue_head_init(&chan->srej_q); + mutex_init(&chan->lock); + + /* Set default lock nesting level */ +@@ -507,7 +512,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan) + chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; + chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; + chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; ++ + chan->conf_state = 0; ++ set_bit(CONF_NOT_COMPLETE, &chan->conf_state); + + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); + } +@@ -1725,9 +1732,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) + + l2cap_chan_del(chan, err); + +- l2cap_chan_unlock(chan); +- + chan->ops->close(chan); ++ ++ l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + } + +@@ -4104,7 +4111,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, + return 0; + } + +- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { ++ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && ++ chan->state != BT_CONNECTED) { + cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, + chan->dcid); + goto unlock; +@@ -4327,6 +4335,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, + return 0; + } + ++ l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + + rsp.dcid = cpu_to_le16(chan->scid); +@@ -4335,12 +4344,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, + + chan->ops->set_shutdown(chan); + +- l2cap_chan_hold(chan); + l2cap_chan_del(chan, ECONNRESET); + +- l2cap_chan_unlock(chan); +- + chan->ops->close(chan); ++ ++ l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); +@@ -4372,20 +4380,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, + return 0; + } + ++ l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + + if (chan->state != BT_DISCONN) { + l2cap_chan_unlock(chan); ++ l2cap_chan_put(chan); + mutex_unlock(&conn->chan_lock); + return 0; + } + +- l2cap_chan_hold(chan); + l2cap_chan_del(chan, 0); + +- l2cap_chan_unlock(chan); +- + chan->ops->close(chan); ++ ++ l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); +@@ -6670,9 +6679,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) + goto drop; + } + +- if ((chan->mode == L2CAP_MODE_ERTM || +- chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb)) +- goto drop; ++ if (chan->ops->filter) { ++ if (chan->ops->filter(chan, skb)) ++ goto drop; ++ } + + if (!control->sframe) { + int err; +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c +index a8ba752732c9..f46f59129bf3 100644 +--- a/net/bluetooth/l2cap_sock.c ++++ b/net/bluetooth/l2cap_sock.c +@@ -1038,7 +1038,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, + } + + /* Kill socket (only if zapped and orphan) +- * Must be called on unlocked socket. ++ * Must be called on unlocked socket, with l2cap channel lock. + */ + static void l2cap_sock_kill(struct sock *sk) + { +@@ -1189,6 +1189,7 @@ static int l2cap_sock_release(struct socket *sock) + { + struct sock *sk = sock->sk; + int err; ++ struct l2cap_chan *chan; + + BT_DBG("sock %p, sk %p", sock, sk); + +@@ -1198,9 +1199,17 @@ static int l2cap_sock_release(struct socket *sock) + bt_sock_unlink(&l2cap_sk_list, sk); + + err = l2cap_sock_shutdown(sock, 2); ++ chan = l2cap_pi(sk)->chan; ++ ++ l2cap_chan_hold(chan); ++ l2cap_chan_lock(chan); + + sock_orphan(sk); + l2cap_sock_kill(sk); ++ ++ l2cap_chan_unlock(chan); ++ l2cap_chan_put(chan); ++ + return err; + } + +@@ -1218,12 +1227,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) + BT_DBG("child chan %p state %s", chan, + state_to_string(chan->state)); + ++ l2cap_chan_hold(chan); + l2cap_chan_lock(chan); ++ + __clear_chan_timer(chan); + l2cap_chan_close(chan, ECONNRESET); +- l2cap_chan_unlock(chan); +- + l2cap_sock_kill(sk); ++ ++ l2cap_chan_unlock(chan); ++ l2cap_chan_put(chan); + } + } + +@@ -1328,8 +1340,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) + + parent = bt_sk(sk)->parent; + +- sock_set_flag(sk, SOCK_ZAPPED); +- + switch (chan->state) { + case BT_OPEN: + case BT_BOUND: +@@ -1356,8 +1366,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) + + break; + } +- + release_sock(sk); ++ ++ /* Only zap after cleanup to avoid use after free race */ ++ sock_set_flag(sk, SOCK_ZAPPED); ++ + } + + static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, +@@ -1463,6 +1476,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) + sk->sk_state_change(sk); + } + ++static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) ++{ ++ struct sock *sk = chan->data; ++ ++ switch (chan->mode) { ++ case L2CAP_MODE_ERTM: ++ case L2CAP_MODE_STREAMING: ++ return sk_filter(sk, skb); ++ } ++ ++ return 0; ++} ++ + static const struct l2cap_ops l2cap_chan_ops = { + .name = "L2CAP Socket Interface", + .new_connection = l2cap_sock_new_connection_cb, +@@ -1477,6 +1503,7 @@ static const struct l2cap_ops l2cap_chan_ops = { + .set_shutdown = l2cap_sock_set_shutdown_cb, + .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, + .alloc_skb = l2cap_sock_alloc_skb_cb, ++ .filter = l2cap_sock_filter, + }; + + static void l2cap_sock_destruct(struct sock *sk) +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c +index ba24f613c0fc..7aef6d23bc77 100644 +--- a/net/bluetooth/mgmt.c ++++ b/net/bluetooth/mgmt.c +@@ -219,12 +219,15 @@ static u8 mgmt_status_table[] = { + MGMT_STATUS_TIMEOUT, /* Instant Passed */ + MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ + MGMT_STATUS_FAILED, /* Transaction Collision */ ++ MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ + MGMT_STATUS_REJECTED, /* QoS Rejected */ + MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ + MGMT_STATUS_REJECTED, /* Insufficient Security */ + MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ ++ MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_BUSY, /* Role Switch Pending */ ++ MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_FAILED, /* Slot Violation */ + MGMT_STATUS_FAILED, /* Role Switch Failed */ + MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ +@@ -635,7 +638,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) + + if (lmp_ssp_capable(hdev)) { + settings |= MGMT_SETTING_SSP; +- settings |= MGMT_SETTING_HS; ++ if (IS_ENABLED(CONFIG_BT_HS)) ++ settings |= MGMT_SETTING_HS; + } + + if (lmp_sc_capable(hdev)) +@@ -1645,6 +1649,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) + + BT_DBG("request for %s", hdev->name); + ++ if (!IS_ENABLED(CONFIG_BT_HS)) ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, ++ MGMT_STATUS_NOT_SUPPORTED); ++ + status = mgmt_bredr_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); +@@ -6082,6 +6090,9 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, + for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) { + cur_len = data[i]; + ++ if (!cur_len) ++ continue; ++ + if (data[i + 1] == EIR_FLAGS && + (!is_adv_data || flags_managed(adv_flags))) + return false; +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c +index 6670b7ffc200..cedfce6f397e 100644 +--- a/net/bluetooth/smp.c ++++ b/net/bluetooth/smp.c +@@ -2636,6 +2636,15 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) + if (skb->len < sizeof(*key)) + return SMP_INVALID_PARAMS; + ++ /* Check if remote and local public keys are the same and debug key is ++ * not in use. ++ */ ++ if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && ++ !crypto_memneq(key, smp->local_pk, 64)) { ++ bt_dev_err(hdev, "Remote and local public keys are identical"); ++ return SMP_UNSPECIFIED; ++ } ++ + memcpy(smp->remote_pk, key, 64); + + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c +index bef3cca4d6b4..be4f5e9e1ef5 100644 +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -178,6 +178,7 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, + sum.rx_packets += tmp.rx_packets; + } + ++ netdev_stats_to_stats64(stats, &dev->stats); + stats->tx_bytes = sum.tx_bytes; + stats->tx_packets = sum.tx_packets; + stats->rx_bytes = sum.rx_bytes; +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c +index 2e05a7949354..95116ee1bb6b 100644 +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -486,7 +486,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + struct net_bridge_port *p; + int err = 0; + unsigned br_hr, dev_hr; +- bool changed_addr; ++ bool changed_addr, fdb_synced = false; + + /* Don't allow bridging non-ethernet like devices, or DSA-enabled + * master network devices since the bridge layer rx_handler prevents +@@ -520,6 +520,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + + err = dev_set_allmulti(dev, 1); + if (err) { ++ br_multicast_del_port(p); + kfree(p); /* kobject not yet init'd, manually free */ + goto err1; + } +@@ -556,6 +557,19 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + list_add_rcu(&p->list, &br->port_list); + + nbp_update_port_count(br); ++ if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) { ++ /* When updating the port count we also update all ports' ++ * promiscuous mode. ++ * A port leaving promiscuous mode normally gets the bridge's ++ * fdb synced to the unicast filter (if supported), however, ++ * `br_port_clear_promisc` does not distinguish between ++ * non-promiscuous ports and *new* ports, so we need to ++ * sync explicitly here. ++ */ ++ fdb_synced = br_fdb_sync_static(br, p) == 0; ++ if (!fdb_synced) ++ netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n"); ++ } + + netdev_update_features(br->dev); + +@@ -596,6 +610,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + return 0; + + err7: ++ if (fdb_synced) ++ br_fdb_unsync_static(br, p); + list_del_rcu(&p->list); + br_fdb_delete_by_port(br, p, 0, 1); + nbp_update_port_count(br); +@@ -609,6 +625,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) + err3: + sysfs_remove_link(br->ifobj, p->dev->name); + err2: ++ br_multicast_del_port(p); + kobject_put(&p->kobj); + dev_set_allmulti(dev, -1); + err1: +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 62e045c9d452..7104d5e64abb 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -716,6 +716,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff + mtu_reserved = nf_bridge_mtu_reduction(skb); + mtu = skb->dev->mtu; + ++ if (nf_bridge->pkt_otherhost) { ++ skb->pkt_type = PACKET_OTHERHOST; ++ nf_bridge->pkt_otherhost = false; ++ } ++ + if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) + mtu = nf_bridge->frag_max_size; + +@@ -809,8 +814,6 @@ static unsigned int br_nf_post_routing(void *priv, + else + return NF_ACCEPT; + +- /* We assume any code from br_dev_queue_push_xmit onwards doesn't care +- * about the value of skb->pkt_type. */ + if (skb->pkt_type == PACKET_OTHERHOST) { + skb->pkt_type = PACKET_HOST; + nf_bridge->pkt_otherhost = true; +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c +index 5172caac645c..6ac7ad4f3547 100644 +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -238,8 +238,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) + } + + masterv = br_vlan_get_master(br, v->vid); +- if (!masterv) ++ if (!masterv) { ++ err = -ENOMEM; + goto out_filt; ++ } + v->brvlan = masterv; + v->stats = masterv->stats; + } +diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c +index 517e78befcb2..61a9f1be1263 100644 +--- a/net/bridge/netfilter/ebt_limit.c ++++ b/net/bridge/netfilter/ebt_limit.c +@@ -105,6 +105,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = { + .match = ebt_limit_mt, + .checkentry = ebt_limit_mt_check, + .matchsize = sizeof(struct ebt_limit_info), ++ .usersize = offsetof(struct ebt_limit_info, prev), + #ifdef CONFIG_COMPAT + .compatsize = sizeof(struct ebt_compat_limit_info), + #endif +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c +index a0443d40d677..a28ffbbf7450 100644 +--- a/net/caif/caif_dev.c ++++ b/net/caif/caif_dev.c +@@ -303,7 +303,7 @@ static void dev_flowctrl(struct net_device *dev, int on) + caifd_put(caifd); + } + +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, + int (**rcv_func)(struct sk_buff *, struct net_device *, +@@ -314,11 +314,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + enum cfcnfg_phy_preference pref; + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); + struct caif_device_entry_list *caifdevs; ++ int res; + + caifdevs = caif_device_list(dev_net(dev)); + caifd = caif_device_alloc(dev); + if (!caifd) +- return; ++ return -ENOMEM; + *layer = &caifd->layer; + spin_lock_init(&caifd->flow_lock); + +@@ -340,7 +341,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + sizeof(caifd->layer.name) - 1); + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + caifd->layer.transmit = transmit; +- cfcnfg_add_phy_layer(cfg, ++ res = cfcnfg_add_phy_layer(cfg, + dev, + &caifd->layer, + pref, +@@ -350,6 +351,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + mutex_unlock(&caifdevs->lock); + if (rcv_func) + *rcv_func = receive; ++ return res; + } + EXPORT_SYMBOL(caif_enroll_dev); + +@@ -364,6 +366,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, + struct cflayer *layer, *link_support; + int head_room = 0; + struct caif_device_entry_list *caifdevs; ++ int res; + + cfg = get_cfcnfg(dev_net(dev)); + caifdevs = caif_device_list(dev_net(dev)); +@@ -389,8 +392,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, + break; + } + } +- caif_enroll_dev(dev, caifdev, link_support, head_room, ++ res = caif_enroll_dev(dev, caifdev, link_support, head_room, + &layer, NULL); ++ if (res) ++ cfserl_release(link_support); + caifdev->flowctrl = dev_flowctrl; + break; + +diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c +index 92cbbd2afddb..9367f260afeb 100644 +--- a/net/caif/caif_socket.c ++++ b/net/caif/caif_socket.c +@@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, + goto err; + + ret = -EINVAL; +- if (unlikely(msg->msg_iter.iov->iov_base == NULL)) ++ if (unlikely(msg->msg_iter.nr_segs == 0) || ++ unlikely(msg->msg_iter.iov->iov_base == NULL)) + goto err; + noblock = msg->msg_flags & MSG_DONTWAIT; + +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c +index 5cd44f001f64..485dde566c1a 100644 +--- a/net/caif/caif_usb.c ++++ b/net/caif/caif_usb.c +@@ -116,6 +116,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], + return (struct cflayer *) this; + } + ++static void cfusbl_release(struct cflayer *layer) ++{ ++ kfree(layer); ++} ++ + static struct packet_type caif_usb_type __read_mostly = { + .type = cpu_to_be16(ETH_P_802_EX1), + }; +@@ -128,6 +133,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + struct cflayer *layer, *link_support; + struct usbnet *usbnet; + struct usb_device *usbdev; ++ int res; + + /* Check whether we have a NCM device, and find its VID/PID. */ + if (!(dev->dev.parent && dev->dev.parent->driver && +@@ -170,8 +176,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + if (dev->num_tx_queues > 1) + pr_warn("USB device uses more than one tx queue\n"); + +- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, ++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, + &layer, &caif_usb_type.func); ++ if (res) ++ goto err; ++ + if (!pack_added) + dev_add_pack(&caif_usb_type); + pack_added = true; +@@ -181,6 +190,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + layer->name[sizeof(layer->name) - 1] = 0; + + return 0; ++err: ++ cfusbl_release(link_support); ++ return res; + } + + static struct notifier_block caif_device_notifier = { +diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c +index fa39fc298708..c45b531a6cd5 100644 +--- a/net/caif/cfcnfg.c ++++ b/net/caif/cfcnfg.c +@@ -455,7 +455,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, + rcu_read_unlock(); + } + +-void ++int + cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + struct net_device *dev, struct cflayer *phy_layer, + enum cfcnfg_phy_preference pref, +@@ -464,7 +464,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + { + struct cflayer *frml; + struct cfcnfg_phyinfo *phyinfo = NULL; +- int i; ++ int i, res = 0; + u8 phyid; + + mutex_lock(&cnfg->lock); +@@ -478,12 +478,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + goto got_phyid; + } + pr_warn("Too many CAIF Link Layers (max 6)\n"); ++ res = -EEXIST; + goto out; + + got_phyid: + phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); +- if (!phyinfo) ++ if (!phyinfo) { ++ res = -ENOMEM; + goto out_err; ++ } + + phy_layer->id = phyid; + phyinfo->pref = pref; +@@ -497,8 +500,10 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + + frml = cffrml_create(phyid, fcs); + +- if (!frml) ++ if (!frml) { ++ res = -ENOMEM; + goto out_err; ++ } + phyinfo->frm_layer = frml; + layer_set_up(frml, cnfg->mux); + +@@ -516,11 +521,12 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + list_add_rcu(&phyinfo->node, &cnfg->phys); + out: + mutex_unlock(&cnfg->lock); +- return; ++ return res; + + out_err: + kfree(phyinfo); + mutex_unlock(&cnfg->lock); ++ return res; + } + EXPORT_SYMBOL(cfcnfg_add_phy_layer); + +diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c +index ce60f06d76de..af1e1e36dc90 100644 +--- a/net/caif/cfserl.c ++++ b/net/caif/cfserl.c +@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); + static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, + int phyid); + ++void cfserl_release(struct cflayer *layer) ++{ ++ kfree(layer); ++} ++ + struct cflayer *cfserl_create(int instance, bool use_stx) + { + struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); +diff --git a/net/can/bcm.c b/net/can/bcm.c +index c99e7c75eeee..369326715b9c 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -127,7 +127,7 @@ struct bcm_sock { + struct sock sk; + int bound; + int ifindex; +- struct notifier_block notifier; ++ struct list_head notifier; + struct list_head rx_ops; + struct list_head tx_ops; + unsigned long dropped_usr_msgs; +@@ -135,6 +135,10 @@ struct bcm_sock { + char procname [32]; /* inode number in decimal with \0 */ + }; + ++static LIST_HEAD(bcm_notifier_list); ++static DEFINE_SPINLOCK(bcm_notifier_lock); ++static struct bcm_sock *bcm_busy_notifier; ++ + static inline struct bcm_sock *bcm_sk(const struct sock *sk) + { + return (struct bcm_sock *)sk; +@@ -405,6 +409,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data) + if (!op->count && (op->flags & TX_COUNTEVT)) { + + /* create notification to user */ ++ memset(&msg_head, 0, sizeof(msg_head)); + msg_head.opcode = TX_EXPIRED; + msg_head.flags = op->flags; + msg_head.count = op->count; +@@ -452,6 +457,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) + /* this element is not throttled anymore */ + data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV); + ++ memset(&head, 0, sizeof(head)); + head.opcode = RX_CHANGED; + head.flags = op->flags; + head.count = op->count; +@@ -566,6 +572,7 @@ static void bcm_rx_timeout_tsklet(unsigned long data) + struct bcm_msg_head msg_head; + + /* create notification to user */ ++ memset(&msg_head, 0, sizeof(msg_head)); + msg_head.opcode = RX_TIMEOUT; + msg_head.flags = op->flags; + msg_head.count = op->count; +@@ -832,6 +839,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, + bcm_rx_handler, op); + + list_del(&op->list); ++ synchronize_rcu(); + bcm_remove_op(op); + return 1; /* done */ + } +@@ -1436,20 +1444,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) + /* + * notification handler for netdevice status changes + */ +-static int bcm_notifier(struct notifier_block *nb, unsigned long msg, +- void *ptr) ++static void bcm_notify(struct bcm_sock *bo, unsigned long msg, ++ struct net_device *dev) + { +- struct net_device *dev = netdev_notifier_info_to_dev(ptr); +- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); + struct sock *sk = &bo->sk; + struct bcm_op *op; + int notify_enodev = 0; + + if (!net_eq(dev_net(dev), &init_net)) +- return NOTIFY_DONE; +- +- if (dev->type != ARPHRD_CAN) +- return NOTIFY_DONE; ++ return; + + switch (msg) { + +@@ -1484,7 +1487,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, + sk->sk_error_report(sk); + } + } ++} ++ ++static int bcm_notifier(struct notifier_block *nb, unsigned long msg, ++ void *ptr) ++{ ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr); + ++ if (dev->type != ARPHRD_CAN) ++ return NOTIFY_DONE; ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) ++ return NOTIFY_DONE; ++ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ ++ return NOTIFY_DONE; ++ ++ spin_lock(&bcm_notifier_lock); ++ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { ++ spin_unlock(&bcm_notifier_lock); ++ bcm_notify(bcm_busy_notifier, msg, dev); ++ spin_lock(&bcm_notifier_lock); ++ } ++ bcm_busy_notifier = NULL; ++ spin_unlock(&bcm_notifier_lock); + return NOTIFY_DONE; + } + +@@ -1504,9 +1528,9 @@ static int bcm_init(struct sock *sk) + INIT_LIST_HEAD(&bo->rx_ops); + + /* set notifier */ +- bo->notifier.notifier_call = bcm_notifier; +- +- register_netdevice_notifier(&bo->notifier); ++ spin_lock(&bcm_notifier_lock); ++ list_add_tail(&bo->notifier, &bcm_notifier_list); ++ spin_unlock(&bcm_notifier_lock); + + return 0; + } +@@ -1527,7 +1551,14 @@ static int bcm_release(struct socket *sock) + + /* remove bcm_ops, timer, rx_unregister(), etc. */ + +- unregister_netdevice_notifier(&bo->notifier); ++ spin_lock(&bcm_notifier_lock); ++ while (bcm_busy_notifier == bo) { ++ spin_unlock(&bcm_notifier_lock); ++ schedule_timeout_uninterruptible(1); ++ spin_lock(&bcm_notifier_lock); ++ } ++ list_del(&bo->notifier); ++ spin_unlock(&bcm_notifier_lock); + + lock_sock(sk); + +@@ -1559,9 +1590,13 @@ static int bcm_release(struct socket *sock) + REGMASK(op->can_id), + bcm_rx_handler, op); + +- bcm_remove_op(op); + } + ++ synchronize_rcu(); ++ ++ list_for_each_entry_safe(op, next, &bo->rx_ops, list) ++ bcm_remove_op(op); ++ + /* remove procfs entry */ + if (proc_dir && bo->bcm_proc_read) + remove_proc_entry(bo->procname, proc_dir); +@@ -1713,6 +1748,10 @@ static const struct can_proto bcm_can_proto = { + .prot = &bcm_proto, + }; + ++static struct notifier_block canbcm_notifier = { ++ .notifier_call = bcm_notifier ++}; ++ + static int __init bcm_module_init(void) + { + int err; +@@ -1727,6 +1766,8 @@ static int __init bcm_module_init(void) + + /* create /proc/net/can-bcm directory */ + proc_dir = proc_mkdir("can-bcm", init_net.proc_net); ++ register_netdevice_notifier(&canbcm_notifier); ++ + return 0; + } + +@@ -1736,6 +1777,8 @@ static void __exit bcm_module_exit(void) + + if (proc_dir) + remove_proc_entry("can-bcm", init_net.proc_net); ++ ++ unregister_netdevice_notifier(&canbcm_notifier); + } + + module_init(bcm_module_init); +diff --git a/net/can/gw.c b/net/can/gw.c +index 81650affa3fa..1867000f8a65 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -497,6 +497,7 @@ static int cgw_notifier(struct notifier_block *nb, + if (gwj->src.dev == dev || gwj->dst.dev == dev) { + hlist_del(&gwj->list); + cgw_unregister_filter(gwj); ++ synchronize_rcu(); + kmem_cache_free(cgw_cache, gwj); + } + } +@@ -941,6 +942,7 @@ static void cgw_remove_all_jobs(void) + hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { + hlist_del(&gwj->list); + cgw_unregister_filter(gwj); ++ synchronize_rcu(); + kmem_cache_free(cgw_cache, gwj); + } + } +@@ -1008,6 +1010,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) + + hlist_del(&gwj->list); + cgw_unregister_filter(gwj); ++ synchronize_rcu(); + kmem_cache_free(cgw_cache, gwj); + err = 0; + break; +diff --git a/net/can/raw.c b/net/can/raw.c +index 6dc546a06673..082965c8dcaf 100644 +--- a/net/can/raw.c ++++ b/net/can/raw.c +@@ -84,7 +84,7 @@ struct raw_sock { + struct sock sk; + int bound; + int ifindex; +- struct notifier_block notifier; ++ struct list_head notifier; + int loopback; + int recv_own_msgs; + int fd_frames; +@@ -96,6 +96,10 @@ struct raw_sock { + struct uniqframe __percpu *uniq; + }; + ++static LIST_HEAD(raw_notifier_list); ++static DEFINE_SPINLOCK(raw_notifier_lock); ++static struct raw_sock *raw_busy_notifier; ++ + /* + * Return pointer to store the extra msg flags for raw_recvmsg(). + * We use the space of one unsigned int beyond the 'struct sockaddr_can' +@@ -260,21 +264,16 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) + return err; + } + +-static int raw_notifier(struct notifier_block *nb, +- unsigned long msg, void *ptr) ++static void raw_notify(struct raw_sock *ro, unsigned long msg, ++ struct net_device *dev) + { +- struct net_device *dev = netdev_notifier_info_to_dev(ptr); +- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); + struct sock *sk = &ro->sk; + + if (!net_eq(dev_net(dev), &init_net)) +- return NOTIFY_DONE; +- +- if (dev->type != ARPHRD_CAN) +- return NOTIFY_DONE; ++ return; + + if (ro->ifindex != dev->ifindex) +- return NOTIFY_DONE; ++ return; + + switch (msg) { + +@@ -303,7 +302,28 @@ static int raw_notifier(struct notifier_block *nb, + sk->sk_error_report(sk); + break; + } ++} ++ ++static int raw_notifier(struct notifier_block *nb, unsigned long msg, ++ void *ptr) ++{ ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr); ++ ++ if (dev->type != ARPHRD_CAN) ++ return NOTIFY_DONE; ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) ++ return NOTIFY_DONE; ++ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */ ++ return NOTIFY_DONE; + ++ spin_lock(&raw_notifier_lock); ++ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) { ++ spin_unlock(&raw_notifier_lock); ++ raw_notify(raw_busy_notifier, msg, dev); ++ spin_lock(&raw_notifier_lock); ++ } ++ raw_busy_notifier = NULL; ++ spin_unlock(&raw_notifier_lock); + return NOTIFY_DONE; + } + +@@ -332,9 +352,9 @@ static int raw_init(struct sock *sk) + return -ENOMEM; + + /* set notifier */ +- ro->notifier.notifier_call = raw_notifier; +- +- register_netdevice_notifier(&ro->notifier); ++ spin_lock(&raw_notifier_lock); ++ list_add_tail(&ro->notifier, &raw_notifier_list); ++ spin_unlock(&raw_notifier_lock); + + return 0; + } +@@ -349,7 +369,14 @@ static int raw_release(struct socket *sock) + + ro = raw_sk(sk); + +- unregister_netdevice_notifier(&ro->notifier); ++ spin_lock(&raw_notifier_lock); ++ while (raw_busy_notifier == ro) { ++ spin_unlock(&raw_notifier_lock); ++ schedule_timeout_uninterruptible(1); ++ spin_lock(&raw_notifier_lock); ++ } ++ list_del(&ro->notifier); ++ spin_unlock(&raw_notifier_lock); + + lock_sock(sk); + +@@ -514,10 +541,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + return -EFAULT; + } + ++ rtnl_lock(); + lock_sock(sk); + +- if (ro->bound && ro->ifindex) ++ if (ro->bound && ro->ifindex) { + dev = dev_get_by_index(&init_net, ro->ifindex); ++ if (!dev) { ++ if (count > 1) ++ kfree(filter); ++ err = -ENODEV; ++ goto out_fil; ++ } ++ } + + if (ro->bound) { + /* (try to) register the new filters */ +@@ -554,6 +589,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + dev_put(dev); + + release_sock(sk); ++ rtnl_unlock(); + + break; + +@@ -566,10 +602,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + + err_mask &= CAN_ERR_MASK; + ++ rtnl_lock(); + lock_sock(sk); + +- if (ro->bound && ro->ifindex) ++ if (ro->bound && ro->ifindex) { + dev = dev_get_by_index(&init_net, ro->ifindex); ++ if (!dev) { ++ err = -ENODEV; ++ goto out_err; ++ } ++ } + + /* remove current error mask */ + if (ro->bound) { +@@ -591,6 +633,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + dev_put(dev); + + release_sock(sk); ++ rtnl_unlock(); + + break; + +@@ -857,6 +900,10 @@ static const struct can_proto raw_can_proto = { + .prot = &raw_proto, + }; + ++static struct notifier_block canraw_notifier = { ++ .notifier_call = raw_notifier ++}; ++ + static __init int raw_module_init(void) + { + int err; +@@ -866,6 +913,8 @@ static __init int raw_module_init(void) + err = can_proto_register(&raw_can_proto); + if (err < 0) + printk(KERN_ERR "can: registration of raw protocol failed\n"); ++ else ++ register_netdevice_notifier(&canraw_notifier); + + return err; + } +@@ -873,6 +922,7 @@ static __init int raw_module_init(void) + static __exit void raw_module_exit(void) + { + can_proto_unregister(&raw_can_proto); ++ unregister_netdevice_notifier(&canraw_notifier); + } + + module_init(raw_module_init); +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 7e27cabb04ef..776d5ab24af6 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -2983,6 +2983,11 @@ static void con_fault(struct ceph_connection *con) + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + } ++ if (con->out_msg) { ++ BUG_ON(con->out_msg->con != con); ++ ceph_msg_put(con->out_msg); ++ con->out_msg = NULL; ++ } + + /* Requeue anything that hasn't been acked */ + list_splice_init(&con->out_sent, &con->out_queue); +diff --git a/net/compat.c b/net/compat.c +index 633fcf6ee369..1f08f0e49e07 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -159,7 +159,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, + if (kcmlen > stackbuf_size) + kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); + if (kcmsg == NULL) +- return -ENOBUFS; ++ return -ENOMEM; + + /* Now copy them over neatly. */ + memset(kcmsg, 0, kcmlen); +@@ -284,6 +284,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) + break; + } + /* Bump the usage count and install the file. */ ++ __receive_sock(fp[i]); + fd_install(new_fd, get_file(fp[i])); + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index c0121d9b0985..c6ab36e553f8 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -82,6 +82,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -191,7 +192,7 @@ static DEFINE_SPINLOCK(napi_hash_lock); + static unsigned int napi_gen_id = NR_CPUS; + static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); + +-static seqcount_t devnet_rename_seq; ++static DECLARE_RWSEM(devnet_rename_sem); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -868,33 +869,28 @@ EXPORT_SYMBOL(dev_get_by_index); + * @net: network namespace + * @name: a pointer to the buffer where the name will be stored. + * @ifindex: the ifindex of the interface to get the name from. +- * +- * The use of raw_seqcount_begin() and cond_resched() before +- * retrying is required as we want to give the writers a chance +- * to complete when CONFIG_PREEMPT is not set. + */ + int netdev_get_name(struct net *net, char *name, int ifindex) + { + struct net_device *dev; +- unsigned int seq; ++ int ret; + +-retry: +- seq = raw_seqcount_begin(&devnet_rename_seq); ++ down_read(&devnet_rename_sem); + rcu_read_lock(); ++ + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { +- rcu_read_unlock(); +- return -ENODEV; ++ ret = -ENODEV; ++ goto out; + } + + strcpy(name, dev->name); +- rcu_read_unlock(); +- if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); +- goto retry; +- } + +- return 0; ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ up_read(&devnet_rename_sem); ++ return ret; + } + + /** +@@ -1159,10 +1155,10 @@ int dev_change_name(struct net_device *dev, const char *newname) + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return 0; + } + +@@ -1170,7 +1166,7 @@ int dev_change_name(struct net_device *dev, const char *newname) + + err = dev_get_valid_name(net, dev, newname); + if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return err; + } + +@@ -1185,11 +1181,11 @@ int dev_change_name(struct net_device *dev, const char *newname) + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + return ret; + } + +- write_seqcount_end(&devnet_rename_seq); ++ up_write(&devnet_rename_sem); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1210,7 +1206,7 @@ int dev_change_name(struct net_device *dev, const char *newname) + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ down_write(&devnet_rename_sem); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +@@ -4499,7 +4495,7 @@ static void flush_backlog(struct work_struct *work) + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev->reg_state == NETREG_UNREGISTERING) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ dev_kfree_skb_irq(skb); + input_queue_head_incr(sd); + } + } +@@ -5120,11 +5116,18 @@ EXPORT_SYMBOL(__napi_schedule); + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule + * +- * Variant of __napi_schedule() assuming hard irqs are masked ++ * Variant of __napi_schedule() assuming hard irqs are masked. ++ * ++ * On PREEMPT_RT enabled kernels this maps to __napi_schedule() ++ * because the interrupt disabled assumption might not be true ++ * due to force-threaded interrupts and spinlock substitution. + */ + void __napi_schedule_irqoff(struct napi_struct *n) + { +- ____napi_schedule(this_cpu_ptr(&softnet_data), n); ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) ++ ____napi_schedule(this_cpu_ptr(&softnet_data), n); ++ else ++ __napi_schedule(n); + } + EXPORT_SYMBOL(__napi_schedule_irqoff); + +@@ -5313,13 +5316,14 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + pr_err_once("netif_napi_add() called with weight %d on device %s\n", + weight, dev->name); + napi->weight = weight; +- list_add(&napi->dev_list, &dev->napi_list); + napi->dev = dev; + #ifdef CONFIG_NETPOLL + spin_lock_init(&napi->poll_lock); + napi->poll_owner = -1; + #endif + set_bit(NAPI_STATE_SCHED, &napi->state); ++ set_bit(NAPI_STATE_NPSVC, &napi->state); ++ list_add_rcu(&napi->dev_list, &dev->napi_list); + napi_hash_add(napi); + } + EXPORT_SYMBOL(netif_napi_add); +@@ -7491,6 +7495,13 @@ int register_netdevice(struct net_device *dev) + rcu_barrier(); + + dev->reg_state = NETREG_UNREGISTERED; ++ /* We should put the kobject that hold in ++ * netdev_unregister_kobject(), otherwise ++ * the net device cannot be freed when ++ * driver calls free_netdev(), because the ++ * kobject is being hold. ++ */ ++ kobject_put(&dev->dev.kobj); + } + /* + * Prevent userspace races by waiting until the network +@@ -8435,7 +8446,7 @@ static void __net_exit default_device_exit(struct net *net) + continue; + + /* Leave virtual devices for the generic cleanup */ +- if (dev->rtnl_link_ops) ++ if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) + continue; + + /* Push remaining network devices to init_net */ +diff --git a/net/core/devlink.c b/net/core/devlink.c +index 1b5063088f1a..785fbaac047f 100644 +--- a/net/core/devlink.c ++++ b/net/core/devlink.c +@@ -986,7 +986,7 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, + err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index, + pool_index, &cur, &max); + if (err && err != -EOPNOTSUPP) +- return err; ++ goto sb_occ_get_failure; + if (!err) { + if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) + goto nla_put_failure; +@@ -999,8 +999,10 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, + return 0; + + nla_put_failure: ++ err = -EMSGSIZE; ++sb_occ_get_failure: + genlmsg_cancel(msg, hdr); +- return -EMSGSIZE; ++ return err; + } + + static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c +index 955fc01a030a..5473af8cb4c9 100644 +--- a/net/core/fib_rules.c ++++ b/net/core/fib_rules.c +@@ -843,7 +843,7 @@ static void notify_rule_change(int event, struct fib_rule *rule, + { + struct net *net; + struct sk_buff *skb; +- int err = -ENOBUFS; ++ int err = -ENOMEM; + + net = ops->fro_net; + skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); +diff --git a/net/core/filter.c b/net/core/filter.c +index 768688bfc7df..2b3a03f04cc1 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1999,8 +1999,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + } + +- /* Due to IPv6 header, MSS needs to be downgraded. */ +- skb_shinfo(skb)->gso_size -= len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; +@@ -2035,8 +2033,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + } + +- /* Due to IPv4 header, MSS can be upgraded. */ +- skb_shinfo(skb)->gso_size += len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; +@@ -2133,10 +2129,7 @@ static u32 __bpf_skb_min_len(const struct sk_buff *skb) + return min_len; + } + +-static u32 __bpf_skb_max_len(const struct sk_buff *skb) +-{ +- return skb->dev->mtu + skb->dev->hard_header_len; +-} ++#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC + + static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) + { +@@ -2157,7 +2150,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) + BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, + u64, flags) + { +- u32 max_len = __bpf_skb_max_len(skb); ++ u32 max_len = BPF_SKB_MAX_LEN; + u32 min_len = __bpf_skb_min_len(skb); + int ret; + +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 0e831671634d..92da79e7fd43 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1239,7 +1239,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, + * we can reinject the packet there. + */ + n2 = NULL; +- if (dst) { ++ if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { + n2 = dst_neigh_lookup_skb(dst, skb); + if (n2) + n1 = n2; +@@ -2818,6 +2818,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) + *pos = cpu+1; + return per_cpu_ptr(tbl->stats, cpu); + } ++ (*pos)++; + return NULL; + } + +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c +index 3333693d8052..3fb5d8ecc849 100644 +--- a/net/core/net-sysfs.c ++++ b/net/core/net-sysfs.c +@@ -1018,7 +1018,7 @@ static ssize_t show_trans_timeout(struct netdev_queue *queue, + trans_timeout = queue->trans_timeout; + spin_unlock_irq(&queue->_xmit_lock); + +- return sprintf(buf, "%lu", trans_timeout); ++ return sprintf(buf, fmt_ulong, trans_timeout); + } + + #ifdef CONFIG_XPS +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 5de180a9b7f5..0fa6269c3f20 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -178,7 +179,7 @@ static void poll_napi(struct net_device *dev) + { + struct napi_struct *napi; + +- list_for_each_entry(napi, &dev->napi_list, dev_list) { ++ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { + if (napi->poll_owner != smp_processor_id() && + spin_trylock(&napi->poll_lock)) { + poll_one_napi(napi); +@@ -661,15 +662,15 @@ EXPORT_SYMBOL_GPL(__netpoll_setup); + + int netpoll_setup(struct netpoll *np) + { +- struct net_device *ndev = NULL; ++ struct net_device *ndev = NULL, *dev = NULL; ++ struct net *net = current->nsproxy->net_ns; + struct in_device *in_dev; + int err; + + rtnl_lock(); +- if (np->dev_name[0]) { +- struct net *net = current->nsproxy->net_ns; ++ if (np->dev_name[0]) + ndev = __dev_get_by_name(net, np->dev_name); +- } ++ + if (!ndev) { + np_err(np, "%s doesn't exist, aborting\n", np->dev_name); + err = -ENODEV; +@@ -677,6 +678,19 @@ int netpoll_setup(struct netpoll *np) + } + dev_hold(ndev); + ++ /* bring up DSA management network devices up first */ ++ for_each_netdev(net, dev) { ++ if (!netdev_uses_dsa(dev)) ++ continue; ++ ++ err = dev_change_flags(dev, dev->flags | IFF_UP); ++ if (err < 0) { ++ np_err(np, "%s failed to open %s\n", ++ np->dev_name, dev->name); ++ goto put; ++ } ++ } ++ + if (netdev_master_upper_dev_get(ndev)) { + np_err(np, "%s is a slave device, aborting\n", np->dev_name); + err = -EBUSY; +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 3708301772f9..a9d8c8964b4a 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -3555,7 +3555,7 @@ static int pktgen_thread_worker(void *arg) + struct pktgen_dev *pkt_dev = NULL; + int cpu = t->cpu; + +- BUG_ON(smp_processor_id() != cpu); ++ WARN_ON(smp_processor_id() != cpu); + + init_waitqueue_head(&t->queue); + complete(&t->start_done); +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index e652e376fb30..911752e8a3e6 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -3530,6 +3530,10 @@ static int rtnl_bridge_notify(struct net_device *dev) + if (err < 0) + goto errout; + ++ /* Notification info is only filled for bridge ports, not the bridge ++ * device itself. Therefore, a zero notification length is valid and ++ * should not result in an error. ++ */ + if (!skb->len) + goto errout; + +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 3d91d4388274..21f501fac1dd 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -434,7 +434,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, + + len += NET_SKB_PAD; + +- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || ++ /* If requested length is either too small or too big, ++ * we use kmalloc() for skb->head allocation. ++ */ ++ if (len <= SKB_WITH_OVERHEAD(1024) || ++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) +@@ -495,13 +499,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb); + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + gfp_t gfp_mask) + { +- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct napi_alloc_cache *nc; + struct sk_buff *skb; + void *data; + + len += NET_SKB_PAD + NET_IP_ALIGN; + +- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || ++ /* If requested length is either too small or too big, ++ * we use kmalloc() for skb->head allocation. ++ */ ++ if (len <= SKB_WITH_OVERHEAD(1024) || ++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) +@@ -509,6 +517,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + goto skb_success; + } + ++ nc = this_cpu_ptr(&napi_alloc_cache); + len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + len = SKB_DATA_ALIGN(len); + +@@ -1598,6 +1607,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) + skb->csum = csum_block_sub(skb->csum, + skb_checksum(skb, len, delta, 0), + len); ++ } else if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; ++ int offset = skb_checksum_start_offset(skb) + skb->csum_offset; ++ ++ if (offset + sizeof(__sum16) > hdlen) ++ return -EINVAL; + } + return __pskb_trim(skb, len); + } +@@ -2279,8 +2294,11 @@ skb_zerocopy_headlen(const struct sk_buff *from) + + if (!from->head_frag || + skb_headlen(from) < L1_CACHE_BYTES || +- skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) ++ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { + hlen = skb_headlen(from); ++ if (!hlen) ++ hlen = from->len; ++ } + + if (skb_has_frag_list(from)) + hlen = from->len; +@@ -2664,7 +2682,19 @@ EXPORT_SYMBOL(skb_split); + */ + static int skb_prepare_for_shift(struct sk_buff *skb) + { +- return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ int ret = 0; ++ ++ if (skb_cloned(skb)) { ++ /* Save and restore truesize: pskb_expand_head() may reallocate ++ * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we ++ * cannot change truesize at this point. ++ */ ++ unsigned int save_truesize = skb->truesize; ++ ++ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ skb->truesize = save_truesize; ++ } ++ return ret; + } + + /** +@@ -4597,8 +4627,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + goto err_free; +- +- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) ++ /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ ++ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) + goto err_free; + + vhdr = (struct vlan_hdr *)skb->data; +@@ -4996,9 +5026,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, + if (skb_has_frag_list(skb)) + skb_clone_fraglist(skb); + +- if (k == 0) { +- /* split line is in frag list */ +- pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); ++ /* split line is in frag list */ ++ if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { ++ /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ ++ if (skb_has_frag_list(skb)) ++ kfree_skb_list(skb_shinfo(skb)->frag_list); ++ kfree(data); ++ return -ENOMEM; + } + skb_release_data(skb); + +diff --git a/net/core/sock.c b/net/core/sock.c +index 3e12e5059a71..123f3d7d2e5d 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -1411,6 +1411,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + cgroup_sk_alloc(&sk->sk_cgrp_data); + sock_update_classid(&sk->sk_cgrp_data); + sock_update_netprioidx(&sk->sk_cgrp_data); ++ sk_tx_queue_clear(sk); + } + + return sk; +@@ -1540,7 +1541,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; + + sock_reset_flag(newsk, SOCK_DONE); +- cgroup_sk_alloc(&newsk->sk_cgrp_data); ++ cgroup_sk_clone(&newsk->sk_cgrp_data); + skb_queue_head_init(&newsk->sk_error_queue); + + filter = rcu_dereference_protected(newsk->sk_filter, 1); +@@ -1595,6 +1596,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) + */ + sk_refcnt_debug_inc(newsk); + sk_set_socket(newsk, NULL); ++ sk_tx_queue_clear(newsk); + newsk->sk_wq = NULL; + + if (newsk->sk_prot->sockets_allocated) +@@ -2329,6 +2331,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * + } + EXPORT_SYMBOL(sock_no_mmap); + ++/* ++ * When a file is received (via SCM_RIGHTS, etc), we must bump the ++ * various sock-based usage counts. ++ */ ++void __receive_sock(struct file *file) ++{ ++ struct socket *sock; ++ int error; ++ ++ /* ++ * The resulting value of "error" is ignored here since we only ++ * need to take action when the file is a socket and testing ++ * "sock" for NULL is sufficient. ++ */ ++ sock = sock_from_file(file, &error); ++ if (sock) { ++ sock_update_netprioidx(&sock->sk->sk_cgrp_data); ++ sock_update_classid(&sock->sk->sk_cgrp_data); ++ } ++} ++ + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) + { + ssize_t res; +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c +index a1116701287f..2dbf5a0faad3 100644 +--- a/net/dcb/dcbnl.c ++++ b/net/dcb/dcbnl.c +@@ -1726,6 +1726,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh) + fn = &reply_funcs[dcb->cmd]; + if (!fn->cb) + return -EOPNOTSUPP; ++ if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) ++ return -EPERM; + + if (!tb[DCB_ATTR_IFNAME]) + return -EINVAL; +diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h +index 0c55ffb859bf..121aa71fcb5c 100644 +--- a/net/dccp/dccp.h ++++ b/net/dccp/dccp.h +@@ -44,9 +44,9 @@ extern bool dccp_debug; + #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) + #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) + #else +-#define dccp_pr_debug(format, a...) +-#define dccp_pr_debug_cat(format, a...) +-#define dccp_debug(format, a...) ++#define dccp_pr_debug(format, a...) do {} while (0) ++#define dccp_pr_debug_cat(format, a...) do {} while (0) ++#define dccp_debug(format, a...) do {} while (0) + #endif + + extern struct inet_hashinfo dccp_hashinfo; +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c +index 9438873fc3c8..ae62c2947278 100644 +--- a/net/dccp/ipv6.c ++++ b/net/dccp/ipv6.c +@@ -317,6 +317,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) + if (!ipv6_unicast_destination(skb)) + return 0; /* discard, don't send a reset here */ + ++ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { ++ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); ++ return 0; ++ } ++ + if (dccp_bad_service_code(sk, service)) { + dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; + goto drop; +diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c +index 9d8fcdefefc0..ee297964fcd2 100644 +--- a/net/decnet/af_decnet.c ++++ b/net/decnet/af_decnet.c +@@ -823,7 +823,7 @@ static int dn_auto_bind(struct socket *sock) + static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) + { + struct dn_scp *scp = DN_SK(sk); +- DEFINE_WAIT(wait); ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err; + + if (scp->state != DN_CR) +@@ -833,11 +833,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) + scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); + dn_send_conn_conf(sk, allocation); + +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); ++ add_wait_queue(sk_sleep(sk), &wait); + for(;;) { + release_sock(sk); + if (scp->state == DN_CC) +- *timeo = schedule_timeout(*timeo); ++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + lock_sock(sk); + err = 0; + if (scp->state == DN_RUN) +@@ -851,9 +851,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) + err = -EAGAIN; + if (!*timeo) + break; +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + } +- finish_wait(sk_sleep(sk), &wait); ++ remove_wait_queue(sk_sleep(sk), &wait); + if (err == 0) { + sk->sk_socket->state = SS_CONNECTED; + } else if (scp->state != DN_CC) { +@@ -865,7 +864,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) + static int dn_wait_run(struct sock *sk, long *timeo) + { + struct dn_scp *scp = DN_SK(sk); +- DEFINE_WAIT(wait); ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err = 0; + + if (scp->state == DN_RUN) +@@ -874,11 +873,11 @@ static int dn_wait_run(struct sock *sk, long *timeo) + if (!*timeo) + return -EALREADY; + +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); ++ add_wait_queue(sk_sleep(sk), &wait); + for(;;) { + release_sock(sk); + if (scp->state == DN_CI || scp->state == DN_CC) +- *timeo = schedule_timeout(*timeo); ++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + lock_sock(sk); + err = 0; + if (scp->state == DN_RUN) +@@ -892,9 +891,8 @@ static int dn_wait_run(struct sock *sk, long *timeo) + err = -ETIMEDOUT; + if (!*timeo) + break; +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + } +- finish_wait(sk_sleep(sk), &wait); ++ remove_wait_queue(sk_sleep(sk), &wait); + out: + if (err == 0) { + sk->sk_socket->state = SS_CONNECTED; +@@ -1039,16 +1037,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) + + static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) + { +- DEFINE_WAIT(wait); ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sk_buff *skb = NULL; + int err = 0; + +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); ++ add_wait_queue(sk_sleep(sk), &wait); + for(;;) { + release_sock(sk); + skb = skb_dequeue(&sk->sk_receive_queue); + if (skb == NULL) { +- *timeo = schedule_timeout(*timeo); ++ *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + skb = skb_dequeue(&sk->sk_receive_queue); + } + lock_sock(sk); +@@ -1063,9 +1061,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) + err = -EAGAIN; + if (!*timeo) + break; +- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + } +- finish_wait(sk_sleep(sk), &wait); ++ remove_wait_queue(sk_sleep(sk), &wait); + + return skb == NULL ? ERR_PTR(err) : skb; + } +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index aead5ac4dbf6..ea07c91ab348 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -315,7 +315,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, + hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(struct hsr_sup_payload)); + ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); + +- skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); ++ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) ++ return; + + hsr_forward_skb(skb, master); + return; +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c +index d7206581145d..336cffd93293 100644 +--- a/net/hsr/hsr_framereg.c ++++ b/net/hsr/hsr_framereg.c +@@ -310,7 +310,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, + + node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest); + if (!node_dst) { +- WARN_ONCE(1, "%s: Unknown node\n", __func__); ++ if (net_ratelimit()) ++ netdev_err(skb->dev, "%s: Unknown node\n", __func__); + return; + } + if (port->type != node_dst->AddrB_port) +diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c +index d3cbb3258718..7531cb1665d2 100644 +--- a/net/ieee802154/nl-mac.c ++++ b/net/ieee802154/nl-mac.c +@@ -559,9 +559,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, + desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]); + + if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { +- if (!info->attrs[IEEE802154_ATTR_PAN_ID] && +- !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] || +- info->attrs[IEEE802154_ATTR_HW_ADDR])) ++ if (!info->attrs[IEEE802154_ATTR_PAN_ID]) + return -EINVAL; + + desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); +@@ -570,6 +568,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, + desc->device_addr.mode = IEEE802154_ADDR_SHORT; + desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); + } else { ++ if (!info->attrs[IEEE802154_ATTR_HW_ADDR]) ++ return -EINVAL; ++ + desc->device_addr.mode = IEEE802154_ADDR_LONG; + desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); + } +@@ -687,8 +688,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info) + nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || + nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, + be32_to_cpu(params.frame_counter)) || +- ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) ++ ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) { ++ rc = -ENOBUFS; + goto out_free; ++ } + + dev_put(dev); + +diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c +index 77d73014bde3..11f53dc0c1c0 100644 +--- a/net/ieee802154/nl-phy.c ++++ b/net/ieee802154/nl-phy.c +@@ -249,8 +249,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) + } + + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || +- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) ++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) { ++ rc = -EMSGSIZE; + goto nla_put_failure; ++ } + dev_put(dev); + + wpan_phy_put(phy); +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c +index d90a4ed5b8a0..936371340dc3 100644 +--- a/net/ieee802154/nl802154.c ++++ b/net/ieee802154/nl802154.c +@@ -851,8 +851,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, + goto nla_put_failure; + + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ goto out; ++ + if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) + goto nla_put_failure; ++ ++out: + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + + genlmsg_end(msg, hdr); +@@ -1325,19 +1330,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla, + nl802154_dev_addr_policy)) + return -EINVAL; + +- if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || +- !attrs[NL802154_DEV_ADDR_ATTR_MODE] || +- !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || +- attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) ++ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE]) + return -EINVAL; + + addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]); + addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]); + switch (addr->mode) { + case NL802154_DEV_ADDR_SHORT: ++ if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT]) ++ return -EINVAL; + addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]); + break; + case NL802154_DEV_ADDR_EXTENDED: ++ if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]) ++ return -EINVAL; + addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]); + break; + default: +@@ -1417,6 +1423,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb, + u32 changed = 0; + int ret; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ return -EOPNOTSUPP; ++ + if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { + u8 enabled; + +@@ -1523,6 +1532,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb) + if (err) + return err; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { ++ err = skb->len; ++ goto out_err; ++ } ++ + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; +@@ -1577,7 +1591,8 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) + struct ieee802154_llsec_key_id id = { }; + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { }; + +- if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, ++ if (!info->attrs[NL802154_ATTR_SEC_KEY] || ++ nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_KEY], + nl802154_key_policy)) + return -EINVAL; +@@ -1627,7 +1642,8 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; + struct ieee802154_llsec_key_id id; + +- if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, ++ if (!info->attrs[NL802154_ATTR_SEC_KEY] || ++ nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_KEY], + nl802154_key_policy)) + return -EINVAL; +@@ -1693,6 +1709,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb) + if (err) + return err; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { ++ err = skb->len; ++ goto out_err; ++ } ++ + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; +@@ -1780,6 +1801,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info) + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_device dev_desc; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ return -EOPNOTSUPP; ++ + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE], + &dev_desc) < 0) + return -EINVAL; +@@ -1795,7 +1819,8 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; + __le64 extended_addr; + +- if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, ++ if (!info->attrs[NL802154_ATTR_SEC_DEVICE] || ++ nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVICE], + nl802154_dev_policy)) + return -EINVAL; +@@ -1865,6 +1890,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb) + if (err) + return err; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { ++ err = skb->len; ++ goto out_err; ++ } ++ + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; +@@ -1922,6 +1952,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info + struct ieee802154_llsec_device_key key; + __le64 extended_addr; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ return -EOPNOTSUPP; ++ + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || + nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVKEY], +@@ -1955,7 +1988,8 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info + struct ieee802154_llsec_device_key key; + __le64 extended_addr; + +- if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, ++ if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || ++ nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, + info->attrs[NL802154_ATTR_SEC_DEVKEY], + nl802154_devkey_policy)) + return -EINVAL; +@@ -2030,6 +2064,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb) + if (err) + return err; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { ++ err = skb->len; ++ goto out_err; ++ } ++ + if (!wpan_dev->netdev) { + err = -EINVAL; + goto out_err; +@@ -2115,6 +2154,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb, + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_seclevel sl; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ return -EOPNOTSUPP; ++ + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], + &sl) < 0) + return -EINVAL; +@@ -2130,6 +2172,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb, + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + struct ieee802154_llsec_seclevel sl; + ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) ++ return -EOPNOTSUPP; ++ + if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || + llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], + &sl) < 0) +diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c +index f66e4afb978a..6383627b783e 100644 +--- a/net/ieee802154/socket.c ++++ b/net/ieee802154/socket.c +@@ -987,6 +987,11 @@ static const struct proto_ops ieee802154_dgram_ops = { + #endif + }; + ++static void ieee802154_sock_destruct(struct sock *sk) ++{ ++ skb_queue_purge(&sk->sk_receive_queue); ++} ++ + /* Create a socket. Initialise the socket, blank the addresses + * set the state. + */ +@@ -1027,7 +1032,7 @@ static int ieee802154_create(struct net *net, struct socket *sock, + sock->ops = ops; + + sock_init_data(sock, sk); +- /* FIXME: sk->sk_destruct */ ++ sk->sk_destruct = ieee802154_sock_destruct; + sk->sk_family = PF_IEEE802154; + + /* Checksums on by default */ +diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig +index 20f94aa3a976..1ad62e212fe5 100644 +--- a/net/ipc_router/Kconfig ++++ b/net/ipc_router/Kconfig +@@ -34,6 +34,17 @@ config IPC_ROUTER_NODE_ID + core and publish the same NODE ID to other NODES present in the + network. + ++config IPC_ROUTER_WAKEUP_MS ++ int "IPC router Wakeup timeout" ++ default 0 ++ help ++ This option is used to configure the wakesource timeout that IPC ++ router should take when a packet is received.The IPC router can ++ guarantee that the packet gets queued to the socket but cannot ++ guarantee the client process will get time to run if auto sleep is ++ enabled. This config will help mitigate missed packets on systems ++ where auto sleep is aggressive. ++ + config IPC_ROUTER_FIFO_XPRT + depends on IPC_ROUTER + bool "IPC Router FIFO Transport" +diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c +index beca506bdae0..595dfe8ec37c 100644 +--- a/net/ipc_router/ipc_router_core.c ++++ b/net/ipc_router/ipc_router_core.c +@@ -1,4 +1,4 @@ +-/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. ++/* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and +@@ -221,6 +221,7 @@ enum { + }; + + static bool is_wakeup_source_allowed; ++static unsigned int ipc_router_wakeup_ms = CONFIG_IPC_ROUTER_WAKEUP_MS; + + void msm_ipc_router_set_ws_allowed(bool flag) + { +@@ -3383,8 +3384,11 @@ int msm_ipc_router_read(struct msm_ipc_port *port_ptr, + return -ETOOSMALL; + } + list_del(&pkt->list); +- if (list_empty(&port_ptr->port_rx_q)) ++ if (list_empty(&port_ptr->port_rx_q)) { + __pm_relax(port_ptr->port_rx_ws); ++ /* keep the system wakeup for configured duration */ ++ __pm_wakeup_event(port_ptr->port_rx_ws, ipc_router_wakeup_ms); ++ } + *read_pkt = pkt; + mutex_unlock(&port_ptr->port_rx_q_lock_lhc3); + if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX) +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index 71409928763b..553cda6f887a 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -486,6 +486,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) + kfree(doi_def->map.std->lvl.local); + kfree(doi_def->map.std->cat.cipso); + kfree(doi_def->map.std->cat.local); ++ kfree(doi_def->map.std); + break; + } + kfree(doi_def); +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 6c975101be1f..d612d403793e 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -292,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), + .daddr = ip_hdr(skb)->saddr, +- .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), ++ .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK, + .flowi4_scope = scope, + .flowi4_mark = vmark ? skb->mark : 0, + }; +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 6aec95e1fc13..305104d116d6 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -776,7 +776,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, + if (fl4.flowi4_scope < RT_SCOPE_LINK) + fl4.flowi4_scope = RT_SCOPE_LINK; + +- if (cfg->fc_table) ++ if (cfg->fc_table && cfg->fc_table != RT_TABLE_MAIN) + tbl = fib_get_table(net, cfg->fc_table); + + if (tbl) +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index a1a7ed6fc8dd..88e357b5fc0f 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1719,7 +1719,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { + struct key_vector *local_l = NULL, *local_tp; + +- hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { ++ hlist_for_each_entry(fa, &l->leaf, fa_list) { + struct fib_alias *new_fa; + + if (local_tb->tb_id != fa->tb_id) +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index 6b66f059a15b..88138fb965bb 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -740,6 +740,40 @@ out:; + } + EXPORT_SYMBOL(__icmp_send); + ++#if IS_ENABLED(CONFIG_NF_NAT) ++#include ++void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) ++{ ++ struct sk_buff *cloned_skb = NULL; ++ struct ip_options opts = { 0 }; ++ enum ip_conntrack_info ctinfo; ++ struct nf_conn *ct; ++ __be32 orig_ip; ++ ++ ct = nf_ct_get(skb_in, &ctinfo); ++ if (!ct || !(ct->status & IPS_SRC_NAT)) { ++ __icmp_send(skb_in, type, code, info, &opts); ++ return; ++ } ++ ++ if (skb_shared(skb_in)) ++ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); ++ ++ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || ++ (skb_network_header(skb_in) + sizeof(struct iphdr)) > ++ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, ++ skb_network_offset(skb_in) + sizeof(struct iphdr)))) ++ goto out; ++ ++ orig_ip = ip_hdr(skb_in)->saddr; ++ ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; ++ __icmp_send(skb_in, type, code, info, &opts); ++ ip_hdr(skb_in)->saddr = orig_ip; ++out: ++ consume_skb(cloned_skb); ++} ++EXPORT_SYMBOL(icmp_ndo_send); ++#endif + + static void icmp_socket_deliver(struct sk_buff *skb, u32 info) + { +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 02c1736c0b89..f4a827964b68 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -1782,6 +1782,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev) + while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { + in_dev->mc_list = i->next_rcu; + in_dev->mc_count--; ++ ip_mc_clear_src(i); + ip_ma_put(i); + } + } +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 80a2d99b569f..2b31c4ba4722 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -89,6 +89,28 @@ int inet_csk_bind_conflict(const struct sock *sk, + } + EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); + ++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, ++ struct sock *sk) ++{ ++ kuid_t uid = sock_i_uid(sk); ++ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; ++ ++ if (!hlist_empty(&tb->owners)) { ++ if (!reuse) ++ tb->fastreuse = 0; ++ if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) ++ tb->fastreuseport = 0; ++ } else { ++ tb->fastreuse = reuse; ++ if (sk->sk_reuseport) { ++ tb->fastreuseport = 1; ++ tb->fastuid = uid; ++ } else { ++ tb->fastreuseport = 0; ++ } ++ } ++} ++ + /* Obtain a reference to a local port for the given sock, + * if snum is zero it means select any available local port. + * We try to allocate an odd port (and leave even ports for connect()) +@@ -225,19 +247,10 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) + } + goto fail_unlock; + } +- if (!reuse) +- tb->fastreuse = 0; +- if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)) +- tb->fastreuseport = 0; +- } else { +- tb->fastreuse = reuse; +- if (sk->sk_reuseport) { +- tb->fastreuseport = 1; +- tb->fastuid = uid; +- } else { +- tb->fastreuseport = 0; +- } + } ++ ++ inet_csk_update_fastreuse(tb, sk); ++ + success: + if (!inet_csk(sk)->icsk_bind_hash) + inet_bind_hash(sk, tb, port); +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index 4273576e1475..9b25809b2d45 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -355,8 +355,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, + r->idiag_inode = 0; + + if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, +- inet_rsk(reqsk)->ir_mark)) ++ inet_rsk(reqsk)->ir_mark)) { ++ nlmsg_cancel(skb, nlh); + return -EMSGSIZE; ++ } + + nlmsg_end(skb, nlh); + return 0; +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c +index 4bf542f4d980..887633870763 100644 +--- a/net/ipv4/inet_hashtables.c ++++ b/net/ipv4/inet_hashtables.c +@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) + return -ENOMEM; + } + } ++ inet_csk_update_fastreuse(tb, child); + } + inet_bind_hash(child, tb, port); + spin_unlock(&head->lock); +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 9609ad71dd26..fe1801d9f059 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -353,6 +353,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, + + static int gre_handle_offloads(struct sk_buff *skb, bool csum) + { ++ if (csum && skb_checksum_start(skb) < skb->data) ++ return -EINVAL; + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); + } + +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 8eaf20a6046d..e812bac0735f 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -73,6 +73,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -307,7 +308,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk + if (skb_is_gso(skb)) + return ip_finish_output_gso(net, sk, skb, mtu); + +- if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) ++ if (skb->len > mtu || IPCB(skb)->frag_max_size) + return ip_fragment(net, sk, skb, mtu, ip_finish_output2); + + return ip_finish_output2(net, sk, skb); +@@ -1634,7 +1635,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, + if (IS_ERR(rt)) + return; + +- inet_sk(sk)->tos = arg->tos; ++ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; + + sk->sk_priority = skb->priority; + sk->sk_protocol = ip_hdr(skb)->protocol; +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index dd5db4cc7d06..26e1dbc95818 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -98,9 +98,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, + __be32 remote, __be32 local, + __be32 key) + { +- unsigned int hash; + struct ip_tunnel *t, *cand = NULL; + struct hlist_head *head; ++ struct net_device *ndev; ++ unsigned int hash; + + hash = ip_tunnel_hash(key, remote); + head = &itn->tunnels[hash]; +@@ -175,8 +176,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, + if (t) + return t; + +- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) +- return netdev_priv(itn->fb_tunnel_dev); ++ ndev = READ_ONCE(itn->fb_tunnel_dev); ++ if (ndev && ndev->flags & IFF_UP) ++ return netdev_priv(ndev); + + return NULL; + } +@@ -741,7 +743,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + +- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ++ df = tnl_params->frag_off; ++ if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) ++ df |= (inner_iph->frag_off & htons(IP_DF)); ++ ++ if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) { + ip_rt_put(rt); + goto tx_error; + } +@@ -769,10 +775,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + ttl = ip4_dst_hoplimit(&rt->dst); + } + +- df = tnl_params->frag_off; +- if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) +- df |= (inner_iph->frag_off&htons(IP_DF)); +- + max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); + if (max_headroom > dev->needed_headroom) +@@ -1193,9 +1195,9 @@ void ip_tunnel_uninit(struct net_device *dev) + struct ip_tunnel_net *itn; + + itn = net_generic(net, tunnel->ip_tnl_net_id); +- /* fb_tunnel_dev will be unregisted in net-exit call. */ +- if (itn->fb_tunnel_dev != dev) +- ip_tunnel_del(itn, netdev_priv(dev)); ++ ip_tunnel_del(itn, netdev_priv(dev)); ++ if (itn->fb_tunnel_dev == dev) ++ WRITE_ONCE(itn->fb_tunnel_dev, NULL); + + dst_cache_reset(&tunnel->dst_cache); + } +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index d278b06459ac..79d8ea98a5b1 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -880,7 +880,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d + + + /* +- * Copy BOOTP-supplied string if not already set. ++ * Copy BOOTP-supplied string + */ + static int __init ic_bootp_string(char *dest, char *src, int len, int max) + { +@@ -929,12 +929,15 @@ static void __init ic_do_bootp_ext(u8 *ext) + } + break; + case 12: /* Host name */ +- ic_bootp_string(utsname()->nodename, ext+1, *ext, +- __NEW_UTS_LEN); +- ic_host_name_set = 1; ++ if (!ic_host_name_set) { ++ ic_bootp_string(utsname()->nodename, ext+1, *ext, ++ __NEW_UTS_LEN); ++ ic_host_name_set = 1; ++ } + break; + case 15: /* Domain name (DNS) */ +- ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain)); ++ if (!ic_domain[0]) ++ ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain)); + break; + case 17: /* Root path */ + if (!root_server_path[0]) +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c +index e02b86265194..5cb5a2957cb2 100644 +--- a/net/ipv4/netfilter/arp_tables.c ++++ b/net/ipv4/netfilter/arp_tables.c +@@ -1209,6 +1209,8 @@ static int translate_compat_table(struct net *net, + if (!newinfo) + goto out_unlock; + ++ memset(newinfo->entries, 0, size); ++ + newinfo->number = compatr->num_entries; + for (i = 0; i < NF_ARP_NUMHOOKS; i++) { + newinfo->hook_entry[i] = compatr->hook_entry[i]; +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c +index 4822459e8f42..3096468a115d 100644 +--- a/net/ipv4/netfilter/ip_tables.c ++++ b/net/ipv4/netfilter/ip_tables.c +@@ -836,10 +836,6 @@ copy_entries_to_user(unsigned int total_size, + return PTR_ERR(counters); + + loc_cpu_entry = private->entries; +- if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { +- ret = -EFAULT; +- goto free_counters; +- } + + /* FIXME: use iterator macros --RR */ + /* ... then go back and fix counters and names */ +@@ -849,6 +845,10 @@ copy_entries_to_user(unsigned int total_size, + const struct xt_entry_target *t; + + e = (struct ipt_entry *)(loc_cpu_entry + off); ++ if (copy_to_user(userptr + off, e, sizeof(*e))) { ++ ret = -EFAULT; ++ goto free_counters; ++ } + if (copy_to_user(userptr + off + + offsetof(struct ipt_entry, counters), + &counters[num], +@@ -862,23 +862,14 @@ copy_entries_to_user(unsigned int total_size, + i += m->u.match_size) { + m = (void *)e + i; + +- if (copy_to_user(userptr + off + i +- + offsetof(struct xt_entry_match, +- u.user.name), +- m->u.kernel.match->name, +- strlen(m->u.kernel.match->name)+1) +- != 0) { ++ if (xt_match_to_user(m, userptr + off + i)) { + ret = -EFAULT; + goto free_counters; + } + } + + t = ipt_get_target_c(e); +- if (copy_to_user(userptr + off + e->target_offset +- + offsetof(struct xt_entry_target, +- u.user.name), +- t->u.kernel.target->name, +- strlen(t->u.kernel.target->name)+1) != 0) { ++ if (xt_target_to_user(t, userptr + off + e->target_offset)) { + ret = -EFAULT; + goto free_counters; + } +@@ -1451,6 +1442,8 @@ translate_compat_table(struct net *net, + if (!newinfo) + goto out_unlock; + ++ memset(newinfo->entries, 0, size); ++ + newinfo->number = compatr->num_entries; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + newinfo->hook_entry[i] = compatr->hook_entry[i]; +diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c +index 16599bae11dd..28bcde0a2749 100644 +--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c ++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c +@@ -478,6 +478,7 @@ static struct xt_target clusterip_tg_reg __read_mostly = { + .checkentry = clusterip_tg_check, + .destroy = clusterip_tg_destroy, + .targetsize = sizeof(struct ipt_clusterip_tgt_info), ++ .usersize = offsetof(struct ipt_clusterip_tgt_info, config), + #ifdef CONFIG_COMPAT + .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info), + #endif /* CONFIG_COMPAT */ +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c +index 78cc64eddfc1..32a363465e0a 100644 +--- a/net/ipv4/netfilter/ipt_rpfilter.c ++++ b/net/ipv4/netfilter/ipt_rpfilter.c +@@ -92,7 +92,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) + flow.saddr = rpfilter_get_saddr(iph->daddr); + flow.flowi4_oif = 0; + flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; +- flow.flowi4_tos = RT_TOS(iph->tos); ++ flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; + flow.flowi4_scope = RT_SCOPE_UNIVERSE; + + return rpfilter_lookup_reverse(par->net, &flow, par->in, info->flags) ^ invert; +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c +index 8fa153c65e76..1d8c9fa4addf 100644 +--- a/net/ipv4/ping.c ++++ b/net/ipv4/ping.c +@@ -801,6 +801,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) + inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, + sk->sk_uid); + ++ fl4.fl4_icmp_type = user_icmph.type; ++ fl4.fl4_icmp_code = user_icmph.code; ++ + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); + rt = ip_route_output_flow(net, &fl4, sk); + if (IS_ERR(rt)) { +@@ -974,6 +977,7 @@ bool ping_rcv(struct sk_buff *skb) + struct sock *sk; + struct net *net = dev_net(skb->dev); + struct icmphdr *icmph = icmp_hdr(skb); ++ bool rc = false; + + /* We assume the packet has already been checked by icmp_rcv */ + +@@ -988,14 +992,15 @@ bool ping_rcv(struct sk_buff *skb) + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + + pr_debug("rcv on socket %p\n", sk); +- if (skb2) +- ping_queue_rcv_skb(sk, skb2); ++ if (skb2 && !ping_queue_rcv_skb(sk, skb2)) ++ rc = true; + sock_put(sk); +- return true; + } +- pr_debug("no socket, dropping\n"); + +- return false; ++ if (!rc) ++ pr_debug("no socket, dropping\n"); ++ ++ return rc; + } + EXPORT_SYMBOL_GPL(ping_rcv); + +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 61263d9d3352..4a5cccfe20af 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -70,6 +70,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -271,6 +272,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) + *pos = cpu+1; + return &per_cpu(rt_cache_stat, cpu); + } ++ (*pos)++; + return NULL; + + } +@@ -462,8 +464,10 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + return neigh_create(&arp_tbl, pkey, dev); + } + +-#define IP_IDENTS_SZ 2048u +- ++/* Hash tables of size 2048..262144 depending on RAM size. ++ * Each bucket uses 8 bytes. ++ */ ++static u32 ip_idents_mask __read_mostly; + static atomic_t *ip_idents __read_mostly; + static u32 *ip_tstamps __read_mostly; + +@@ -473,12 +477,16 @@ static u32 *ip_tstamps __read_mostly; + */ + u32 ip_idents_reserve(u32 hash, int segs) + { +- u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; +- atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; +- u32 old = ACCESS_ONCE(*p_tstamp); +- u32 now = (u32)jiffies; ++ u32 bucket, old, now = (u32)jiffies; ++ atomic_t *p_id; ++ u32 *p_tstamp; + u32 delta = 0; + ++ bucket = hash & ip_idents_mask; ++ p_tstamp = ip_tstamps + bucket; ++ p_id = ip_idents + bucket; ++ old = ACCESS_ONCE(*p_tstamp); ++ + if (old != now && cmpxchg(p_tstamp, old, now) == old) + delta = prandom_u32_max(now - old); + +@@ -2953,18 +2961,27 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; + + int __init ip_rt_init(void) + { ++ void *idents_hash; + int rc = 0; + int cpu; + +- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); +- if (!ip_idents) +- panic("IP: failed to allocate ip_idents\n"); ++ /* For modern hosts, this will use 2 MB of memory */ ++ idents_hash = alloc_large_system_hash("IP idents", ++ sizeof(*ip_idents) + sizeof(*ip_tstamps), ++ 0, ++ 16, /* one bucket per 64 KB */ ++ 0, ++ NULL, ++ &ip_idents_mask, ++ 2048, ++ 256*1024); ++ ++ ip_idents = idents_hash; + +- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); ++ prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); + +- ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); +- if (!ip_tstamps) +- panic("IP: failed to allocate ip_tstamps\n"); ++ ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); ++ memset(ip_tstamps, 0, (ip_idents_mask + 1) * sizeof(*ip_tstamps)); + + for_each_possible_cpu(cpu) { + struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index b59d9cd75b56..70dd013b2afd 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -304,7 +304,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) + __u32 cookie = ntohl(th->ack_seq) - 1; + struct sock *ret = sk; + struct request_sock *req; +- int mss; ++ int full_space, mss; + struct rtable *rt; + __u8 rcv_wscale; + struct flowi4 fl4; +@@ -388,8 +388,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) + + /* Try to redo what tcp_v4_send_synack did. */ + req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); ++ /* limit the window selection if the user enforce a smaller rx buffer */ ++ full_space = tcp_full_space(sk); ++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && ++ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) ++ req->rsk_window_clamp = full_space; + +- tcp_select_initial_window(tcp_full_space(sk), req->mss, ++ tcp_select_initial_window(sock_net(sk), full_space, req->mss, + &req->rsk_rcv_wnd, &req->rsk_window_clamp, + ireq->wscale_ok, &rcv_wscale, + dst_metric(&rt->dst, RTAX_INITRWND)); +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 50f494547ab9..6cd596ae0515 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -27,7 +27,9 @@ + + static int zero; + static int one = 1; ++static int three = 3; + static int four = 4; ++static int hundred = 100; + static int thousand = 1000; + static int gso_max_segs = GSO_MAX_SEGS; + static int tcp_retr1_max = 255; +@@ -159,21 +161,6 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write, + return ret; + } + +-/* Validate changes from /proc interface. */ +-static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write, +- void __user *buffer, +- size_t *lenp, loff_t *ppos) +-{ +- int old_value = *(int *)ctl->data; +- int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); +- int new_value = *(int *)ctl->data; +- +- if (write && ret == 0 && (new_value < 3 || new_value > 100)) +- *(int *)ctl->data = old_value; +- +- return ret; +-} +- + static int proc_tcp_congestion_control(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { +@@ -656,13 +643,6 @@ static struct ctl_table ipv4_table[] = { + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, +- { +- .procname = "tcp_default_init_rwnd", +- .data = &sysctl_tcp_default_init_rwnd, +- .maxlen = sizeof(int), +- .mode = 0644, +- .proc_handler = proc_tcp_default_init_rwnd +- }, + { + .procname = "icmp_msgs_per_sec", + .data = &sysctl_icmp_msgs_per_sec, +@@ -1046,6 +1026,15 @@ static struct ctl_table ipv4_net_table[] = { + .extra2 = &one, + }, + #endif ++ { ++ .procname = "tcp_default_init_rwnd", ++ .data = &init_net.ipv4.sysctl_tcp_default_init_rwnd, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &three, ++ .extra2 = &hundred, ++ }, + { } + }; + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 3fabbb5616f8..a41f4ab2ef0a 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2331,6 +2331,9 @@ int tcp_disconnect(struct sock *sk, int flags) + tp->snd_cwnd_cnt = 0; + tp->window_clamp = 0; + tp->delivered = 0; ++ if (icsk->icsk_ca_ops->release) ++ icsk->icsk_ca_ops->release(sk); ++ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); + tcp_set_ca_state(sk, TCP_CA_Open); + tp->is_sack_reneg = 0; + tcp_clear_retrans(tp); +@@ -2705,10 +2708,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, + + #ifdef CONFIG_TCP_MD5SIG + case TCP_MD5SIG: +- if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) +- err = tp->af_specific->md5_parse(sk, optval, optlen); +- else +- err = -EINVAL; ++ err = tp->af_specific->md5_parse(sk, optval, optlen); + break; + #endif + case TCP_USER_TIMEOUT: +@@ -3259,10 +3259,13 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data); + + int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) + { ++ u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ + struct scatterlist sg; + +- sg_init_one(&sg, key->key, key->keylen); +- ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); ++ sg_init_one(&sg, key->key, keylen); ++ ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); ++ ++ /* tcp_md5_do_add() might change key->key under us */ + return crypto_ahash_update(hp->md5_req); + } + EXPORT_SYMBOL(tcp_md5_hash_key); +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c +index 3bfab2d41574..47f40e105044 100644 +--- a/net/ipv4/tcp_bbr.c ++++ b/net/ipv4/tcp_bbr.c +@@ -740,7 +740,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) + filter_expired = after(tcp_time_stamp, + bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); + if (rs->rtt_us >= 0 && +- (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) { ++ (rs->rtt_us < bbr->min_rtt_us || filter_expired)) { + bbr->min_rtt_us = rs->rtt_us; + bbr->min_rtt_stamp = tcp_time_stamp; + } +@@ -811,7 +811,7 @@ static void bbr_init(struct sock *sk) + bbr->prior_cwnd = 0; + bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ + bbr->rtt_cnt = 0; +- bbr->next_rtt_delivered = 0; ++ bbr->next_rtt_delivered = tp->delivered; + bbr->prev_ca_state = TCP_CA_Open; + bbr->packet_conservation = 0; + +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c +index 0cdbea9b9288..05643dd7b15d 100644 +--- a/net/ipv4/tcp_cong.c ++++ b/net/ipv4/tcp_cong.c +@@ -198,7 +198,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, + icsk->icsk_ca_setsockopt = 1; + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); + +- if (sk->sk_state != TCP_CLOSE) ++ if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) + tcp_init_congestion_control(sk); + } + +diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c +index c99230efcd52..00397c6add20 100644 +--- a/net/ipv4/tcp_cubic.c ++++ b/net/ipv4/tcp_cubic.c +@@ -414,6 +414,8 @@ static void hystart_update(struct sock *sk, u32 delay) + + if (hystart_detect & HYSTART_DELAY) { + /* obtain the minimum delay of more than sampling packets */ ++ if (ca->curr_rtt > delay) ++ ca->curr_rtt = delay; + if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { + if (ca->curr_rtt == 0 || ca->curr_rtt > delay) + ca->curr_rtt = delay; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 00771c262aa0..2b12b773c656 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -100,7 +100,6 @@ int sysctl_tcp_thin_dupack __read_mostly; + int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; + int sysctl_tcp_early_retrans __read_mostly = 3; + int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; +-int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2; + + #define FLAG_DATA 0x01 /* Incoming frame contained data. */ + #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ +@@ -425,7 +424,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) + int rcvmem; + + rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * +- tcp_default_init_rwnd(mss); ++ tcp_default_init_rwnd(sock_net(sk), mss); + + /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency + * Allow enough cushion so that sender is not limited by our window +@@ -3567,10 +3566,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) + } + } + +-/* This routine deals with acks during a TLP episode. +- * We mark the end of a TLP episode on receiving TLP dupack or when +- * ack is after tlp_high_seq. +- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. ++/* This routine deals with acks during a TLP episode and ends an episode by ++ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack + */ + static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) + { +@@ -3579,7 +3576,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) + if (before(ack, tp->tlp_high_seq)) + return; + +- if (flag & FLAG_DSACKING_ACK) { ++ if (!tp->tlp_retrans) { ++ /* TLP of new data has been acknowledged */ ++ tp->tlp_high_seq = 0; ++ } else if (flag & FLAG_DSACKING_ACK) { + /* This DSACK means original and TLP probe arrived; no loss */ + tp->tlp_high_seq = 0; + } else if (after(ack, tp->tlp_high_seq)) { +@@ -4538,7 +4538,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, + skb, &fragstolen)) { + coalesce_done: +- tcp_grow_window(sk, skb); ++ /* For non sack flows, do not grow window to force DUPACK ++ * and trigger fast retransmit. ++ */ ++ if (tcp_is_sack(tp)) ++ tcp_grow_window(sk, skb); + kfree_skb_partial(skb, fragstolen); + skb = NULL; + goto add_sack; +@@ -4622,7 +4626,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + tcp_sack_new_ofo_skb(sk, seq, end_seq); + end: + if (skb) { +- tcp_grow_window(sk, skb); ++ /* For non sack flows, do not grow window to force DUPACK ++ * and trigger fast retransmit. ++ */ ++ if (tcp_is_sack(tp)) ++ tcp_grow_window(sk, skb); + skb_set_owner_r(skb, sk); + } + } +@@ -5591,6 +5599,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, + tcp_data_snd_check(sk); + if (!inet_csk_ack_scheduled(sk)) + goto no_ack; ++ } else { ++ tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); + } + + __tcp_ack_snd_check(sk, 0); +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 78a25d6adc9d..0ea9f7f86f3e 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -280,7 +280,7 @@ void tcp_v4_mtu_reduced(struct sock *sk) + + if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) + return; +- mtu = tcp_sk(sk)->mtu_info; ++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info); + dst = inet_csk_update_pmtu(sk, mtu); + if (!dst) + return; +@@ -447,7 +447,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) + if (sk->sk_state == TCP_LISTEN) + goto out; + +- tp->mtu_info = info; ++ WRITE_ONCE(tp->mtu_info, info); + if (!sock_owned_by_user(sk)) { + tcp_v4_mtu_reduced(sk); + } else { +@@ -944,9 +944,18 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, + + key = tcp_md5_do_lookup(sk, addr, family); + if (key) { +- /* Pre-existing entry - just update that one. */ ++ /* Pre-existing entry - just update that one. ++ * Note that the key might be used concurrently. ++ */ + memcpy(key->key, newkey, newkeylen); +- key->keylen = newkeylen; ++ ++ /* Pairs with READ_ONCE() in tcp_md5_hash_key(). ++ * Also note that a reader could catch new key->keylen value ++ * but old key->key[], this is the reason we use __GFP_ZERO ++ * at sock_kmalloc() time below these lines. ++ */ ++ WRITE_ONCE(key->keylen, newkeylen); ++ + return 0; + } + +@@ -962,7 +971,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, + rcu_assign_pointer(tp->md5sig_info, md5sig); + } + +- key = sock_kmalloc(sk, sizeof(*key), gfp); ++ key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); + if (!key) + return -ENOMEM; + if (!tcp_alloc_md5sig_pool()) { +@@ -2488,6 +2497,7 @@ static int __net_init tcp_sk_init(struct net *net) + net->ipv4.sysctl_tcp_orphan_retries = 0; + net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; + net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; ++ net->ipv4.sysctl_tcp_default_init_rwnd = TCP_INIT_CWND * 2; + + return 0; + fail: +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index a501b45d0334..25d028a5577c 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -390,7 +390,7 @@ void tcp_openreq_init_rwin(struct request_sock *req, + req->rsk_window_clamp = full_space; + + /* tcp_full_space because it is guaranteed to be the first packet */ +- tcp_select_initial_window(full_space, ++ tcp_select_initial_window(sock_net(sk_listener), full_space, + mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), + &req->rsk_rcv_wnd, + &req->rsk_window_clamp, +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index a7c86c8eacf3..6366ee618d5c 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -186,14 +186,14 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, + } + + +-u32 tcp_default_init_rwnd(u32 mss) ++u32 tcp_default_init_rwnd(struct net *net, u32 mss) + { + /* Initial receive window should be twice of TCP_INIT_CWND to + * enable proper sending of new unsent data during fast recovery + * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a + * limit when mss is larger than 1460. + */ +- u32 init_rwnd = sysctl_tcp_default_init_rwnd; ++ u32 init_rwnd = net->ipv4.sysctl_tcp_default_init_rwnd; + + if (mss > 1460) + init_rwnd = max((1460 * init_rwnd) / mss, 2U); +@@ -207,7 +207,7 @@ u32 tcp_default_init_rwnd(u32 mss) + * be a multiple of mss if possible. We assume here that mss >= 1. + * This MUST be enforced by all callers. + */ +-void tcp_select_initial_window(int __space, __u32 mss, ++void tcp_select_initial_window(struct net *net, int __space, __u32 mss, + __u32 *rcv_wnd, __u32 *window_clamp, + int wscale_ok, __u8 *rcv_wscale, + __u32 init_rcv_wnd) +@@ -252,7 +252,7 @@ void tcp_select_initial_window(int __space, __u32 mss, + + if (mss > (1 << *rcv_wscale)) { + if (!init_rcv_wnd) /* Use default unless specified otherwise */ +- init_rcv_wnd = tcp_default_init_rwnd(mss); ++ init_rcv_wnd = tcp_default_init_rwnd(net, mss); + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); + } + +@@ -615,7 +615,8 @@ static unsigned int tcp_synack_options(struct request_sock *req, + unsigned int mss, struct sk_buff *skb, + struct tcp_out_options *opts, + const struct tcp_md5sig_key *md5, +- struct tcp_fastopen_cookie *foc) ++ struct tcp_fastopen_cookie *foc, ++ enum tcp_synack_type synack_type) + { + struct inet_request_sock *ireq = inet_rsk(req); + unsigned int remaining = MAX_TCP_OPTION_SPACE; +@@ -630,7 +631,8 @@ static unsigned int tcp_synack_options(struct request_sock *req, + * rather than TS in order to fit in better with old, + * buggy kernels, but that was deemed to be unnecessary. + */ +- ireq->tstamp_ok &= !ireq->sack_ok; ++ if (synack_type != TCP_SYNACK_COOKIE) ++ ireq->tstamp_ok &= !ireq->sack_ok; + } + #endif + +@@ -1381,6 +1383,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) + return __tcp_mtu_to_mss(sk, pmtu) - + (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); + } ++EXPORT_SYMBOL(tcp_mtu_to_mss); + + /* Inverse of above */ + int tcp_mss_to_mtu(struct sock *sk, int mss) +@@ -1530,7 +1533,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) + * window, and remember whether we were cwnd-limited then. + */ + if (!before(tp->snd_una, tp->max_packets_seq) || +- tp->packets_out > tp->max_packets_out) { ++ tp->packets_out > tp->max_packets_out || ++ is_cwnd_limited) { + tp->max_packets_out = tp->packets_out; + tp->max_packets_seq = tp->snd_nxt; + tp->is_cwnd_limited = is_cwnd_limited; +@@ -2257,6 +2261,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + break; + } + ++ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); ++ if (likely(sent_pkts || is_cwnd_limited)) ++ tcp_cwnd_validate(sk, is_cwnd_limited); ++ + if (likely(sent_pkts)) { + if (tcp_in_cwnd_reduction(sk)) + tp->prr_out += sent_pkts; +@@ -2264,8 +2272,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, + /* Send one loss probe per tail loss episode. */ + if (push_one != 2) + tcp_schedule_loss_probe(sk); +- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); +- tcp_cwnd_validate(sk, is_cwnd_limited); + return false; + } + return !tp->packets_out && tcp_send_head(sk); +@@ -2355,6 +2361,11 @@ void tcp_send_loss_probe(struct sock *sk) + int pcount; + int mss = tcp_current_mss(sk); + ++ /* At most one outstanding TLP */ ++ if (tp->tlp_high_seq) ++ goto rearm_timer; ++ ++ tp->tlp_retrans = 0; + skb = tcp_send_head(sk); + if (skb) { + if (tcp_snd_wnd_test(tp, skb, mss)) { +@@ -2377,10 +2388,6 @@ void tcp_send_loss_probe(struct sock *sk) + return; + } + +- /* At most one outstanding TLP retransmission. */ +- if (tp->tlp_high_seq) +- goto rearm_timer; +- + if (skb_still_in_host_queue(sk, skb)) + goto rearm_timer; + +@@ -2401,10 +2408,12 @@ void tcp_send_loss_probe(struct sock *sk) + if (__tcp_retransmit_skb(sk, skb, 1)) + goto rearm_timer; + ++ tp->tlp_retrans = 1; ++ ++probe_sent: + /* Record snd_nxt for loss detection. */ + tp->tlp_high_seq = tp->snd_nxt; + +-probe_sent: + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); + /* Reset s.t. tcp_rearm_rto will restart timer from now */ + inet_csk(sk)->icsk_pending = 0; +@@ -3165,8 +3174,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, + md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); + #endif + skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); +- tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) + +- sizeof(*th); ++ tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, ++ foc, synack_type) + sizeof(*th); + + skb_push(skb, tcp_header_size); + skb_reset_transport_header(skb); +@@ -3262,7 +3271,7 @@ static void tcp_connect_init(struct sock *sk) + (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) + tp->window_clamp = tcp_full_space(sk); + +- tcp_select_initial_window(tcp_full_space(sk), ++ tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), + tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), + &tp->rcv_wnd, + &tp->window_clamp, +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 405ec0202ad8..19658c1b1d07 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -1560,7 +1560,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + /* + * UDP-Lite specific tests, ignored on UDP sockets + */ +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + + /* + * MIB statistics other than incrementing the error count are +@@ -2006,6 +2006,9 @@ void udp_destroy_sock(struct sock *sk) + { + struct udp_sock *up = udp_sk(sk); + bool slow = lock_sock_fast(sk); ++ ++ /* protects from races with udp_abort() */ ++ sock_set_flag(sk, SOCK_DEAD); + udp_flush_pending_frames(sk); + unlock_sock_fast(sk, slow); + if (static_key_false(&udp_encap_needed) && up->encap_type) { +@@ -2236,10 +2239,17 @@ int udp_abort(struct sock *sk, int err) + { + lock_sock(sk); + ++ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing ++ * with close() ++ */ ++ if (sock_flag(sk, SOCK_DEAD)) ++ goto out; ++ + sk->sk_err = err; + sk->sk_error_report(sk); + __udp_disconnect(sk, 0); + ++out: + release_sock(sk); + + return 0; +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c +index f4f616eaaeb8..b1b43d7da91b 100644 +--- a/net/ipv4/udp_offload.c ++++ b/net/ipv4/udp_offload.c +@@ -265,7 +265,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct sock *sk; + + if (NAPI_GRO_CB(skb)->encap_mark || +- (skb->ip_summed != CHECKSUM_PARTIAL && ++ (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid)) + goto out; +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index 016efd37749f..fe025f40eff5 100755 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -385,8 +385,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) + + if (ndev->cnf.stable_secret.initialized) + ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; +- else +- ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode; + + ndev->cnf.mtu6 = dev->mtu; + ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); +@@ -2328,6 +2326,7 @@ static void addrconf_add_mroute(struct net_device *dev) + .fc_dst_len = 8, + .fc_flags = RTF_UP, + .fc_nlinfo.nl_net = dev_net(dev), ++ .fc_protocol = RTPROT_KERNEL, + }; + + ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); +@@ -3190,6 +3189,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) + if (netif_is_l3_master(idev->dev)) + return; + ++ /* no link local addresses on devices flagged as slaves */ ++ if (idev->dev->flags & IFF_SLAVE) ++ return; ++ + ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); + + switch (idev->cnf.addr_gen_mode) { +diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c +index e742c4deb13d..98d253d7bed3 100644 +--- a/net/ipv6/ah6.c ++++ b/net/ipv6/ah6.c +@@ -595,7 +595,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) + memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); + memset(ah->auth_data, 0, ahp->icv_trunc_len); + +- if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN)) ++ err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN); ++ if (err) + goto out_free; + + ip6h->priority = 0; +diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c +index 514ac259f543..b831e9b2e906 100644 +--- a/net/ipv6/anycast.c ++++ b/net/ipv6/anycast.c +@@ -170,7 +170,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) + return 0; + } + +-void ipv6_sock_ac_close(struct sock *sk) ++void __ipv6_sock_ac_close(struct sock *sk) + { + struct ipv6_pinfo *np = inet6_sk(sk); + struct net_device *dev = NULL; +@@ -178,10 +178,7 @@ void ipv6_sock_ac_close(struct sock *sk) + struct net *net = sock_net(sk); + int prev_index; + +- if (!np->ipv6_ac_list) +- return; +- +- rtnl_lock(); ++ ASSERT_RTNL(); + pac = np->ipv6_ac_list; + np->ipv6_ac_list = NULL; + +@@ -198,6 +195,16 @@ void ipv6_sock_ac_close(struct sock *sk) + sock_kfree_s(sk, pac, sizeof(*pac)); + pac = next; + } ++} ++ ++void ipv6_sock_ac_close(struct sock *sk) ++{ ++ struct ipv6_pinfo *np = inet6_sk(sk); ++ ++ if (!np->ipv6_ac_list) ++ return; ++ rtnl_lock(); ++ __ipv6_sock_ac_close(sk); + rtnl_unlock(); + } + +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index 841d69221d75..19758a760da1 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); +- struct in6_addr *daddr; ++ struct in6_addr *daddr, old_daddr; ++ __be32 fl6_flowlabel = 0; ++ __be32 old_fl6_flowlabel; ++ __be32 old_dport; + int addr_type; + int err; +- __be32 fl6_flowlabel = 0; + + if (usin->sin6_family == AF_INET) { + if (__ipv6_only_sock(sk)) +@@ -239,9 +241,13 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, + } + } + ++ /* save the current peer information before updating it */ ++ old_daddr = sk->sk_v6_daddr; ++ old_fl6_flowlabel = np->flow_label; ++ old_dport = inet->inet_dport; ++ + sk->sk_v6_daddr = *daddr; + np->flow_label = fl6_flowlabel; +- + inet->inet_dport = usin->sin6_port; + + /* +@@ -251,11 +257,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, + + err = ip6_datagram_dst_update(sk, true); + if (err) { +- /* Reset daddr and dport so that udp_v6_early_demux() +- * fails to find this socket ++ /* Restore the socket peer info, to keep it consistent with ++ * the old socket state + */ +- memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); +- inet->inet_dport = 0; ++ sk->sk_v6_daddr = old_daddr; ++ np->flow_label = old_fl6_flowlabel; ++ inet->inet_dport = old_dport; + goto out; + } + +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index 17fa28f7a0ff..f6144fe6212e 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -299,10 +299,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st + } + + #if IS_ENABLED(CONFIG_IPV6_MIP6) +-static void mip6_addr_swap(struct sk_buff *skb) ++static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) + { + struct ipv6hdr *iph = ipv6_hdr(skb); +- struct inet6_skb_parm *opt = IP6CB(skb); + struct ipv6_destopt_hao *hao; + struct in6_addr tmp; + int off; +@@ -319,7 +318,7 @@ static void mip6_addr_swap(struct sk_buff *skb) + } + } + #else +-static inline void mip6_addr_swap(struct sk_buff *skb) {} ++static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {} + #endif + + static struct dst_entry *icmpv6_route_lookup(struct net *net, +@@ -389,8 +388,9 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net, + /* + * Send an ICMP message in response to a packet in error + */ +-static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, +- const struct in6_addr *force_saddr) ++void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, ++ const struct in6_addr *force_saddr, ++ const struct inet6_skb_parm *parm) + { + struct net *net = dev_net(skb->dev); + struct inet6_dev *idev = NULL; +@@ -474,7 +474,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + return; + } + +- mip6_addr_swap(skb); ++ mip6_addr_swap(skb, parm); + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_ICMPV6; +@@ -558,9 +558,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + */ + void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) + { +- icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); ++ icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb)); + kfree_skb(skb); + } ++EXPORT_SYMBOL(icmp6_send); + + /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH + * if sufficient data bytes are available +@@ -614,10 +615,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + } + if (type == ICMP_TIME_EXCEEDED) + icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, +- info, &temp_saddr); ++ info, &temp_saddr, IP6CB(skb2)); + else + icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, +- info, &temp_saddr); ++ info, &temp_saddr, IP6CB(skb2)); + if (rt) + ip6_rt_put(rt); + +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index a88aff02b579..e852c2692da9 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -124,6 +124,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, + int dev_type = (gre_proto == htons(ETH_P_TEB)) ? + ARPHRD_ETHER : ARPHRD_IP6GRE; + int score, cand_score = 4; ++ struct net_device *ndev; + + for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { + if (!ipv6_addr_equal(local, &t->parms.laddr) || +@@ -226,9 +227,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, + if (cand) + return cand; + +- dev = ign->fb_tunnel_dev; +- if (dev->flags & IFF_UP) +- return netdev_priv(dev); ++ ndev = READ_ONCE(ign->fb_tunnel_dev); ++ if (ndev && ndev->flags & IFF_UP) ++ return netdev_priv(ndev); + + return NULL; + } +@@ -349,7 +350,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, + if (!(nt->parms.o_flags & TUNNEL_SEQ)) + dev->features |= NETIF_F_LLTX; + +- dev_hold(dev); + ip6gre_tunnel_link(ign, nt); + return nt; + +@@ -364,6 +364,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); + + ip6gre_tunnel_unlink(ign, t); ++ if (ign->fb_tunnel_dev == dev) ++ WRITE_ONCE(ign->fb_tunnel_dev, NULL); + dst_cache_reset(&t->dst_cache); + dev_put(dev); + } +@@ -1086,8 +1088,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) + strcpy(tunnel->parms.name, dev->name); + + tunnel->hlen = sizeof(struct ipv6hdr) + 4; +- +- dev_hold(dev); + } + + +@@ -1131,15 +1131,16 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) + static int __net_init ip6gre_init_net(struct net *net) + { + struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); ++ struct net_device *ndev; + int err; + +- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", +- NET_NAME_UNKNOWN, +- ip6gre_tunnel_setup); +- if (!ign->fb_tunnel_dev) { ++ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", ++ NET_NAME_UNKNOWN, ip6gre_tunnel_setup); ++ if (!ndev) { + err = -ENOMEM; + goto err_alloc_dev; + } ++ ign->fb_tunnel_dev = ndev; + dev_net_set(ign->fb_tunnel_dev, net); + /* FB netdevice is special: we have one, and only one per netns. + * Allowing to move it to another netns is clearly unsafe. +@@ -1159,7 +1160,7 @@ static int __net_init ip6gre_init_net(struct net *net) + return 0; + + err_reg_dev: +- ip6gre_dev_free(ign->fb_tunnel_dev); ++ ip6gre_dev_free(ndev); + err_alloc_dev: + return err; + } +diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c +index 713676f14a0e..05287bc36dbd 100644 +--- a/net/ipv6/ip6_icmp.c ++++ b/net/ipv6/ip6_icmp.c +@@ -8,6 +8,8 @@ + + #if IS_ENABLED(CONFIG_IPV6) + ++#if !IS_BUILTIN(CONFIG_IPV6) ++ + static ip6_icmp_send_t __rcu *ip6_icmp_send; + + int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +@@ -30,18 +32,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) + } + EXPORT_SYMBOL(inet6_unregister_icmp_sender); + +-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) ++void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, ++ const struct inet6_skb_parm *parm) + { + ip6_icmp_send_t *send; + + rcu_read_lock(); + send = rcu_dereference(ip6_icmp_send); ++ if (send) ++ send(skb, type, code, info, NULL, parm); ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL(__icmpv6_send); ++#endif ++ ++#if IS_ENABLED(CONFIG_NF_NAT) ++#include ++void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) ++{ ++ struct inet6_skb_parm parm = { 0 }; ++ struct sk_buff *cloned_skb = NULL; ++ enum ip_conntrack_info ctinfo; ++ struct in6_addr orig_ip; ++ struct nf_conn *ct; + +- if (!send) ++ ct = nf_ct_get(skb_in, &ctinfo); ++ if (!ct || !(ct->status & IPS_SRC_NAT)) { ++ __icmpv6_send(skb_in, type, code, info, &parm); ++ return; ++ } ++ ++ if (skb_shared(skb_in)) ++ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); ++ ++ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || ++ (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > ++ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, ++ skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) + goto out; +- send(skb, type, code, info, NULL); ++ ++ orig_ip = ipv6_hdr(skb_in)->saddr; ++ ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; ++ __icmpv6_send(skb_in, type, code, info, &parm); ++ ipv6_hdr(skb_in)->saddr = orig_ip; + out: +- rcu_read_unlock(); ++ consume_skb(cloned_skb); + } +-EXPORT_SYMBOL(icmpv6_send); ++EXPORT_SYMBOL(icmpv6_ndo_send); ++#endif + #endif +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c +index e726a61ae6dc..aacfb4bce153 100644 +--- a/net/ipv6/ip6_input.c ++++ b/net/ipv6/ip6_input.c +@@ -168,16 +168,6 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt + if (ipv6_addr_is_multicast(&hdr->saddr)) + goto err; + +- /* While RFC4291 is not explicit about v4mapped addresses +- * in IPv6 headers, it seems clear linux dual-stack +- * model can not deal properly with these. +- * Security models could be fooled by ::ffff:127.0.0.1 for example. +- * +- * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 +- */ +- if (ipv6_addr_v4mapped(&hdr->saddr)) +- goto err; +- + skb->transport_header = skb->network_header + sizeof(*hdr); + IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 92399181ba25..6171a4dc1aee 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -273,7 +273,6 @@ static int ip6_tnl_create2(struct net_device *dev) + + strcpy(t->parms.name, dev->name); + +- dev_hold(dev); + ip6_tnl_link(ip6n, t); + return 0; + +@@ -871,7 +870,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, + struct metadata_dst *tun_dst, + bool log_ecn_err) + { +- return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate, ++ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, ++ const struct ipv6hdr *ipv6h, ++ struct sk_buff *skb); ++ ++ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; ++ if (tpi->proto == htons(ETH_P_IP)) ++ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; ++ ++ return __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate, + log_ecn_err); + } + EXPORT_SYMBOL(ip6_tnl_rcv); +@@ -1839,6 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + dev->mtu -= 8; + ++ dev_hold(dev); + return 0; + + destroy_dst: +@@ -1882,7 +1890,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + + t->parms.proto = IPPROTO_IPV6; +- dev_hold(dev); + + rcu_assign_pointer(ip6n->tnls_wc[0], t); + return 0; +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c +index 365f44581843..6b84ee8e1f43 100644 +--- a/net/ipv6/ip6_vti.c ++++ b/net/ipv6/ip6_vti.c +@@ -196,7 +196,6 @@ static int vti6_tnl_create2(struct net_device *dev) + + strcpy(t->parms.name, dev->name); + +- dev_hold(dev); + vti6_tnl_link(ip6n, t); + + return 0; +@@ -915,6 +914,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev) + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; ++ dev_hold(dev); + return 0; + } + +@@ -946,7 +946,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) + struct vti6_net *ip6n = net_generic(net, vti6_net_id); + + t->parms.proto = IPPROTO_IPV6; +- dev_hold(dev); + + rcu_assign_pointer(ip6n->tnls_wc[0], t); + return 0; +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 455fa4a30353..622063438953 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -184,14 +184,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + retv = -EBUSY; + break; + } +- } +- if (sk->sk_protocol == IPPROTO_TCP && +- sk->sk_prot != &tcpv6_prot) { +- retv = -EBUSY; ++ } else if (sk->sk_protocol == IPPROTO_TCP) { ++ if (sk->sk_prot != &tcpv6_prot) { ++ retv = -EBUSY; ++ break; ++ } ++ } else { + break; + } +- if (sk->sk_protocol != IPPROTO_TCP) +- break; ++ + if (sk->sk_state != TCP_ESTABLISHED) { + retv = -ENOTCONN; + break; +@@ -205,6 +206,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, + + fl6_free_socklist(sk); + __ipv6_sock_mc_close(sk); ++ __ipv6_sock_ac_close(sk); + + /* + * Sock is moving from IPv6 to IPv4 (sk_prot), so +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c +index e065d48b31b9..9a78b89690bd 100644 +--- a/net/ipv6/mcast.c ++++ b/net/ipv6/mcast.c +@@ -1580,10 +1580,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) + IPV6_TLV_PADN, 0 }; + + /* we assume size > sizeof(ra) here */ +- /* limit our allocations to order-0 page */ +- size = min_t(int, size, SKB_MAX_ORDER(0, 0)); + skb = sock_alloc_send_skb(sk, size, 1, &err); +- + if (!skb) + return NULL; + +@@ -2601,6 +2598,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) + idev->mc_list = i->next; + + write_unlock_bh(&idev->lock); ++ ip6_mc_clear_src(i); + ma_put(i); + write_lock_bh(&idev->lock); + } +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c +index 671eb3222f69..e89d811e239f 100644 +--- a/net/ipv6/netfilter/ip6_tables.c ++++ b/net/ipv6/netfilter/ip6_tables.c +@@ -873,10 +873,6 @@ copy_entries_to_user(unsigned int total_size, + return PTR_ERR(counters); + + loc_cpu_entry = private->entries; +- if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { +- ret = -EFAULT; +- goto free_counters; +- } + + /* FIXME: use iterator macros --RR */ + /* ... then go back and fix counters and names */ +@@ -886,6 +882,10 @@ copy_entries_to_user(unsigned int total_size, + const struct xt_entry_target *t; + + e = (struct ip6t_entry *)(loc_cpu_entry + off); ++ if (copy_to_user(userptr + off, e, sizeof(*e))) { ++ ret = -EFAULT; ++ goto free_counters; ++ } + if (copy_to_user(userptr + off + + offsetof(struct ip6t_entry, counters), + &counters[num], +@@ -899,23 +899,14 @@ copy_entries_to_user(unsigned int total_size, + i += m->u.match_size) { + m = (void *)e + i; + +- if (copy_to_user(userptr + off + i +- + offsetof(struct xt_entry_match, +- u.user.name), +- m->u.kernel.match->name, +- strlen(m->u.kernel.match->name)+1) +- != 0) { ++ if (xt_match_to_user(m, userptr + off + i)) { + ret = -EFAULT; + goto free_counters; + } + } + + t = ip6t_get_target_c(e); +- if (copy_to_user(userptr + off + e->target_offset +- + offsetof(struct xt_entry_target, +- u.user.name), +- t->u.kernel.target->name, +- strlen(t->u.kernel.target->name)+1) != 0) { ++ if (xt_target_to_user(t, userptr + off + e->target_offset)) { + ret = -EFAULT; + goto free_counters; + } +@@ -1485,6 +1476,8 @@ translate_compat_table(struct net *net, + if (!newinfo) + goto out_unlock; + ++ memset(newinfo->entries, 0, size); ++ + newinfo->number = compatr->num_entries; + for (i = 0; i < NF_INET_NUMHOOKS; i++) { + newinfo->hook_entry[i] = compatr->hook_entry[i]; +diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c +index 590f767db5d4..a379d2f79b19 100644 +--- a/net/ipv6/netfilter/ip6t_NPT.c ++++ b/net/ipv6/netfilter/ip6t_NPT.c +@@ -112,6 +112,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { + .table = "mangle", + .target = ip6t_snpt_tg, + .targetsize = sizeof(struct ip6t_npt_tginfo), ++ .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), + .checkentry = ip6t_npt_checkentry, + .family = NFPROTO_IPV6, + .hooks = (1 << NF_INET_LOCAL_IN) | +@@ -123,6 +124,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { + .table = "mangle", + .target = ip6t_dnpt_tg, + .targetsize = sizeof(struct ip6t_npt_tginfo), ++ .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), + .checkentry = ip6t_npt_checkentry, + .family = NFPROTO_IPV6, + .hooks = (1 << NF_INET_PRE_ROUTING) | +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 6a6d01cb1ace..9c25e8b09306 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -14,29 +14,11 @@ static u32 __ipv6_select_ident(struct net *net, + const struct in6_addr *dst, + const struct in6_addr *src) + { +- const struct { +- struct in6_addr dst; +- struct in6_addr src; +- } __aligned(SIPHASH_ALIGNMENT) combined = { +- .dst = *dst, +- .src = *src, +- }; +- u32 hash, id; +- +- /* Note the following code is not safe, but this is okay. */ +- if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) +- get_random_bytes(&net->ipv4.ip_id_key, +- sizeof(net->ipv4.ip_id_key)); +- +- hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key); +- +- /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, +- * set the hight order instead thus minimizing possible future +- * collisions. +- */ +- id = ip_idents_reserve(hash, 1); +- if (unlikely(!id)) +- id = 1 << 31; ++ u32 id; ++ ++ do { ++ id = prandom_u32(); ++ } while (!id); + + return id; + } +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index bd406f5268a8..09e49f794eb9 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3035,9 +3035,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) + * nexthops have been replaced by first new, the rest should + * be added to it. + */ +- cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | +- NLM_F_REPLACE); +- cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; ++ if (cfg->fc_nlinfo.nlh) { ++ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | ++ NLM_F_REPLACE); ++ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; ++ } + nhn++; + } + +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index 16eba7b5f1a9..07e545fd2a3d 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -209,8 +209,6 @@ static int ipip6_tunnel_create(struct net_device *dev) + + ipip6_tunnel_clone_6rd(dev, sitn); + +- dev_hold(dev); +- + ipip6_tunnel_link(sitn, t); + return 0; + +@@ -1072,7 +1070,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) + if (tdev && !netif_is_l3_master(tdev)) { + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + +- dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); + dev->mtu = tdev->mtu - t_hlen; + if (dev->mtu < IPV6_MIN_MTU) + dev->mtu = IPV6_MIN_MTU; +@@ -1372,7 +1369,6 @@ static void ipip6_tunnel_setup(struct net_device *dev) + dev->destructor = ipip6_dev_free; + + dev->type = ARPHRD_SIT; +- dev->hard_header_len = LL_MAX_HEADER + t_hlen; + dev->mtu = ETH_DATA_LEN - t_hlen; + dev->flags = IFF_NOARP; + netif_keep_dst(dev); +@@ -1402,7 +1398,7 @@ static int ipip6_tunnel_init(struct net_device *dev) + dev->tstats = NULL; + return err; + } +- ++ dev_hold(dev); + return 0; + } + +@@ -1418,7 +1414,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev) + iph->ihl = 5; + iph->ttl = 64; + +- dev_hold(dev); + rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); + } + +@@ -1585,8 +1580,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, + } + + #ifdef CONFIG_IPV6_SIT_6RD +- if (ipip6_netlink_6rd_parms(data, &ip6rd)) ++ if (ipip6_netlink_6rd_parms(data, &ip6rd)) { + err = ipip6_tunnel_update_6rd(nt, &ip6rd); ++ if (err < 0) ++ unregister_netdevice_queue(dev, NULL); ++ } + #endif + + return err; +@@ -1798,9 +1796,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net, + if (dev->rtnl_link_ops == &sit_link_ops) + unregister_netdevice_queue(dev, head); + +- for (prio = 1; prio < 4; prio++) { ++ for (prio = 0; prio < 4; prio++) { + int h; +- for (h = 0; h < IP6_SIT_HASH_SIZE; h++) { ++ for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) { + struct ip_tunnel *t; + + t = rtnl_dereference(sitn->tunnels[prio][h]); +diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c +index 28cb584784df..2e8f7d057c9a 100644 +--- a/net/ipv6/syncookies.c ++++ b/net/ipv6/syncookies.c +@@ -143,7 +143,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + __u32 cookie = ntohl(th->ack_seq) - 1; + struct sock *ret = sk; + struct request_sock *req; +- int mss; ++ int full_space, mss; + struct dst_entry *dst; + __u8 rcv_wscale; + +@@ -237,7 +237,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) + } + + req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); +- tcp_select_initial_window(tcp_full_space(sk), req->mss, ++ /* limit the window selection if the user enforce a smaller rx buffer */ ++ full_space = tcp_full_space(sk); ++ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && ++ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) ++ req->rsk_window_clamp = full_space; ++ ++ tcp_select_initial_window(sock_net(sk), full_space, req->mss, + &req->rsk_rcv_wnd, &req->rsk_window_clamp, + ireq->wscale_ok, &rcv_wscale, + dst_metric(dst, RTAX_INITRWND)); +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 606c470c529d..17c11a8489ea 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -312,11 +312,20 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, + static void tcp_v6_mtu_reduced(struct sock *sk) + { + struct dst_entry *dst; ++ u32 mtu; + + if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) + return; + +- dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); ++ mtu = READ_ONCE(tcp_sk(sk)->mtu_info); ++ ++ /* Drop requests trying to increase our current mss. ++ * Check done in __ip6_rt_update_pmtu() is too late. ++ */ ++ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) ++ return; ++ ++ dst = inet6_csk_update_pmtu(sk, mtu); + if (!dst) + return; + +@@ -395,6 +404,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + } + + if (type == ICMPV6_PKT_TOOBIG) { ++ u32 mtu = ntohl(info); ++ + /* We are not interested in TCP_LISTEN and open_requests + * (SYN-ACKs send out by Linux are always <576bytes so + * they should go through unfragmented). +@@ -405,7 +416,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + if (!ip6_sk_accept_pmtu(sk)) + goto out; + +- tp->mtu_info = ntohl(info); ++ if (mtu < IPV6_MIN_MTU) ++ goto out; ++ ++ WRITE_ONCE(tp->mtu_info, mtu); ++ + if (!sock_owned_by_user(sk)) + tcp_v6_mtu_reduced(sk); + else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, +@@ -992,6 +1007,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) + if (!ipv6_unicast_destination(skb)) + goto drop; + ++ if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { ++ __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); ++ return 0; ++ } ++ + return tcp_conn_request(&tcp6_request_sock_ops, + &tcp_request_sock_ipv6_ops, sk, skb); + +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 6148952fc6a6..3a7c0e5be1c0 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -598,7 +598,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + /* + * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). + */ +- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { ++ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + + if (up->pcrlen == 0) { /* full coverage was set */ + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", +@@ -1410,6 +1410,9 @@ void udpv6_destroy_sock(struct sock *sk) + { + struct udp_sock *up = udp_sk(sk); + lock_sock(sk); ++ ++ /* protects from races with udp_abort() */ ++ sock_set_flag(sk, SOCK_DEAD); + udp_v6_flush_pending_frames(sk); + release_sock(sk); + +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c +index b2dc9a820c6a..ef6cc9eb0e45 100644 +--- a/net/ipv6/xfrm6_output.c ++++ b/net/ipv6/xfrm6_output.c +@@ -141,7 +141,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) + { + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x = dst->xfrm; +- int mtu; ++ unsigned int mtu; + bool toobig; + + #ifdef CONFIG_NETFILTER +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c +index 02e10deef5b4..b139c149c42a 100644 +--- a/net/iucv/af_iucv.c ++++ b/net/iucv/af_iucv.c +@@ -1542,7 +1542,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how) + break; + } + +- if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { ++ if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) && ++ sk->sk_state == IUCV_CONNECTED) { + if (iucv->transport == AF_IUCV_TRANS_IUCV) { + txmsg.class = 0; + txmsg.tag = 0; +@@ -1752,7 +1753,7 @@ static int iucv_callback_connreq(struct iucv_path *path, + } + + /* Create the new socket */ +- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); ++ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); + if (!nsk) { + err = pr_iucv->path_sever(path, user_data); + iucv_path_free(path); +@@ -1962,7 +1963,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) + goto out; + } + +- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); ++ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); + bh_lock_sock(sk); + if ((sk->sk_state != IUCV_LISTEN) || + sk_acceptq_is_full(sk) || +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 6b73871ff318..22ba32f0341c 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -1863,6 +1863,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms + if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { + struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + ++ if ((xfilter->sadb_x_filter_splen >= ++ (sizeof(xfrm_address_t) << 3)) || ++ (xfilter->sadb_x_filter_dplen >= ++ (sizeof(xfrm_address_t) << 3))) { ++ mutex_unlock(&pfk->dump_lock); ++ return -EINVAL; ++ } + filter = kmalloc(sizeof(*filter), GFP_KERNEL); + if (filter == NULL) { + mutex_unlock(&pfk->dump_lock); +@@ -2916,7 +2923,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) + break; + if (!aalg->pfkey_supported) + continue; +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + return sz + sizeof(struct sadb_prop); +@@ -2934,7 +2941,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!ealg->pfkey_supported) + continue; + +- if (!(ealg_tmpl_set(t, ealg) && ealg->available)) ++ if (!(ealg_tmpl_set(t, ealg))) + continue; + + for (k = 1; ; k++) { +@@ -2945,7 +2952,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) + if (!aalg->pfkey_supported) + continue; + +- if (aalg_tmpl_set(t, aalg) && aalg->available) ++ if (aalg_tmpl_set(t, aalg)) + sz += sizeof(struct sadb_comb); + } + } +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index 3305183b7c8f..f94c81431ace 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -112,6 +112,13 @@ struct l2tp_net { + spinlock_t l2tp_session_hlist_lock; + }; + ++#if IS_ENABLED(CONFIG_IPV6) ++static bool l2tp_sk_is_v6(struct sock *sk) ++{ ++ return sk->sk_family == PF_INET6 && ++ !ipv6_addr_v4mapped(&sk->sk_v6_daddr); ++} ++#endif + + static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) + { +@@ -351,8 +358,13 @@ int l2tp_session_register(struct l2tp_session *session, + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + ++ /* IP encap expects session IDs to be globally unique, while ++ * UDP encap doesn't. ++ */ + hlist_for_each_entry(session_walk, g_head, global_hlist) +- if (session_walk->session_id == session->session_id) { ++ if (session_walk->session_id == session->session_id && ++ (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP || ++ tunnel->encap == L2TP_ENCAPTYPE_IP)) { + err = -EEXIST; + goto err_tlock_pnlock; + } +@@ -1121,8 +1133,9 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, + + /* Queue the packet to IP for output */ + skb->ignore_df = 1; ++ skb_dst_drop(skb); + #if IS_ENABLED(CONFIG_IPV6) +- if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) ++ if (l2tp_sk_is_v6(tunnel->sock)) + error = inet6_csk_xmit(tunnel->sock, skb, NULL); + else + #endif +@@ -1185,9 +1198,14 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len + goto out_unlock; + } + +- /* Get routing info from the tunnel socket */ +- skb_dst_drop(skb); +- skb_dst_set(skb, sk_dst_check(sk, 0)); ++ /* The user-space may change the connection status for the user-space ++ * provided socket at run time: we must check it under the socket lock ++ */ ++ if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) { ++ kfree_skb(skb); ++ ret = NET_XMIT_DROP; ++ goto out_unlock; ++ } + + inet = inet_sk(sk); + fl = &inet->cork.fl; +@@ -1204,7 +1222,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len + + /* Calculate UDP checksum if configured to do so */ + #if IS_ENABLED(CONFIG_IPV6) +- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) ++ if (l2tp_sk_is_v6(sk)) + udp6_set_csum(udp_get_no_check6_tx(sk), + skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, udp_len); +@@ -1610,24 +1628,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 + if (cfg != NULL) + tunnel->debug = cfg->debug; + +-#if IS_ENABLED(CONFIG_IPV6) +- if (sk->sk_family == PF_INET6) { +- struct ipv6_pinfo *np = inet6_sk(sk); +- +- if (ipv6_addr_v4mapped(&np->saddr) && +- ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { +- struct inet_sock *inet = inet_sk(sk); +- +- tunnel->v4mapped = true; +- inet->inet_saddr = np->saddr.s6_addr32[3]; +- inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3]; +- inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3]; +- } else { +- tunnel->v4mapped = false; +- } +- } +-#endif +- + /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ + tunnel->encap = encap; + if (encap == L2TP_ENCAPTYPE_UDP) { +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h +index ecf36fa820f7..891f79ad11f4 100644 +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -190,9 +190,6 @@ struct l2tp_tunnel { + struct sock *sock; /* Parent socket */ + int fd; /* Parent fd, if tunnel socket + * was created by userspace */ +-#if IS_ENABLED(CONFIG_IPV6) +- bool v4mapped; +-#endif + + struct work_struct del_work; + +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c +index 482c94d9d958..d1c7dcc23448 100644 +--- a/net/lapb/lapb_out.c ++++ b/net/lapb/lapb_out.c +@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb) + skb = skb_dequeue(&lapb->write_queue); + + do { +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { ++ skbn = skb_copy(skb, GFP_ATOMIC); ++ if (!skbn) { + skb_queue_head(&lapb->write_queue, skb); + break; + } +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index 2a859f967c8a..a8866455e8b2 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -96,8 +96,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) + { + u8 rc = LLC_PDU_LEN_U; + +- if (addr->sllc_test || addr->sllc_xid) ++ if (addr->sllc_test) + rc = LLC_PDU_LEN_U; ++ else if (addr->sllc_xid) ++ /* We need to expand header to sizeof(struct llc_xid_info) ++ * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header ++ * as XID PDU. In llc_ui_sendmsg() we reserved header size and then ++ * filled all other space with user data. If we won't reserve this ++ * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data ++ */ ++ rc = LLC_PDU_LEN_U_XID; + else if (sk->sk_type == SOCK_STREAM) + rc = LLC_PDU_LEN_I; + return rc; +@@ -271,6 +279,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) + + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out; ++ if (!addr->sllc_arphrd) ++ addr->sllc_arphrd = ARPHRD_ETHER; ++ if (addr->sllc_arphrd != ARPHRD_ETHER) ++ goto out; + rc = -ENODEV; + if (sk->sk_bound_dev_if) { + llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); +@@ -328,15 +340,15 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) + if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) + goto out; + rc = -EAFNOSUPPORT; +- if (unlikely(addr->sllc_family != AF_LLC)) ++ if (!addr->sllc_arphrd) ++ addr->sllc_arphrd = ARPHRD_ETHER; ++ if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) + goto out; + rc = -ENODEV; + rcu_read_lock(); + if (sk->sk_bound_dev_if) { + llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); + if (llc->dev) { +- if (!addr->sllc_arphrd) +- addr->sllc_arphrd = llc->dev->type; + if (is_zero_ether_addr(addr->sllc_mac)) + memcpy(addr->sllc_mac, llc->dev->dev_addr, + IFHWADDRLEN); +diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c +index 7ae4cc684d3a..9fa3342c7a82 100644 +--- a/net/llc/llc_s_ac.c ++++ b/net/llc/llc_s_ac.c +@@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) + struct llc_sap_state_ev *ev = llc_sap_ev(skb); + int rc; + +- llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ++ llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, + ev->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c +index 88dd5d218fe3..f37fbc71fc1d 100644 +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1964,6 +1964,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) + ieee80211_stop_mesh(sdata); + mutex_lock(&sdata->local->mtx); + ieee80211_vif_release_channel(sdata); ++ kfree(sdata->u.mesh.ie); + mutex_unlock(&sdata->local->mtx); + + return 0; +@@ -2680,14 +2681,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, + continue; + + for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { +- if (~sdata->rc_rateidx_mcs_mask[i][j]) { ++ if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { + sdata->rc_has_mcs_mask[i] = true; + break; + } + } + + for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { +- if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { ++ if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { + sdata->rc_has_vht_mcs_mask[i] = true; + break; + } +diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c +index 14ec63a02669..91b94ac9a88a 100644 +--- a/net/mac80211/debugfs_sta.c ++++ b/net/mac80211/debugfs_sta.c +@@ -80,6 +80,7 @@ static const char * const sta_flag_names[] = { + FLAG(MPSP_OWNER), + FLAG(MPSP_RECIPIENT), + FLAG(PS_DELIVER), ++ FLAG(USES_ENCRYPTION), + #undef FLAG + }; + +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c +index f783d1377d9a..9f0f437a09b9 100644 +--- a/net/mac80211/driver-ops.c ++++ b/net/mac80211/driver-ops.c +@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local, + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); +- if (ret == 0) ++ if (ret == 0) { + sta->uploaded = true; ++ if (rcu_access_pointer(sta->sta.rates)) ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta); ++ } + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c +index 0c0695eb2609..3796c24defcb 100644 +--- a/net/mac80211/ibss.c ++++ b/net/mac80211/ibss.c +@@ -1862,6 +1862,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) + + /* remove beacon */ + kfree(sdata->u.ibss.ie); ++ sdata->u.ibss.ie = NULL; ++ sdata->u.ibss.ie_len = 0; + + /* on the next join, re-program HT parameters */ + memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h +index 0b0de3030e0d..f5532a3ce72e 100644 +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -52,12 +52,6 @@ struct ieee80211_local; + #define IEEE80211_ENCRYPT_HEADROOM 8 + #define IEEE80211_ENCRYPT_TAILROOM 18 + +-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent +- * reception of at least three fragmented frames. This limit can be increased +- * by changing this define, at the cost of slower frame reassembly and +- * increased memory use (about 2 kB of RAM per entry). */ +-#define IEEE80211_FRAGMENT_MAX 4 +- + /* power level hasn't been configured (or set to automatic) */ + #define IEEE80211_UNSET_POWER_LEVEL INT_MIN + +@@ -88,18 +82,6 @@ struct ieee80211_local; + + #define IEEE80211_MAX_NAN_INSTANCE_ID 255 + +-struct ieee80211_fragment_entry { +- struct sk_buff_head skb_list; +- unsigned long first_frag_time; +- u16 seq; +- u16 extra_len; +- u16 last_frag; +- u8 rx_queue; +- bool check_sequential_pn; /* needed for CCMP/GCMP */ +- u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ +-}; +- +- + struct ieee80211_bss { + u32 device_ts_beacon, device_ts_presp; + +@@ -239,8 +221,15 @@ struct ieee80211_rx_data { + */ + int security_idx; + +- u32 tkip_iv32; +- u16 tkip_iv16; ++ union { ++ struct { ++ u32 iv32; ++ u16 iv16; ++ } tkip; ++ struct { ++ u8 pn[IEEE80211_CCMP_PN_LEN]; ++ } ccm_gcm; ++ }; + }; + + struct ieee80211_csa_settings { +@@ -869,9 +858,7 @@ struct ieee80211_sub_if_data { + + char name[IFNAMSIZ]; + +- /* Fragment table for host-based reassembly */ +- struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; +- unsigned int fragment_next; ++ struct ieee80211_fragment_cache frags; + + /* TID bitmap for NoAck policy */ + u16 noack_map; +@@ -1046,6 +1033,7 @@ enum queue_stop_reason { + IEEE80211_QUEUE_STOP_REASON_FLUSH, + IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, + IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, + + IEEE80211_QUEUE_STOP_REASONS, + }; +@@ -1403,7 +1391,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) + rcu_read_lock(); + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); + +- if (WARN_ON_ONCE(!chanctx_conf)) { ++ if (!chanctx_conf) { + rcu_read_unlock(); + return NULL; + } +@@ -2135,4 +2123,7 @@ extern const struct ethtool_ops ieee80211_ethtool_ops; + #define debug_noinline + #endif + ++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache); ++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache); ++ + #endif /* IEEE80211_I_H */ +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index ad03331ee785..deebf42c740e 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1120,16 +1120,12 @@ static void ieee80211_set_multicast_list(struct net_device *dev) + */ + static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) + { +- int i; +- + /* free extra data */ + ieee80211_free_keys(sdata, false); + + ieee80211_debugfs_remove_netdev(sdata); + +- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) +- __skb_queue_purge(&sdata->fragments[i].skb_list); +- sdata->fragment_next = 0; ++ ieee80211_destroy_frag_cache(&sdata->frags); + + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_mesh_teardown_sdata(sdata); +@@ -1577,6 +1573,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + if (ret) + return ret; + ++ ieee80211_stop_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); ++ synchronize_net(); ++ + ieee80211_do_stop(sdata, false); + + ieee80211_teardown_sdata(sdata); +@@ -1597,6 +1597,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, + err = ieee80211_do_open(&sdata->wdev, false); + WARN(err, "type change: do_open returned %d", err); + ++ ieee80211_wake_vif_queues(local, sdata, ++ IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); + return ret; + } + +@@ -1857,8 +1859,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, + sdata->wdev.wiphy = local->hw.wiphy; + sdata->local = local; + +- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) +- skb_queue_head_init(&sdata->fragments[i].skb_list); ++ ieee80211_init_frag_cache(&sdata->frags); + + INIT_LIST_HEAD(&sdata->key_list); + +diff --git a/net/mac80211/key.c b/net/mac80211/key.c +index 6e02f8dfce2b..a0d9507cb6a7 100644 +--- a/net/mac80211/key.c ++++ b/net/mac80211/key.c +@@ -334,6 +334,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, + if (sta) { + if (pairwise) { + rcu_assign_pointer(sta->ptk[idx], new); ++ set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); + sta->ptk_idx = idx; + ieee80211_check_fast_xmit(sta); + } else { +@@ -647,6 +648,7 @@ int ieee80211_key_link(struct ieee80211_key *key, + struct sta_info *sta) + { + struct ieee80211_local *local = sdata->local; ++ static atomic_t key_color = ATOMIC_INIT(0); + struct ieee80211_key *old_key; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; +@@ -658,6 +660,12 @@ int ieee80211_key_link(struct ieee80211_key *key, + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; + ++ /* ++ * Assign a unique ID to every key so we can easily prevent mixed ++ * key and fragment cache attacks. ++ */ ++ key->color = atomic_inc_return(&key_color); ++ + mutex_lock(&sdata->local->key_mtx); + + if (sta && pairwise) +diff --git a/net/mac80211/key.h b/net/mac80211/key.h +index 4aa20cef0859..2749a7d05e76 100644 +--- a/net/mac80211/key.h ++++ b/net/mac80211/key.h +@@ -127,6 +127,8 @@ struct ieee80211_key { + } debugfs; + #endif + ++ unsigned int color; ++ + /* + * key config, must be last because it contains key + * material as variable length member +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index e3bbfb20ae82..5f8c6f9563b0 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -906,8 +906,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + continue; + + if (!dflt_chandef.chan) { ++ /* ++ * Assign the first enabled channel to dflt_chandef ++ * from the list of channels ++ */ ++ for (i = 0; i < sband->n_channels; i++) ++ if (!(sband->channels[i].flags & ++ IEEE80211_CHAN_DISABLED)) ++ break; ++ /* if none found then use the first anyway */ ++ if (i == sband->n_channels) ++ i = 0; + cfg80211_chandef_create(&dflt_chandef, +- &sband->channels[0], ++ &sband->channels[i], + NL80211_CHAN_NO_HT); + /* init channel we're on */ + if (!local->use_chanctx && !local->_oper_chandef.chan) { +@@ -1025,8 +1036,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + if (local->hw.wiphy->max_scan_ie_len) + local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; + +- WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, +- local->hw.n_cipher_schemes)); ++ if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, ++ local->hw.n_cipher_schemes))) { ++ result = -EINVAL; ++ goto fail_workqueue; ++ } + + result = ieee80211_init_cipher_suites(local); + if (result < 0) +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c +index 2fbd100b9e73..a8b837d0498a 100644 +--- a/net/mac80211/mesh_hwmp.c ++++ b/net/mac80211/mesh_hwmp.c +@@ -355,7 +355,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, + */ + tx_time = (device_constant + 10 * test_frame_len / rate); + estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); +- result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); ++ result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT); + return (u32)result; + } + +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c +index 8c17d498df30..30a95a2ff196 100644 +--- a/net/mac80211/mesh_pathtbl.c ++++ b/net/mac80211/mesh_pathtbl.c +@@ -61,6 +61,7 @@ static struct mesh_table *mesh_table_alloc(void) + INIT_HLIST_HEAD(&newtbl->known_gates); + atomic_set(&newtbl->entries, 0); + spin_lock_init(&newtbl->gates_lock); ++ rhashtable_init(&newtbl->rhead, &mesh_rht_params); + + return newtbl; + } +@@ -555,6 +556,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, + del_timer_sync(&mpath->timer); + atomic_dec(&sdata->u.mesh.mpaths); + atomic_dec(&tbl->entries); ++ mesh_path_flush_pending(mpath); + kfree_rcu(mpath, rcu); + } + +@@ -848,9 +850,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) + goto free_path; + } + +- rhashtable_init(&tbl_path->rhead, &mesh_rht_params); +- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); +- + sdata->u.mesh.mesh_paths = tbl_path; + sdata->u.mesh.mpp_paths = tbl_mpp; + +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 1389baaa4ac0..a760ccc8a2e1 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -1101,6 +1101,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata) + + sdata->vif.csa_active = false; + ifmgd->csa_waiting_bcn = false; ++ /* ++ * If the CSA IE is still present on the beacon after the switch, ++ * we need to consider it as a new CSA (possibly to self). ++ */ ++ ifmgd->beacon_crc_valid = false; + + ret = drv_post_channel_switch(sdata); + if (ret) { +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index e6096dfd0210..41421609a0f0 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -892,7 +892,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw, + if (old) + kfree_rcu(old, rcu_head); + +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); ++ if (sta->uploaded) ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + + return 0; + } +diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c +index 14c5ba3a1b1c..e91386919399 100644 +--- a/net/mac80211/rc80211_minstrel.c ++++ b/net/mac80211/rc80211_minstrel.c +@@ -274,7 +274,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, + success = !!(info->flags & IEEE80211_TX_STAT_ACK); + + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { +- if (ar[i].idx < 0) ++ if (ar[i].idx < 0 || !ar[i].count) + break; + + ndx = rix_to_ndx(mi, ar[i].idx); +@@ -287,12 +287,6 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband, + mi->r[ndx].stats.success += success; + } + +- if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0)) +- mi->sample_packets++; +- +- if (mi->sample_deferred > 0) +- mi->sample_deferred--; +- + if (time_after(jiffies, mi->last_stats_update + + (mp->update_interval * HZ) / 1000)) + minstrel_update_stats(mp, mi); +@@ -366,7 +360,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, + #endif + + delta = (mi->total_packets * sampling_ratio / 100) - +- (mi->sample_packets + mi->sample_deferred / 2); ++ mi->sample_packets; + + /* delta < 0: no sampling required */ + prev_sample = mi->prev_sample; +@@ -375,7 +369,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, + return; + + if (mi->total_packets >= 10000) { +- mi->sample_deferred = 0; + mi->sample_packets = 0; + mi->total_packets = 0; + } else if (delta > mi->n_rates * 2) { +@@ -400,19 +393,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, + * rate sampling method should be used. + * Respect such rates that are not sampled for 20 interations. + */ +- if (mrr_capable && +- msr->perfect_tx_time > mr->perfect_tx_time && +- msr->stats.sample_skipped < 20) { +- /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark +- * packets that have the sampling rate deferred to the +- * second MRR stage. Increase the sample counter only +- * if the deferred sample rate was actually used. +- * Use the sample_deferred counter to make sure that +- * the sampling is not done in large bursts */ +- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; +- rate++; +- mi->sample_deferred++; +- } else { ++ if (msr->perfect_tx_time < mr->perfect_tx_time || ++ msr->stats.sample_skipped >= 20) { + if (!msr->sample_limit) + return; + +@@ -432,6 +414,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, + + rate->idx = mi->r[ndx].rix; + rate->count = minstrel_get_retry_count(&mi->r[ndx], info); ++ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + } + + +diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h +index c230bbe93262..5a9e44f4fba4 100644 +--- a/net/mac80211/rc80211_minstrel.h ++++ b/net/mac80211/rc80211_minstrel.h +@@ -105,7 +105,6 @@ struct minstrel_sta_info { + u8 max_prob_rate; + unsigned int total_packets; + unsigned int sample_packets; +- int sample_deferred; + + unsigned int sample_row; + unsigned int sample_column; +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index d3334fd84ca2..b40e71a5d795 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1873,19 +1873,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) + return result; + } + ++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cache->entries); i++) ++ skb_queue_head_init(&cache->entries[i].skb_list); ++} ++ ++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cache->entries); i++) ++ __skb_queue_purge(&cache->entries[i].skb_list); ++} ++ + static inline struct ieee80211_fragment_entry * +-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, ++ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, int rx_queue, + struct sk_buff **skb) + { + struct ieee80211_fragment_entry *entry; + +- entry = &sdata->fragments[sdata->fragment_next++]; +- if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) +- sdata->fragment_next = 0; ++ entry = &cache->entries[cache->next++]; ++ if (cache->next >= IEEE80211_FRAGMENT_MAX) ++ cache->next = 0; + +- if (!skb_queue_empty(&entry->skb_list)) +- __skb_queue_purge(&entry->skb_list); ++ __skb_queue_purge(&entry->skb_list); + + __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ + *skb = NULL; +@@ -1900,14 +1915,14 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, + } + + static inline struct ieee80211_fragment_entry * +-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, ++ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, + int rx_queue, struct ieee80211_hdr *hdr) + { + struct ieee80211_fragment_entry *entry; + int i, idx; + +- idx = sdata->fragment_next; ++ idx = cache->next; + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { + struct ieee80211_hdr *f_hdr; + +@@ -1915,7 +1930,7 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, + if (idx < 0) + idx = IEEE80211_FRAGMENT_MAX - 1; + +- entry = &sdata->fragments[idx]; ++ entry = &cache->entries[idx]; + if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || + entry->rx_queue != rx_queue || + entry->last_frag + 1 != frag) +@@ -1942,16 +1957,27 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, + return NULL; + } + ++static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) ++{ ++ return rx->key && ++ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ++ ieee80211_has_protected(fc); ++} ++ + static ieee80211_rx_result debug_noinline + ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + { ++ struct ieee80211_fragment_cache *cache = &rx->sdata->frags; + struct ieee80211_hdr *hdr; + u16 sc; + __le16 fc; + unsigned int frag, seq; + struct ieee80211_fragment_entry *entry; + struct sk_buff *skb; +- struct ieee80211_rx_status *status; ++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + hdr = (struct ieee80211_hdr *)rx->skb->data; + fc = hdr->frame_control; +@@ -1962,14 +1988,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + +- if (is_multicast_ether_addr(hdr->addr1)) { +- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); +- goto out_no_led; +- } ++ if (rx->sta) ++ cache = &rx->sta->frags; + + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) + goto out; + ++ if (is_multicast_ether_addr(hdr->addr1)) ++ return RX_DROP_MONITOR; ++ + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + if (skb_linearize(rx->skb)) +@@ -1985,20 +2012,17 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + + if (frag == 0) { + /* This is the first fragment of a new frame. */ +- entry = ieee80211_reassemble_add(rx->sdata, frag, seq, ++ entry = ieee80211_reassemble_add(cache, frag, seq, + rx->seqno_idx, &(rx->skb)); +- if (rx->key && +- (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && +- ieee80211_has_protected(fc)) { ++ if (requires_sequential_pn(rx, fc)) { + int queue = rx->security_idx; + + /* Store CCMP/GCMP PN so that we can verify that the + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; ++ entry->is_protected = true; ++ entry->key_color = rx->key->color; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN); +@@ -2010,6 +2034,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); ++ } else if (rx->key && ++ (ieee80211_has_protected(fc) || ++ (status->flag & RX_FLAG_DECRYPTED))) { ++ entry->is_protected = true; ++ entry->key_color = rx->key->color; + } + return RX_QUEUED; + } +@@ -2017,7 +2046,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + /* This is a fragment for a frame that should already be pending in + * fragment cache. Add this fragment to the end of the pending entry. + */ +- entry = ieee80211_reassemble_find(rx->sdata, frag, seq, ++ entry = ieee80211_reassemble_find(cache, frag, seq, + rx->seqno_idx, hdr); + if (!entry) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); +@@ -2032,25 +2061,39 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + if (entry->check_sequential_pn) { + int i; + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; +- int queue; + +- if (!rx->key || +- (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) ++ if (!requires_sequential_pn(rx, fc)) ++ return RX_DROP_UNUSABLE; ++ ++ /* Prevent mixed key and fragment cache attacks */ ++ if (entry->key_color != rx->key->color) + return RX_DROP_UNUSABLE; ++ + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } +- queue = rx->security_idx; +- rpn = rx->key->u.ccmp.rx_pn[queue]; ++ ++ rpn = rx->ccm_gcm.pn; + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); ++ } else if (entry->is_protected && ++ (!rx->key || ++ (!ieee80211_has_protected(fc) && ++ !(status->flag & RX_FLAG_DECRYPTED)) || ++ rx->key->color != entry->key_color)) { ++ /* Drop this as a mixed key or fragment cache attack, even ++ * if for TKIP Michael MIC should protect us, and WEP is a ++ * lost cause anyway. ++ */ ++ return RX_DROP_UNUSABLE; ++ } else if (entry->is_protected && rx->key && ++ entry->key_color != rx->key->color && ++ (status->flag & RX_FLAG_DECRYPTED)) { ++ return RX_DROP_UNUSABLE; + } + + skb_pull(rx->skb, ieee80211_hdrlen(fc)); +@@ -2082,7 +2125,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + + out: + ieee80211_led_rx(rx->local); +- out_no_led: + if (rx->sta) + rx->sta->rx_stats.packets++; + return RX_CONTINUE; +@@ -2098,6 +2140,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) + + static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) + { ++ struct ieee80211_hdr *hdr = (void *)rx->skb->data; + struct sk_buff *skb = rx->skb; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +@@ -2108,6 +2151,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) + if (status->flag & RX_FLAG_DECRYPTED) + return 0; + ++ /* check mesh EAPOL frames first */ ++ if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && ++ ieee80211_is_data(fc))) { ++ struct ieee80211s_hdr *mesh_hdr; ++ u16 hdr_len = ieee80211_hdrlen(fc); ++ u16 ethertype_offset; ++ __be16 ethertype; ++ ++ if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) ++ goto drop_check; ++ ++ /* make sure fixed part of mesh header is there, also checks skb len */ ++ if (!pskb_may_pull(rx->skb, hdr_len + 6)) ++ goto drop_check; ++ ++ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); ++ ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + ++ sizeof(rfc1042_header); ++ ++ if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && ++ ethertype == rx->sdata->control_port_protocol) ++ return 0; ++ } ++ ++drop_check: + /* Drop unencrypted frames if key is set. */ + if (unlikely(!ieee80211_has_protected(fc) && + !ieee80211_is_any_nullfunc(fc) && +@@ -2213,13 +2281,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + + /* +- * Allow EAPOL frames to us/the PAE group address regardless +- * of whether the frame was encrypted or not. ++ * Allow EAPOL frames to us/the PAE group address regardless of ++ * whether the frame was encrypted or not, and always disallow ++ * all other destination addresses for them. + */ +- if (ehdr->h_proto == rx->sdata->control_port_protocol && +- (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || +- ether_addr_equal(ehdr->h_dest, pae_group_addr))) +- return true; ++ if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) ++ return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || ++ ether_addr_equal(ehdr->h_dest, pae_group_addr); + + if (ieee80211_802_1x_port_control(rx) || + ieee80211_drop_unencrypted(rx, fc)) +@@ -2259,6 +2327,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) + if ((sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && ++ ehdr->h_proto != rx->sdata->control_port_protocol && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { + if (is_multicast_ether_addr(ehdr->h_dest)) { + /* +@@ -2311,9 +2380,30 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) + #endif + + if (skb) { ++ struct ethhdr *ehdr = (struct ethhdr *)skb->data; ++ + /* deliver to local stack */ + skb->protocol = eth_type_trans(skb, dev); + memset(skb->cb, 0, sizeof(skb->cb)); ++ ++ /* ++ * 802.1X over 802.11 requires that the authenticator address ++ * be used for EAPOL frames. However, 802.1X allows the use of ++ * the PAE group address instead. If the interface is part of ++ * a bridge and we pass the frame with the PAE group address, ++ * then the bridge will forward it to the network (even if the ++ * client was not associated yet), which isn't supposed to ++ * happen. ++ * To avoid that, rewrite the destination address to our own ++ * address, so that the authenticator (e.g. hostapd) will see ++ * the frame, but bridge won't forward it anywhere else. Note ++ * that due to earlier filtering, the only other address can ++ * be the PAE group address. ++ */ ++ if (unlikely(skb->protocol == sdata->control_port_protocol && ++ !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) ++ ether_addr_copy(ehdr->h_dest, sdata->vif.addr); ++ + if (rx->napi) + napi_gro_receive(rx->napi, skb); + else +@@ -2395,9 +2485,27 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) + + if (ieee80211_data_to_8023_exthdr(skb, ðhdr, + rx->sdata->vif.addr, +- rx->sdata->vif.type)) ++ rx->sdata->vif.type, ++ true)) + return RX_DROP_UNUSABLE; + ++ if (rx->key) { ++ /* ++ * We should not receive A-MSDUs on pre-HT connections, ++ * and HT connections cannot use old ciphers. Thus drop ++ * them, as in those cases we couldn't even have SPP ++ * A-MSDUs or such. ++ */ ++ switch (rx->key->conf.cipher) { ++ case WLAN_CIPHER_SUITE_WEP40: ++ case WLAN_CIPHER_SUITE_WEP104: ++ case WLAN_CIPHER_SUITE_TKIP: ++ return RX_DROP_UNUSABLE; ++ default: ++ break; ++ } ++ } ++ + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, + rx->sdata->vif.type, + rx->local->hw.extra_tx_headroom, +@@ -3776,6 +3884,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta) + + rcu_read_lock(); + key = rcu_dereference(sta->ptk[sta->ptk_idx]); ++ if (!key) ++ key = rcu_dereference(sdata->default_unicast_key); + if (key) { + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_TKIP: +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 4f7061c3b770..ded1264cf8e4 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -243,6 +243,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, + */ + void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) + { ++ /* ++ * If we had used sta_info_pre_move_state() then we might not ++ * have gone through the state transitions down again, so do ++ * it here now (and warn if it's inserted). ++ * ++ * This will clear state such as fast TX/RX that may have been ++ * allocated during state transitions. ++ */ ++ while (sta->sta_state > IEEE80211_STA_NONE) { ++ int ret; ++ ++ WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); ++ ++ ret = sta_info_move_state(sta, sta->sta_state - 1); ++ if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) ++ break; ++ } ++ + if (sta->rate_ctrl) + rate_control_free_sta(sta); + +@@ -348,6 +366,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, + + u64_stats_init(&sta->rx_stats.syncp); + ++ ieee80211_init_frag_cache(&sta->frags); ++ + sta->sta_state = IEEE80211_STA_NONE; + + /* Mark TID as unreserved */ +@@ -583,7 +603,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) + out_drop_sta: + local->num_sta--; + synchronize_net(); +- __cleanup_single_sta(sta); ++ cleanup_single_sta(sta); + out_err: + mutex_unlock(&local->sta_mtx); + kfree(sinfo); +@@ -602,19 +622,13 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU) + + err = sta_info_insert_check(sta); + if (err) { ++ sta_info_free(local, sta); + mutex_unlock(&local->sta_mtx); + rcu_read_lock(); +- goto out_free; ++ return err; + } + +- err = sta_info_insert_finish(sta); +- if (err) +- goto out_free; +- +- return 0; +- out_free: +- sta_info_free(local, sta); +- return err; ++ return sta_info_insert_finish(sta); + } + + int sta_info_insert(struct sta_info *sta) +@@ -946,7 +960,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) + might_sleep(); + lockdep_assert_held(&local->sta_mtx); + +- while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ++ if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + WARN_ON_ONCE(ret); + } +@@ -987,6 +1001,8 @@ static void __sta_info_destroy_part2(struct sta_info *sta) + rate_control_remove_sta_debugfs(sta); + ieee80211_sta_debugfs_remove(sta); + ++ ieee80211_destroy_frag_cache(&sta->frags); ++ + cleanup_single_sta(sta); + } + +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h +index cc808ac783e5..0909332965bc 100644 +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -100,6 +100,7 @@ enum ieee80211_sta_info_flags { + WLAN_STA_MPSP_OWNER, + WLAN_STA_MPSP_RECIPIENT, + WLAN_STA_PS_DELIVER, ++ WLAN_STA_USES_ENCRYPTION, + + NUM_WLAN_STA_FLAGS, + }; +@@ -391,6 +392,34 @@ struct ieee80211_sta_rx_stats { + u64 msdu[IEEE80211_NUM_TIDS + 1]; + }; + ++/* ++ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent ++ * reception of at least one MSDU per access category per associated STA" ++ * on APs, or "at least one MSDU per access category" on other interface types. ++ * ++ * This limit can be increased by changing this define, at the cost of slower ++ * frame reassembly and increased memory use while fragments are pending. ++ */ ++#define IEEE80211_FRAGMENT_MAX 4 ++ ++struct ieee80211_fragment_entry { ++ struct sk_buff_head skb_list; ++ unsigned long first_frag_time; ++ u16 seq; ++ u16 extra_len; ++ u16 last_frag; ++ u8 rx_queue; ++ u8 check_sequential_pn:1, /* needed for CCMP/GCMP */ ++ is_protected:1; ++ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ ++ unsigned int key_color; ++}; ++ ++struct ieee80211_fragment_cache { ++ struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX]; ++ unsigned int next; ++}; ++ + /** + * struct sta_info - STA information + * +@@ -454,6 +483,7 @@ struct ieee80211_sta_rx_stats { + * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs + * this (by advertising the USES_RSS hw flag) + * @status_stats: TX status statistics ++ * @frags: fragment cache + */ + struct sta_info { + /* General information, mostly static */ +@@ -551,6 +581,8 @@ struct sta_info { + + struct cfg80211_chan_def tdls_chandef; + ++ struct ieee80211_fragment_cache frags; ++ + /* keep last! */ + struct ieee80211_sta sta; + }; +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 6216279efc46..48d0dd0beaa5 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -588,10 +588,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + +- if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) ++ if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { + tx->key = NULL; +- else if (tx->sta && +- (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) ++ return TX_CONTINUE; ++ } ++ ++ if (tx->sta && ++ (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) + tx->key = key; + else if (ieee80211_is_group_privacy_action(tx->skb) && + (key = rcu_dereference(tx->sdata->default_multicast_key))) +@@ -652,6 +655,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) + if (!skip_hw && tx->key && + tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + info->control.hw_key = &tx->key->conf; ++ } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta && ++ test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { ++ return TX_DROP; + } + + return TX_CONTINUE; +@@ -1847,19 +1853,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, + + /* device xmit handlers */ + ++enum ieee80211_encrypt { ++ ENCRYPT_NO, ++ ENCRYPT_MGMT, ++ ENCRYPT_DATA, ++}; ++ + static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, +- int head_need, bool may_encrypt) ++ int head_need, ++ enum ieee80211_encrypt encrypt) + { + struct ieee80211_local *local = sdata->local; +- struct ieee80211_hdr *hdr; + bool enc_tailroom; + int tail_need = 0; + +- hdr = (struct ieee80211_hdr *) skb->data; +- enc_tailroom = may_encrypt && +- (sdata->crypto_tx_tailroom_needed_cnt || +- ieee80211_is_mgmt(hdr->frame_control)); ++ enc_tailroom = encrypt == ENCRYPT_MGMT || ++ (encrypt == ENCRYPT_DATA && ++ sdata->crypto_tx_tailroom_needed_cnt); + + if (enc_tailroom) { + tail_need = IEEE80211_ENCRYPT_TAILROOM; +@@ -1892,21 +1903,27 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int headroom; +- bool may_encrypt; ++ enum ieee80211_encrypt encrypt; + +- may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); ++ if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) ++ encrypt = ENCRYPT_NO; ++ else if (ieee80211_is_mgmt(hdr->frame_control)) ++ encrypt = ENCRYPT_MGMT; ++ else ++ encrypt = ENCRYPT_DATA; + + headroom = local->tx_headroom; +- if (may_encrypt) ++ if (encrypt != ENCRYPT_NO) + headroom += sdata->encrypt_headroom; + headroom -= skb_headroom(skb); + headroom = max_t(int, 0, headroom); + +- if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { ++ if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { + ieee80211_free_txskb(&local->hw, skb); + return; + } + ++ /* reload after potential resize */ + hdr = (struct ieee80211_hdr *) skb->data; + info->control.vif = &sdata->vif; + +@@ -2688,7 +2705,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, + head_need += sdata->encrypt_headroom; + head_need += local->tx_headroom; + head_need = max_t(int, 0, head_need); +- if (ieee80211_skb_resize(sdata, skb, head_need, true)) { ++ if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { + ieee80211_free_txskb(&local->hw, skb); + skb = NULL; + return ERR_PTR(-ENOMEM); +@@ -3313,7 +3330,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, + if (unlikely(ieee80211_skb_resize(sdata, skb, + max_t(int, extra_head + hw_headroom - + skb_headroom(skb), 0), +- false))) { ++ ENCRYPT_NO))) { + kfree_skb(skb); + return true; + } +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c +index 43e45bb660bc..b1d3fa708e16 100644 +--- a/net/mac80211/vht.c ++++ b/net/mac80211/vht.c +@@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + /* take some capabilities as-is */ + cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); + vht_cap->cap = cap_info; +- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | +- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | +- IEEE80211_VHT_CAP_RXLDPC | ++ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_VHT_TXOP_PS | + IEEE80211_VHT_CAP_HTC_VHT | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | +@@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + ++ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, ++ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); ++ + /* and some based on our own capabilities */ + switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: +diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c +index c0529c4b60f8..7819a2507d39 100644 +--- a/net/mac80211/wpa.c ++++ b/net/mac80211/wpa.c +@@ -162,8 +162,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) + + update_iv: + /* update IV in key information to be able to detect replays */ +- rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32; +- rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16; ++ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32; ++ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16; + + return RX_CONTINUE; + +@@ -289,8 +289,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) + key, skb->data + hdrlen, + skb->len - hdrlen, rx->sta->sta.addr, + hdr->addr1, hwaccel, rx->security_idx, +- &rx->tkip_iv32, +- &rx->tkip_iv16); ++ &rx->tkip.iv32, ++ &rx->tkip.iv16); + if (res != TKIP_DECRYPT_OK) + return RX_DROP_UNUSABLE; + +@@ -548,6 +548,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, + } + + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); ++ if (unlikely(ieee80211_is_frag(hdr))) ++ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove CCMP header and MIC */ +@@ -777,6 +779,8 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) + } + + memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); ++ if (unlikely(ieee80211_is_frag(hdr))) ++ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove GCMP header and MIC */ +diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c +index 6a3e1c2181d3..9e885d94e515 100644 +--- a/net/mac802154/llsec.c ++++ b/net/mac802154/llsec.c +@@ -158,7 +158,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template) + crypto_free_skcipher(key->tfm0); + err_tfm: + for (i = 0; i < ARRAY_SIZE(key->tfm); i++) +- if (key->tfm[i]) ++ if (!IS_ERR_OR_NULL(key->tfm[i])) + crypto_free_aead(key->tfm[i]); + + kzfree(key); +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c +index bcd1a5e6ebf4..2f873a0dc583 100644 +--- a/net/mac802154/tx.c ++++ b/net/mac802154/tx.c +@@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work) + if (res) + goto err_tx; + +- ieee802154_xmit_complete(&local->hw, skb, false); +- + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + ++ ieee802154_xmit_complete(&local->hw, skb, false); ++ + return; + + err_tx: +@@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) + + /* async is priority, otherwise sync is fallback */ + if (local->ops->xmit_async) { ++ unsigned int len = skb->len; ++ + ret = drv_xmit_async(local, skb); + if (ret) { + ieee802154_wake_queue(&local->hw); +@@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) + } + + dev->stats.tx_packets++; +- dev->stats.tx_bytes += skb->len; ++ dev->stats.tx_bytes += len; + } else { + local->tx_skb = skb; + queue_work(local->workqueue, &local->tx_work); +diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c +index b4da6d8e8632..2129856b5933 100644 +--- a/net/mpls/mpls_gso.c ++++ b/net/mpls/mpls_gso.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + + static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, + netdev_features_t features) +@@ -31,6 +32,8 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, + + skb_reset_network_header(skb); + mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb); ++ if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN)) ++ goto out; + if (unlikely(!pskb_may_pull(skb, mpls_hlen))) + goto out; + +diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c +index 087db775b3dc..c973d88e9ea7 100644 +--- a/net/ncsi/ncsi-rsp.c ++++ b/net/ncsi/ncsi-rsp.c +@@ -975,7 +975,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev, + int payload, i, ret; + + /* Find the NCSI device */ +- nd = ncsi_find_dev(dev); ++ nd = ncsi_find_dev(orig_dev); + ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; + if (!ndp) + return -ENODEV; +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index f64660e9ff87..511496278262 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -379,6 +379,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, + for (id = 0; id < IPSET_EXT_ID_MAX; id++) { + if (!add_extension(id, cadt_flags, tb)) + continue; ++ if (align < ip_set_extensions[id].align) ++ align = ip_set_extensions[id].align; + len = ALIGN(len, ip_set_extensions[id].align); + set->offset[id] = len; + set->extensions |= ip_set_extensions[id].type; +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index d32fd6b036bf..a4bd2d3a4821 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -113,20 +113,6 @@ htable_size(u8 hbits) + return hsize * sizeof(struct hbucket *) + sizeof(struct htable); + } + +-/* Compute htable_bits from the user input parameter hashsize */ +-static u8 +-htable_bits(u32 hashsize) +-{ +- /* Assume that hashsize == 2^htable_bits */ +- u8 bits = fls(hashsize - 1); +- +- if (jhash_size(bits) != hashsize) +- /* Round up to the first 2^n value */ +- bits = fls(hashsize); +- +- return bits; +-} +- + #ifdef IP_SET_HASH_WITH_NETS + #if IPSET_NET_COUNT > 1 + #define __CIDR(cidr, i) (cidr[i]) +@@ -1309,7 +1295,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, + get_random_bytes(&h->initval, sizeof(h->initval)); + set->timeout = IPSET_NO_TIMEOUT; + +- hbits = htable_bits(hashsize); ++ /* Compute htable_bits from the user input parameter hashsize. ++ * Assume that hashsize == 2^htable_bits, ++ * otherwise round up to the first 2^n value. ++ */ ++ hbits = fls(hashsize - 1); + hsize = htable_size(hbits); + if (hsize == 0) { + kfree(h); +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index 33125fc009cf..4e08305a55c4 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -1256,7 +1256,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, + ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); + svc->port = u->port; + svc->fwmark = u->fwmark; +- svc->flags = u->flags; ++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED; + svc->timeout = u->timeout * HZ; + svc->netmask = u->netmask; + svc->ipvs = ipvs; +@@ -2424,6 +2424,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) + /* Set timeout values for (tcp tcpfin udp) */ + ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg); + goto out_unlock; ++ } else if (!len) { ++ /* No more commands with len == 0 below */ ++ ret = -EINVAL; ++ goto out_unlock; + } + + usvc_compat = (struct ip_vs_service_user *)arg; +@@ -2500,9 +2504,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) + break; + case IP_VS_SO_SET_DELDEST: + ret = ip_vs_del_dest(svc, &udest); +- break; +- default: +- ret = -EINVAL; + } + + out_unlock: +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index f0e9b63b92ab..8b9a9c552bcc 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -530,8 +530,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) + return false; + + tstamp = nf_conn_tstamp_find(ct); +- if (tstamp && tstamp->stop == 0) ++ if (tstamp) { ++ s32 timeout = ct->timeout - nfct_time_stamp; ++ + tstamp->stop = ktime_get_real_ns(); ++ if (timeout < 0) ++ tstamp->stop -= jiffies_to_nsecs(-timeout); ++ } + + if (nf_conntrack_event_report(IPCT_DESTROY, ct, + portid, report) < 0) { +@@ -942,7 +947,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, + * Let nf_ct_resolve_clash() deal with this later. + */ + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, +- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) && ++ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) + continue; + + NF_CT_STAT_INC_ATOMIC(net, found); +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c +index 65695824e10c..4cf882e573ac 100644 +--- a/net/netfilter/nf_conntrack_netlink.c ++++ b/net/netfilter/nf_conntrack_netlink.c +@@ -1035,6 +1035,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], + if (!tb[CTA_TUPLE_IP]) + return -EINVAL; + ++ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) ++ return -EOPNOTSUPP; + tuple->src.l3num = l3num; + + err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index 2f1014eb3cc5..309f2458847b 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -458,6 +458,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write, + { + int ret; + ++ /* module_param hashsize could have changed value */ ++ nf_conntrack_htable_size_user = nf_conntrack_htable_size; ++ + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret < 0 || !write) + return ret; +@@ -556,8 +559,11 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) + if (net->user_ns != &init_user_ns) + table[0].procname = NULL; + +- if (!net_eq(&init_net, net)) ++ if (!net_eq(&init_net, net)) { ++ table[0].mode = 0444; + table[2].mode = 0444; ++ table[5].mode = 0444; ++ } + + net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table); + if (!net->ct.sysctl_header) +diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c +index c8a4a48bced9..8be604eb6961 100644 +--- a/net/netfilter/nf_synproxy_core.c ++++ b/net/netfilter/nf_synproxy_core.c +@@ -34,6 +34,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + int length = (th->doff * 4) - sizeof(*th); + u8 buf[40], *ptr; + ++ if (unlikely(length < 0)) ++ return false; ++ + ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); + if (ptr == NULL) + return false; +@@ -50,6 +53,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, + length--; + continue; + default: ++ if (length < 2) ++ return true; + opsize = *ptr++; + if (opsize < 2) + return true; +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 2fa1c4f2e94e..ec460aedfc61 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -2592,7 +2592,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, + goto nla_put_failure; + } + +- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) ++ if (set->udata && ++ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + goto nla_put_failure; + + desc = nla_nest_start(skb, NFTA_SET_DESC); +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c +index 8396dc8ee247..babe42ff3eec 100644 +--- a/net/netfilter/nfnetlink_cthelper.c ++++ b/net/netfilter/nfnetlink_cthelper.c +@@ -355,10 +355,14 @@ static int + nfnl_cthelper_update(const struct nlattr * const tb[], + struct nf_conntrack_helper *helper) + { ++ u32 size; + int ret; + +- if (tb[NFCTH_PRIV_DATA_LEN]) +- return -EBUSY; ++ if (tb[NFCTH_PRIV_DATA_LEN]) { ++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); ++ if (size != helper->data_len) ++ return -EBUSY; ++ } + + if (tb[NFCTH_POLICY]) { + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c +index b9dd4e960426..81adbfaffe38 100644 +--- a/net/netfilter/nft_dynset.c ++++ b/net/netfilter/nft_dynset.c +@@ -210,8 +210,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, + nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, + priv->expr->ops->size); + if (set->flags & NFT_SET_TIMEOUT) { +- if (timeout || set->timeout) ++ if (timeout || set->timeout) { ++ nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); + nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); ++ } + } + + priv->timeout = timeout; +diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c +index 47beb3abcc9d..e2c815ee06d0 100644 +--- a/net/netfilter/nft_exthdr.c ++++ b/net/netfilter/nft_exthdr.c +@@ -34,6 +34,9 @@ static void nft_exthdr_eval(const struct nft_expr *expr, + unsigned int offset = 0; + int err; + ++ if (pkt->skb->protocol != htons(ETH_P_IPV6)) ++ goto err; ++ + err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); + if (err < 0) + goto err; +diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c +index 4c48e9bb21e2..d338d69a0e0b 100644 +--- a/net/netfilter/nft_nat.c ++++ b/net/netfilter/nft_nat.c +@@ -135,7 +135,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + priv->type = NF_NAT_MANIP_DST; + break; + default: +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + err = nft_nat_validate(ctx, expr, NULL); +@@ -157,7 +157,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6); + break; + default: +- return -EAFNOSUPPORT; ++ if (tb[NFTA_NAT_REG_ADDR_MIN]) ++ return -EAFNOSUPPORT; ++ break; + } + priv->family = family; + +@@ -206,7 +208,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) +- return -EINVAL; ++ return -EOPNOTSUPP; + } + + return 0; +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c +index b2f88617611a..f73d47b3ffb7 100644 +--- a/net/netfilter/nft_payload.c ++++ b/net/netfilter/nft_payload.c +@@ -74,7 +74,9 @@ static void nft_payload_eval(const struct nft_expr *expr, + u32 *dest = ®s->data[priv->dreg]; + int offset; + +- dest[priv->len / NFT_REG32_SIZE] = 0; ++ if (priv->len % NFT_REG32_SIZE) ++ dest[priv->len / NFT_REG32_SIZE] = 0; ++ + switch (priv->base) { + case NFT_PAYLOAD_LL_HEADER: + if (!skb_mac_header_was_set(skb)) +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c +index e065140d0c93..8d84f480b01e 100644 +--- a/net/netfilter/x_tables.c ++++ b/net/netfilter/x_tables.c +@@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte "); + MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); + + #define XT_PCPU_BLOCK_SIZE 4096 ++#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024) + + struct compat_delta { + unsigned int offset; /* offset in kernel */ +@@ -267,11 +268,66 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) + } + EXPORT_SYMBOL_GPL(xt_request_find_target); + ++ ++static int xt_obj_to_user(u16 __user *psize, u16 size, ++ void __user *pname, const char *name, ++ u8 __user *prev, u8 rev) ++{ ++ if (put_user(size, psize)) ++ return -EFAULT; ++ if (copy_to_user(pname, name, strlen(name) + 1)) ++ return -EFAULT; ++ if (put_user(rev, prev)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ ++ xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ ++ U->u.user.name, K->u.kernel.TYPE->name, \ ++ &U->u.user.revision, K->u.kernel.TYPE->revision) ++ ++int xt_data_to_user(void __user *dst, const void *src, ++ int usersize, int size) ++{ ++ usersize = usersize ? : size; ++ if (copy_to_user(dst, src, usersize)) ++ return -EFAULT; ++ if (usersize != size && clear_user(dst + usersize, size - usersize)) ++ return -EFAULT; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(xt_data_to_user); ++ ++#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ ++ xt_data_to_user(U->data, K->data, \ ++ K->u.kernel.TYPE->usersize, \ ++ C_SIZE ? : K->u.kernel.TYPE->TYPE##size) ++ ++int xt_match_to_user(const struct xt_entry_match *m, ++ struct xt_entry_match __user *u) ++{ ++ return XT_OBJ_TO_USER(u, m, match, 0) || ++ XT_DATA_TO_USER(u, m, match, 0); ++} ++EXPORT_SYMBOL_GPL(xt_match_to_user); ++ ++int xt_target_to_user(const struct xt_entry_target *t, ++ struct xt_entry_target __user *u) ++{ ++ return XT_OBJ_TO_USER(u, t, target, 0) || ++ XT_DATA_TO_USER(u, t, target, 0); ++} ++EXPORT_SYMBOL_GPL(xt_target_to_user); ++ + static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) + { + const struct xt_match *m; + int have_rev = 0; + ++ mutex_lock(&xt[af].mutex); + list_for_each_entry(m, &xt[af].match, list) { + if (strcmp(m->name, name) == 0) { + if (m->revision > *bestp) +@@ -280,6 +336,7 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) + have_rev = 1; + } + } ++ mutex_unlock(&xt[af].mutex); + + if (af != NFPROTO_UNSPEC && !have_rev) + return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); +@@ -292,6 +349,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) + const struct xt_target *t; + int have_rev = 0; + ++ mutex_lock(&xt[af].mutex); + list_for_each_entry(t, &xt[af].target, list) { + if (strcmp(t->name, name) == 0) { + if (t->revision > *bestp) +@@ -300,6 +358,7 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) + have_rev = 1; + } + } ++ mutex_unlock(&xt[af].mutex); + + if (af != NFPROTO_UNSPEC && !have_rev) + return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); +@@ -313,12 +372,10 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, + { + int have_rev, best = -1; + +- mutex_lock(&xt[af].mutex); + if (target == 1) + have_rev = target_revfn(af, name, revision, &best); + else + have_rev = match_revfn(af, name, revision, &best); +- mutex_unlock(&xt[af].mutex); + + /* Nothing at all? Return 0 to try loading module. */ + if (best == -1) { +@@ -567,7 +624,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, + { + const struct xt_match *match = m->u.kernel.match; + struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; +- int pad, off = xt_compat_match_offset(match); ++ int off = xt_compat_match_offset(match); + u_int16_t msize = cm->u.user.match_size; + char name[sizeof(m->u.user.name)]; + +@@ -577,9 +634,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, + match->compat_from_user(m->data, cm->data); + else + memcpy(m->data, cm->data, msize - sizeof(*cm)); +- pad = XT_ALIGN(match->matchsize) - match->matchsize; +- if (pad > 0) +- memset(m->data + match->matchsize, 0, pad); + + msize += off; + m->u.user.match_size = msize; +@@ -925,7 +979,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, + { + const struct xt_target *target = t->u.kernel.target; + struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; +- int pad, off = xt_compat_target_offset(target); ++ int off = xt_compat_target_offset(target); + u_int16_t tsize = ct->u.user.target_size; + char name[sizeof(t->u.user.name)]; + +@@ -935,9 +989,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, + target->compat_from_user(t->data, ct->data); + else + memcpy(t->data, ct->data, tsize - sizeof(*ct)); +- pad = XT_ALIGN(target->targetsize) - target->targetsize; +- if (pad > 0) +- memset(t->data + target->targetsize, 0, pad); + + tsize += off; + t->u.user.target_size = tsize; +@@ -984,17 +1035,15 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) + struct xt_table_info *info = NULL; + size_t sz = sizeof(*info) + size; + +- if (sz < sizeof(*info)) +- return NULL; +- +- /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ +- if ((size >> PAGE_SHIFT) + 2 > totalram_pages) ++ if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE) + return NULL; + + if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!info) { +- info = vmalloc(sz); ++ info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | ++ __GFP_NORETRY | __GFP_HIGHMEM, ++ PAGE_KERNEL); + if (!info) + return NULL; + } +@@ -1177,6 +1226,9 @@ xt_replace_table(struct xt_table *table, + smp_wmb(); + table->private = newinfo; + ++ /* make sure all cpus see new ->private value */ ++ smp_mb(); ++ + /* + * Even though table entries have now been swapped, other CPU's + * may still be using the old entries. This is okay, because +diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c +index 00ef8243d48d..1390f34098e4 100644 +--- a/net/netfilter/xt_CT.c ++++ b/net/netfilter/xt_CT.c +@@ -380,6 +380,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { + .name = "CT", + .family = NFPROTO_UNSPEC, + .targetsize = sizeof(struct xt_ct_target_info), ++ .usersize = offsetof(struct xt_ct_target_info, ct), + .checkentry = xt_ct_tg_check_v0, + .destroy = xt_ct_tg_destroy_v0, + .target = xt_ct_target_v0, +@@ -391,6 +392,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { + .family = NFPROTO_UNSPEC, + .revision = 1, + .targetsize = sizeof(struct xt_ct_target_info_v1), ++ .usersize = offsetof(struct xt_ct_target_info, ct), + .checkentry = xt_ct_tg_check_v1, + .destroy = xt_ct_tg_destroy_v1, + .target = xt_ct_target_v1, +@@ -402,6 +404,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { + .family = NFPROTO_UNSPEC, + .revision = 2, + .targetsize = sizeof(struct xt_ct_target_info_v1), ++ .usersize = offsetof(struct xt_ct_target_info, ct), + .checkentry = xt_ct_tg_check_v2, + .destroy = xt_ct_tg_destroy_v1, + .target = xt_ct_target_v1, +diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c +index 736cd0f150e6..7183241558c6 100644 +--- a/net/netfilter/xt_IDLETIMER.c ++++ b/net/netfilter/xt_IDLETIMER.c +@@ -505,6 +505,7 @@ static struct xt_target idletimer_tg __read_mostly = { + .family = NFPROTO_UNSPEC, + .target = idletimer_tg_target, + .targetsize = sizeof(struct idletimer_tg_info), ++ .usersize = offsetof(struct idletimer_tg_info, timer), + .checkentry = idletimer_tg_checkentry, + .destroy = idletimer_tg_destroy, + .me = THIS_MODULE, +diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c +index 0858fe17e14a..2d1c5c169a26 100644 +--- a/net/netfilter/xt_LED.c ++++ b/net/netfilter/xt_LED.c +@@ -198,6 +198,7 @@ static struct xt_target led_tg_reg __read_mostly = { + .family = NFPROTO_UNSPEC, + .target = led_tg, + .targetsize = sizeof(struct xt_led_info), ++ .usersize = offsetof(struct xt_led_info, internal_data), + .checkentry = led_tg_check, + .destroy = led_tg_destroy, + .me = THIS_MODULE, +diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c +index 92cfae66743b..2306e6e3373f 100644 +--- a/net/netfilter/xt_RATEEST.c ++++ b/net/netfilter/xt_RATEEST.c +@@ -106,6 +106,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) + } cfg; + int ret; + ++ if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name)) ++ return -ENAMETOOLONG; ++ + net_get_random_once(&jhash_rnd, sizeof(jhash_rnd)); + + mutex_lock(&xt_rateest_mutex); +@@ -174,6 +177,7 @@ static struct xt_target xt_rateest_tg_reg __read_mostly = { + .checkentry = xt_rateest_tg_checkentry, + .destroy = xt_rateest_tg_destroy, + .targetsize = sizeof(struct xt_rateest_target_info), ++ .usersize = offsetof(struct xt_rateest_target_info, est), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c +index 0471db4032c5..23b24380b806 100644 +--- a/net/netfilter/xt_TEE.c ++++ b/net/netfilter/xt_TEE.c +@@ -133,6 +133,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { + .family = NFPROTO_IPV4, + .target = tee_tg4, + .targetsize = sizeof(struct xt_tee_tginfo), ++ .usersize = offsetof(struct xt_tee_tginfo, priv), + .checkentry = tee_tg_check, + .destroy = tee_tg_destroy, + .me = THIS_MODULE, +@@ -144,6 +145,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = { + .family = NFPROTO_IPV6, + .target = tee_tg6, + .targetsize = sizeof(struct xt_tee_tginfo), ++ .usersize = offsetof(struct xt_tee_tginfo, priv), + .checkentry = tee_tg_check, + .destroy = tee_tg_destroy, + .me = THIS_MODULE, +diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c +index a8df0a9d68e6..7e7746cc45d6 100644 +--- a/net/netfilter/xt_bpf.c ++++ b/net/netfilter/xt_bpf.c +@@ -125,6 +125,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = { + .match = bpf_mt, + .destroy = bpf_mt_destroy, + .matchsize = sizeof(struct xt_bpf_info), ++ .usersize = offsetof(struct xt_bpf_info, filter), + .me = THIS_MODULE, + }, + { +@@ -135,6 +136,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = { + .match = bpf_mt_v1, + .destroy = bpf_mt_destroy_v1, + .matchsize = sizeof(struct xt_bpf_info_v1), ++ .usersize = offsetof(struct xt_bpf_info_v1, filter), + .me = THIS_MODULE, + }, + }; +diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c +index 0eddbd5328af..891f4e7e8ea7 100644 +--- a/net/netfilter/xt_cgroup.c ++++ b/net/netfilter/xt_cgroup.c +@@ -123,6 +123,7 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = { + .checkentry = cgroup_mt_check_v1, + .match = cgroup_mt_v1, + .matchsize = sizeof(struct xt_cgroup_info_v1), ++ .usersize = offsetof(struct xt_cgroup_info_v1, priv), + .destroy = cgroup_mt_destroy_v1, + .me = THIS_MODULE, + .hooks = (1 << NF_INET_LOCAL_OUT) | +diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c +index b6dc322593a3..6b36598a211a 100644 +--- a/net/netfilter/xt_connlimit.c ++++ b/net/netfilter/xt_connlimit.c +@@ -431,6 +431,7 @@ static struct xt_match connlimit_mt_reg __read_mostly = { + .checkentry = connlimit_mt_check, + .match = connlimit_mt, + .matchsize = sizeof(struct xt_connlimit_info), ++ .usersize = offsetof(struct xt_connlimit_info, data), + .destroy = connlimit_mt_destroy, + .me = THIS_MODULE, + }; +diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c +index 140a9ae262ef..97df6f9dbdde 100644 +--- a/net/netfilter/xt_hashlimit.c ++++ b/net/netfilter/xt_hashlimit.c +@@ -851,6 +851,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { + .family = NFPROTO_IPV4, + .match = hashlimit_mt_v1, + .matchsize = sizeof(struct xt_hashlimit_mtinfo1), ++ .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), + .checkentry = hashlimit_mt_check_v1, + .destroy = hashlimit_mt_destroy_v1, + .me = THIS_MODULE, +@@ -861,6 +862,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { + .family = NFPROTO_IPV4, + .match = hashlimit_mt, + .matchsize = sizeof(struct xt_hashlimit_mtinfo2), ++ .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), + .checkentry = hashlimit_mt_check, + .destroy = hashlimit_mt_destroy, + .me = THIS_MODULE, +@@ -872,6 +874,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { + .family = NFPROTO_IPV6, + .match = hashlimit_mt_v1, + .matchsize = sizeof(struct xt_hashlimit_mtinfo1), ++ .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), + .checkentry = hashlimit_mt_check_v1, + .destroy = hashlimit_mt_destroy_v1, + .me = THIS_MODULE, +@@ -882,6 +885,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { + .family = NFPROTO_IPV6, + .match = hashlimit_mt, + .matchsize = sizeof(struct xt_hashlimit_mtinfo2), ++ .usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo), + .checkentry = hashlimit_mt_check, + .destroy = hashlimit_mt_destroy, + .me = THIS_MODULE, +diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c +index bef850596558..e84de7656289 100644 +--- a/net/netfilter/xt_limit.c ++++ b/net/netfilter/xt_limit.c +@@ -193,6 +193,7 @@ static struct xt_match limit_mt_reg __read_mostly = { + .compat_from_user = limit_mt_compat_from_user, + .compat_to_user = limit_mt_compat_to_user, + #endif ++ .usersize = offsetof(struct xt_rateinfo, prev), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c +index cf327593852a..51541c5be5a4 100644 +--- a/net/netfilter/xt_nfacct.c ++++ b/net/netfilter/xt_nfacct.c +@@ -62,6 +62,7 @@ static struct xt_match nfacct_mt_reg __read_mostly = { + .match = nfacct_mt, + .destroy = nfacct_mt_destroy, + .matchsize = sizeof(struct xt_nfacct_match_info), ++ .usersize = offsetof(struct xt_nfacct_match_info, nfacct), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c +index 44c8eb4c9d66..10d61a6eed71 100644 +--- a/net/netfilter/xt_quota.c ++++ b/net/netfilter/xt_quota.c +@@ -73,6 +73,7 @@ static struct xt_match quota_mt_reg __read_mostly = { + .checkentry = quota_mt_check, + .destroy = quota_mt_destroy, + .matchsize = sizeof(struct xt_quota_info), ++ .usersize = offsetof(struct xt_quota_info, master), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c +index 8c56ff2aa2fa..2a48285f6845 100644 +--- a/net/netfilter/xt_quota2.c ++++ b/net/netfilter/xt_quota2.c +@@ -135,6 +135,8 @@ static ssize_t quota_proc_write(struct file *file, const char __user *input, + if (copy_from_user(buf, input, size) != 0) + return -EFAULT; + buf[sizeof(buf)-1] = '\0'; ++ if (size < sizeof(buf)) ++ buf[size] = '\0'; + + spin_lock_bh(&e->lock); + e->quota = simple_strtoull(buf, NULL, 0); +@@ -282,6 +284,8 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) + { + struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; + struct xt_quota_counter *e = q->master; ++ int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; ++ bool no_change = q->flags & XT_QUOTA_NO_CHANGE; + bool ret = q->flags & XT_QUOTA_INVERT; + + spin_lock_bh(&e->lock); +@@ -290,20 +294,17 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) + * While no_change is pointless in "grow" mode, we will + * implement it here simply to have a consistent behavior. + */ +- if (!(q->flags & XT_QUOTA_NO_CHANGE)) { +- e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; +- } +- ret = true; ++ if (!no_change) ++ e->quota += charge; ++ ret = true; /* note: does not respect inversion (bug??) */ + } else { +- if (e->quota >= skb->len) { +- if (!(q->flags & XT_QUOTA_NO_CHANGE)) +- e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; ++ if (e->quota > charge) { ++ if (!no_change) ++ e->quota -= charge; + ret = !ret; +- } else { ++ } else if (e->quota) { + /* We are transitioning, log that fact. */ +- if (e->quota) { +- quota2_log(par->in, par->out, e, q->name); +- } ++ quota2_log(par->in, par->out, e, q->name); + /* we do not allow even small packets from now on */ + e->quota = 0; + } +@@ -321,6 +322,7 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), ++ .usersize = offsetof(struct xt_quota_mtinfo2, master), + .me = THIS_MODULE, + }, + { +@@ -331,6 +333,7 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { + .match = quota_mt2, + .destroy = quota_mt2_destroy, + .matchsize = sizeof(struct xt_quota_mtinfo2), ++ .usersize = offsetof(struct xt_quota_mtinfo2, master), + .me = THIS_MODULE, + }, + }; +diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c +index 7720b036d76a..d46dc9ff591f 100644 +--- a/net/netfilter/xt_rateest.c ++++ b/net/netfilter/xt_rateest.c +@@ -135,6 +135,7 @@ static struct xt_match xt_rateest_mt_reg __read_mostly = { + .checkentry = xt_rateest_mt_checkentry, + .destroy = xt_rateest_mt_destroy, + .matchsize = sizeof(struct xt_rateest_match_info), ++ .usersize = offsetof(struct xt_rateest_match_info, est1), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c +index 79d7ad621a80..03c8bd854e56 100644 +--- a/net/netfilter/xt_recent.c ++++ b/net/netfilter/xt_recent.c +@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) + /* + * Drop entries with timestamps older then 'time'. + */ +-static void recent_entry_reap(struct recent_table *t, unsigned long time) ++static void recent_entry_reap(struct recent_table *t, unsigned long time, ++ struct recent_entry *working, bool update) + { + struct recent_entry *e; + +@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time) + */ + e = list_entry(t->lru_list.next, struct recent_entry, lru_list); + ++ /* ++ * Do not reap the entry which are going to be updated. ++ */ ++ if (e == working && update) ++ return; ++ + /* + * The last time stamp is the most recent. + */ +@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) + + /* info->seconds must be non-zero */ + if (info->check_set & XT_RECENT_REAP) +- recent_entry_reap(t, time); ++ recent_entry_reap(t, time, e, ++ info->check_set & XT_RECENT_UPDATE && ret); + } + + if (info->check_set & XT_RECENT_SET || +diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c +index 11de55e7a868..8710fdba2ae2 100644 +--- a/net/netfilter/xt_statistic.c ++++ b/net/netfilter/xt_statistic.c +@@ -84,6 +84,7 @@ static struct xt_match xt_statistic_mt_reg __read_mostly = { + .checkentry = statistic_mt_check, + .destroy = statistic_mt_destroy, + .matchsize = sizeof(struct xt_statistic_info), ++ .usersize = offsetof(struct xt_statistic_info, master), + .me = THIS_MODULE, + }; + +diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c +index 0bc3460319c8..423293ee57c2 100644 +--- a/net/netfilter/xt_string.c ++++ b/net/netfilter/xt_string.c +@@ -77,6 +77,7 @@ static struct xt_match xt_string_mt_reg __read_mostly = { + .match = string_mt, + .destroy = string_mt_destroy, + .matchsize = sizeof(struct xt_string_info), ++ .usersize = offsetof(struct xt_string_info, config), + .me = THIS_MODULE, + }; + +diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c +index 41d0e95d171e..b1a1718495f3 100644 +--- a/net/netlabel/netlabel_domainhash.c ++++ b/net/netlabel/netlabel_domainhash.c +@@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) + kfree(netlbl_domhsh_addr6_entry(iter6)); + } + #endif /* IPv6 */ ++ kfree(ptr->def.addrsel); + } + kfree(ptr->domain); + kfree(ptr); +@@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, + goto add_return; + } + #endif /* IPv6 */ ++ /* cleanup the new entry since we've moved everything over */ ++ netlbl_domhsh_free_entry(&entry->rcu); + } else + ret_val = -EINVAL; + +@@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, + { + int ret_val = 0; + struct audit_buffer *audit_buf; ++ struct netlbl_af4list *iter4; ++ struct netlbl_domaddr4_map *map4; ++#if IS_ENABLED(CONFIG_IPV6) ++ struct netlbl_af6list *iter6; ++ struct netlbl_domaddr6_map *map6; ++#endif /* IPv6 */ + + if (entry == NULL) + return -ENOENT; +@@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, + ret_val = -ENOENT; + spin_unlock(&netlbl_domhsh_lock); + ++ if (ret_val) ++ return ret_val; ++ + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); + if (audit_buf != NULL) { + audit_log_format(audit_buf, +@@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, + audit_log_end(audit_buf); + } + +- if (ret_val == 0) { +- struct netlbl_af4list *iter4; +- struct netlbl_domaddr4_map *map4; +-#if IS_ENABLED(CONFIG_IPV6) +- struct netlbl_af6list *iter6; +- struct netlbl_domaddr6_map *map6; +-#endif /* IPv6 */ +- +- switch (entry->def.type) { +- case NETLBL_NLTYPE_ADDRSELECT: +- netlbl_af4list_foreach_rcu(iter4, +- &entry->def.addrsel->list4) { +- map4 = netlbl_domhsh_addr4_entry(iter4); +- cipso_v4_doi_putdef(map4->def.cipso); +- } ++ switch (entry->def.type) { ++ case NETLBL_NLTYPE_ADDRSELECT: ++ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { ++ map4 = netlbl_domhsh_addr4_entry(iter4); ++ cipso_v4_doi_putdef(map4->def.cipso); ++ } + #if IS_ENABLED(CONFIG_IPV6) +- netlbl_af6list_foreach_rcu(iter6, +- &entry->def.addrsel->list6) { +- map6 = netlbl_domhsh_addr6_entry(iter6); +- calipso_doi_putdef(map6->def.calipso); +- } ++ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { ++ map6 = netlbl_domhsh_addr6_entry(iter6); ++ calipso_doi_putdef(map6->def.calipso); ++ } + #endif /* IPv6 */ +- break; +- case NETLBL_NLTYPE_CIPSOV4: +- cipso_v4_doi_putdef(entry->def.cipso); +- break; ++ break; ++ case NETLBL_NLTYPE_CIPSOV4: ++ cipso_v4_doi_putdef(entry->def.cipso); ++ break; + #if IS_ENABLED(CONFIG_IPV6) +- case NETLBL_NLTYPE_CALIPSO: +- calipso_doi_putdef(entry->def.calipso); +- break; ++ case NETLBL_NLTYPE_CALIPSO: ++ calipso_doi_putdef(entry->def.calipso); ++ break; + #endif /* IPv6 */ +- } +- call_rcu(&entry->rcu, netlbl_domhsh_free_entry); + } ++ call_rcu(&entry->rcu, netlbl_domhsh_free_entry); + + return ret_val; + } +diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c +index f85d0e07af2d..9be851ce97c2 100644 +--- a/net/netlabel/netlabel_mgmt.c ++++ b/net/netlabel/netlabel_mgmt.c +@@ -96,6 +96,7 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { + static int netlbl_mgmt_add_common(struct genl_info *info, + struct netlbl_audit *audit_info) + { ++ void *pmap = NULL; + int ret_val = -EINVAL; + struct netlbl_domaddr_map *addrmap = NULL; + struct cipso_v4_doi *cipsov4 = NULL; +@@ -195,6 +196,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info, + ret_val = -ENOMEM; + goto add_free_addrmap; + } ++ pmap = map; + map->list.addr = addr->s_addr & mask->s_addr; + map->list.mask = mask->s_addr; + map->list.valid = 1; +@@ -203,10 +205,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info, + map->def.cipso = cipsov4; + + ret_val = netlbl_af4list_add(&map->list, &addrmap->list4); +- if (ret_val != 0) { +- kfree(map); +- goto add_free_addrmap; +- } ++ if (ret_val != 0) ++ goto add_free_map; + + entry->family = AF_INET; + entry->def.type = NETLBL_NLTYPE_ADDRSELECT; +@@ -243,6 +243,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info, + ret_val = -ENOMEM; + goto add_free_addrmap; + } ++ pmap = map; + map->list.addr = *addr; + map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; + map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; +@@ -255,10 +256,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info, + map->def.calipso = calipso; + + ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); +- if (ret_val != 0) { +- kfree(map); +- goto add_free_addrmap; +- } ++ if (ret_val != 0) ++ goto add_free_map; + + entry->family = AF_INET6; + entry->def.type = NETLBL_NLTYPE_ADDRSELECT; +@@ -268,10 +267,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info, + + ret_val = netlbl_domhsh_add(entry, audit_info); + if (ret_val != 0) +- goto add_free_addrmap; ++ goto add_free_map; + + return 0; + ++add_free_map: ++ kfree(pmap); + add_free_addrmap: + kfree(addrmap); + add_doi_put_def: +diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c +index 053ba8646155..467e0d19cdfe 100644 +--- a/net/netlabel/netlabel_unlabeled.c ++++ b/net/netlabel/netlabel_unlabeled.c +@@ -1185,12 +1185,13 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, + struct netlbl_unlhsh_walk_arg cb_arg; + u32 skip_bkt = cb->args[0]; + u32 skip_chain = cb->args[1]; +- u32 iter_bkt; +- u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; ++ u32 skip_addr4 = cb->args[2]; ++ u32 iter_bkt, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0; + struct netlbl_unlhsh_iface *iface; + struct list_head *iter_list; + struct netlbl_af4list *addr4; + #if IS_ENABLED(CONFIG_IPV6) ++ u32 skip_addr6 = cb->args[3]; + struct netlbl_af6list *addr6; + #endif + +@@ -1201,7 +1202,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, + rcu_read_lock(); + for (iter_bkt = skip_bkt; + iter_bkt < rcu_dereference(netlbl_unlhsh)->size; +- iter_bkt++, iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0) { ++ iter_bkt++) { + iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; + list_for_each_entry_rcu(iface, iter_list, list) { + if (!iface->valid || +@@ -1209,7 +1210,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, + continue; + netlbl_af4list_foreach_rcu(addr4, + &iface->addr4_list) { +- if (iter_addr4++ < cb->args[2]) ++ if (iter_addr4++ < skip_addr4) + continue; + if (netlbl_unlabel_staticlist_gen( + NLBL_UNLABEL_C_STATICLIST, +@@ -1222,10 +1223,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, + goto unlabel_staticlist_return; + } + } ++ iter_addr4 = 0; ++ skip_addr4 = 0; + #if IS_ENABLED(CONFIG_IPV6) + netlbl_af6list_foreach_rcu(addr6, + &iface->addr6_list) { +- if (iter_addr6++ < cb->args[3]) ++ if (iter_addr6++ < skip_addr6) + continue; + if (netlbl_unlabel_staticlist_gen( + NLBL_UNLABEL_C_STATICLIST, +@@ -1238,8 +1241,12 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb, + goto unlabel_staticlist_return; + } + } ++ iter_addr6 = 0; ++ skip_addr6 = 0; + #endif /* IPv6 */ + } ++ iter_chain = 0; ++ skip_chain = 0; + } + + unlabel_staticlist_return: +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 205865292ba3..541410f1c3b7 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -436,11 +436,13 @@ void netlink_table_ungrab(void) + static inline void + netlink_lock_table(void) + { ++ unsigned long flags; ++ + /* read_lock() synchronizes us to netlink_table_grab */ + +- read_lock(&nl_table_lock); ++ read_lock_irqsave(&nl_table_lock, flags); + atomic_inc(&nl_table_users); +- read_unlock(&nl_table_lock); ++ read_unlock_irqrestore(&nl_table_lock, flags); + } + + static inline void +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c +old mode 100644 +new mode 100755 +index a1fae01de6b2..9eb20dbcb411 +--- a/net/netlink/genetlink.c ++++ b/net/netlink/genetlink.c +@@ -992,63 +992,11 @@ static const struct genl_multicast_group genl_ctrl_groups[] = { + { .name = "notify", }, + }; + +-static int genl_bind(struct net *net, int group) +-{ +- int i, err = 0; +- +- down_read(&cb_lock); +- for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { +- struct genl_family *f; +- +- list_for_each_entry(f, genl_family_chain(i), family_list) { +- if (group >= f->mcgrp_offset && +- group < f->mcgrp_offset + f->n_mcgrps) { +- int fam_grp = group - f->mcgrp_offset; +- +- if (!f->netnsok && net != &init_net) +- err = -ENOENT; +- else if (f->mcast_bind) +- err = f->mcast_bind(net, fam_grp); +- else +- err = 0; +- break; +- } +- } +- } +- up_read(&cb_lock); +- +- return err; +-} +- +-static void genl_unbind(struct net *net, int group) +-{ +- int i; +- +- down_read(&cb_lock); +- for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { +- struct genl_family *f; +- +- list_for_each_entry(f, genl_family_chain(i), family_list) { +- if (group >= f->mcgrp_offset && +- group < f->mcgrp_offset + f->n_mcgrps) { +- int fam_grp = group - f->mcgrp_offset; +- +- if (f->mcast_unbind) +- f->mcast_unbind(net, fam_grp); +- break; +- } +- } +- } +- up_read(&cb_lock); +-} +- + static int __net_init genl_pernet_init(struct net *net) + { + struct netlink_kernel_cfg cfg = { + .input = genl_rcv, + .flags = NL_CFG_F_NONROOT_RECV, +- .bind = genl_bind, +- .unbind = genl_unbind, + }; + + /* we'll bump the group number right afterwards */ +diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c +index f0ecaec1ff3d..d1a0b7056743 100644 +--- a/net/netrom/nr_timer.c ++++ b/net/netrom/nr_timer.c +@@ -125,11 +125,9 @@ static void nr_heartbeat_expiry(unsigned long param) + is accepted() it isn't 'dead' so doesn't get removed. */ + if (sock_flag(sk, SOCK_DESTROY) || + (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { +- sock_hold(sk); + bh_unlock_sock(sk); + nr_destroy_socket(sk); +- sock_put(sk); +- return; ++ goto out; + } + break; + +@@ -150,6 +148,8 @@ static void nr_heartbeat_expiry(unsigned long param) + + nr_start_heartbeat(sk); + bh_unlock_sock(sk); ++out: ++ sock_put(sk); + } + + static void nr_t2timer_expiry(unsigned long param) +@@ -163,6 +163,7 @@ static void nr_t2timer_expiry(unsigned long param) + nr_enquiry_response(sk); + } + bh_unlock_sock(sk); ++ sock_put(sk); + } + + static void nr_t4timer_expiry(unsigned long param) +@@ -172,6 +173,7 @@ static void nr_t4timer_expiry(unsigned long param) + bh_lock_sock(sk); + nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; + bh_unlock_sock(sk); ++ sock_put(sk); + } + + static void nr_idletimer_expiry(unsigned long param) +@@ -200,6 +202,7 @@ static void nr_idletimer_expiry(unsigned long param) + sock_set_flag(sk, SOCK_DEAD); + } + bh_unlock_sock(sk); ++ sock_put(sk); + } + + static void nr_t1timer_expiry(unsigned long param) +@@ -212,8 +215,7 @@ static void nr_t1timer_expiry(unsigned long param) + case NR_STATE_1: + if (nr->n2count == nr->n2) { + nr_disconnect(sk, ETIMEDOUT); +- bh_unlock_sock(sk); +- return; ++ goto out; + } else { + nr->n2count++; + nr_write_internal(sk, NR_CONNREQ); +@@ -223,8 +225,7 @@ static void nr_t1timer_expiry(unsigned long param) + case NR_STATE_2: + if (nr->n2count == nr->n2) { + nr_disconnect(sk, ETIMEDOUT); +- bh_unlock_sock(sk); +- return; ++ goto out; + } else { + nr->n2count++; + nr_write_internal(sk, NR_DISCREQ); +@@ -234,8 +235,7 @@ static void nr_t1timer_expiry(unsigned long param) + case NR_STATE_3: + if (nr->n2count == nr->n2) { + nr_disconnect(sk, ETIMEDOUT); +- bh_unlock_sock(sk); +- return; ++ goto out; + } else { + nr->n2count++; + nr_requeue_frames(sk); +@@ -244,5 +244,7 @@ static void nr_t1timer_expiry(unsigned long param) + } + + nr_start_t1timer(sk); ++out: + bh_unlock_sock(sk); ++ sock_put(sk); + } +diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c +index f864ce19e13d..582f97d035ef 100644 +--- a/net/nfc/digital_dep.c ++++ b/net/nfc/digital_dep.c +@@ -1289,6 +1289,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, + } + + rc = nfc_tm_data_received(ddev->nfc_dev, resp); ++ if (rc) ++ resp = NULL; + + exit: + kfree_skb(ddev->chaining_skb); +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c +index dd59fde1dac8..92c6fbfd51f7 100644 +--- a/net/nfc/llcp_sock.c ++++ b/net/nfc/llcp_sock.c +@@ -119,13 +119,19 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + llcp_sock->service_name_len, + GFP_KERNEL); + if (!llcp_sock->service_name) { ++ nfc_llcp_local_put(llcp_sock->local); ++ llcp_sock->local = NULL; ++ llcp_sock->dev = NULL; + ret = -ENOMEM; + goto put_dev; + } + llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); + if (llcp_sock->ssap == LLCP_SAP_MAX) { ++ nfc_llcp_local_put(llcp_sock->local); ++ llcp_sock->local = NULL; + kfree(llcp_sock->service_name); + llcp_sock->service_name = NULL; ++ llcp_sock->dev = NULL; + ret = -EADDRINUSE; + goto put_dev; + } +@@ -683,6 +689,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, + ret = -EISCONN; + goto error; + } ++ if (sk->sk_state == LLCP_CONNECTING) { ++ ret = -EINPROGRESS; ++ goto error; ++ } + + dev = nfc_get_device(addr->dev_idx); + if (dev == NULL) { +@@ -714,6 +724,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, + llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->ssap = nfc_llcp_get_local_ssap(local); + if (llcp_sock->ssap == LLCP_SAP_MAX) { ++ nfc_llcp_local_put(llcp_sock->local); ++ llcp_sock->local = NULL; + ret = -ENOMEM; + goto put_dev; + } +@@ -751,8 +763,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, + + sock_unlink: + nfc_llcp_put_ssap(local, llcp_sock->ssap); ++ nfc_llcp_local_put(llcp_sock->local); ++ llcp_sock->local = NULL; + + nfc_llcp_sock_unlink(&local->connecting_sockets, sk); ++ kfree(llcp_sock->service_name); ++ llcp_sock->service_name = NULL; + + put_dev: + nfc_put_device(dev); +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c +index 85a3d9ed4c29..bff6ba84d397 100644 +--- a/net/nfc/nci/core.c ++++ b/net/nfc/nci/core.c +@@ -1188,6 +1188,7 @@ EXPORT_SYMBOL(nci_allocate_device); + void nci_free_device(struct nci_dev *ndev) + { + nfc_free_device(ndev->nfc_dev); ++ nci_hci_deallocate(ndev); + kfree(ndev); + } + EXPORT_SYMBOL(nci_free_device); +diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c +index a0ab26d535dc..5fae3f064ad0 100644 +--- a/net/nfc/nci/hci.c ++++ b/net/nfc/nci/hci.c +@@ -798,3 +798,8 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev) + + return hdev; + } ++ ++void nci_hci_deallocate(struct nci_dev *ndev) ++{ ++ kfree(ndev->hci_dev); ++} +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index e79a49fe61e8..f326a6ea35fc 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -887,6 +887,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) + + if (!dev->polling) { + device_unlock(&dev->dev); ++ nfc_put_device(dev); + return -EINVAL; + } + +@@ -1227,7 +1228,7 @@ static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info) + u32 idx; + char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; + +- if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) ++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || !info->attrs[NFC_ATTR_FIRMWARE_NAME]) + return -EINVAL; + + idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); +diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c +index e386e6c90b17..2fba626a0125 100644 +--- a/net/nfc/rawsock.c ++++ b/net/nfc/rawsock.c +@@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, + if (addr->target_idx > dev->target_next_idx - 1 || + addr->target_idx < dev->target_next_idx - dev->n_targets) { + rc = -EINVAL; +- goto error; ++ goto put_dev; + } + + rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); +@@ -344,10 +344,13 @@ static int rawsock_create(struct net *net, struct socket *sock, + if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) + return -ESOCKTNOSUPPORT; + +- if (sock->type == SOCK_RAW) ++ if (sock->type == SOCK_RAW) { ++ if (!ns_capable(net->user_ns, CAP_NET_RAW)) ++ return -EPERM; + sock->ops = &rawsock_raw_ops; +- else ++ } else { + sock->ops = &rawsock_ops; ++ } + + sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); + if (!sk) +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 7135aff3946d..5fa7b2569a3a 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -710,16 +710,16 @@ static void ovs_fragment(struct net *net, struct vport *vport, + } + + if (ethertype == htons(ETH_P_IP)) { +- struct dst_entry ovs_dst; ++ struct rtable ovs_rt = { 0 }; + unsigned long orig_dst; + + prepare_frag(vport, skb); +- dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1, ++ dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1, + DST_OBSOLETE_NONE, DST_NOCOUNT); +- ovs_dst.dev = vport->dev; ++ ovs_rt.dst.dev = vport->dev; + + orig_dst = skb->_skb_refdst; +- skb_dst_set_noref(skb, &ovs_dst); ++ skb_dst_set_noref(skb, &ovs_rt.dst); + IPCB(skb)->frag_max_size = mru; + + ip_do_fragment(net, skb->sk, skb, ovs_vport_output); +diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c +index beb2897d8ddf..cff76ffcaac2 100644 +--- a/net/openvswitch/conntrack.c ++++ b/net/openvswitch/conntrack.c +@@ -709,15 +709,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, + } + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); + +- if (err == NF_ACCEPT && +- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { +- if (maniptype == NF_NAT_MANIP_SRC) +- maniptype = NF_NAT_MANIP_DST; +- else +- maniptype = NF_NAT_MANIP_SRC; +- +- err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, +- maniptype); ++ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { ++ if (ct->status & IPS_SRC_NAT) { ++ if (maniptype == NF_NAT_MANIP_SRC) ++ maniptype = NF_NAT_MANIP_DST; ++ else ++ maniptype = NF_NAT_MANIP_SRC; ++ ++ err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, ++ maniptype); ++ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { ++ err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, ++ NF_NAT_MANIP_SRC); ++ } + } + + /* Mark NAT done if successful and update the flow key. */ +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index 802fe548d621..3f23f021ff89 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2161,7 +2161,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + int skb_len = skb->len; + unsigned int snaplen, res; + unsigned long status = TP_STATUS_USER; +- unsigned short macoff, netoff, hdrlen; ++ unsigned short macoff, hdrlen; ++ unsigned int netoff; + struct sk_buff *copy_skb = NULL; + struct timespec ts; + __u32 ts_status; +@@ -2223,6 +2224,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, + } + macoff = netoff - maclen; + } ++ if (netoff > USHRT_MAX) { ++ spin_lock(&sk->sk_receive_queue.lock); ++ po->stats.stats1.tp_drops++; ++ spin_unlock(&sk->sk_receive_queue.lock); ++ goto drop_n_restore; ++ } + if (po->tp_version <= TPACKET_V2) { + if (macoff + snaplen > po->rx_ring.frame_size) { + if (po->copy_thresh && +@@ -2667,7 +2674,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) + } + if (likely(saddr == NULL)) { + dev = packet_cached_dev_get(po); +- proto = po->num; ++ proto = READ_ONCE(po->num); + } else { + err = -EINVAL; + if (msg->msg_namelen < sizeof(struct sockaddr_ll)) +@@ -2879,7 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) + + if (likely(saddr == NULL)) { + dev = packet_cached_dev_get(po); +- proto = po->num; ++ proto = READ_ONCE(po->num); + } else { + err = -EINVAL; + if (msg->msg_namelen < sizeof(struct sockaddr_ll)) +@@ -3150,7 +3157,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, + /* prevents packet_notifier() from calling + * register_prot_hook() + */ +- po->num = 0; ++ WRITE_ONCE(po->num, 0); + __unregister_prot_hook(sk, true); + rcu_read_lock(); + dev_curr = po->prot_hook.dev; +@@ -3160,17 +3167,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, + } + + BUG_ON(po->running); +- po->num = proto; ++ WRITE_ONCE(po->num, proto); + po->prot_hook.type = proto; + + if (unlikely(unlisted)) { + dev_put(dev); + po->prot_hook.dev = NULL; +- po->ifindex = -1; ++ WRITE_ONCE(po->ifindex, -1); + packet_cached_dev_reset(po); + } else { + po->prot_hook.dev = dev; +- po->ifindex = dev ? dev->ifindex : 0; ++ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0); + packet_cached_dev_assign(po, dev); + } + } +@@ -3485,7 +3492,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, + uaddr->sa_family = AF_PACKET; + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); + rcu_read_lock(); +- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); ++ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); + if (dev) + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); + rcu_read_unlock(); +@@ -3501,16 +3508,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, + struct sock *sk = sock->sk; + struct packet_sock *po = pkt_sk(sk); + DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr); ++ int ifindex; + + if (peer) + return -EOPNOTSUPP; + ++ ifindex = READ_ONCE(po->ifindex); + sll->sll_family = AF_PACKET; +- sll->sll_ifindex = po->ifindex; +- sll->sll_protocol = po->num; ++ sll->sll_ifindex = ifindex; ++ sll->sll_protocol = READ_ONCE(po->num); + sll->sll_pkttype = 0; + rcu_read_lock(); +- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); ++ dev = dev_get_by_index_rcu(sock_net(sk), ifindex); + if (dev) { + sll->sll_hatype = dev->type; + sll->sll_halen = dev->addr_len; +@@ -4090,7 +4099,7 @@ static int packet_notifier(struct notifier_block *this, + } + if (msg == NETDEV_UNREGISTER) { + packet_cached_dev_reset(po); +- po->ifindex = -1; ++ WRITE_ONCE(po->ifindex, -1); + if (po->prot_hook.dev) + dev_put(po->prot_hook.dev); + po->prot_hook.dev = NULL; +@@ -4393,7 +4402,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + was_running = po->running; + num = po->num; + if (was_running) { +- po->num = 0; ++ WRITE_ONCE(po->num, 0); + __unregister_prot_hook(sk, false); + } + spin_unlock(&po->bind_lock); +@@ -4426,7 +4435,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, + + spin_lock(&po->bind_lock); + if (was_running) { +- po->num = num; ++ WRITE_ONCE(po->num, num); + register_prot_hook(sk); + } + spin_unlock(&po->bind_lock); +@@ -4595,8 +4604,8 @@ static int packet_seq_show(struct seq_file *seq, void *v) + s, + atomic_read(&s->sk_refcnt), + s->sk_type, +- ntohs(po->num), +- po->ifindex, ++ ntohs(READ_ONCE(po->num)), ++ READ_ONCE(po->ifindex), + po->running, + atomic_read(&s->sk_rmem_alloc), + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)), +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c +index c72d59ebb906..a253f7674d1a 100644 +--- a/net/qrtr/qrtr.c ++++ b/net/qrtr/qrtr.c +@@ -218,7 +218,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) + if (dst != QRTR_PORT_CTRL && type != QRTR_TYPE_DATA) + return -EINVAL; + +- skb = netdev_alloc_skb(NULL, len); ++ skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN); + if (!skb) + return -ENOMEM; + +@@ -714,6 +714,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, + rc = copied; + + if (addr) { ++ /* There is an anonymous 2-byte hole after sq_family, ++ * make sure to clear it. ++ */ ++ memset(addr, 0, sizeof(*addr)); ++ + addr->sq_family = AF_QIPCRTR; + addr->sq_node = le32_to_cpu(phdr->src_node_id); + addr->sq_port = le32_to_cpu(phdr->src_port_id); +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c +index 3d9c4c6397c3..20d045faf07c 100644 +--- a/net/rds/ib_frmr.c ++++ b/net/rds/ib_frmr.c +@@ -112,9 +112,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr) + cpu_relax(); + } + +- ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, ++ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len, + &off, PAGE_SIZE); +- if (unlikely(ret != ibmr->sg_len)) ++ if (unlikely(ret != ibmr->sg_dma_len)) + return ret < 0 ? ret : -EINVAL; + + /* Perform a WR for the fast_reg_mr. Each individual page +diff --git a/net/rds/recv.c b/net/rds/recv.c +index f16ee1b13b8d..4bd307e31b40 100644 +--- a/net/rds/recv.c ++++ b/net/rds/recv.c +@@ -405,12 +405,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, + int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) + { + struct rds_notifier *notifier; +- struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ ++ struct rds_rdma_notify cmsg; + unsigned int count = 0, max_messages = ~0U; + unsigned long flags; + LIST_HEAD(copy); + int err = 0; + ++ memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ + + /* put_cmsg copies to user space and thus may sleep. We can't do this + * with rs_lock held, so first grab as many notifications as we can stuff +@@ -595,7 +596,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, + + if (rds_cmsg_recv(inc, msg, rs)) { + ret = -EFAULT; +- goto out; ++ break; + } + + rds_stats_inc(s_recv_delivered); +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c +index 344456206b70..0f371e50d9c4 100644 +--- a/net/rose/rose_loopback.c ++++ b/net/rose/rose_loopback.c +@@ -99,10 +99,19 @@ static void rose_loopback_timer(unsigned long param) + } + + if (frametype == ROSE_CALL_REQUEST) { +- if ((dev = rose_dev_get(dest)) != NULL) { +- if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) +- kfree_skb(skb); +- } else { ++ if (!rose_loopback_neigh->dev) { ++ kfree_skb(skb); ++ continue; ++ } ++ ++ dev = rose_dev_get(dest); ++ if (!dev) { ++ kfree_skb(skb); ++ continue; ++ } ++ ++ if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) { ++ dev_put(dev); + kfree_skb(skb); + } + } else { +diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c +index 832d854c2d5c..01ad588144e3 100644 +--- a/net/rxrpc/call_accept.c ++++ b/net/rxrpc/call_accept.c +@@ -26,6 +26,11 @@ + #include + #include "ar-internal.h" + ++static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, ++ unsigned long user_call_ID) ++{ ++} ++ + /* + * Preallocate a single service call, connection and peer and, if possible, + * give them a user ID and attach the user's side of the ID to them. +@@ -224,6 +229,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) + if (rx->discard_new_call) { + _debug("discard %lx", call->user_call_ID); + rx->discard_new_call(call, call->user_call_ID); ++ if (call->notify_rx) ++ call->notify_rx = rxrpc_dummy_notify; + rxrpc_put_call(call, rxrpc_call_put_kernel); + } + rxrpc_call_completed(call); +diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c +index b099b64366f3..ec02dd7c12ef 100644 +--- a/net/rxrpc/conn_event.c ++++ b/net/rxrpc/conn_event.c +@@ -309,18 +309,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, + return ret; + + spin_lock(&conn->channel_lock); +- spin_lock(&conn->state_lock); ++ spin_lock_bh(&conn->state_lock); + + if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { + conn->state = RXRPC_CONN_SERVICE; +- spin_unlock(&conn->state_lock); ++ spin_unlock_bh(&conn->state_lock); + for (loop = 0; loop < RXRPC_MAXCALLS; loop++) + rxrpc_call_is_secure( + rcu_dereference_protected( + conn->channels[loop].call, + lockdep_is_held(&conn->channel_lock))); + } else { +- spin_unlock(&conn->state_lock); ++ spin_unlock_bh(&conn->state_lock); + } + + spin_unlock(&conn->channel_lock); +diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c +index 7fc340726d03..16efd04d4a80 100644 +--- a/net/rxrpc/key.c ++++ b/net/rxrpc/key.c +@@ -899,7 +899,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) + + _enter(""); + +- if (optlen <= 0 || optlen > PAGE_SIZE - 1) ++ if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) + return -EINVAL; + + description = memdup_user_nul(optval, optlen); +@@ -1104,8 +1104,9 @@ static long rxrpc_read(const struct key *key, + break; + + default: /* we have a ticket we can't encode */ +- BUG(); +- continue; ++ pr_err("Unsupported key token type (%u)\n", ++ token->security_index); ++ return -ENOPKG; + } + + _debug("token[%u]: toksize=%u", ntoks, toksize); +@@ -1139,6 +1140,14 @@ static long rxrpc_read(const struct key *key, + goto fault; \ + xdr += (_l + 3) >> 2; \ + } while(0) ++#define ENCODE_BYTES(l, s) \ ++ do { \ ++ u32 _l = (l); \ ++ memcpy(xdr, (s), _l); \ ++ if (_l & 3) \ ++ memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ ++ xdr += (_l + 3) >> 2; \ ++ } while(0) + #define ENCODE64(x) \ + do { \ + __be64 y = cpu_to_be64(x); \ +@@ -1167,7 +1176,7 @@ static long rxrpc_read(const struct key *key, + case RXRPC_SECURITY_RXKAD: + ENCODE(token->kad->vice_id); + ENCODE(token->kad->kvno); +- ENCODE_DATA(8, token->kad->session_key); ++ ENCODE_BYTES(8, token->kad->session_key); + ENCODE(token->kad->start); + ENCODE(token->kad->expiry); + ENCODE(token->kad->primary_flag); +@@ -1217,8 +1226,9 @@ static long rxrpc_read(const struct key *key, + break; + + default: +- BUG(); +- break; ++ pr_err("Unsupported key token type (%u)\n", ++ token->security_index); ++ return -ENOPKG; + } + + ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==, +diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c +index 72de69175476..702ebcfa18dc 100644 +--- a/net/rxrpc/recvmsg.c ++++ b/net/rxrpc/recvmsg.c +@@ -439,7 +439,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + list_empty(&rx->recvmsg_q) && + rx->sk.sk_state != RXRPC_SERVER_LISTENING) { + release_sock(&rx->sk); +- return -ENODATA; ++ return -EAGAIN; + } + + if (list_empty(&rx->recvmsg_q)) { +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index 1de27c39564b..2ec1c29eeba4 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -191,7 +191,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, + /* this should be in poll */ + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + +- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) ++ if (sk->sk_shutdown & SEND_SHUTDOWN) + return -EPIPE; + + more = msg->msg_flags & MSG_MORE; +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c +index ab66e2b38e66..7aafb402e5c7 100644 +--- a/net/sched/cls_tcindex.c ++++ b/net/sched/cls_tcindex.c +@@ -247,7 +247,7 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) + int i, err = 0; + + cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), +- GFP_KERNEL); ++ GFP_KERNEL | __GFP_NOWARN); + if (!cp->perfect) + return -ENOMEM; + +@@ -307,9 +307,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, + if (tb[TCA_TCINDEX_MASK]) + cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); + +- if (tb[TCA_TCINDEX_SHIFT]) ++ if (tb[TCA_TCINDEX_SHIFT]) { + cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); +- ++ if (cp->shift > 16) { ++ err = -EINVAL; ++ goto errout; ++ } ++ } + if (!cp->hash) { + /* Hash not specified, use perfect hash if the upper limit + * of the hashing index is below the threshold. +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index adb623da3cb2..734ec5350e6c 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -393,7 +393,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, + { + struct qdisc_rate_table *rtab; + +- if (tab == NULL || r->rate == 0 || r->cell_log == 0 || ++ if (tab == NULL || r->rate == 0 || ++ r->cell_log == 0 || r->cell_log >= 32 || + nla_len(tab) != TC_RTAB_SIZE) + return NULL; + +@@ -1822,7 +1823,7 @@ static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, + + static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, + struct tcmsg *tcm, struct netlink_callback *cb, +- int *t_p, int s_t) ++ int *t_p, int s_t, bool recur) + { + struct Qdisc *q; + int b; +@@ -1833,7 +1834,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, + if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) + return -1; + +- if (!qdisc_dev(root)) ++ if (!qdisc_dev(root) || !recur) + return 0; + + hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { +@@ -1861,13 +1862,13 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) + s_t = cb->args[0]; + t = 0; + +- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) ++ if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) + goto done; + + dev_queue = dev_ingress_queue(dev); + if (dev_queue && + tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, +- &t, s_t) < 0) ++ &t, s_t, false) < 0) + goto done; + + done: +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c +index d13e1de0b3a2..1283c3bf401a 100644 +--- a/net/sched/sch_choke.c ++++ b/net/sched/sch_choke.c +@@ -409,6 +409,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) + struct sk_buff **old = NULL; + unsigned int mask; + u32 max_P; ++ u8 *stab; + + if (opt == NULL) + return -EINVAL; +@@ -424,8 +425,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) + max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; + + ctl = nla_data(tb[TCA_CHOKE_PARMS]); +- +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ stab = nla_data(tb[TCA_CHOKE_STAB]); ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) + return -EINVAL; + + if (ctl->limit > CHOKE_MAX_QUEUE) +@@ -478,7 +479,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) + + red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, + ctl->Plog, ctl->Scell_log, +- nla_data(tb[TCA_CHOKE_STAB]), ++ stab, + max_P); + red_set_vars(&q->vars); + +diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c +index 551cf193649e..02ef78d2b3df 100644 +--- a/net/sched/sch_dsmark.c ++++ b/net/sched/sch_dsmark.c +@@ -388,7 +388,8 @@ static void dsmark_reset(struct Qdisc *sch) + struct dsmark_qdisc_data *p = qdisc_priv(sch); + + pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); +- qdisc_reset(p->q); ++ if (p->q) ++ qdisc_reset(p->q); + sch->qstats.backlog = 0; + sch->q.qlen = 0; + } +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index da3a7c923d08..fc39517d4ccd 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -342,6 +342,7 @@ void __netdev_watchdog_up(struct net_device *dev) + dev_hold(dev); + } + } ++EXPORT_SYMBOL_GPL(__netdev_watchdog_up); + + static void dev_watchdog_up(struct net_device *dev) + { +diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c +index 729c0e4eca21..745e8fae62b3 100644 +--- a/net/sched/sch_gred.c ++++ b/net/sched/sch_gred.c +@@ -356,7 +356,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, + struct gred_sched *table = qdisc_priv(sch); + struct gred_sched_data *q = table->tab[dp]; + +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) + return -EINVAL; + + if (!q) { +diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c +index 4610d44f58d3..d6abf5c5a5b8 100644 +--- a/net/sched/sch_red.c ++++ b/net/sched/sch_red.c +@@ -169,6 +169,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) + struct Qdisc *child = NULL; + int err; + u32 max_P; ++ u8 *stab; + + if (opt == NULL) + return -EINVAL; +@@ -184,7 +185,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) + max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; + + ctl = nla_data(tb[TCA_RED_PARMS]); +- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) ++ stab = nla_data(tb[TCA_RED_STAB]); ++ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ++ ctl->Scell_log, stab)) + return -EINVAL; + + if (ctl->limit > 0) { +@@ -206,7 +209,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) + red_set_parms(&q->parms, + ctl->qth_min, ctl->qth_max, ctl->Wlog, + ctl->Plog, ctl->Scell_log, +- nla_data(tb[TCA_RED_STAB]), ++ stab, + max_P); + red_set_vars(&q->vars); + +diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c +index 633e237a406c..b2598a32b556 100644 +--- a/net/sched/sch_sfq.c ++++ b/net/sched/sch_sfq.c +@@ -645,7 +645,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) + } + + if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, +- ctl_v1->Wlog)) ++ ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) + return -EINVAL; + if (ctl_v1 && ctl_v1->qth_min) { + p = kmalloc(sizeof(*p), GFP_KERNEL); +diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c +index 2cd9b4478b92..90e440c46803 100644 +--- a/net/sched/sch_teql.c ++++ b/net/sched/sch_teql.c +@@ -138,6 +138,9 @@ teql_destroy(struct Qdisc *sch) + struct teql_sched_data *dat = qdisc_priv(sch); + struct teql_master *master = dat->m; + ++ if (!master) ++ return; ++ + prev = master->slaves; + if (prev) { + do { +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index 16e120b84118..8c5597d07240 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1583,12 +1583,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) + int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, + sctp_scope_t scope, gfp_t gfp) + { ++ struct sock *sk = asoc->base.sk; + int flags; + + /* Use scoping rules to determine the subset of addresses from + * the endpoint. + */ +- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; ++ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; ++ if (!inet_v6_ipv6only(sk)) ++ flags |= SCTP_ADDR4_ALLOWED; + if (asoc->peer.ipv4_address) + flags |= SCTP_ADDR4_PEERSUPP; + if (asoc->peer.ipv6_address) +diff --git a/net/sctp/auth.c b/net/sctp/auth.c +index f99d4855d3de..6ab11ee3d3a5 100644 +--- a/net/sctp/auth.c ++++ b/net/sctp/auth.c +@@ -494,6 +494,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) + out_err: + /* Clean up any successful allocations */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); ++ ep->auth_hmacs = NULL; + return -ENOMEM; + } + +diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c +index 401c60750b20..dc4335d817d8 100644 +--- a/net/sctp/bind_addr.c ++++ b/net/sctp/bind_addr.c +@@ -451,6 +451,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, + * well as the remote peer. + */ + if ((((AF_INET == addr->sa.sa_family) && ++ (flags & SCTP_ADDR4_ALLOWED) && + (flags & SCTP_ADDR4_PEERSUPP))) || + (((AF_INET6 == addr->sa.sa_family) && + (flags & SCTP_ADDR6_ALLOWED) && +diff --git a/net/sctp/input.c b/net/sctp/input.c +index 969fb1623e4e..8f4574c4aa6c 100644 +--- a/net/sctp/input.c ++++ b/net/sctp/input.c +@@ -449,7 +449,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk, + else { + if (!mod_timer(&t->proto_unreach_timer, + jiffies + (HZ/20))) +- sctp_association_hold(asoc); ++ sctp_transport_hold(t); + } + } else { + struct net *net = sock_net(sk); +@@ -458,7 +458,7 @@ void sctp_icmp_proto_unreachable(struct sock *sk, + "encountered!\n", __func__); + + if (del_timer(&t->proto_unreach_timer)) +- sctp_association_put(asoc); ++ sctp_transport_put(t); + + sctp_do_sm(net, SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), +@@ -1165,7 +1165,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net, + + ch = (sctp_chunkhdr_t *) ch_end; + chunk_num++; +- } while (ch_end < skb_tail_pointer(skb)); ++ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb)); + + return asoc; + } +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c +index c5a2a538279b..b1932fd125da 100644 +--- a/net/sctp/protocol.c ++++ b/net/sctp/protocol.c +@@ -210,6 +210,7 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, + * sock as well as the remote peer. + */ + if ((((AF_INET == addr->a.sa.sa_family) && ++ (copy_flags & SCTP_ADDR4_ALLOWED) && + (copy_flags & SCTP_ADDR4_PEERSUPP))) || + (((AF_INET6 == addr->a.sa.sa_family) && + (copy_flags & SCTP_ADDR6_ALLOWED) && +@@ -412,7 +413,8 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) + retval = SCTP_SCOPE_LINK; + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || + ipv4_is_private_172(addr->v4.sin_addr.s_addr) || +- ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { ++ ipv4_is_private_192(addr->v4.sin_addr.s_addr) || ++ ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { + retval = SCTP_SCOPE_PRIVATE; + } else { + retval = SCTP_SCOPE_GLOBAL; +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c +index acb0c2631c79..0c5aff3bb539 100644 +--- a/net/sctp/sm_make_chunk.c ++++ b/net/sctp/sm_make_chunk.c +@@ -3129,7 +3129,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, + * primary. + */ + if (af->is_any(&addr)) +- memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); ++ memcpy(&addr, sctp_source(asconf), sizeof(addr)); + + peer = sctp_assoc_lookup_paddr(asoc, &addr); + if (!peer) +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c +index 1133fa0830f4..26e2927e0c6e 100644 +--- a/net/sctp/sm_sideeffect.c ++++ b/net/sctp/sm_sideeffect.c +@@ -417,7 +417,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) + /* Try again later. */ + if (!mod_timer(&transport->proto_unreach_timer, + jiffies + (HZ/20))) +- sctp_association_hold(asoc); ++ sctp_transport_hold(transport); + goto out_unlock; + } + +@@ -433,7 +433,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) + + out_unlock: + bh_unlock_sock(sk); +- sctp_association_put(asoc); ++ sctp_transport_put(transport); + } + + +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 146b568962e0..9045f6bcb34c 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -1851,7 +1851,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); +- SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); ++ if (asoc->state < SCTP_STATE_ESTABLISHED) ++ SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); + + repl = sctp_make_cookie_ack(new_asoc, chunk); +diff --git a/net/sctp/socket.c b/net/sctp/socket.c +index 95f39dde1e08..09a99c92c27d 100644 +--- a/net/sctp/socket.c ++++ b/net/sctp/socket.c +@@ -367,6 +367,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, + return af; + } + ++static void sctp_auto_asconf_init(struct sctp_sock *sp) ++{ ++ struct net *net = sock_net(&sp->inet.sk); ++ ++ if (net->sctp.default_auto_asconf) { ++ spin_lock(&net->sctp.addr_wq_lock); ++ list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); ++ spin_unlock(&net->sctp.addr_wq_lock); ++ sp->do_auto_asconf = 1; ++ } ++} ++ + /* Bind a local address either to an endpoint or to an association. */ + static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) + { +@@ -429,8 +441,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) + } + + /* Refresh ephemeral port. */ +- if (!bp->port) ++ if (!bp->port) { + bp->port = inet_sk(sk)->inet_num; ++ sctp_auto_asconf_init(sp); ++ } + + /* Add the address to the bind address list. + * Use GFP_ATOMIC since BHs will be disabled. +@@ -4262,19 +4276,6 @@ static int sctp_init_sock(struct sock *sk) + sk_sockets_allocated_inc(sk); + sock_prot_inuse_add(net, sk->sk_prot, 1); + +- /* Nothing can fail after this block, otherwise +- * sctp_destroy_sock() will be called without addr_wq_lock held +- */ +- if (net->sctp.default_auto_asconf) { +- spin_lock(&sock_net(sk)->sctp.addr_wq_lock); +- list_add_tail(&sp->auto_asconf_list, +- &net->sctp.auto_asconf_splist); +- sp->do_auto_asconf = 1; +- spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); +- } else { +- sp->do_auto_asconf = 0; +- } +- + local_bh_enable(); + + return 0; +@@ -6687,8 +6688,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) + + pr_debug("%s: begins, snum:%d\n", __func__, snum); + +- local_bh_disable(); +- + if (snum == 0) { + /* Search for an available port. */ + int low, high, remaining, index; +@@ -6707,20 +6706,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) + continue; + index = sctp_phashfn(sock_net(sk), rover); + head = &sctp_port_hashtable[index]; +- spin_lock(&head->lock); ++ spin_lock_bh(&head->lock); + sctp_for_each_hentry(pp, &head->chain) + if ((pp->port == rover) && + net_eq(sock_net(sk), pp->net)) + goto next; + break; + next: +- spin_unlock(&head->lock); ++ spin_unlock_bh(&head->lock); ++ cond_resched(); + } while (--remaining > 0); + + /* Exhausted local port range during search? */ + ret = 1; + if (remaining <= 0) +- goto fail; ++ return ret; + + /* OK, here is the one we will use. HEAD (the port + * hash table list entry) is non-NULL and we hold it's +@@ -6735,7 +6735,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) + * port iterator, pp being NULL. + */ + head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; +- spin_lock(&head->lock); ++ spin_lock_bh(&head->lock); + sctp_for_each_hentry(pp, &head->chain) { + if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) + goto pp_found; +@@ -6819,10 +6819,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) + ret = 0; + + fail_unlock: +- spin_unlock(&head->lock); +- +-fail: +- local_bh_enable(); ++ spin_unlock_bh(&head->lock); + return ret; + } + +@@ -7821,6 +7818,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + sctp_bind_addr_dup(&newsp->ep->base.bind_addr, + &oldsp->ep->base.bind_addr, GFP_KERNEL); + ++ sctp_auto_asconf_init(newsp); ++ + /* Move any messages in the old socket's receive queue that are for the + * peeled off association to the new socket's receive queue. + */ +diff --git a/net/sctp/transport.c b/net/sctp/transport.c +index 5c78942550e9..bace37b086e9 100644 +--- a/net/sctp/transport.c ++++ b/net/sctp/transport.c +@@ -146,7 +146,7 @@ void sctp_transport_free(struct sctp_transport *transport) + + /* Delete the ICMP proto unreachable timer if it's active. */ + if (del_timer(&transport->proto_unreach_timer)) +- sctp_association_put(transport->asoc); ++ sctp_transport_put(transport); + + sctp_transport_put(transport); + } +diff --git a/net/socket.c b/net/socket.c +index 4fac892daba9..c7be7c16e0db 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -510,7 +510,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) + if (f.file) { + sock = sock_from_file(f.file, err); + if (likely(sock)) { +- *fput_needed = f.flags; ++ *fput_needed = f.flags & FDPUT_FPUT; + return sock; + } + fdput(f); +diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c +index 2e0a6f92e563..7404f02702a1 100644 +--- a/net/sunrpc/addr.c ++++ b/net/sunrpc/addr.c +@@ -81,11 +81,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap, + + rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", + IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id); +- if (unlikely((size_t)rc > sizeof(scopebuf))) ++ if (unlikely((size_t)rc >= sizeof(scopebuf))) + return 0; + + len += rc; +- if (unlikely(len > buflen)) ++ if (unlikely(len >= buflen)) + return 0; + + strcat(buf, scopebuf); +@@ -184,7 +184,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, + scope_id = dev->ifindex; + dev_put(dev); + } else { +- if (kstrtou32(p, 10, &scope_id) == 0) { ++ if (kstrtou32(p, 10, &scope_id) != 0) { + kfree(p); + return 0; + } +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c +index 591d378d1a18..1d00f49dfe48 100644 +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -53,6 +53,7 @@ + #include + #include + ++#include "auth_gss_internal.h" + #include "../netns.h" + + static const struct rpc_authops authgss_ops; +@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, size_t len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static inline const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- dest->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(dest->data == NULL)) +- return ERR_PTR(-ENOMEM); +- dest->len = len; +- return q; +-} +- + static struct gss_cl_ctx * + gss_cred_get_ctx(struct rpc_cred *cred) + { +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h +new file mode 100644 +index 000000000000..f6d9631bd9d0 +--- /dev/null ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: BSD-3-Clause ++/* ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h ++ * ++ * Internal definitions for RPCSEC_GSS client authentication ++ * ++ * Copyright (c) 2000 The Regents of the University of Michigan. ++ * All rights reserved. ++ * ++ */ ++#include ++#include ++#include ++ ++static inline const void * ++simple_get_bytes(const void *p, const void *end, void *res, size_t len) ++{ ++ const void *q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ memcpy(res, p, len); ++ return q; ++} ++ ++static inline const void * ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) ++{ ++ const void *q; ++ unsigned int len; ++ ++ p = simple_get_bytes(p, end, &len, sizeof(len)); ++ if (IS_ERR(p)) ++ return p; ++ q = (const void *)((const char *)p + len); ++ if (unlikely(q > end || q < p)) ++ return ERR_PTR(-EFAULT); ++ if (len) { ++ dest->data = kmemdup(p, len, GFP_NOFS); ++ if (unlikely(dest->data == NULL)) ++ return ERR_PTR(-ENOMEM); ++ } else ++ dest->data = NULL; ++ dest->len = len; ++ return q; ++} +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c +index 60595835317a..ea2f6022b3d5 100644 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c +@@ -46,6 +46,8 @@ + #include + #include + ++#include "auth_gss_internal.h" ++ + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + # define RPCDBG_FACILITY RPCDBG_AUTH + #endif +@@ -187,35 +189,6 @@ get_gss_krb5_enctype(int etype) + return NULL; + } + +-static const void * +-simple_get_bytes(const void *p, const void *end, void *res, int len) +-{ +- const void *q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- memcpy(res, p, len); +- return q; +-} +- +-static const void * +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +-{ +- const void *q; +- unsigned int len; +- +- p = simple_get_bytes(p, end, &len, sizeof(len)); +- if (IS_ERR(p)) +- return p; +- q = (const void *)((const char *)p + len); +- if (unlikely(q > end || q < p)) +- return ERR_PTR(-EFAULT); +- res->data = kmemdup(p, len, GFP_NOFS); +- if (unlikely(res->data == NULL)) +- return ERR_PTR(-ENOMEM); +- res->len = len; +- return q; +-} +- + static inline const void * + get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_skcipher **res) +diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c +index 5fec3abbe19b..c7d88f979c56 100644 +--- a/net/sunrpc/auth_gss/gss_mech_switch.c ++++ b/net/sunrpc/auth_gss/gss_mech_switch.c +@@ -61,6 +61,8 @@ gss_mech_free(struct gss_api_mech *gm) + + for (i = 0; i < gm->gm_pf_num; i++) { + pf = &gm->gm_pfs[i]; ++ if (pf->domain) ++ auth_domain_put(pf->domain); + kfree(pf->auth_domain_name); + pf->auth_domain_name = NULL; + } +@@ -83,6 +85,7 @@ make_auth_domain_name(char *name) + static int + gss_mech_svc_setup(struct gss_api_mech *gm) + { ++ struct auth_domain *dom; + struct pf_desc *pf; + int i, status; + +@@ -92,10 +95,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) + status = -ENOMEM; + if (pf->auth_domain_name == NULL) + goto out; +- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, +- pf->auth_domain_name); +- if (status) ++ dom = svcauth_gss_register_pseudoflavor( ++ pf->pseudoflavor, pf->auth_domain_name); ++ if (IS_ERR(dom)) { ++ status = PTR_ERR(dom); + goto out; ++ } ++ pf->domain = dom; + } + return 0; + out: +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index d7775ca2fbb9..85ad23d9a8a9 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -779,7 +779,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) + + EXPORT_SYMBOL_GPL(svcauth_gss_flavor); + +-int ++struct auth_domain * + svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + { + struct gss_domain *new; +@@ -796,21 +796,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + new->h.flavour = &svcauthops_gss; + new->pseudoflavor = pseudoflavor; + +- stat = 0; + test = auth_domain_lookup(name, &new->h); +- if (test != &new->h) { /* Duplicate registration */ ++ if (test != &new->h) { ++ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", ++ name); ++ stat = -EADDRINUSE; + auth_domain_put(test); +- kfree(new->h.name); +- goto out_free_dom; ++ goto out_free_name; + } +- return 0; ++ return test; + ++out_free_name: ++ kfree(new->h.name); + out_free_dom: + kfree(new); + out: +- return stat; ++ return ERR_PTR(stat); + } +- + EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); + + static inline int +@@ -1703,11 +1705,14 @@ static int + svcauth_gss_release(struct svc_rqst *rqstp) + { + struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; +- struct rpc_gss_wire_cred *gc = &gsd->clcred; ++ struct rpc_gss_wire_cred *gc; + struct xdr_buf *resbuf = &rqstp->rq_res; + int stat = -EINVAL; + struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); + ++ if (!gsd) ++ goto out; ++ gc = &gsd->clcred; + if (gc->gc_proc != RPC_GSS_PROC_DATA) + goto out; + /* Release can be called twice, but we only wrap once. */ +@@ -1748,10 +1753,10 @@ svcauth_gss_release(struct svc_rqst *rqstp) + if (rqstp->rq_cred.cr_group_info) + put_group_info(rqstp->rq_cred.cr_group_info); + rqstp->rq_cred.cr_group_info = NULL; +- if (gsd->rsci) ++ if (gsd && gsd->rsci) { + cache_put(&gsd->rsci->h, sn->rsc_cache); +- gsd->rsci = NULL; +- ++ gsd->rsci = NULL; ++ } + return stat; + } + +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index de18a463ac96..eef2f732fbe3 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -1936,6 +1936,14 @@ call_connect_status(struct rpc_task *task) + task->tk_status = 0; + switch (status) { + case -ECONNREFUSED: ++ /* A positive refusal suggests a rebind is needed. */ ++ if (RPC_IS_SOFTCONN(task)) ++ break; ++ if (clnt->cl_autobind) { ++ rpc_force_rebind(clnt); ++ task->tk_action = call_bind; ++ return; ++ } + case -ECONNRESET: + case -ECONNABORTED: + case -ENETUNREACH: +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c +index 34f94052c519..137f92bfafac 100644 +--- a/net/sunrpc/rpc_pipe.c ++++ b/net/sunrpc/rpc_pipe.c +@@ -1347,6 +1347,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) + q.len = strlen(gssd_dummy_clnt_dir[0].name); + clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); + if (!clnt_dentry) { ++ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); + pipe_dentry = ERR_PTR(-ENOENT); + goto out; + } +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c +index eafc78e063f1..185441d7a281 100644 +--- a/net/sunrpc/rpcb_clnt.c ++++ b/net/sunrpc/rpcb_clnt.c +@@ -975,8 +975,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, + p = xdr_inline_decode(xdr, len); + if (unlikely(p == NULL)) + goto out_fail; +- dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, +- req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); ++ dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, ++ req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); + + if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, + sap, sizeof(address)) == 0) +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c +index 0ef65822fdd3..00d95fefdc6f 100644 +--- a/net/sunrpc/sched.c ++++ b/net/sunrpc/sched.c +@@ -486,11 +486,21 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q + struct list_head *q; + struct rpc_task *task; + ++ /* ++ * Service the privileged queue. ++ */ ++ q = &queue->tasks[RPC_NR_PRIORITY - 1]; ++ if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) { ++ task = list_first_entry(q, struct rpc_task, u.tk_wait.list); ++ goto out; ++ } ++ + /* + * Service a batch of tasks from a single owner. + */ + q = &queue->tasks[queue->priority]; +- if (!list_empty(q) && --queue->nr) { ++ if (!list_empty(q) && queue->nr) { ++ queue->nr--; + task = list_first_entry(q, struct rpc_task, u.tk_wait.list); + goto out; + } +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c +index f984ff25cdb2..51ffe70eb34b 100644 +--- a/net/sunrpc/svc.c ++++ b/net/sunrpc/svc.c +@@ -1306,7 +1306,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + + sendit: + if (svc_authorise(rqstp)) +- goto close; ++ goto close_xprt; + return 1; /* Caller can now send it */ + + dropit: +@@ -1315,6 +1315,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + return 0; + + close: ++ svc_authorise(rqstp); ++close_xprt: + if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) + svc_close_xprt(rqstp->rq_xprt); + dprintk("svc: svc_process close\n"); +@@ -1323,7 +1325,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) + err_short_len: + svc_printk(rqstp, "short len %Zd, dropping request\n", + argv->iov_len); +- goto close; ++ goto close_xprt; + + err_bad_rpc: + serv->sv_stats->rpcbadfmt++; +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c +index 42ce3ed21637..7d366f6bef86 100644 +--- a/net/sunrpc/svc_xprt.c ++++ b/net/sunrpc/svc_xprt.c +@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) + } + EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); + +-/* +- * Format the transport list for printing ++/** ++ * svc_print_xprts - Format the transport list for printing ++ * @buf: target buffer for formatted address ++ * @maxlen: length of target buffer ++ * ++ * Fills in @buf with a string containing a list of transport names, each name ++ * terminated with '\n'. If the buffer is too small, some entries may be ++ * missing, but it is guaranteed that all lines in the output buffer are ++ * complete. ++ * ++ * Returns positive length of the filled-in string. + */ + int svc_print_xprts(char *buf, int maxlen) + { +@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen) + list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { + int slen; + +- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); +- slen = strlen(tmpstr); +- if (len + slen > maxlen) ++ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", ++ xcl->xcl_name, xcl->xcl_max_payload); ++ if (slen >= sizeof(tmpstr) || len + slen >= maxlen) + break; + len += slen; + strcat(buf, tmpstr); +@@ -1095,7 +1104,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st + struct svc_xprt *xprt; + int ret = 0; + +- spin_lock(&serv->sv_lock); ++ spin_lock_bh(&serv->sv_lock); + list_for_each_entry(xprt, xprt_list, xpt_list) { + if (xprt->xpt_net != net) + continue; +@@ -1103,7 +1112,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st + set_bit(XPT_CLOSE, &xprt->xpt_flags); + svc_xprt_enqueue(xprt); + } +- spin_unlock(&serv->sv_lock); ++ spin_unlock_bh(&serv->sv_lock); + return ret; + } + +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c +index 69846c6574ef..dd547edee59f 100644 +--- a/net/sunrpc/xdr.c ++++ b/net/sunrpc/xdr.c +@@ -1036,6 +1036,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, + base = 0; + } else { + base -= buf->head[0].iov_len; ++ subbuf->head[0].iov_base = buf->head[0].iov_base; + subbuf->head[0].iov_len = 0; + } + +@@ -1048,6 +1049,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, + base = 0; + } else { + base -= buf->page_len; ++ subbuf->pages = buf->pages; ++ subbuf->page_base = 0; + subbuf->page_len = 0; + } + +@@ -1059,6 +1062,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, + base = 0; + } else { + base -= buf->tail[0].iov_len; ++ subbuf->tail[0].iov_base = buf->tail[0].iov_base; + subbuf->tail[0].iov_len = 0; + } + +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c +index 1a8df242d26a..9491fc81d50a 100644 +--- a/net/sunrpc/xprt.c ++++ b/net/sunrpc/xprt.c +@@ -143,31 +143,64 @@ int xprt_unregister_transport(struct xprt_class *transport) + } + EXPORT_SYMBOL_GPL(xprt_unregister_transport); + ++static void ++xprt_class_release(const struct xprt_class *t) ++{ ++ module_put(t->owner); ++} ++ ++static const struct xprt_class * ++xprt_class_find_by_netid_locked(const char *netid) ++{ ++ const struct xprt_class *t; ++ unsigned int i; ++ ++ list_for_each_entry(t, &xprt_list, list) { ++ for (i = 0; t->netid[i][0] != '\0'; i++) { ++ if (strcmp(t->netid[i], netid) != 0) ++ continue; ++ if (!try_module_get(t->owner)) ++ continue; ++ return t; ++ } ++ } ++ return NULL; ++} ++ ++static const struct xprt_class * ++xprt_class_find_by_netid(const char *netid) ++{ ++ const struct xprt_class *t; ++ ++ spin_lock(&xprt_list_lock); ++ t = xprt_class_find_by_netid_locked(netid); ++ if (!t) { ++ spin_unlock(&xprt_list_lock); ++ request_module("rpc%s", netid); ++ spin_lock(&xprt_list_lock); ++ t = xprt_class_find_by_netid_locked(netid); ++ } ++ spin_unlock(&xprt_list_lock); ++ return t; ++} ++ + /** + * xprt_load_transport - load a transport implementation +- * @transport_name: transport to load ++ * @netid: transport to load + * + * Returns: + * 0: transport successfully loaded + * -ENOENT: transport module not available + */ +-int xprt_load_transport(const char *transport_name) ++int xprt_load_transport(const char *netid) + { +- struct xprt_class *t; +- int result; ++ const struct xprt_class *t; + +- result = 0; +- spin_lock(&xprt_list_lock); +- list_for_each_entry(t, &xprt_list, list) { +- if (strcmp(t->name, transport_name) == 0) { +- spin_unlock(&xprt_list_lock); +- goto out; +- } +- } +- spin_unlock(&xprt_list_lock); +- result = request_module("xprt%s", transport_name); +-out: +- return result; ++ t = xprt_class_find_by_netid(netid); ++ if (!t) ++ return -ENOENT; ++ xprt_class_release(t); ++ return 0; + } + EXPORT_SYMBOL_GPL(xprt_load_transport); + +diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c +index 560712bd9fa2..dd227de31a58 100644 +--- a/net/sunrpc/xprtrdma/module.c ++++ b/net/sunrpc/xprtrdma/module.c +@@ -19,6 +19,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport"); + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_ALIAS("svcrdma"); + MODULE_ALIAS("xprtrdma"); ++MODULE_ALIAS("rpcrdma6"); + + static void __exit rpc_rdma_cleanup(void) + { +diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +index 6035c5a380a6..791072cd9492 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +@@ -277,6 +277,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) + { + dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + ++ xprt_rdma_free_addresses(xprt); + xprt_free(xprt); + module_put(THIS_MODULE); + } +@@ -327,9 +328,9 @@ xprt_setup_rdma_bc(struct xprt_create *args) + xprt->timeout = &xprt_rdma_bc_timeout; + xprt_set_bound(xprt); + xprt_set_connected(xprt); +- xprt->bind_timeout = RPCRDMA_BIND_TO; +- xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; +- xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; ++ xprt->bind_timeout = 0; ++ xprt->reestablish_timeout = 0; ++ xprt->idle_timeout = 0; + + xprt->prot = XPRT_TRANSPORT_BC_RDMA; + xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32); +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c +index fa324fe73946..3ea3bb64b6d5 100644 +--- a/net/sunrpc/xprtrdma/transport.c ++++ b/net/sunrpc/xprtrdma/transport.c +@@ -777,6 +777,7 @@ static struct xprt_class xprt_rdma = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_RDMA, + .setup = xprt_setup_rdma, ++ .netid = { "rdma", "rdma6", "" }, + }; + + void xprt_rdma_cleanup(void) +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index f3f05148922a..bf20ea260638 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -3147,6 +3147,7 @@ static struct xprt_class xs_local_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_LOCAL, + .setup = xs_setup_local, ++ .netid = { "" }, + }; + + static struct xprt_class xs_udp_transport = { +@@ -3155,6 +3156,7 @@ static struct xprt_class xs_udp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_UDP, + .setup = xs_setup_udp, ++ .netid = { "udp", "udp6", "" }, + }; + + static struct xprt_class xs_tcp_transport = { +@@ -3163,6 +3165,7 @@ static struct xprt_class xs_tcp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_TCP, + .setup = xs_setup_tcp, ++ .netid = { "tcp", "tcp6", "" }, + }; + + static struct xprt_class xs_bc_tcp_transport = { +@@ -3171,6 +3174,7 @@ static struct xprt_class xs_bc_tcp_transport = { + .owner = THIS_MODULE, + .ident = XPRT_TRANSPORT_BC_TCP, + .setup = xs_setup_bc_tcp, ++ .netid = { "" }, + }; + + /** +diff --git a/net/tipc/core.c b/net/tipc/core.c +index 799900c0f2c9..c5d52c9a1acb 100644 +--- a/net/tipc/core.c ++++ b/net/tipc/core.c +@@ -89,6 +89,11 @@ static int __net_init tipc_init_net(struct net *net) + static void __net_exit tipc_exit_net(struct net *net) + { + tipc_net_stop(net); ++ ++ /* Make sure the tipc_net_finalize_work stopped ++ * before releasing the resources. ++ */ ++ flush_scheduled_work(); + tipc_bcast_stop(net); + tipc_nametbl_stop(net); + tipc_sk_rht_destroy(net); +diff --git a/net/tipc/link.c b/net/tipc/link.c +index c7406c1fdc14..6fc2fa75503d 100644 +--- a/net/tipc/link.c ++++ b/net/tipc/link.c +@@ -877,9 +877,8 @@ void tipc_link_reset(struct tipc_link *l) + int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + struct sk_buff_head *xmitq) + { +- struct tipc_msg *hdr = buf_msg(skb_peek(list)); + unsigned int maxwin = l->window; +- unsigned int i, imp = msg_importance(hdr); ++ unsigned int i; + unsigned int mtu = l->mtu; + u16 ack = l->rcv_nxt - 1; + u16 seqno = l->snd_nxt; +@@ -888,7 +887,14 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + struct sk_buff_head *backlogq = &l->backlogq; + struct sk_buff *skb, *_skb, *bskb; + int pkt_cnt = skb_queue_len(list); ++ struct tipc_msg *hdr; ++ int imp; ++ ++ if (pkt_cnt <= 0) ++ return 0; + ++ hdr = buf_msg(skb_peek(list)); ++ imp = msg_importance(hdr); + /* Match msg importance against this and all higher backlog limits: */ + if (!skb_queue_empty(backlogq)) { + for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { +@@ -896,6 +902,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, + return link_schedule_user(l, list); + } + } ++ + if (unlikely(msg_size(hdr) > mtu)) { + skb_queue_purge(list); + return -EMSGSIZE; +diff --git a/net/tipc/msg.c b/net/tipc/msg.c +index 912f1fb97c06..c1ab2ff2f6ac 100644 +--- a/net/tipc/msg.c ++++ b/net/tipc/msg.c +@@ -140,18 +140,14 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) + if (fragid == FIRST_FRAGMENT) { + if (unlikely(head)) + goto err; +- if (unlikely(skb_unclone(frag, GFP_ATOMIC))) ++ *buf = NULL; ++ if (skb_has_frag_list(frag) && __skb_linearize(frag)) ++ goto err; ++ frag = skb_unshare(frag, GFP_ATOMIC); ++ if (unlikely(!frag)) + goto err; + head = *headbuf = frag; +- *buf = NULL; + TIPC_SKB_CB(head)->tail = NULL; +- if (skb_is_nonlinear(head)) { +- skb_walk_frags(head, tail) { +- TIPC_SKB_CB(head)->tail = tail; +- } +- } else { +- skb_frag_list_init(head); +- } + return 0; + } + +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c +index cdc49e680be4..69151de9657c 100644 +--- a/net/tipc/netlink_compat.c ++++ b/net/tipc/netlink_compat.c +@@ -250,8 +250,9 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + struct tipc_nl_compat_msg *msg) + { +- int err; ++ struct nlmsghdr *nlh; + struct sk_buff *arg; ++ int err; + + if (msg->req_type && (!msg->req_size || + !TLV_CHECK_TYPE(msg->req, msg->req_type))) +@@ -280,6 +281,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, + return -ENOMEM; + } + ++ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); ++ if (!nlh) { ++ kfree_skb(arg); ++ kfree_skb(msg->rep); ++ msg->rep = NULL; ++ return -EMSGSIZE; ++ } ++ nlmsg_end(arg, nlh); ++ + err = __tipc_nl_compat_dumpit(cmd, msg, arg); + if (err) { + kfree_skb(msg->rep); +@@ -652,7 +662,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, + if (err) + return err; + +- link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); ++ link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); + link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); + nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME], + TIPC_MAX_LINK_NAME); +diff --git a/net/tipc/socket.c b/net/tipc/socket.c +index 57df99ca6347..607785077445 100644 +--- a/net/tipc/socket.c ++++ b/net/tipc/socket.c +@@ -741,6 +741,9 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, + spin_lock_bh(&inputq->lock); + if (skb_peek(arrvq) == skb) { + skb_queue_splice_tail_init(&tmpq, inputq); ++ /* Decrease the skb's refcnt as increasing in the ++ * function tipc_skb_peek ++ */ + kfree_skb(__skb_dequeue(arrvq)); + } + spin_unlock_bh(&inputq->lock); +@@ -1982,7 +1985,7 @@ static int tipc_listen(struct socket *sock, int len) + static int tipc_wait_for_accept(struct socket *sock, long timeo) + { + struct sock *sk = sock->sk; +- DEFINE_WAIT(wait); ++ DEFINE_WAIT_FUNC(wait, woken_wake_function); + int err; + + /* True wake-one mechanism for incoming connections: only +@@ -1991,12 +1994,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) + * anymore, the common case will execute the loop only once. + */ + for (;;) { +- prepare_to_wait_exclusive(sk_sleep(sk), &wait, +- TASK_INTERRUPTIBLE); + if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { ++ add_wait_queue(sk_sleep(sk), &wait); + release_sock(sk); +- timeo = schedule_timeout(timeo); ++ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + lock_sock(sk); ++ remove_wait_queue(sk_sleep(sk), &wait); + } + err = 0; + if (!skb_queue_empty(&sk->sk_receive_queue)) +@@ -2011,7 +2014,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) + if (signal_pending(current)) + break; + } +- finish_wait(sk_sleep(sk), &wait); + return err; + } + +diff --git a/net/unix/Kconfig b/net/unix/Kconfig +index 8b31ab85d050..3b9e450656a4 100644 +--- a/net/unix/Kconfig ++++ b/net/unix/Kconfig +@@ -19,6 +19,11 @@ config UNIX + + Say Y unless you know what you are doing. + ++config UNIX_SCM ++ bool ++ depends on UNIX ++ default y ++ + config UNIX_DIAG + tristate "UNIX: socket monitoring interface" + depends on UNIX +diff --git a/net/unix/Makefile b/net/unix/Makefile +index b663c607b1c6..dc686c6757fb 100644 +--- a/net/unix/Makefile ++++ b/net/unix/Makefile +@@ -9,3 +9,5 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o + + obj-$(CONFIG_UNIX_DIAG) += unix_diag.o + unix_diag-y := diag.o ++ ++obj-$(CONFIG_UNIX_SCM) += scm.o +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index e7012a509035..fa4f39e8ee0c 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -118,6 +118,8 @@ + #include + #include + ++#include "scm.h" ++ + struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; + EXPORT_SYMBOL_GPL(unix_socket_table); + DEFINE_SPINLOCK(unix_table_lock); +@@ -191,11 +193,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) + return unix_peer(osk) == NULL || unix_our_peer(sk, osk); + } + +-static inline int unix_recvq_full(struct sock const *sk) ++static inline int unix_recvq_full(const struct sock *sk) + { + return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; + } + ++static inline int unix_recvq_full_lockless(const struct sock *sk) ++{ ++ return skb_queue_len_lockless(&sk->sk_receive_queue) > ++ READ_ONCE(sk->sk_max_ack_backlog); ++} ++ + struct sock *unix_peer_get(struct sock *s) + { + struct sock *peer; +@@ -528,12 +536,14 @@ static void unix_release_sock(struct sock *sk, int embrion) + u->path.mnt = NULL; + state = sk->sk_state; + sk->sk_state = TCP_CLOSE; ++ ++ skpair = unix_peer(sk); ++ unix_peer(sk) = NULL; ++ + unix_state_unlock(sk); + + wake_up_interruptible_all(&u->peer_wait); + +- skpair = unix_peer(sk); +- + if (skpair != NULL) { + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { + unix_state_lock(skpair); +@@ -548,7 +558,6 @@ static void unix_release_sock(struct sock *sk, int embrion) + + unix_dgram_peer_wake_disconnect(sk, skpair); + sock_put(skpair); /* It may now die */ +- unix_peer(sk) = NULL; + } + + /* Try to flush out this socket. Throw out buffers at least */ +@@ -1498,78 +1507,51 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ + return err; + } + +-static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +-{ +- int i; +- +- scm->fp = UNIXCB(skb).fp; +- UNIXCB(skb).fp = NULL; +- +- for (i = scm->fp->count-1; i >= 0; i--) +- unix_notinflight(scm->fp->user, scm->fp->fp[i]); +-} +- +-static void unix_destruct_scm(struct sk_buff *skb) +-{ +- struct scm_cookie scm; +- memset(&scm, 0, sizeof(scm)); +- scm.pid = UNIXCB(skb).pid; +- if (UNIXCB(skb).fp) +- unix_detach_fds(&scm, skb); +- +- /* Alas, it calls VFS */ +- /* So fscking what? fput() had been SMP-safe since the last Summer */ +- scm_destroy(&scm); +- sock_wfree(skb); +-} +- +-/* +- * The "user->unix_inflight" variable is protected by the garbage +- * collection lock, and we just read it locklessly here. If you go +- * over the limit, there might be a tiny race in actually noticing +- * it across threads. Tough. +- */ +-static inline bool too_many_unix_fds(struct task_struct *p) ++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) + { +- struct user_struct *user = current_user(); +- +- if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) +- return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); +- return false; +-} +- +-#define MAX_RECURSION_LEVEL 4 +- +-static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +-{ +- int i; +- unsigned char max_level = 0; +- +- if (too_many_unix_fds(current)) +- return -ETOOMANYREFS; +- +- for (i = scm->fp->count - 1; i >= 0; i--) { +- struct sock *sk = unix_get_socket(scm->fp->fp[i]); +- +- if (sk) +- max_level = max(max_level, +- unix_sk(sk)->recursion_level); +- } +- if (unlikely(max_level > MAX_RECURSION_LEVEL)) +- return -ETOOMANYREFS; ++ scm->fp = scm_fp_dup(UNIXCB(skb).fp); + + /* +- * Need to duplicate file references for the sake of garbage +- * collection. Otherwise a socket in the fps might become a +- * candidate for GC while the skb is not yet queued. ++ * Garbage collection of unix sockets starts by selecting a set of ++ * candidate sockets which have reference only from being in flight ++ * (total_refs == inflight_refs). This condition is checked once during ++ * the candidate collection phase, and candidates are marked as such, so ++ * that non-candidates can later be ignored. While inflight_refs is ++ * protected by unix_gc_lock, total_refs (file count) is not, hence this ++ * is an instantaneous decision. ++ * ++ * Once a candidate, however, the socket must not be reinstalled into a ++ * file descriptor while the garbage collection is in progress. ++ * ++ * If the above conditions are met, then the directed graph of ++ * candidates (*) does not change while unix_gc_lock is held. ++ * ++ * Any operations that changes the file count through file descriptors ++ * (dup, close, sendmsg) does not change the graph since candidates are ++ * not installed in fds. ++ * ++ * Dequeing a candidate via recvmsg would install it into an fd, but ++ * that takes unix_gc_lock to decrement the inflight count, so it's ++ * serialized with garbage collection. ++ * ++ * MSG_PEEK is special in that it does not change the inflight count, ++ * yet does install the socket into an fd. The following lock/unlock ++ * pair is to ensure serialization with garbage collection. It must be ++ * done between incrementing the file count and installing the file into ++ * an fd. ++ * ++ * If garbage collection starts after the barrier provided by the ++ * lock/unlock, then it will see the elevated refcount and not mark this ++ * as a candidate. If a garbage collection is already in progress ++ * before the file count was incremented, then the lock/unlock pair will ++ * ensure that garbage collection is finished before progressing to ++ * installing the fd. ++ * ++ * (*) A -> B where B is on the queue of A or B is on the queue of C ++ * which is on the queue of listening socket A. + */ +- UNIXCB(skb).fp = scm_fp_dup(scm->fp); +- if (!UNIXCB(skb).fp) +- return -ENOMEM; +- +- for (i = scm->fp->count - 1; i >= 0; i--) +- unix_inflight(scm->fp->user, scm->fp->fp[i]); +- return max_level; ++ spin_lock(&unix_gc_lock); ++ spin_unlock(&unix_gc_lock); + } + + static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) +@@ -1793,7 +1775,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, + * - unix_peer(sk) == sk by time of get but disconnected before lock + */ + if (other != sk && +- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { ++ unlikely(unix_peer(other) != sk && ++ unix_recvq_full_lockless(other))) { + if (timeo) { + timeo = unix_wait_for_peer(other, timeo); + +@@ -2204,7 +2187,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, + sk_peek_offset_fwd(sk, size); + + if (UNIXCB(skb).fp) +- scm.fp = scm_fp_dup(UNIXCB(skb).fp); ++ unix_peek_fds(&scm, skb); + } + err = (flags & MSG_TRUNC) ? skb->len - skip : size; + +@@ -2449,7 +2432,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, + /* It is questionable, see note in unix_dgram_recvmsg. + */ + if (UNIXCB(skb).fp) +- scm.fp = scm_fp_dup(UNIXCB(skb).fp); ++ unix_peek_fds(&scm, skb); + + sk_peek_offset_fwd(sk, chunk); + +diff --git a/net/unix/garbage.c b/net/unix/garbage.c +index c36757e72844..8bbe1b8e4ff7 100644 +--- a/net/unix/garbage.c ++++ b/net/unix/garbage.c +@@ -86,77 +86,13 @@ + #include + #include + ++#include "scm.h" ++ + /* Internal data structures and random procedures: */ + +-static LIST_HEAD(gc_inflight_list); + static LIST_HEAD(gc_candidates); +-static DEFINE_SPINLOCK(unix_gc_lock); + static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); + +-unsigned int unix_tot_inflight; +- +-struct sock *unix_get_socket(struct file *filp) +-{ +- struct sock *u_sock = NULL; +- struct inode *inode = file_inode(filp); +- +- /* Socket ? */ +- if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { +- struct socket *sock = SOCKET_I(inode); +- struct sock *s = sock->sk; +- +- /* PF_UNIX ? */ +- if (s && sock->ops && sock->ops->family == PF_UNIX) +- u_sock = s; +- } +- return u_sock; +-} +- +-/* Keep the number of times in flight count for the file +- * descriptor if it is for an AF_UNIX socket. +- */ +- +-void unix_inflight(struct user_struct *user, struct file *fp) +-{ +- struct sock *s = unix_get_socket(fp); +- +- spin_lock(&unix_gc_lock); +- +- if (s) { +- struct unix_sock *u = unix_sk(s); +- +- if (atomic_long_inc_return(&u->inflight) == 1) { +- BUG_ON(!list_empty(&u->link)); +- list_add_tail(&u->link, &gc_inflight_list); +- } else { +- BUG_ON(list_empty(&u->link)); +- } +- unix_tot_inflight++; +- } +- user->unix_inflight++; +- spin_unlock(&unix_gc_lock); +-} +- +-void unix_notinflight(struct user_struct *user, struct file *fp) +-{ +- struct sock *s = unix_get_socket(fp); +- +- spin_lock(&unix_gc_lock); +- +- if (s) { +- struct unix_sock *u = unix_sk(s); +- +- BUG_ON(!atomic_long_read(&u->inflight)); +- BUG_ON(list_empty(&u->link)); +- +- if (atomic_long_dec_and_test(&u->inflight)) +- list_del_init(&u->link); +- unix_tot_inflight--; +- } +- user->unix_inflight--; +- spin_unlock(&unix_gc_lock); +-} +- + static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), + struct sk_buff_head *hitlist) + { +diff --git a/net/unix/scm.c b/net/unix/scm.c +new file mode 100644 +index 000000000000..df8f636ab1d8 +--- /dev/null ++++ b/net/unix/scm.c +@@ -0,0 +1,161 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "scm.h" ++ ++unsigned int unix_tot_inflight; ++EXPORT_SYMBOL(unix_tot_inflight); ++ ++LIST_HEAD(gc_inflight_list); ++EXPORT_SYMBOL(gc_inflight_list); ++ ++DEFINE_SPINLOCK(unix_gc_lock); ++EXPORT_SYMBOL(unix_gc_lock); ++ ++struct sock *unix_get_socket(struct file *filp) ++{ ++ struct sock *u_sock = NULL; ++ struct inode *inode = file_inode(filp); ++ ++ /* Socket ? */ ++ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { ++ struct socket *sock = SOCKET_I(inode); ++ struct sock *s = sock->sk; ++ ++ /* PF_UNIX ? */ ++ if (s && sock->ops && sock->ops->family == PF_UNIX) ++ u_sock = s; ++ } ++ return u_sock; ++} ++EXPORT_SYMBOL(unix_get_socket); ++ ++/* Keep the number of times in flight count for the file ++ * descriptor if it is for an AF_UNIX socket. ++ */ ++void unix_inflight(struct user_struct *user, struct file *fp) ++{ ++ struct sock *s = unix_get_socket(fp); ++ ++ spin_lock(&unix_gc_lock); ++ ++ if (s) { ++ struct unix_sock *u = unix_sk(s); ++ ++ if (atomic_long_inc_return(&u->inflight) == 1) { ++ BUG_ON(!list_empty(&u->link)); ++ list_add_tail(&u->link, &gc_inflight_list); ++ } else { ++ BUG_ON(list_empty(&u->link)); ++ } ++ unix_tot_inflight++; ++ } ++ user->unix_inflight++; ++ spin_unlock(&unix_gc_lock); ++} ++ ++void unix_notinflight(struct user_struct *user, struct file *fp) ++{ ++ struct sock *s = unix_get_socket(fp); ++ ++ spin_lock(&unix_gc_lock); ++ ++ if (s) { ++ struct unix_sock *u = unix_sk(s); ++ ++ BUG_ON(!atomic_long_read(&u->inflight)); ++ BUG_ON(list_empty(&u->link)); ++ ++ if (atomic_long_dec_and_test(&u->inflight)) ++ list_del_init(&u->link); ++ unix_tot_inflight--; ++ } ++ user->unix_inflight--; ++ spin_unlock(&unix_gc_lock); ++} ++ ++/* ++ * The "user->unix_inflight" variable is protected by the garbage ++ * collection lock, and we just read it locklessly here. If you go ++ * over the limit, there might be a tiny race in actually noticing ++ * it across threads. Tough. ++ */ ++static inline bool too_many_unix_fds(struct task_struct *p) ++{ ++ struct user_struct *user = current_user(); ++ ++ if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) ++ return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); ++ return false; ++} ++ ++#define MAX_RECURSION_LEVEL 4 ++ ++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) ++{ ++ int i; ++ unsigned char max_level = 0; ++ ++ if (too_many_unix_fds(current)) ++ return -ETOOMANYREFS; ++ ++ for (i = scm->fp->count - 1; i >= 0; i--) { ++ struct sock *sk = unix_get_socket(scm->fp->fp[i]); ++ ++ if (sk) ++ max_level = max(max_level, ++ unix_sk(sk)->recursion_level); ++ } ++ if (unlikely(max_level > MAX_RECURSION_LEVEL)) ++ return -ETOOMANYREFS; ++ ++ /* ++ * Need to duplicate file references for the sake of garbage ++ * collection. Otherwise a socket in the fps might become a ++ * candidate for GC while the skb is not yet queued. ++ */ ++ UNIXCB(skb).fp = scm_fp_dup(scm->fp); ++ if (!UNIXCB(skb).fp) ++ return -ENOMEM; ++ ++ for (i = scm->fp->count - 1; i >= 0; i--) ++ unix_inflight(scm->fp->user, scm->fp->fp[i]); ++ return max_level; ++} ++EXPORT_SYMBOL(unix_attach_fds); ++ ++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) ++{ ++ int i; ++ ++ scm->fp = UNIXCB(skb).fp; ++ UNIXCB(skb).fp = NULL; ++ ++ for (i = scm->fp->count-1; i >= 0; i--) ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]); ++} ++EXPORT_SYMBOL(unix_detach_fds); ++ ++void unix_destruct_scm(struct sk_buff *skb) ++{ ++ struct scm_cookie scm; ++ ++ memset(&scm, 0, sizeof(scm)); ++ scm.pid = UNIXCB(skb).pid; ++ if (UNIXCB(skb).fp) ++ unix_detach_fds(&scm, skb); ++ ++ /* Alas, it calls VFS */ ++ /* So fscking what? fput() had been SMP-safe since the last Summer */ ++ scm_destroy(&scm); ++ sock_wfree(skb); ++} ++EXPORT_SYMBOL(unix_destruct_scm); +diff --git a/net/unix/scm.h b/net/unix/scm.h +new file mode 100644 +index 000000000000..5a255a477f16 +--- /dev/null ++++ b/net/unix/scm.h +@@ -0,0 +1,10 @@ ++#ifndef NET_UNIX_SCM_H ++#define NET_UNIX_SCM_H ++ ++extern struct list_head gc_inflight_list; ++extern spinlock_t unix_gc_lock; ++ ++int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); ++void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); ++ ++#endif +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c +index d6473b8d9a81..2fecdfe49bae 100644 +--- a/net/vmw_vsock/af_vsock.c ++++ b/net/vmw_vsock/af_vsock.c +@@ -650,8 +650,9 @@ struct sock *__vsock_create(struct net *net, + vsk->trusted = psk->trusted; + vsk->owner = get_cred(psk->owner); + vsk->connect_timeout = psk->connect_timeout; ++ security_sk_clone(parent, sk); + } else { +- vsk->trusted = capable(CAP_NET_ADMIN); ++ vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); + vsk->owner = get_current_cred(); + vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; + } +@@ -830,10 +831,12 @@ static int vsock_shutdown(struct socket *sock, int mode) + */ + + sk = sock->sk; ++ ++ lock_sock(sk); + if (sock->state == SS_UNCONNECTED) { + err = -ENOTCONN; + if (sk->sk_type == SOCK_STREAM) +- return err; ++ goto out; + } else { + sock->state = SS_DISCONNECTING; + err = 0; +@@ -842,10 +845,8 @@ static int vsock_shutdown(struct socket *sock, int mode) + /* Receive and send shutdowns are treated alike. */ + mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); + if (mode) { +- lock_sock(sk); + sk->sk_shutdown |= mode; + sk->sk_state_change(sk); +- release_sock(sk); + + if (sk->sk_type == SOCK_STREAM) { + sock_reset_flag(sk, SOCK_DONE); +@@ -853,6 +854,8 @@ static int vsock_shutdown(struct socket *sock, int mode) + } + } + ++out: ++ release_sock(sk); + return err; + } + +@@ -1121,7 +1124,6 @@ static void vsock_connect_timeout(struct work_struct *work) + { + struct sock *sk; + struct vsock_sock *vsk; +- int cancel = 0; + + vsk = container_of(work, struct vsock_sock, connect_work.work); + sk = sk_vsock(vsk); +@@ -1132,11 +1134,9 @@ static void vsock_connect_timeout(struct work_struct *work) + sk->sk_state = SS_UNCONNECTED; + sk->sk_err = ETIMEDOUT; + sk->sk_error_report(sk); +- cancel = 1; ++ vsock_transport_cancel_pkt(vsk); + } + release_sock(sk); +- if (cancel) +- vsock_transport_cancel_pkt(vsk); + + sock_put(sk); + } +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c +index 0bd5a60f3bde..67aba63b5c96 100644 +--- a/net/vmw_vsock/virtio_transport.c ++++ b/net/vmw_vsock/virtio_transport.c +@@ -39,6 +39,7 @@ struct virtio_vsock { + * must be accessed with tx_lock held. + */ + struct mutex tx_lock; ++ bool tx_run; + + struct work_struct send_pkt_work; + spinlock_t send_pkt_list_lock; +@@ -50,6 +51,7 @@ struct virtio_vsock { + * must be accessed with rx_lock held. + */ + struct mutex rx_lock; ++ bool rx_run; + int rx_buf_nr; + int rx_buf_max_nr; + +@@ -57,24 +59,28 @@ struct virtio_vsock { + * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. + */ + struct mutex event_lock; ++ bool event_run; + struct virtio_vsock_event event_list[8]; + + u32 guest_cid; + }; + +-static struct virtio_vsock *virtio_vsock_get(void) +-{ +- return the_virtio_vsock; +-} +- + static u32 virtio_transport_get_local_cid(void) + { +- struct virtio_vsock *vsock = virtio_vsock_get(); ++ struct virtio_vsock *vsock; ++ u32 ret; + +- if (!vsock) +- return VMADDR_CID_ANY; ++ rcu_read_lock(); ++ vsock = rcu_dereference(the_virtio_vsock); ++ if (!vsock) { ++ ret = VMADDR_CID_ANY; ++ goto out_rcu; ++ } + +- return vsock->guest_cid; ++ ret = vsock->guest_cid; ++out_rcu: ++ rcu_read_unlock(); ++ return ret; + } + + static void +@@ -88,6 +94,9 @@ virtio_transport_send_pkt_work(struct work_struct *work) + + mutex_lock(&vsock->tx_lock); + ++ if (!vsock->tx_run) ++ goto out; ++ + vq = vsock->vqs[VSOCK_VQ_TX]; + + for (;;) { +@@ -144,6 +153,7 @@ virtio_transport_send_pkt_work(struct work_struct *work) + if (added) + virtqueue_kick(vq); + ++out: + mutex_unlock(&vsock->tx_lock); + + if (restart_rx) +@@ -156,10 +166,12 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) + struct virtio_vsock *vsock; + int len = pkt->len; + +- vsock = virtio_vsock_get(); ++ rcu_read_lock(); ++ vsock = rcu_dereference(the_virtio_vsock); + if (!vsock) { + virtio_transport_free_pkt(pkt); +- return -ENODEV; ++ len = -ENODEV; ++ goto out_rcu; + } + + if (pkt->reply) +@@ -170,6 +182,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) + spin_unlock_bh(&vsock->send_pkt_list_lock); + + queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); ++ ++out_rcu: ++ rcu_read_unlock(); + return len; + } + +@@ -222,6 +237,10 @@ static void virtio_transport_tx_work(struct work_struct *work) + + vq = vsock->vqs[VSOCK_VQ_TX]; + mutex_lock(&vsock->tx_lock); ++ ++ if (!vsock->tx_run) ++ goto out; ++ + do { + struct virtio_vsock_pkt *pkt; + unsigned int len; +@@ -232,6 +251,8 @@ static void virtio_transport_tx_work(struct work_struct *work) + added = true; + } + } while (!virtqueue_enable_cb(vq)); ++ ++out: + mutex_unlock(&vsock->tx_lock); + + if (added) +@@ -260,6 +281,9 @@ static void virtio_transport_rx_work(struct work_struct *work) + + mutex_lock(&vsock->rx_lock); + ++ if (!vsock->rx_run) ++ goto out; ++ + do { + virtqueue_disable_cb(vq); + for (;;) { +@@ -368,6 +392,9 @@ static void virtio_transport_event_work(struct work_struct *work) + + mutex_lock(&vsock->event_lock); + ++ if (!vsock->event_run) ++ goto out; ++ + do { + struct virtio_vsock_event *event; + unsigned int len; +@@ -382,7 +409,7 @@ static void virtio_transport_event_work(struct work_struct *work) + } while (!virtqueue_enable_cb(vq)); + + virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); +- ++out: + mutex_unlock(&vsock->event_lock); + } + +@@ -478,7 +505,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) + return ret; + + /* Only one virtio-vsock device per guest is supported */ +- if (the_virtio_vsock) { ++ if (rcu_dereference_protected(the_virtio_vsock, ++ lockdep_is_held(&the_virtio_vsock_mutex))) { + ret = -EBUSY; + goto out; + } +@@ -502,8 +530,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) + vsock->rx_buf_max_nr = 0; + atomic_set(&vsock->queued_replies, 0); + +- vdev->priv = vsock; +- the_virtio_vsock = vsock; + mutex_init(&vsock->tx_lock); + mutex_init(&vsock->rx_lock); + mutex_init(&vsock->event_lock); +@@ -514,14 +540,23 @@ static int virtio_vsock_probe(struct virtio_device *vdev) + INIT_WORK(&vsock->event_work, virtio_transport_event_work); + INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); + ++ mutex_lock(&vsock->tx_lock); ++ vsock->tx_run = true; ++ mutex_unlock(&vsock->tx_lock); ++ + mutex_lock(&vsock->rx_lock); + virtio_vsock_rx_fill(vsock); ++ vsock->rx_run = true; + mutex_unlock(&vsock->rx_lock); + + mutex_lock(&vsock->event_lock); + virtio_vsock_event_fill(vsock); ++ vsock->event_run = true; + mutex_unlock(&vsock->event_lock); + ++ vdev->priv = vsock; ++ rcu_assign_pointer(the_virtio_vsock, vsock); ++ + mutex_unlock(&the_virtio_vsock_mutex); + return 0; + +@@ -536,6 +571,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) + struct virtio_vsock *vsock = vdev->priv; + struct virtio_vsock_pkt *pkt; + ++ mutex_lock(&the_virtio_vsock_mutex); ++ ++ vdev->priv = NULL; ++ rcu_assign_pointer(the_virtio_vsock, NULL); ++ synchronize_rcu(); ++ + flush_work(&vsock->rx_work); + flush_work(&vsock->tx_work); + flush_work(&vsock->event_work); +@@ -544,6 +585,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev) + /* Reset all connected sockets when the device disappear */ + vsock_for_each_connected_socket(virtio_vsock_reset_sock); + ++ /* Stop all work handlers to make sure no one is accessing the device, ++ * so we can safely call vdev->config->reset(). ++ */ ++ mutex_lock(&vsock->rx_lock); ++ vsock->rx_run = false; ++ mutex_unlock(&vsock->rx_lock); ++ ++ mutex_lock(&vsock->tx_lock); ++ vsock->tx_run = false; ++ mutex_unlock(&vsock->tx_lock); ++ ++ mutex_lock(&vsock->event_lock); ++ vsock->event_run = false; ++ mutex_unlock(&vsock->event_lock); ++ ++ /* Flush all device writes and interrupts, device will not use any ++ * more buffers. ++ */ + vdev->config->reset(vdev); + + mutex_lock(&vsock->rx_lock); +@@ -565,12 +624,11 @@ static void virtio_vsock_remove(struct virtio_device *vdev) + } + spin_unlock_bh(&vsock->send_pkt_list_lock); + +- mutex_lock(&the_virtio_vsock_mutex); +- the_virtio_vsock = NULL; +- mutex_unlock(&the_virtio_vsock_mutex); +- ++ /* Delete virtqueues and flush outstanding callbacks if any */ + vdev->config->del_vqs(vdev); + ++ mutex_unlock(&the_virtio_vsock_mutex); ++ + kfree(vsock); + } + +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c +index aa9d1c7780c3..5a2a08663769 100644 +--- a/net/vmw_vsock/virtio_transport_common.c ++++ b/net/vmw_vsock/virtio_transport_common.c +@@ -25,6 +25,10 @@ + /* How long to wait for graceful shutdown of a connection */ + #define VSOCK_CLOSE_TIMEOUT (8 * HZ) + ++uint virtio_transport_max_vsock_pkt_buf_size = 64 * 1024; ++module_param(virtio_transport_max_vsock_pkt_buf_size, uint, 0444); ++EXPORT_SYMBOL_GPL(virtio_transport_max_vsock_pkt_buf_size); ++ + static const struct virtio_transport *virtio_transport_get_ops(void) + { + const struct vsock_transport *t = vsock_core_get_transport(); +@@ -959,10 +963,10 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) + + vsk = vsock_sk(sk); + +- space_available = virtio_transport_space_update(sk, pkt); +- + lock_sock(sk); + ++ space_available = virtio_transport_space_update(sk, pkt); ++ + /* Update CID in case it has changed after a transport reset event */ + vsk->local_addr.svm_cid = dst.svm_cid; + +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c +index 102bf9194662..c09efcdf72d2 100644 +--- a/net/vmw_vsock/vmci_transport.c ++++ b/net/vmw_vsock/vmci_transport.c +@@ -593,8 +593,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, + peer, flags, VMCI_NO_PRIVILEGE_FLAGS); + out: + if (err < 0) { +- pr_err("Could not attach to queue pair with %d\n", +- err); ++ pr_err_once("Could not attach to queue pair with %d\n", err); + err = vmci_transport_error_to_vsock_error(err); + } + +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 475d1f5bbe8f..9204a602c21a 100755 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -1821,7 +1821,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, + * case we'll continue with more data in the next round, + * but break unconditionally so unsplit data stops here. + */ +- state->split_start++; ++ if (state->split) ++ state->split_start++; ++ else ++ state->split_start = 0; + break; + case 9: + if (rdev->wiphy.extended_capabilities && +@@ -3385,6 +3388,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) + if (err) + return err; + ++ if (key.idx < 0) ++ return -EINVAL; ++ + if (info->attrs[NL80211_ATTR_MAC]) + mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + +@@ -10774,7 +10780,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct nlattr *tb[NUM_NL80211_REKEY_DATA]; +- struct cfg80211_gtk_rekey_data rekey_data; ++ struct cfg80211_gtk_rekey_data rekey_data = {}; + int err; + + if (!info->attrs[NL80211_ATTR_REKEY_DATA]) +@@ -11678,13 +11684,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) + if (!wdev->netdev && !wdev->p2p_started) + return -ENETDOWN; + } +- +- if (!vcmd->doit) +- return -EOPNOTSUPP; + } else { + wdev = NULL; + } + ++ if (!vcmd->doit) ++ return -EOPNOTSUPP; ++ + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { + data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); + len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index 9588b7229409..e4b844f07b8e 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -2325,6 +2325,9 @@ int regulatory_hint_user(const char *alpha2, + if (WARN_ON(!alpha2)) + return -EINVAL; + ++ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) ++ return -EINVAL; ++ + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); + if (!request) + return -ENOMEM; +@@ -2761,7 +2764,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) + power_rule = ®_rule->power_rule; + + if (reg_rule->flags & NL80211_RRF_AUTO_BW) +- snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO", ++ snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", + freq_range->max_bandwidth_khz, + reg_get_max_bandwidth(rd, reg_rule)); + else +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 4de2c7c15531..84887901afa5 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -952,14 +952,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, + * be grouped with this beacon for updates ... + */ + if (!cfg80211_combine_bsses(rdev, new)) { +- kfree(new); ++ bss_ref_put(rdev, new); + goto drop; + } + } + + if (rdev->bss_entries >= bss_entries_limit && + !cfg80211_bss_expire_oldest(rdev)) { +- kfree(new); ++ bss_ref_put(rdev, new); + goto drop; + } + +diff --git a/net/wireless/sme.c b/net/wireless/sme.c +index 5d1c8ec911cd..1938ed08ed32 100644 +--- a/net/wireless/sme.c ++++ b/net/wireless/sme.c +@@ -552,7 +552,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, + cfg80211_sme_free(wdev); + } + +- if (WARN_ON(wdev->conn)) ++ if (wdev->conn) + return -EINPROGRESS; + + wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); +diff --git a/net/wireless/util.c b/net/wireless/util.c +index e7ea1524ab05..1d7a73c58d16 100755 +--- a/net/wireless/util.c ++++ b/net/wireless/util.c +@@ -429,7 +429,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) + EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); + + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, +- const u8 *addr, enum nl80211_iftype iftype) ++ const u8 *addr, enum nl80211_iftype iftype, ++ bool is_amsdu) + { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct { +@@ -517,7 +518,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)); + tmp.h_proto = payload.proto; + +- if (likely((ether_addr_equal(payload.hdr, rfc1042_header) && ++ if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) && + tmp.h_proto != htons(ETH_P_AARP) && + tmp.h_proto != htons(ETH_P_IPX)) || + ether_addr_equal(payload.hdr, bridge_tunnel_header))) +@@ -659,6 +660,9 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, + remaining = skb->len - offset; + if (subframe_len > remaining) + goto purge; ++ /* mitigate A-MSDU aggregation injection attacks */ ++ if (ether_addr_equal(eth.h_dest, rfc1042_header)) ++ goto purge; + + offset += sizeof(struct ethhdr); + last = remaining <= subframe_len + padding; +@@ -929,6 +933,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, + case NL80211_IFTYPE_MESH_POINT: + /* mesh should be handled? */ + break; ++ case NL80211_IFTYPE_OCB: ++ cfg80211_leave_ocb(rdev, dev); ++ break; + default: + break; + } +diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c +index 4bf0296a7c43..0e809ae17f38 100644 +--- a/net/wireless/wext-core.c ++++ b/net/wireless/wext-core.c +@@ -898,8 +898,9 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, + int call_commit_handler(struct net_device *dev) + { + #ifdef CONFIG_WIRELESS_EXT +- if ((netif_running(dev)) && +- (dev->wireless_handlers->standard[0] != NULL)) ++ if (netif_running(dev) && ++ dev->wireless_handlers && ++ dev->wireless_handlers->standard[0]) + /* Call the commit handler on the driver */ + return dev->wireless_handlers->standard[0](dev, NULL, + NULL, NULL); +diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c +index 33bef22e44e9..b379a0371653 100644 +--- a/net/wireless/wext-spy.c ++++ b/net/wireless/wext-spy.c +@@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device * dev, + return -EOPNOTSUPP; + + /* Just do it */ +- memcpy(&(spydata->spy_thr_low), &(threshold->low), +- 2 * sizeof(struct iw_quality)); ++ spydata->spy_thr_low = threshold->low; ++ spydata->spy_thr_high = threshold->high; + + /* Clear flag */ + memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); +@@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device * dev, + return -EOPNOTSUPP; + + /* Just do it */ +- memcpy(&(threshold->low), &(spydata->spy_thr_low), +- 2 * sizeof(struct iw_quality)); ++ threshold->low = spydata->spy_thr_low; ++ threshold->high = spydata->spy_thr_high; + + return 0; + } +@@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device * dev, + memcpy(threshold.addr.sa_data, address, ETH_ALEN); + threshold.addr.sa_family = ARPHRD_ETHER; + /* Copy stats */ +- memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); ++ threshold.qual = *wstats; + /* Copy also thresholds */ +- memcpy(&(threshold.low), &(spydata->spy_thr_low), +- 2 * sizeof(struct iw_quality)); ++ threshold.low = spydata->spy_thr_low; ++ threshold.high = spydata->spy_thr_high; + + /* Send event to user space */ + wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c +index a8ca79810dcc..c23c04d38a82 100644 +--- a/net/x25/af_x25.c ++++ b/net/x25/af_x25.c +@@ -550,7 +550,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, + if (protocol) + goto out; + +- rc = -ENOBUFS; ++ rc = -ENOMEM; + if ((sk = x25_alloc_socket(net, kern)) == NULL) + goto out; + +@@ -679,7 +679,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + int len, i, rc = 0; + + if (addr_len != sizeof(struct sockaddr_x25) || +- addr->sx25_family != AF_X25) { ++ addr->sx25_family != AF_X25 || ++ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) { + rc = -EINVAL; + goto out; + } +@@ -773,7 +774,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, + + rc = -EINVAL; + if (addr_len != sizeof(struct sockaddr_x25) || +- addr->sx25_family != AF_X25) ++ addr->sx25_family != AF_X25 || ++ strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) + goto out; + + rc = -ENETUNREACH; +@@ -823,7 +825,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, + sock->state = SS_CONNECTED; + rc = 0; + out_put_neigh: +- if (rc) { ++ if (rc && x25->neighbour) { + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; +@@ -1047,6 +1049,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, + makex25->lci = lci; + makex25->dest_addr = dest_addr; + makex25->source_addr = source_addr; ++ x25_neigh_hold(nb); + makex25->neighbour = nb; + makex25->facilities = facilities; + makex25->dte_facilities= dte_facilities; +diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c +index 6b5af65f491f..a3163645b5bd 100644 +--- a/net/x25/x25_subr.c ++++ b/net/x25/x25_subr.c +@@ -368,6 +368,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, + sk->sk_state_change(sk); + sock_set_flag(sk, SOCK_DEAD); + } ++ if (x25->neighbour) { ++ read_lock_bh(&x25_list_lock); ++ x25_neigh_put(x25->neighbour); ++ x25->neighbour = NULL; ++ read_unlock_bh(&x25_list_lock); ++ } + } + + /* +diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig +index cf778a93c4fb..668fdfa4d7f2 100644 +--- a/net/xfrm/Kconfig ++++ b/net/xfrm/Kconfig +@@ -22,6 +22,17 @@ config XFRM_USER + + If unsure, say Y. + ++config XFRM_USER_COMPAT ++ tristate "Compatible ABI support" ++ depends on XFRM_USER && COMPAT_FOR_U64_ALIGNMENT && \ ++ HAVE_EFFICIENT_UNALIGNED_ACCESS ++ select WANT_COMPAT_NETLINK_MESSAGES ++ help ++ Transformation(XFRM) user configuration interface like IPsec ++ used by compatible Linux applications. ++ ++ If unsure, say N. ++ + config XFRM_INTERFACE + tristate "Transformation virtual interface" + depends on XFRM && IPV6 +diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile +index 5f038f336d39..cd4393c75203 100644 +--- a/net/xfrm/Makefile ++++ b/net/xfrm/Makefile +@@ -8,5 +8,6 @@ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ + obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o + obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o + obj-$(CONFIG_XFRM_USER) += xfrm_user.o ++obj-$(CONFIG_XFRM_USER_COMPAT) += xfrm_compat.o + obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o + obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o +diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c +new file mode 100644 +index 000000000000..27e15516787d +--- /dev/null ++++ b/net/xfrm/xfrm_compat.c +@@ -0,0 +1,629 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * XFRM compat layer ++ * Author: Dmitry Safonov ++ * Based on code and translator idea by: Florian Westphal ++ */ ++#include ++#include ++#include ++#include ++ ++struct compat_xfrm_lifetime_cfg { ++ compat_u64 soft_byte_limit, hard_byte_limit; ++ compat_u64 soft_packet_limit, hard_packet_limit; ++ compat_u64 soft_add_expires_seconds, hard_add_expires_seconds; ++ compat_u64 soft_use_expires_seconds, hard_use_expires_seconds; ++}; /* same size on 32bit, but only 4 byte alignment required */ ++ ++struct compat_xfrm_lifetime_cur { ++ compat_u64 bytes, packets, add_time, use_time; ++}; /* same size on 32bit, but only 4 byte alignment required */ ++ ++struct compat_xfrm_userpolicy_info { ++ struct xfrm_selector sel; ++ struct compat_xfrm_lifetime_cfg lft; ++ struct compat_xfrm_lifetime_cur curlft; ++ __u32 priority, index; ++ u8 dir, action, flags, share; ++ /* 4 bytes additional padding on 64bit */ ++}; ++ ++struct compat_xfrm_usersa_info { ++ struct xfrm_selector sel; ++ struct xfrm_id id; ++ xfrm_address_t saddr; ++ struct compat_xfrm_lifetime_cfg lft; ++ struct compat_xfrm_lifetime_cur curlft; ++ struct xfrm_stats stats; ++ __u32 seq, reqid; ++ u16 family; ++ u8 mode, replay_window, flags; ++ /* 4 bytes additional padding on 64bit */ ++}; ++ ++struct compat_xfrm_user_acquire { ++ struct xfrm_id id; ++ xfrm_address_t saddr; ++ struct xfrm_selector sel; ++ struct compat_xfrm_userpolicy_info policy; ++ /* 4 bytes additional padding on 64bit */ ++ __u32 aalgos, ealgos, calgos, seq; ++}; ++ ++struct compat_xfrm_userspi_info { ++ struct compat_xfrm_usersa_info info; ++ /* 4 bytes additional padding on 64bit */ ++ __u32 min, max; ++}; ++ ++struct compat_xfrm_user_expire { ++ struct compat_xfrm_usersa_info state; ++ /* 8 bytes additional padding on 64bit */ ++ u8 hard; ++}; ++ ++struct compat_xfrm_user_polexpire { ++ struct compat_xfrm_userpolicy_info pol; ++ /* 8 bytes additional padding on 64bit */ ++ u8 hard; ++}; ++ ++#define XMSGSIZE(type) sizeof(struct type) ++ ++static const int compat_msg_min[XFRM_NR_MSGTYPES] = { ++ [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info), ++ [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), ++ [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), ++ [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info), ++ [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), ++ [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), ++ [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userspi_info), ++ [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_acquire), ++ [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_expire), ++ [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info), ++ [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info), ++ [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_polexpire), ++ [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), ++ [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, ++ [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), ++ [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), ++ [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), ++ [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), ++ [XFRM_MSG_NEWSADINFO - XFRM_MSG_BASE] = sizeof(u32), ++ [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), ++ [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), ++ [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), ++ [XFRM_MSG_MAPPING - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_mapping) ++}; ++ ++static const struct nla_policy compat_policy[XFRMA_MAX+1] = { ++ [XFRMA_SA] = { .len = XMSGSIZE(compat_xfrm_usersa_info)}, ++ [XFRMA_POLICY] = { .len = XMSGSIZE(compat_xfrm_userpolicy_info)}, ++ [XFRMA_LASTUSED] = { .type = NLA_U64}, ++ [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, ++ [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, ++ [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, ++ [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, ++ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, ++ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, ++ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, ++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, ++ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, ++ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, ++ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, ++ [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, ++ [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, ++ [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, ++ [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, ++ [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, ++ [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, ++ [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, ++ [XFRMA_TFCPAD] = { .type = NLA_U32 }, ++ [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, ++ [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, ++ [XFRMA_PROTO] = { .type = NLA_U8 }, ++ [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, ++ [XFRMA_SET_MARK] = { .type = NLA_U32 }, ++ [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, ++ [XFRMA_IF_ID] = { .type = NLA_U32 }, ++}; ++ ++static inline void *kvmalloc(size_t size, gfp_t flags) ++{ ++ void *ret; ++ ++ ret = kmalloc(size, flags | __GFP_NOWARN); ++ if (!ret) ++ ret = __vmalloc(size, flags, PAGE_KERNEL); ++ return ret; ++} ++ ++static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb, ++ const struct nlmsghdr *nlh_src, u16 type) ++{ ++ int payload = compat_msg_min[type]; ++ int src_len = xfrm_msg_min[type]; ++ struct nlmsghdr *nlh_dst; ++ ++ /* Compat messages are shorter or equal to native (+padding) */ ++ if (WARN_ON_ONCE(src_len < payload)) ++ return ERR_PTR(-EMSGSIZE); ++ ++ nlh_dst = nlmsg_put(skb, nlh_src->nlmsg_pid, nlh_src->nlmsg_seq, ++ nlh_src->nlmsg_type, payload, nlh_src->nlmsg_flags); ++ if (!nlh_dst) ++ return ERR_PTR(-EMSGSIZE); ++ ++ memset(nlmsg_data(nlh_dst), 0, payload); ++ ++ switch (nlh_src->nlmsg_type) { ++ /* Compat message has the same layout as native */ ++ case XFRM_MSG_DELSA: ++ case XFRM_MSG_DELPOLICY: ++ case XFRM_MSG_FLUSHSA: ++ case XFRM_MSG_FLUSHPOLICY: ++ case XFRM_MSG_NEWAE: ++ case XFRM_MSG_REPORT: ++ case XFRM_MSG_MIGRATE: ++ case XFRM_MSG_NEWSADINFO: ++ case XFRM_MSG_NEWSPDINFO: ++ case XFRM_MSG_MAPPING: ++ WARN_ON_ONCE(src_len != payload); ++ memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), src_len); ++ break; ++ /* 4 byte alignment for trailing u64 on native, but not on compat */ ++ case XFRM_MSG_NEWSA: ++ case XFRM_MSG_NEWPOLICY: ++ case XFRM_MSG_UPDSA: ++ case XFRM_MSG_UPDPOLICY: ++ WARN_ON_ONCE(src_len != payload + 4); ++ memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), payload); ++ break; ++ case XFRM_MSG_EXPIRE: { ++ const struct xfrm_user_expire *src_ue = nlmsg_data(nlh_src); ++ struct compat_xfrm_user_expire *dst_ue = nlmsg_data(nlh_dst); ++ ++ /* compat_xfrm_user_expire has 4-byte smaller state */ ++ memcpy(dst_ue, src_ue, sizeof(dst_ue->state)); ++ dst_ue->hard = src_ue->hard; ++ break; ++ } ++ case XFRM_MSG_ACQUIRE: { ++ const struct xfrm_user_acquire *src_ua = nlmsg_data(nlh_src); ++ struct compat_xfrm_user_acquire *dst_ua = nlmsg_data(nlh_dst); ++ ++ memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos)); ++ dst_ua->aalgos = src_ua->aalgos; ++ dst_ua->ealgos = src_ua->ealgos; ++ dst_ua->calgos = src_ua->calgos; ++ dst_ua->seq = src_ua->seq; ++ break; ++ } ++ case XFRM_MSG_POLEXPIRE: { ++ const struct xfrm_user_polexpire *src_upe = nlmsg_data(nlh_src); ++ struct compat_xfrm_user_polexpire *dst_upe = nlmsg_data(nlh_dst); ++ ++ /* compat_xfrm_user_polexpire has 4-byte smaller state */ ++ memcpy(dst_upe, src_upe, sizeof(dst_upe->pol)); ++ dst_upe->hard = src_upe->hard; ++ break; ++ } ++ case XFRM_MSG_ALLOCSPI: { ++ const struct xfrm_userspi_info *src_usi = nlmsg_data(nlh_src); ++ struct compat_xfrm_userspi_info *dst_usi = nlmsg_data(nlh_dst); ++ ++ /* compat_xfrm_user_polexpire has 4-byte smaller state */ ++ memcpy(dst_usi, src_usi, sizeof(src_usi->info)); ++ dst_usi->min = src_usi->min; ++ dst_usi->max = src_usi->max; ++ break; ++ } ++ /* Not being sent by kernel */ ++ case XFRM_MSG_GETSA: ++ case XFRM_MSG_GETPOLICY: ++ case XFRM_MSG_GETAE: ++ case XFRM_MSG_GETSADINFO: ++ case XFRM_MSG_GETSPDINFO: ++ default: ++ WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type); ++ return ERR_PTR(-EOPNOTSUPP); ++ } ++ ++ return nlh_dst; ++} ++ ++static int xfrm_nla_cpy(struct sk_buff *dst, const struct nlattr *src, int len) ++{ ++ return nla_put(dst, src->nla_type, len, nla_data(src)); ++} ++ ++static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src) ++{ ++ switch (src->nla_type) { ++ case XFRMA_PAD: ++ case XFRMA_OFFLOAD_DEV: ++ /* Ignore */ ++ return 0; ++ case XFRMA_ALG_AUTH: ++ case XFRMA_ALG_CRYPT: ++ case XFRMA_ALG_COMP: ++ case XFRMA_ENCAP: ++ case XFRMA_TMPL: ++ return xfrm_nla_cpy(dst, src, nla_len(src)); ++ case XFRMA_SA: ++ return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_usersa_info)); ++ case XFRMA_POLICY: ++ return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_userpolicy_info)); ++ case XFRMA_SEC_CTX: ++ return xfrm_nla_cpy(dst, src, nla_len(src)); ++ case XFRMA_LTIME_VAL: ++ return nla_put_64bit(dst, src->nla_type, nla_len(src), ++ nla_data(src), XFRMA_PAD); ++ case XFRMA_REPLAY_VAL: ++ case XFRMA_REPLAY_THRESH: ++ case XFRMA_ETIMER_THRESH: ++ case XFRMA_SRCADDR: ++ case XFRMA_COADDR: ++ return xfrm_nla_cpy(dst, src, nla_len(src)); ++ case XFRMA_LASTUSED: ++ return nla_put_64bit(dst, src->nla_type, nla_len(src), ++ nla_data(src), XFRMA_PAD); ++ case XFRMA_POLICY_TYPE: ++ case XFRMA_MIGRATE: ++ case XFRMA_ALG_AEAD: ++ case XFRMA_KMADDRESS: ++ case XFRMA_ALG_AUTH_TRUNC: ++ case XFRMA_MARK: ++ case XFRMA_TFCPAD: ++ case XFRMA_REPLAY_ESN_VAL: ++ case XFRMA_SA_EXTRA_FLAGS: ++ case XFRMA_PROTO: ++ case XFRMA_ADDRESS_FILTER: ++ case XFRMA_SET_MARK: ++ case XFRMA_SET_MARK_MASK: ++ case XFRMA_IF_ID: ++ return xfrm_nla_cpy(dst, src, nla_len(src)); ++ default: ++ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID); ++ WARN_ONCE(1, "unsupported nla_type %d", src->nla_type); ++ return -EOPNOTSUPP; ++ } ++} ++ ++/* Take kernel-built (64bit layout) and create 32bit layout for userspace */ ++static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src) ++{ ++ u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE; ++ const struct nlattr *nla, *attrs; ++ struct nlmsghdr *nlh_dst; ++ int len, remaining; ++ ++ nlh_dst = xfrm_nlmsg_put_compat(dst, nlh_src, type); ++ if (IS_ERR(nlh_dst)) ++ return PTR_ERR(nlh_dst); ++ ++ attrs = nlmsg_attrdata(nlh_src, xfrm_msg_min[type]); ++ len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]); ++ ++ nla_for_each_attr(nla, attrs, len, remaining) { ++ int err = xfrm_xlate64_attr(dst, nla); ++ ++ if (err) ++ return err; ++ } ++ ++ nlmsg_end(dst, nlh_dst); ++ ++ return 0; ++} ++ ++static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src) ++{ ++ u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE; ++ struct sk_buff *new = NULL; ++ int err; ++ ++ if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min))) ++ return -EOPNOTSUPP; ++ ++ if (skb_shinfo(skb)->frag_list == NULL) { ++ new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC); ++ if (!new) ++ return -ENOMEM; ++ skb_shinfo(skb)->frag_list = new; ++ } ++ ++ err = xfrm_xlate64(skb_shinfo(skb)->frag_list, nlh_src); ++ if (err) { ++ if (new) { ++ kfree_skb(new); ++ skb_shinfo(skb)->frag_list = NULL; ++ } ++ return err; ++ } ++ ++ return 0; ++} ++ ++/* Calculates len of translated 64-bit message. */ ++static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src, ++ struct nlattr *attrs[XFRMA_MAX+1]) ++{ ++ size_t len = nlmsg_len(src); ++ ++ switch (src->nlmsg_type) { ++ case XFRM_MSG_NEWSA: ++ case XFRM_MSG_NEWPOLICY: ++ case XFRM_MSG_ALLOCSPI: ++ case XFRM_MSG_ACQUIRE: ++ case XFRM_MSG_UPDPOLICY: ++ case XFRM_MSG_UPDSA: ++ len += 4; ++ break; ++ case XFRM_MSG_EXPIRE: ++ case XFRM_MSG_POLEXPIRE: ++ len += 8; ++ break; ++ default: ++ break; ++ } ++ ++ if (attrs[XFRMA_SA]) ++ len += 4; ++ if (attrs[XFRMA_POLICY]) ++ len += 4; ++ ++ /* XXX: some attrs may need to be realigned ++ * if !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++ */ ++ ++ return len; ++} ++ ++static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src, ++ size_t size, int copy_len, int payload) ++{ ++ struct nlmsghdr *nlmsg = dst; ++ struct nlattr *nla; ++ ++ if (WARN_ON_ONCE(copy_len > payload)) ++ copy_len = payload; ++ ++ if (size - *pos < nla_attr_size(payload)) ++ return -ENOBUFS; ++ ++ nla = dst + *pos; ++ ++ memcpy(nla, src, nla_attr_size(copy_len)); ++ nla->nla_len = nla_attr_size(payload); ++ *pos += nla_attr_size(payload); ++ nlmsg->nlmsg_len += nla->nla_len; ++ ++ memset(dst + *pos, 0, payload - copy_len); ++ *pos += payload - copy_len; ++ ++ return 0; ++} ++ ++static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, ++ size_t *pos, size_t size) ++{ ++ int type = nla_type(nla); ++ u16 pol_len32, pol_len64; ++ int err; ++ ++ if (type > XFRMA_MAX) { ++ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID); ++ return -EOPNOTSUPP; ++ } ++ if (nla_len(nla) < compat_policy[type].len) { ++ return -EOPNOTSUPP; ++ } ++ ++ pol_len32 = compat_policy[type].len; ++ pol_len64 = xfrma_policy[type].len; ++ ++ /* XFRMA_SA and XFRMA_POLICY - need to know how-to translate */ ++ if (pol_len32 != pol_len64) { ++ if (nla_len(nla) != compat_policy[type].len) { ++ return -EOPNOTSUPP; ++ } ++ err = xfrm_attr_cpy32(dst, pos, nla, size, pol_len32, pol_len64); ++ if (err) ++ return err; ++ } ++ ++ return xfrm_attr_cpy32(dst, pos, nla, size, nla_len(nla), nla_len(nla)); ++} ++ ++static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src, ++ struct nlattr *attrs[XFRMA_MAX+1], ++ size_t size, u8 type) ++{ ++ size_t pos; ++ int i; ++ ++ memcpy(dst, src, NLMSG_HDRLEN); ++ dst->nlmsg_len = NLMSG_HDRLEN + xfrm_msg_min[type]; ++ memset(nlmsg_data(dst), 0, xfrm_msg_min[type]); ++ ++ switch (src->nlmsg_type) { ++ /* Compat message has the same layout as native */ ++ case XFRM_MSG_DELSA: ++ case XFRM_MSG_GETSA: ++ case XFRM_MSG_DELPOLICY: ++ case XFRM_MSG_GETPOLICY: ++ case XFRM_MSG_FLUSHSA: ++ case XFRM_MSG_FLUSHPOLICY: ++ case XFRM_MSG_NEWAE: ++ case XFRM_MSG_GETAE: ++ case XFRM_MSG_REPORT: ++ case XFRM_MSG_MIGRATE: ++ case XFRM_MSG_NEWSADINFO: ++ case XFRM_MSG_GETSADINFO: ++ case XFRM_MSG_NEWSPDINFO: ++ case XFRM_MSG_GETSPDINFO: ++ case XFRM_MSG_MAPPING: ++ memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]); ++ break; ++ /* 4 byte alignment for trailing u64 on native, but not on compat */ ++ case XFRM_MSG_NEWSA: ++ case XFRM_MSG_NEWPOLICY: ++ case XFRM_MSG_UPDSA: ++ case XFRM_MSG_UPDPOLICY: ++ memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]); ++ break; ++ case XFRM_MSG_EXPIRE: { ++ const struct compat_xfrm_user_expire *src_ue = nlmsg_data(src); ++ struct xfrm_user_expire *dst_ue = nlmsg_data(dst); ++ ++ /* compat_xfrm_user_expire has 4-byte smaller state */ ++ memcpy(dst_ue, src_ue, sizeof(src_ue->state)); ++ dst_ue->hard = src_ue->hard; ++ break; ++ } ++ case XFRM_MSG_ACQUIRE: { ++ const struct compat_xfrm_user_acquire *src_ua = nlmsg_data(src); ++ struct xfrm_user_acquire *dst_ua = nlmsg_data(dst); ++ ++ memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos)); ++ dst_ua->aalgos = src_ua->aalgos; ++ dst_ua->ealgos = src_ua->ealgos; ++ dst_ua->calgos = src_ua->calgos; ++ dst_ua->seq = src_ua->seq; ++ break; ++ } ++ case XFRM_MSG_POLEXPIRE: { ++ const struct compat_xfrm_user_polexpire *src_upe = nlmsg_data(src); ++ struct xfrm_user_polexpire *dst_upe = nlmsg_data(dst); ++ ++ /* compat_xfrm_user_polexpire has 4-byte smaller state */ ++ memcpy(dst_upe, src_upe, sizeof(src_upe->pol)); ++ dst_upe->hard = src_upe->hard; ++ break; ++ } ++ case XFRM_MSG_ALLOCSPI: { ++ const struct compat_xfrm_userspi_info *src_usi = nlmsg_data(src); ++ struct xfrm_userspi_info *dst_usi = nlmsg_data(dst); ++ ++ /* compat_xfrm_user_polexpire has 4-byte smaller state */ ++ memcpy(dst_usi, src_usi, sizeof(src_usi->info)); ++ dst_usi->min = src_usi->min; ++ dst_usi->max = src_usi->max; ++ break; ++ } ++ default: ++ return -EOPNOTSUPP; ++ } ++ pos = dst->nlmsg_len; ++ ++ for (i = 1; i < XFRMA_MAX + 1; i++) { ++ int err; ++ ++ if (i == XFRMA_PAD || i == XFRMA_OFFLOAD_DEV) ++ continue; ++ ++ if (!attrs[i]) ++ continue; ++ ++ err = xfrm_xlate32_attr(dst, attrs[i], &pos, size); ++ if (err) ++ return err; ++ } ++ ++ return 0; ++} ++ ++static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32, ++ int maxtype, const struct nla_policy *policy) ++{ ++ /* netlink_rcv_skb() checks if a message has full (struct nlmsghdr) */ ++ u16 type = h32->nlmsg_type - XFRM_MSG_BASE; ++ struct nlattr *attrs[XFRMA_MAX+1]; ++ struct nlmsghdr *h64; ++ size_t len; ++ int err; ++ ++ BUILD_BUG_ON(ARRAY_SIZE(xfrm_msg_min) != ARRAY_SIZE(compat_msg_min)); ++ ++ if (type >= ARRAY_SIZE(xfrm_msg_min)) ++ return ERR_PTR(-EINVAL); ++ ++ /* Don't call parse: the message might have only nlmsg header */ ++ if ((h32->nlmsg_type == XFRM_MSG_GETSA || ++ h32->nlmsg_type == XFRM_MSG_GETPOLICY) && ++ (h32->nlmsg_flags & NLM_F_DUMP)) ++ return NULL; ++ ++ err = nlmsg_parse(h32, compat_msg_min[type], attrs, ++ maxtype ? : XFRMA_MAX, policy ? : compat_policy); ++ if (err < 0) ++ return ERR_PTR(err); ++ ++ len = xfrm_user_rcv_calculate_len64(h32, attrs); ++ /* The message doesn't need translation */ ++ if (len == nlmsg_len(h32)) ++ return NULL; ++ ++ len += NLMSG_HDRLEN; ++ h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO); ++ if (!h64) ++ return ERR_PTR(-ENOMEM); ++ ++ err = xfrm_xlate32(h64, h32, attrs, len, type); ++ if (err < 0) { ++ kvfree(h64); ++ return ERR_PTR(err); ++ } ++ ++ return h64; ++} ++ ++static int xfrm_user_policy_compat(u8 **pdata32, int optlen) ++{ ++ struct compat_xfrm_userpolicy_info *p = (void *)*pdata32; ++ u8 *src_templates, *dst_templates; ++ u8 *data64; ++ ++ if (optlen < sizeof(*p)) ++ return -EINVAL; ++ ++ data64 = kmalloc_track_caller(optlen + 4, GFP_USER | __GFP_NOWARN); ++ if (!data64) ++ return -ENOMEM; ++ ++ memcpy(data64, *pdata32, sizeof(*p)); ++ memset(data64 + sizeof(*p), 0, 4); ++ ++ src_templates = *pdata32 + sizeof(*p); ++ dst_templates = data64 + sizeof(*p) + 4; ++ memcpy(dst_templates, src_templates, optlen - sizeof(*p)); ++ ++ kfree(*pdata32); ++ *pdata32 = data64; ++ return 0; ++} ++ ++static struct xfrm_translator xfrm_translator = { ++ .owner = THIS_MODULE, ++ .alloc_compat = xfrm_alloc_compat, ++ .rcv_msg_compat = xfrm_user_rcv_msg_compat, ++ .xlate_user_policy_sockptr = xfrm_user_policy_compat, ++}; ++ ++static int __init xfrm_compat_init(void) ++{ ++ return xfrm_register_translator(&xfrm_translator); ++} ++ ++static void __exit xfrm_compat_exit(void) ++{ ++ xfrm_unregister_translator(&xfrm_translator); ++} ++ ++module_init(xfrm_compat_init); ++module_exit(xfrm_compat_exit); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Dmitry Safonov"); ++MODULE_DESCRIPTION("XFRM 32-bit compatibility layer"); +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index ecb9dcb71a39..3264a7c12ee8 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -320,7 +320,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + /* only the first xfrm gets the encap type */ + encap_type = 0; + +- if (async && x->repl->recheck(x, skb, seq)) { ++ if (x->repl->recheck(x, skb, seq)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); + goto drop_unlock; + } +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index 860f9c1c34fd..1af6ba7115dc 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -761,7 +761,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, + */ + if (x->km.state == XFRM_STATE_VALID) { + if ((x->sel.family && +- !xfrm_selector_match(&x->sel, fl, x->sel.family)) || ++ (x->sel.family != family || ++ !xfrm_selector_match(&x->sel, fl, family))) || + !security_xfrm_state_pol_flow_match(x, pol, fl)) + return; + +@@ -774,7 +775,9 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, + *acq_in_progress = 1; + } else if (x->km.state == XFRM_STATE_ERROR || + x->km.state == XFRM_STATE_EXPIRED) { +- if (xfrm_selector_match(&x->sel, fl, x->sel.family) && ++ if ((!x->sel.family || ++ (x->sel.family == family && ++ xfrm_selector_match(&x->sel, fl, family))) && + security_xfrm_state_pol_flow_match(x, pol, fl)) + *error = -ESRCH; + } +@@ -815,7 +818,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, + tmpl->mode == x->props.mode && + tmpl->id.proto == x->id.proto && + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) +- xfrm_state_look_at(pol, x, fl, encap_family, ++ xfrm_state_look_at(pol, x, fl, family, + &best, &acquire_in_progress, &error); + } + if (best || acquire_in_progress) +@@ -832,7 +835,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, + tmpl->mode == x->props.mode && + tmpl->id.proto == x->id.proto && + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) +- xfrm_state_look_at(pol, x, fl, encap_family, ++ xfrm_state_look_at(pol, x, fl, family, + &best, &acquire_in_progress, &error); + } + +@@ -1253,7 +1256,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig) + x->tfcpad = orig->tfcpad; + x->replay_maxdiff = orig->replay_maxdiff; + x->replay_maxage = orig->replay_maxage; +- x->curlft.add_time = orig->curlft.add_time; ++ memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); + x->km.state = orig->km.state; + x->km.seq = orig->km.seq; + x->replay = orig->replay; +@@ -1610,6 +1613,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) + int err = -ENOENT; + __be32 minspi = htonl(low); + __be32 maxspi = htonl(high); ++ __be32 newspi = 0; + u32 mark = x->mark.v & x->mark.m; + + spin_lock_bh(&x->lock); +@@ -1628,21 +1632,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) + xfrm_state_put(x0); + goto unlock; + } +- x->id.spi = minspi; ++ newspi = minspi; + } else { + u32 spi = 0; + for (h = 0; h < high-low+1; h++) { + spi = low + prandom_u32()%(high-low+1); + x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); + if (x0 == NULL) { +- x->id.spi = htonl(spi); ++ newspi = htonl(spi); + break; + } + xfrm_state_put(x0); + } + } +- if (x->id.spi) { ++ if (newspi) { + spin_lock_bh(&net->xfrm.xfrm_state_lock); ++ x->id.spi = newspi; + h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); + hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); +@@ -1901,6 +1906,66 @@ bool km_is_alive(const struct km_event *c) + } + EXPORT_SYMBOL(km_is_alive); + ++#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) ++static DEFINE_SPINLOCK(xfrm_translator_lock); ++static struct xfrm_translator __rcu *xfrm_translator; ++ ++struct xfrm_translator *xfrm_get_translator(void) ++{ ++ struct xfrm_translator *xtr; ++ ++ rcu_read_lock(); ++ xtr = rcu_dereference(xfrm_translator); ++ if (unlikely(!xtr)) ++ goto out; ++ if (!try_module_get(xtr->owner)) ++ xtr = NULL; ++out: ++ rcu_read_unlock(); ++ return xtr; ++} ++EXPORT_SYMBOL_GPL(xfrm_get_translator); ++ ++void xfrm_put_translator(struct xfrm_translator *xtr) ++{ ++ module_put(xtr->owner); ++} ++EXPORT_SYMBOL_GPL(xfrm_put_translator); ++ ++int xfrm_register_translator(struct xfrm_translator *xtr) ++{ ++ int err = 0; ++ ++ spin_lock_bh(&xfrm_translator_lock); ++ if (unlikely(xfrm_translator != NULL)) ++ err = -EEXIST; ++ else ++ rcu_assign_pointer(xfrm_translator, xtr); ++ spin_unlock_bh(&xfrm_translator_lock); ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(xfrm_register_translator); ++ ++int xfrm_unregister_translator(struct xfrm_translator *xtr) ++{ ++ int err = 0; ++ ++ spin_lock_bh(&xfrm_translator_lock); ++ if (likely(xfrm_translator != NULL)) { ++ if (rcu_access_pointer(xfrm_translator) != xtr) ++ err = -EINVAL; ++ else ++ RCU_INIT_POINTER(xfrm_translator, NULL); ++ } ++ spin_unlock_bh(&xfrm_translator_lock); ++ synchronize_rcu(); ++ ++ return err; ++} ++EXPORT_SYMBOL_GPL(xfrm_unregister_translator); ++#endif ++ + int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) + { + int err; +@@ -1926,6 +1991,23 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen + if (copy_from_user(data, optval, optlen)) + goto out; + ++ /* Use the 64-bit / untranslated format on Android, even for compat */ ++ if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) { ++ if (in_compat_syscall()) { ++ struct xfrm_translator *xtr = xfrm_get_translator(); ++ ++ if (!xtr) ++ return -EOPNOTSUPP; ++ ++ err = xtr->xlate_user_policy_sockptr(&data, optlen); ++ xfrm_put_translator(xtr); ++ if (err) { ++ kfree(data); ++ return err; ++ } ++ } ++ } ++ + err = -EINVAL; + rcu_read_lock(); + list_for_each_entry_rcu(km, &xfrm_km_list, list) { +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index 5ea9c1a83d3e..2ec3047cabd9 100755 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -579,6 +579,20 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, + + copy_from_user_state(x, p); + ++ if (attrs[XFRMA_ENCAP]) { ++ x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), ++ sizeof(*x->encap), GFP_KERNEL); ++ if (x->encap == NULL) ++ goto error; ++ } ++ ++ if (attrs[XFRMA_COADDR]) { ++ x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), ++ sizeof(*x->coaddr), GFP_KERNEL); ++ if (x->coaddr == NULL) ++ goto error; ++ } ++ + if (attrs[XFRMA_SA_EXTRA_FLAGS]) + x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); + +@@ -599,23 +613,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, + attrs[XFRMA_ALG_COMP]))) + goto error; + +- if (attrs[XFRMA_ENCAP]) { +- x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), +- sizeof(*x->encap), GFP_KERNEL); +- if (x->encap == NULL) +- goto error; +- } +- + if (attrs[XFRMA_TFCPAD]) + x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); + +- if (attrs[XFRMA_COADDR]) { +- x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), +- sizeof(*x->coaddr), GFP_KERNEL); +- if (x->coaddr == NULL) +- goto error; +- } +- + xfrm_mark_get(attrs, &x->mark); + + xfrm_smark_init(attrs, &x->props.smark); +@@ -949,6 +949,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) + struct xfrm_dump_info *sp = ptr; + struct sk_buff *in_skb = sp->in_skb; + struct sk_buff *skb = sp->out_skb; ++ struct xfrm_translator *xtr; + struct xfrm_usersa_info *p; + struct nlmsghdr *nlh; + int err; +@@ -966,6 +967,18 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr) + return err; + } + nlmsg_end(skb, nlh); ++ ++ xtr = xfrm_get_translator(); ++ if (xtr) { ++ err = xtr->alloc_compat(skb, nlh); ++ ++ xfrm_put_translator(xtr); ++ if (err) { ++ nlmsg_cancel(skb, nlh); ++ return err; ++ } ++ } ++ + return 0; + } + +@@ -980,7 +993,6 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb) + return 0; + } + +-static const struct nla_policy xfrma_policy[XFRMA_MAX+1]; + static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) + { + struct net *net = sock_net(skb->sk); +@@ -1057,12 +1069,24 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, + u32 pid, unsigned int group) + { + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); ++ struct xfrm_translator *xtr; + + if (!nlsk) { + kfree_skb(skb); + return -EPIPE; + } + ++ xtr = xfrm_get_translator(); ++ if (xtr) { ++ int err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); ++ ++ xfrm_put_translator(xtr); ++ if (err) { ++ kfree_skb(skb); ++ return err; ++ } ++ } ++ + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); + } + +@@ -1280,6 +1304,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, + struct net *net = sock_net(skb->sk); + struct xfrm_state *x; + struct xfrm_userspi_info *p; ++ struct xfrm_translator *xtr; + struct sk_buff *resp_skb; + xfrm_address_t *daddr; + int family; +@@ -1330,6 +1355,17 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, + goto out; + } + ++ xtr = xfrm_get_translator(); ++ if (xtr) { ++ err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); ++ ++ xfrm_put_translator(xtr); ++ if (err) { ++ kfree_skb(resp_skb); ++ goto out; ++ } ++ } ++ + err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); + + out: +@@ -1736,6 +1772,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr + struct xfrm_userpolicy_info *p; + struct sk_buff *in_skb = sp->in_skb; + struct sk_buff *skb = sp->out_skb; ++ struct xfrm_translator *xtr; + struct nlmsghdr *nlh; + int err; + +@@ -1760,6 +1797,18 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr + return err; + } + nlmsg_end(skb, nlh); ++ ++ xtr = xfrm_get_translator(); ++ if (xtr) { ++ err = xtr->alloc_compat(skb, nlh); ++ ++ xfrm_put_translator(xtr); ++ if (err) { ++ nlmsg_cancel(skb, nlh); ++ return err; ++ } ++ } ++ + return 0; + } + +@@ -2479,7 +2528,7 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, + + #define XMSGSIZE(type) sizeof(struct type) + +-static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { ++const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { + [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), + [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), + [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), +@@ -2502,10 +2551,11 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { + [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), + [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), + }; ++EXPORT_SYMBOL_GPL(xfrm_msg_min); + + #undef XMSGSIZE + +-static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { ++const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { + [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, + [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, + [XFRMA_LASTUSED] = { .type = NLA_U64}, +@@ -2536,6 +2586,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { + [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, + [XFRMA_IF_ID] = { .type = NLA_U32 }, + }; ++EXPORT_SYMBOL_GPL(xfrma_policy); + + static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { + [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, +@@ -2584,6 +2635,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + struct net *net = sock_net(skb->sk); + struct nlattr *attrs[XFRMA_MAX+1]; + const struct xfrm_link *link; ++ struct nlmsghdr *nlh64 = NULL; + int type, err; + + type = nlh->nlmsg_type; +@@ -2597,32 +2649,58 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + if (!netlink_net_capable(skb, CAP_NET_ADMIN)) + return -EPERM; + ++ /* Use the 64-bit / untranslated format on Android, even for compat */ ++ if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) { ++ if (in_compat_syscall()) { ++ struct xfrm_translator *xtr = xfrm_get_translator(); ++ ++ if (!xtr) ++ return -EOPNOTSUPP; ++ ++ nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max, ++ link->nla_pol); ++ xfrm_put_translator(xtr); ++ if (IS_ERR(nlh64)) ++ return PTR_ERR(nlh64); ++ if (nlh64) ++ nlh = nlh64; ++ } ++ } ++ + if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || + type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && + (nlh->nlmsg_flags & NLM_F_DUMP)) { +- if (link->dump == NULL) +- return -EINVAL; ++ struct netlink_dump_control c = { ++ .start = link->start, ++ .dump = link->dump, ++ .done = link->done, ++ }; + +- { +- struct netlink_dump_control c = { +- .start = link->start, +- .dump = link->dump, +- .done = link->done, +- }; +- return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); ++ if (link->dump == NULL) { ++ err = -EINVAL; ++ goto err; + } ++ ++ err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); ++ goto err; + } + + err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, + link->nla_max ? : XFRMA_MAX, + link->nla_pol ? : xfrma_policy); + if (err < 0) +- return err; ++ goto err; + +- if (link->doit == NULL) +- return -EINVAL; ++ if (link->doit == NULL) { ++ err = -EINVAL; ++ goto err; ++ } + +- return link->doit(skb, nlh, attrs); ++ err = link->doit(skb, nlh, attrs); ++ ++err: ++ kvfree(nlh64); ++ return err; + } + + static void xfrm_netlink_rcv(struct sk_buff *skb) +diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c +index 107da148820f..9c74b45c5720 100644 +--- a/samples/bpf/tracex1_kern.c ++++ b/samples/bpf/tracex1_kern.c +@@ -20,7 +20,7 @@ + SEC("kprobe/__netif_receive_skb_core") + int bpf_prog1(struct pt_regs *ctx) + { +- /* attaches to kprobe netif_receive_skb, ++ /* attaches to kprobe __netif_receive_skb_core, + * looks for packets on loobpack device and prints them + */ + char devname[IFNAMSIZ]; +@@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx) + int len; + + /* non-portable! works for the given kernel only */ +- skb = (struct sk_buff *) PT_REGS_PARM1(ctx); ++ bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx)); + dev = _(skb->dev); + len = _(skb->len); + +diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c +index 2fca916d9edf..a7f5ee8b6edc 100644 +--- a/samples/kfifo/bytestream-example.c ++++ b/samples/kfifo/bytestream-example.c +@@ -124,8 +124,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, + ret = kfifo_from_user(&test, buf, count, &copied); + + mutex_unlock(&write_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static ssize_t fifo_read(struct file *file, char __user *buf, +@@ -140,8 +142,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, + ret = kfifo_to_user(&test, buf, count, &copied); + + mutex_unlock(&read_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static const struct file_operations fifo_fops = { +diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c +index 8dc3c2e7105a..a326a37e9163 100644 +--- a/samples/kfifo/inttype-example.c ++++ b/samples/kfifo/inttype-example.c +@@ -117,8 +117,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, + ret = kfifo_from_user(&test, buf, count, &copied); + + mutex_unlock(&write_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static ssize_t fifo_read(struct file *file, char __user *buf, +@@ -133,8 +135,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, + ret = kfifo_to_user(&test, buf, count, &copied); + + mutex_unlock(&read_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static const struct file_operations fifo_fops = { +diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c +index 2d7529eeb294..deb87a2e4e6b 100644 +--- a/samples/kfifo/record-example.c ++++ b/samples/kfifo/record-example.c +@@ -131,8 +131,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, + ret = kfifo_from_user(&test, buf, count, &copied); + + mutex_unlock(&write_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static ssize_t fifo_read(struct file *file, char __user *buf, +@@ -147,8 +149,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf, + ret = kfifo_to_user(&test, buf, count, &copied); + + mutex_unlock(&read_lock); ++ if (ret) ++ return ret; + +- return ret ? ret : copied; ++ return copied; + } + + static const struct file_operations fifo_fops = { +diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c +index 49db1def1721..84e583ab8fd0 100644 +--- a/samples/mic/mpssd/mpssd.c ++++ b/samples/mic/mpssd/mpssd.c +@@ -414,9 +414,9 @@ mic_virtio_copy(struct mic_info *mic, int fd, + + static inline unsigned _vring_size(unsigned int num, unsigned long align) + { +- return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) ++ return _ALIGN_UP(((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + + align - 1) & ~(align - 1)) +- + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; ++ + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num, 4); + } + + /* +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include +index f40431fd7df1..a45ea93a01a7 100644 +--- a/scripts/Kbuild.include ++++ b/scripts/Kbuild.include +@@ -82,20 +82,21 @@ cc-cross-prefix = \ + fi))) + + # output directory for tests below +-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) ++TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ + + # try-run + # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) + # Exit code chooses option. "$$TMP" is can be used as temporary file and + # is automatically cleaned up. + try-run = $(shell set -e; \ +- TMP="$(TMPOUT).$$$$.tmp"; \ +- TMPO="$(TMPOUT).$$$$.o"; \ ++ TMP=$(TMPOUT)/tmp; \ ++ TMPO=$(TMPOUT)/tmp.o; \ ++ mkdir -p $(TMPOUT); \ ++ trap "rm -rf $(TMPOUT)" EXIT; \ + if ($(1)) >/dev/null 2>&1; \ + then echo "$(2)"; \ + else echo "$(3)"; \ +- fi; \ +- rm -f "$$TMP" "$$TMPO") ++ fi) + + # as-option + # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) +diff --git a/scripts/Makefile b/scripts/Makefile +index 1d80897a9644..9116feaacee2 100644 +--- a/scripts/Makefile ++++ b/scripts/Makefile +@@ -11,6 +11,9 @@ + + HOST_EXTRACFLAGS += -I$(srctree)/tools/include + ++CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto) ++CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null) ++ + hostprogs-$(CONFIG_KALLSYMS) += kallsyms + hostprogs-$(CONFIG_LOGO) += pnmtologo + hostprogs-$(CONFIG_VT) += conmakehash +@@ -23,8 +26,10 @@ hostprogs-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert + + HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include + HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include +-HOSTLOADLIBES_sign-file = -lcrypto +-HOSTLOADLIBES_extract-cert = -lcrypto ++HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS) ++HOSTLOADLIBES_sign-file = $(CRYPTO_LIBS) ++HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) ++HOSTLOADLIBES_extract-cert = $(CRYPTO_LIBS) + + always := $(hostprogs-y) $(hostprogs-m) + +diff --git a/scripts/Makefile.build b/scripts/Makefile.build +index fcffce439834..b560f9de83b8 100644 +--- a/scripts/Makefile.build ++++ b/scripts/Makefile.build +@@ -245,6 +245,8 @@ endif + endif + + ifdef CONFIG_FTRACE_MCOUNT_RECORD ++ifndef CC_USING_RECORD_MCOUNT ++# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl + ifdef BUILD_C_RECORDMCOUNT + ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") + RECORDMCOUNT_FLAGS = -w +@@ -284,6 +286,7 @@ cmd_record_mcount = \ + "$(CC_FLAGS_FTRACE)" ]; then \ + $(sub_cmd_record_mcount) \ + fi; ++endif # CC_USING_RECORD_MCOUNT + endif + endif # CONFIG_FTRACE_MCOUNT_RECORD + +diff --git a/scripts/analyze_suspend.py b/scripts/analyze_suspend.py +index a0ba48fa2c5e..edc4f1255f9b 100755 +--- a/scripts/analyze_suspend.py ++++ b/scripts/analyze_suspend.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # + # Tool for analyzing suspend/resume timing + # Copyright (c) 2013, Intel Corporation. +diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter +index d9ff038c1b28..a650bea5ccdd 100755 +--- a/scripts/bloat-o-meter ++++ b/scripts/bloat-o-meter +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python3 + # + # Copyright 2004 Matt Mackall + # +diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl +index 9ca667bcaee9..594c55541b16 100755 +--- a/scripts/bootgraph.pl ++++ b/scripts/bootgraph.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # Copyright 2008, Intel Corporation + # +diff --git a/scripts/checkincludes.pl b/scripts/checkincludes.pl +index 97b2c6143fe4..cfdfa02d4d92 100755 +--- a/scripts/checkincludes.pl ++++ b/scripts/checkincludes.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + # + # checkincludes: find/remove files included more than once + # +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl +index eca87a51ba78..0d83cd736900 100755 +--- a/scripts/checkpatch.pl ++++ b/scripts/checkpatch.pl +@@ -2514,8 +2514,8 @@ sub process { + + # Check if the commit log has what seems like a diff which can confuse patch + if ($in_commit_log && !$commit_log_has_diff && +- (($line =~ m@^\s+diff\b.*a/[\w/]+@ && +- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || ++ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && ++ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || + $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || + $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { + ERROR("DIFF_IN_COMMIT_MSG", +@@ -3991,7 +3991,7 @@ sub process { + $fix) { + fix_delete_line($fixlinenr, $rawline); + my $fixed_line = $rawline; +- $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/; ++ $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/; + my $line1 = $1; + my $line2 = $2; + fix_insert_line($fixlinenr, ltrim($line1)); +diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl +index b8f616545277..32828fafcf5b 100755 +--- a/scripts/checkstack.pl ++++ b/scripts/checkstack.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # Check the stack usage of functions + # +diff --git a/scripts/config b/scripts/config +index 73de17d39698..06ac9882e1de 100755 +--- a/scripts/config ++++ b/scripts/config +@@ -1,4 +1,4 @@ +-#!/bin/bash ++#!/usr/bin/env bash + # Manipulate options in a .config file from the command line + + myname=${0##*/} +diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh +index 98cf6343afcd..61564e56e3aa 100755 +--- a/scripts/decode_stacktrace.sh ++++ b/scripts/decode_stacktrace.sh +@@ -76,8 +76,8 @@ parse_symbol() { + return + fi + +- # Strip out the base of the path +- code=${code#$basepath/} ++ # Strip out the base of the path on each line ++ code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") + + # In the case of inlines, move everything to same line + code=${code//$'\n'/' '} +diff --git a/scripts/depmod.sh b/scripts/depmod.sh +index baedaef53ca0..b0cb89e73bc5 100755 +--- a/scripts/depmod.sh ++++ b/scripts/depmod.sh +@@ -14,6 +14,8 @@ if ! test -r System.map ; then + exit 0 + fi + ++# legacy behavior: "depmod" in /sbin, no /sbin in PATH ++PATH="$PATH:/sbin" + if [ -z $(command -v $DEPMOD) ]; then + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "This is probably in the kmod package." >&2 +diff --git a/scripts/diffconfig b/scripts/diffconfig +index 0db267d0adc9..19189f3c4a03 100755 +--- a/scripts/diffconfig ++++ b/scripts/diffconfig +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python3 + # + # diffconfig - a tool to compare .config files. + # +diff --git a/scripts/dtc/dt_to_config b/scripts/dtc/dt_to_config +index 9a248b505c58..5dfd1bff351f 100755 +--- a/scripts/dtc/dt_to_config ++++ b/scripts/dtc/dt_to_config +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # Copyright 2016 by Frank Rowand + # Copyright 2016 by Gaurav Minocha +diff --git a/scripts/extract_xc3028.pl b/scripts/extract_xc3028.pl +index 47877deae6d7..61d9b256c658 100755 +--- a/scripts/extract_xc3028.pl ++++ b/scripts/extract_xc3028.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # Copyright (c) Mauro Carvalho Chehab + # Released under GPLv2 +diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py +index f9b92ece7834..6d2e09a2ad2f 100644 +--- a/scripts/gdb/linux/dmesg.py ++++ b/scripts/gdb/linux/dmesg.py +@@ -12,6 +12,7 @@ + # + + import gdb ++import sys + + from linux import utils + +@@ -23,10 +24,11 @@ class LxDmesg(gdb.Command): + super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): +- log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) +- log_first_idx = int(gdb.parse_and_eval("log_first_idx")) +- log_next_idx = int(gdb.parse_and_eval("log_next_idx")) +- log_buf_len = int(gdb.parse_and_eval("log_buf_len")) ++ log_buf_addr = int(str(gdb.parse_and_eval( ++ "(void *)'printk.c'::log_buf")).split()[0], 16) ++ log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) ++ log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) ++ log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) + + inf = gdb.inferiors()[0] + start = log_buf_addr + log_first_idx +@@ -51,13 +53,19 @@ class LxDmesg(gdb.Command): + continue + + text_len = utils.read_u16(log_buf[pos + 10:pos + 12]) +- text = log_buf[pos + 16:pos + 16 + text_len].decode() ++ text = log_buf[pos + 16:pos + 16 + text_len].decode( ++ encoding='utf8', errors='replace') + time_stamp = utils.read_u64(log_buf[pos:pos + 8]) + + for line in text.splitlines(): +- gdb.write("[{time:12.6f}] {line}\n".format( ++ msg = u"[{time:12.6f}] {line}\n".format( + time=time_stamp / 1000000000.0, +- line=line)) ++ line=line) ++ # With python2 gdb.write will attempt to convert unicode to ++ # ascii and might fail so pass an utf8-encoded str instead. ++ if sys.hexversion < 0x03000000: ++ msg = msg.encode(encoding='utf8', errors='replace') ++ gdb.write(msg) + + pos += length + +diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py +index 38b1f09d1cd9..822e3767bc05 100644 +--- a/scripts/gdb/linux/proc.py ++++ b/scripts/gdb/linux/proc.py +@@ -40,7 +40,7 @@ class LxVersion(gdb.Command): + + def invoke(self, arg, from_tty): + # linux_banner should contain a newline +- gdb.write(gdb.parse_and_eval("linux_banner").string()) ++ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string()) + + LxVersion() + +diff --git a/scripts/get_dvb_firmware b/scripts/get_dvb_firmware +index 1a0a04125f71..f3f230225aba 100755 +--- a/scripts/get_dvb_firmware ++++ b/scripts/get_dvb_firmware +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + # DVB firmware extractor + # + # (c) 2004 Andrew de Quincey +diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c +index f7049e288e93..c58a46904861 100644 +--- a/scripts/kconfig/nconf.c ++++ b/scripts/kconfig/nconf.c +@@ -502,8 +502,8 @@ static int get_mext_match(const char *match_str, match_f flag) + else if (flag == FIND_NEXT_MATCH_UP) + --match_start; + ++ match_start = (match_start + items_num) % items_num; + index = match_start; +- index = (index + items_num) % items_num; + while (true) { + char *str = k_menu_items[index].str; + if (strcasestr(str, match_str) != 0) +diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl +index c21d16328d3f..70dcfb6b3de1 100755 +--- a/scripts/markup_oops.pl ++++ b/scripts/markup_oops.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + use File::Basename; + use Math::BigInt; +diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h +index 6fdc97ef6023..cb73747002ed 100755 +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h +@@ -82,15 +82,23 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" + # Only replace the real compile.h if the new one is different, + # in order to preserve the timestamp and avoid unnecessary + # recompilations. +-# We don't consider the file changed if only the date/time changed. ++# We don't consider the file changed if only the date/time changed, ++# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for ++# reproducible builds with that value referring to a commit timestamp). + # A kernel config change will increase the generation number, thus + # causing compile.h to be updated (including date/time) due to the + # changed comment in the + # first line. + ++if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then ++ IGNORE_PATTERN="UTS_VERSION" ++else ++ IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED" ++fi ++ + if [ -r $TARGET ] && \ +- grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \ +- grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \ ++ grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \ ++ grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \ + cmp -s .tmpver.1 .tmpver.2; then + rm -f .tmpcompile + else +diff --git a/scripts/mksysmap b/scripts/mksysmap +index a35acc0d0b82..9aa23d15862a 100755 +--- a/scripts/mksysmap ++++ b/scripts/mksysmap +@@ -41,4 +41,4 @@ + # so we just ignore them to let readprofile continue to work. + # (At least sparc64 has __crc_ in the middle). + +-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2 ++$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2 +diff --git a/scripts/profile2linkerlist.pl b/scripts/profile2linkerlist.pl +index 6943fa7cc95b..f23d7be94394 100755 +--- a/scripts/profile2linkerlist.pl ++++ b/scripts/profile2linkerlist.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # + # Takes a (sorted) output of readprofile and turns it into a list suitable for +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c +index 347f70607a33..b7f4235bd925 100644 +--- a/scripts/recordmcount.c ++++ b/scripts/recordmcount.c +@@ -362,7 +362,7 @@ static uint32_t (*w2)(uint16_t); + static int + is_mcounted_section_name(char const *const txtname) + { +- return strcmp(".text", txtname) == 0 || ++ return strncmp(".text", txtname, 5) == 0 || + strcmp(".ref.text", txtname) == 0 || + strcmp(".sched.text", txtname) == 0 || + strcmp(".spinlock.text", txtname) == 0 || +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl +index faac4b10d8ea..113c7a071810 100755 +--- a/scripts/recordmcount.pl ++++ b/scripts/recordmcount.pl +@@ -140,6 +140,11 @@ my %text_sections = ( + ".text.unlikely" => 1, + ); + ++# Acceptable section-prefixes to record. ++my %text_section_prefixes = ( ++ ".text." => 1, ++); ++ + # Note: we are nice to C-programmers here, thus we skip the '||='-idiom. + $objdump = 'objdump' if (!$objdump); + $objcopy = 'objcopy' if (!$objcopy); +@@ -261,7 +266,11 @@ if ($arch eq "x86_64") { + + # force flags for this arch + $ld .= " -m shlelf_linux"; +- $objcopy .= " -O elf32-sh-linux"; ++ if ($endian eq "big") { ++ $objcopy .= " -O elf32-shbig-linux"; ++ } else { ++ $objcopy .= " -O elf32-sh-linux"; ++ } + + } elsif ($arch eq "powerpc") { + $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; +@@ -501,6 +510,14 @@ while () { + + # Only record text sections that we know are safe + $read_function = defined($text_sections{$1}); ++ if (!$read_function) { ++ foreach my $prefix (keys %text_section_prefixes) { ++ if (substr($1, 0, length $prefix) eq $prefix) { ++ $read_function = 1; ++ last; ++ } ++ } ++ } + # print out any recorded offsets + update_funcs(); + +diff --git a/scripts/setlocalversion b/scripts/setlocalversion +index aa28c3f29809..0c8741b795d0 100755 +--- a/scripts/setlocalversion ++++ b/scripts/setlocalversion +@@ -44,7 +44,7 @@ scm_version() + + # Check for git and a git repo. + if test -z "$(git rev-parse --show-cdup 2>/dev/null)" && +- head=`git rev-parse --verify --short HEAD 2>/dev/null`; then ++ head=$(git rev-parse --verify HEAD 2>/dev/null); then + + # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore + # it, because this version is defined in the top level Makefile. +@@ -58,11 +58,22 @@ scm_version() + fi + # If we are past a tagged commit (like + # "v2.6.30-rc5-302-g72357d5"), we pretty print it. +- if atag="`git describe 2>/dev/null`"; then +- echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}' +- +- # If we don't have a tag at all we print -g{commitish}. ++ # ++ # Ensure the abbreviated sha1 has exactly 12 ++ # hex characters, to make the output ++ # independent of git version, local ++ # core.abbrev settings and/or total number of ++ # objects in the current repository - passing ++ # --abbrev=12 ensures a minimum of 12, and the ++ # awk substr() then picks the 'g' and first 12 ++ # hex chars. ++ if atag="$(git describe --abbrev=12 2>/dev/null)"; then ++ echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}' ++ ++ # If we don't have a tag at all we print -g{commitish}, ++ # again using exactly 12 hex chars. + else ++ head="$(echo $head | cut -c1-12)" + printf '%s%s' -g $head + fi + fi +diff --git a/scripts/show_delta b/scripts/show_delta +index 5b365009e6a3..55c66dce6fc1 100755 +--- a/scripts/show_delta ++++ b/scripts/show_delta +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # + # show_deltas: Read list of printk messages instrumented with + # time data, and format with time deltas. +diff --git a/scripts/stackdelta b/scripts/stackdelta +index 48eabf2f48f8..20a79f19a111 100755 +--- a/scripts/stackdelta ++++ b/scripts/stackdelta +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + # Read two files produced by the stackusage script, and show the + # delta between them. +diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py +index db40fa04cd51..68ecb455b492 100755 +--- a/scripts/tracing/draw_functrace.py ++++ b/scripts/tracing/draw_functrace.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + + """ + Copyright 2008 (c) Frederic Weisbecker +@@ -17,7 +17,7 @@ Usage: + $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func + Wait some times but not too much, the script is a bit slow. + Break the pipe (Ctrl + Z) +- $ scripts/draw_functrace.py < raw_trace_func > draw_functrace ++ $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace + Then you have your drawn trace in draw_functrace + """ + +@@ -103,10 +103,10 @@ def parseLine(line): + line = line.strip() + if line.startswith("#"): + raise CommentLineException +- m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) ++ m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) + if m is None: + raise BrokenLineException +- return (m.group(1), m.group(2), m.group(3)) ++ return (m.group(2), m.group(3), m.group(4)) + + + def main(): +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index e034dc21421e..b0440cf34970 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -240,7 +240,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + + /* Portable EVM signatures must include an IMA hash */ + if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) +- return -EPERM; ++ error = -EPERM; + out: + kfree(xattr_value); + kfree(desc); +diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h +index df7834aa1b8f..5f2a0a07ceac 100644 +--- a/security/integrity/ima/ima.h ++++ b/security/integrity/ima/ima.h +@@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; + #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE + #define IMA_EVENT_NAME_LEN_MAX 255 + +-#define IMA_HASH_BITS 9 ++#define IMA_HASH_BITS 10 + #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) + + #define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +@@ -136,9 +136,10 @@ struct ima_h_table { + }; + extern struct ima_h_table ima_htable; + +-static inline unsigned long ima_hash_key(u8 *digest) ++static inline unsigned int ima_hash_key(u8 *digest) + { +- return hash_long(*digest, IMA_HASH_BITS); ++ /* there is no point in taking a hash of part of a digest */ ++ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; + } + + enum ima_hooks { +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 5155c343406e..170f12031ae5 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -683,6 +683,8 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest, + ima_pcrread(i, pcr_i); + /* now accumulate with current aggregate */ + rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE); ++ if (rc != 0) ++ return rc; + } + if (!rc) + crypto_shash_final(shash, digest); +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c +index aed47b777a57..4926a5a1bc94 100644 +--- a/security/integrity/ima/ima_policy.c ++++ b/security/integrity/ima/ima_policy.c +@@ -150,7 +150,7 @@ static struct ima_rule_entry default_appraise_rules[] = { + static LIST_HEAD(ima_default_rules); + static LIST_HEAD(ima_policy_rules); + static LIST_HEAD(ima_temp_rules); +-static struct list_head *ima_rules; ++static struct list_head *ima_rules = &ima_default_rules; + + static int ima_policy __initdata; + +@@ -429,7 +429,6 @@ void __init ima_init_policy(void) + temp_ima_appraise |= IMA_APPRAISE_POLICY; + } + +- ima_rules = &ima_default_rules; + ima_update_policy_flag(); + } + +diff --git a/security/keys/trusted.c b/security/keys/trusted.c +index e0fcb17068f5..c473fcb3f938 100644 +--- a/security/keys/trusted.c ++++ b/security/keys/trusted.c +@@ -797,7 +797,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, + case Opt_migratable: + if (*args[0].from == '0') + pay->migratable = 0; +- else ++ else if (*args[0].from != '1') + return -EINVAL; + break; + case Opt_pcrlock: +diff --git a/security/lsm_audit.c b/security/lsm_audit.c +index 44a20c218409..cc4000dba600 100644 +--- a/security/lsm_audit.c ++++ b/security/lsm_audit.c +@@ -277,7 +277,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, + struct inode *inode; + + audit_log_format(ab, " name="); ++ spin_lock(&a->u.dentry->d_lock); + audit_log_untrustedstring(ab, a->u.dentry->d_name.name); ++ spin_unlock(&a->u.dentry->d_lock); + + inode = d_backing_inode(a->u.dentry); + if (inode) { +@@ -295,8 +297,9 @@ static void dump_common_audit_data(struct audit_buffer *ab, + dentry = d_find_alias(inode); + if (dentry) { + audit_log_format(ab, " name="); +- audit_log_untrustedstring(ab, +- dentry->d_name.name); ++ spin_lock(&dentry->d_lock); ++ audit_log_untrustedstring(ab, dentry->d_name.name); ++ spin_unlock(&dentry->d_lock); + dput(dentry); + } + audit_log_format(ab, " dev="); +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h +index 1764b221d545..6bdc0949d81b 100644 +--- a/security/selinux/include/classmap.h ++++ b/security/selinux/include/classmap.h +@@ -105,7 +105,8 @@ struct security_class_mapping secclass_map[] = { + { COMMON_IPC_PERMS, NULL } }, + { "netlink_route_socket", + { COMMON_SOCK_PERMS, +- "nlmsg_read", "nlmsg_write", "nlmsg_readpriv", NULL } }, ++ "nlmsg_read", "nlmsg_write", "nlmsg_readpriv", "nlmsg_getneigh", ++ NULL } }, + { "netlink_tcpdiag_socket", + { COMMON_SOCK_PERMS, + "nlmsg_read", "nlmsg_write", NULL } }, +diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h +index 576d5a8c9238..e5bd50cf517f 100644 +--- a/security/selinux/include/security.h ++++ b/security/selinux/include/security.h +@@ -75,6 +75,7 @@ enum { + #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) + + extern int selinux_android_netlink_route; ++extern int selinux_android_netlink_getneigh; + extern int selinux_policycap_netpeer; + extern int selinux_policycap_openperm; + extern int selinux_policycap_alwaysnetwork; +diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c +index 697785cec7f6..cce66276250c 100644 +--- a/security/selinux/nlmsgtab.c ++++ b/security/selinux/nlmsgtab.c +@@ -194,12 +194,12 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) + return err; + } + +-static void nlmsg_set_getlink_perm(u32 perm) ++static void nlmsg_set_perm_for_type(u32 perm, u16 type) + { + int i; + + for (i = 0; i < ARRAY_SIZE(nlmsg_route_perms); i++) { +- if (nlmsg_route_perms[i].nlmsg_type == RTM_GETLINK) { ++ if (nlmsg_route_perms[i].nlmsg_type == type) { + nlmsg_route_perms[i].perm = perm; + break; + } +@@ -209,11 +209,27 @@ static void nlmsg_set_getlink_perm(u32 perm) + /** + * Use nlmsg_readpriv as the permission for RTM_GETLINK messages if the + * netlink_route_getlink policy capability is set. Otherwise use nlmsg_read. ++ * Similarly, use nlmsg_getneigh for RTM_GETNEIGH and RTM_GETNEIGHTBL if the ++ * netlink_route_getneigh policy capability is set. Otherwise use nlmsg_read. + */ + void selinux_nlmsg_init(void) + { + if (selinux_android_netlink_route) +- nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV); ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV, ++ RTM_GETLINK); + else +- nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READ); ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ, ++ RTM_GETLINK); ++ ++ if (selinux_android_netlink_getneigh) { ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH, ++ RTM_GETNEIGH); ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH, ++ RTM_GETNEIGHTBL); ++ } else { ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ, ++ RTM_GETNEIGH); ++ nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ, ++ RTM_GETNEIGHTBL); ++ } + } +diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c +index 72c145dd799f..ef1226c1c3ad 100644 +--- a/security/selinux/selinuxfs.c ++++ b/security/selinux/selinuxfs.c +@@ -1416,6 +1416,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) + *idx = cpu + 1; + return &per_cpu(avc_cache_stats, cpu); + } ++ (*idx)++; + return NULL; + } + +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c +index 62518b031e5e..bddc8d363cb8 100644 +--- a/security/selinux/ss/policydb.c ++++ b/security/selinux/ss/policydb.c +@@ -2336,6 +2336,10 @@ int policydb_read(struct policydb *p, void *fp) + p->android_netlink_route = 1; + } + ++ if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH)) { ++ p->android_netlink_getneigh = 1; ++ } ++ + if (p->policyvers >= POLICYDB_VERSION_POLCAP) { + rc = ebitmap_read(&p->policycaps, fp); + if (rc) +diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h +index 0d511cf3c1e9..45698f754766 100644 +--- a/security/selinux/ss/policydb.h ++++ b/security/selinux/ss/policydb.h +@@ -228,6 +228,7 @@ struct genfs { + struct policydb { + int mls_enabled; + int android_netlink_route; ++ int android_netlink_getneigh; + + /* symbol tables */ + struct symtab symtab[SYM_NUM]; +@@ -315,6 +316,7 @@ extern int policydb_write(struct policydb *p, void *fp); + + #define POLICYDB_CONFIG_MLS 1 + #define POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE (1 << 31) ++#define POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH (1 << 30) + + /* the config flags related to unknown classes/perms are bits 2 and 3 */ + #define REJECT_UNKNOWN 0x00000002 +diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c +index b96d4466de4c..b5cd832328b8 100644 +--- a/security/selinux/ss/services.c ++++ b/security/selinux/ss/services.c +@@ -71,6 +71,7 @@ + #include "audit.h" + + int selinux_android_netlink_route; ++int selinux_android_netlink_getneigh; + int selinux_policycap_netpeer; + int selinux_policycap_openperm; + int selinux_policycap_alwaysnetwork; +@@ -1993,6 +1994,7 @@ static void security_load_policycaps(void) + POLICYDB_CAPABILITY_ALWAYSNETWORK); + + selinux_android_netlink_route = policydb.android_netlink_route; ++ selinux_android_netlink_getneigh = policydb.android_netlink_getneigh; + selinux_nlmsg_init(); + } + +@@ -2619,8 +2621,12 @@ int security_get_bools(int *len, char ***names, int **values) + if (*names) { + for (i = 0; i < *len; i++) + kfree((*names)[i]); ++ kfree(*names); + } + kfree(*values); ++ *len = 0; ++ *names = NULL; ++ *values = NULL; + goto out; + } + +diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c +index 6492fe96cae4..966d30bf2e38 100644 +--- a/security/smack/smackfs.c ++++ b/security/smack/smackfs.c +@@ -878,6 +878,8 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + if (format == SMK_FIXED24_FMT && + (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX)) + return -EINVAL; ++ if (count > PAGE_SIZE) ++ return -EINVAL; + + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) +@@ -901,11 +903,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + else + rule += strlen(skp->smk_known) + 1; + ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &maplevel); +- if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) ++ if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL) + goto out; + + rule += SMK_DIGITLEN; ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } ++ + ret = sscanf(rule, "%d", &catlen); + if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) + goto out; +@@ -918,6 +930,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + + for (i = 0; i < catlen; i++) { + rule += SMK_DIGITLEN; ++ if (rule > data + count) { ++ rc = -EOVERFLOW; ++ goto out; ++ } + ret = sscanf(rule, "%u", &cat); + if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) + goto out; +@@ -1172,7 +1188,7 @@ static ssize_t smk_write_net4addr(struct file *file, const char __user *buf, + return -EPERM; + if (*ppos != 0) + return -EINVAL; +- if (count < SMK_NETLBLADDRMIN) ++ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1) + return -EINVAL; + + data = memdup_user_nul(buf, count); +@@ -1432,7 +1448,7 @@ static ssize_t smk_write_net6addr(struct file *file, const char __user *buf, + return -EPERM; + if (*ppos != 0) + return -EINVAL; +- if (count < SMK_NETLBLADDRMIN) ++ if (count < SMK_NETLBLADDRMIN || count > PAGE_SIZE - 1) + return -EINVAL; + + data = memdup_user_nul(buf, count); +@@ -1839,6 +1855,10 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf, + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + ++ /* Enough data must be present */ ++ if (count == 0 || count > PAGE_SIZE) ++ return -EINVAL; ++ + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) + return PTR_ERR(data); +@@ -2010,6 +2030,9 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + ++ if (count > PAGE_SIZE) ++ return -EINVAL; ++ + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) + return PTR_ERR(data); +@@ -2097,6 +2120,9 @@ static ssize_t smk_write_unconfined(struct file *file, const char __user *buf, + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + ++ if (count > PAGE_SIZE) ++ return -EINVAL; ++ + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) + return PTR_ERR(data); +@@ -2650,6 +2676,10 @@ static ssize_t smk_write_syslog(struct file *file, const char __user *buf, + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + ++ /* Enough data must be present */ ++ if (count == 0 || count > PAGE_SIZE) ++ return -EINVAL; ++ + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) + return PTR_ERR(data); +@@ -2731,7 +2761,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file) + static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +- struct task_smack *tsp = current_security(); + char *data; + int rc; + LIST_HEAD(list_tmp); +@@ -2743,10 +2772,13 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, + return -EPERM; + + /* ++ * No partial write. + * Enough data must be present. + */ + if (*ppos != 0) + return -EINVAL; ++ if (count == 0 || count > PAGE_SIZE) ++ return -EINVAL; + + data = memdup_user_nul(buf, count); + if (IS_ERR(data)) +@@ -2756,11 +2788,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, + kfree(data); + + if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) { ++ struct cred *new; ++ struct task_smack *tsp; ++ ++ new = prepare_creds(); ++ if (!new) { ++ rc = -ENOMEM; ++ goto out; ++ } ++ tsp = new->security; + smk_destroy_label_list(&tsp->smk_relabel); + list_splice(&list_tmp, &tsp->smk_relabel); ++ commit_creds(new); + return count; + } +- ++out: + smk_destroy_label_list(&list_tmp); + return rc; + } +diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c +index ac0e18366265..2149079bf74c 100755 +--- a/sound/core/compress_offload.c ++++ b/sound/core/compress_offload.c +@@ -727,6 +727,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream) + + retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); + if (!retval) { ++ /* clear flags and stop any drain wait */ ++ stream->partial_drain = false; ++ stream->metadata_set = false; + stream->runtime->state = SNDRV_PCM_STATE_SETUP; + wake_up(&stream->runtime->sleep); + stream->runtime->total_bytes_available = 0; +@@ -865,6 +868,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) + if (stream->next_track == false) + return -EPERM; + ++ stream->partial_drain = true; + retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); + + stream->next_track = false; +diff --git a/sound/core/control.c b/sound/core/control.c +index 511368fe974e..abda2c3fcd8a 100644 +--- a/sound/core/control.c ++++ b/sound/core/control.c +@@ -1381,7 +1381,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, + + unlock: + up_write(&card->controls_rwsem); +- return 0; ++ return err; + } + + static int snd_ctl_elem_add_user(struct snd_ctl_file *file, +diff --git a/sound/core/info.c b/sound/core/info.c +index d092d84307cf..74850bbf6afd 100644 +--- a/sound/core/info.c ++++ b/sound/core/info.c +@@ -645,7 +645,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) + { + int c = -1; + +- if (snd_BUG_ON(!buffer || !buffer->buffer)) ++ if (snd_BUG_ON(!buffer)) ++ return 1; ++ if (!buffer->buffer) + return 1; + if (len <= 0 || buffer->stop || buffer->error) + return 1; +diff --git a/sound/core/init.c b/sound/core/init.c +index f49f5075a6da..31fa4f50415b 100644 +--- a/sound/core/init.c ++++ b/sound/core/init.c +@@ -450,10 +450,8 @@ int snd_card_disconnect(struct snd_card *card) + return 0; + } + card->shutdown = 1; +- spin_unlock(&card->files_lock); + + /* replace file->f_op with special dummy operations */ +- spin_lock(&card->files_lock); + list_for_each_entry(mfile, &card->files_list, list) { + /* it's critical part, use endless loop */ + /* we have no room to fail */ +diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c +index 3788906421a7..fe27034f2846 100644 +--- a/sound/core/oss/mulaw.c ++++ b/sound/core/oss/mulaw.c +@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, + snd_BUG(); + return -EINVAL; + } +- if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) +- return -ENXIO; ++ if (!snd_pcm_format_linear(format->format)) ++ return -EINVAL; + + err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", + src_format, dst_format, +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c +index 824097571467..c2fb5198d5d5 100644 +--- a/sound/core/oss/pcm_oss.c ++++ b/sound/core/oss/pcm_oss.c +@@ -719,6 +719,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, + + oss_buffer_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; ++ if (!oss_buffer_size) ++ return -EINVAL; + oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); + if (atomic_read(&substream->mmap_count)) { + if (oss_buffer_size > runtime->oss.mmap_bytes) +@@ -754,17 +756,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, + + min_period_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); +- min_period_size *= oss_frame_size; +- min_period_size = roundup_pow_of_two(min_period_size); +- if (oss_period_size < min_period_size) +- oss_period_size = min_period_size; ++ if (min_period_size) { ++ min_period_size *= oss_frame_size; ++ min_period_size = roundup_pow_of_two(min_period_size); ++ if (oss_period_size < min_period_size) ++ oss_period_size = min_period_size; ++ } + + max_period_size = snd_pcm_plug_client_size(substream, + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); +- max_period_size *= oss_frame_size; +- max_period_size = rounddown_pow_of_two(max_period_size); +- if (oss_period_size > max_period_size) +- oss_period_size = max_period_size; ++ if (max_period_size) { ++ max_period_size *= oss_frame_size; ++ max_period_size = rounddown_pow_of_two(max_period_size); ++ if (oss_period_size > max_period_size) ++ oss_period_size = max_period_size; ++ } + + oss_periods = oss_buffer_size / oss_period_size; + +@@ -2001,11 +2007,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int + static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) + { + struct snd_pcm_runtime *runtime; ++ int fragshift; + + runtime = substream->runtime; + if (runtime->oss.subdivision || runtime->oss.fragshift) + return -EINVAL; +- runtime->oss.fragshift = val & 0xffff; ++ fragshift = val & 0xffff; ++ if (fragshift >= 31) ++ return -EINVAL; ++ runtime->oss.fragshift = fragshift; + runtime->oss.maxfrags = (val >> 16) & 0xffff; + if (runtime->oss.fragshift < 4) /* < 16 */ + runtime->oss.fragshift = 4; +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c +index 0179ef50b6b6..4bcb0d593b14 100755 +--- a/sound/core/pcm_native.c ++++ b/sound/core/pcm_native.c +@@ -1864,6 +1864,11 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) + } + pcm_file = f.file->private_data; + substream1 = pcm_file->substream; ++ if (substream == substream1) { ++ res = -EINVAL; ++ goto _badf; ++ } ++ + group = kmalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + res = -ENOMEM; +diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c +index 8cdf489df80e..ade880fe24a4 100644 +--- a/sound/core/seq/oss/seq_oss.c ++++ b/sound/core/seq/oss/seq_oss.c +@@ -181,10 +181,19 @@ static long + odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + { + struct seq_oss_devinfo *dp; ++ long rc; ++ + dp = file->private_data; + if (snd_BUG_ON(!dp)) + return -ENXIO; +- return snd_seq_oss_ioctl(dp, cmd, arg); ++ ++ if (cmd != SNDCTL_SEQ_SYNC && ++ mutex_lock_interruptible(®ister_mutex)) ++ return -ERESTARTSYS; ++ rc = snd_seq_oss_ioctl(dp, cmd, arg); ++ if (cmd != SNDCTL_SEQ_SYNC) ++ mutex_unlock(®ister_mutex); ++ return rc; + } + + #ifdef CONFIG_COMPAT +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c +index c93945917235..247b68790a52 100644 +--- a/sound/core/seq/oss/seq_oss_synth.c ++++ b/sound/core/seq/oss/seq_oss_synth.c +@@ -624,7 +624,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in + + if (info->is_midi) { + struct midi_info minf; +- snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); ++ if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf)) ++ return -ENXIO; + inf->synth_type = SYNTH_TYPE_MIDI; + inf->synth_subtype = 0; + inf->nr_voices = 16; +diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c +index 9cfe4fcee9a5..27433f141cee 100644 +--- a/sound/core/seq/seq_ports.c ++++ b/sound/core/seq/seq_ports.c +@@ -532,10 +532,11 @@ static int check_and_subscribe_port(struct snd_seq_client *client, + return err; + } + +-static void delete_and_unsubscribe_port(struct snd_seq_client *client, +- struct snd_seq_client_port *port, +- struct snd_seq_subscribers *subs, +- bool is_src, bool ack) ++/* called with grp->list_mutex held */ ++static void __delete_and_unsubscribe_port(struct snd_seq_client *client, ++ struct snd_seq_client_port *port, ++ struct snd_seq_subscribers *subs, ++ bool is_src, bool ack) + { + struct snd_seq_port_subs_info *grp; + struct list_head *list; +@@ -543,7 +544,6 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + + grp = is_src ? &port->c_src : &port->c_dest; + list = is_src ? &subs->src_list : &subs->dest_list; +- down_write(&grp->list_mutex); + write_lock_irq(&grp->list_lock); + empty = list_empty(list); + if (!empty) +@@ -553,6 +553,18 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client, + + if (!empty) + unsubscribe_port(client, port, grp, &subs->info, ack); ++} ++ ++static void delete_and_unsubscribe_port(struct snd_seq_client *client, ++ struct snd_seq_client_port *port, ++ struct snd_seq_subscribers *subs, ++ bool is_src, bool ack) ++{ ++ struct snd_seq_port_subs_info *grp; ++ ++ grp = is_src ? &port->c_src : &port->c_dest; ++ down_write(&grp->list_mutex); ++ __delete_and_unsubscribe_port(client, port, subs, is_src, ack); + up_write(&grp->list_mutex); + } + +@@ -608,27 +620,30 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, + struct snd_seq_client_port *dest_port, + struct snd_seq_port_subscribe *info) + { +- struct snd_seq_port_subs_info *src = &src_port->c_src; ++ struct snd_seq_port_subs_info *dest = &dest_port->c_dest; + struct snd_seq_subscribers *subs; + int err = -ENOENT; + +- down_write(&src->list_mutex); ++ /* always start from deleting the dest port for avoiding concurrent ++ * deletions ++ */ ++ down_write(&dest->list_mutex); + /* look for the connection */ +- list_for_each_entry(subs, &src->list_head, src_list) { ++ list_for_each_entry(subs, &dest->list_head, dest_list) { + if (match_subs_info(info, &subs->info)) { +- atomic_dec(&subs->ref_count); /* mark as not ready */ ++ __delete_and_unsubscribe_port(dest_client, dest_port, ++ subs, false, ++ connector->number != dest_client->number); + err = 0; + break; + } + } +- up_write(&src->list_mutex); ++ up_write(&dest->list_mutex); + if (err < 0) + return err; + + delete_and_unsubscribe_port(src_client, src_port, subs, true, + connector->number != src_client->number); +- delete_and_unsubscribe_port(dest_client, dest_port, subs, false, +- connector->number != dest_client->number); + kfree(subs); + return 0; + } +diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h +index 719093489a2c..7909cf6040e3 100644 +--- a/sound/core/seq/seq_queue.h ++++ b/sound/core/seq/seq_queue.h +@@ -40,10 +40,10 @@ struct snd_seq_queue { + + struct snd_seq_timer *timer; /* time keeper for this queue */ + int owner; /* client that 'owns' the timer */ +- unsigned int locked:1, /* timer is only accesibble by owner if set */ +- klocked:1, /* kernel lock (after START) */ +- check_again:1, +- check_blocked:1; ++ bool locked; /* timer is only accesibble by owner if set */ ++ bool klocked; /* kernel lock (after START) */ ++ bool check_again; /* concurrent access happened during check */ ++ bool check_blocked; /* queue being checked */ + + unsigned int flags; /* status flags */ + unsigned int info_flags; /* info for sync */ +diff --git a/sound/core/timer.c b/sound/core/timer.c +index be9e517e264c..4d57a7a7dd44 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -488,9 +488,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) + return; + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) + return; ++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */ + list_for_each_entry(ts, &ti->slave_active_head, active_list) + if (ts->ccallback) +- ts->ccallback(ts, event + 100, &tstamp, resolution); ++ ts->ccallback(ts, event, &tstamp, resolution); + } + + /* start/continue a master timer */ +diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c +index 847f70348d4d..cc600aa0f6c7 100644 +--- a/sound/drivers/aloop.c ++++ b/sound/drivers/aloop.c +@@ -1062,6 +1062,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) + return -ENOMEM; + kctl->id.device = dev; + kctl->id.subdevice = substr; ++ ++ /* Add the control before copying the id so that ++ * the numid field of the id is set in the copy. ++ */ ++ err = snd_ctl_add(card, kctl); ++ if (err < 0) ++ return err; ++ + switch (idx) { + case ACTIVE_IDX: + setup->active_id = kctl->id; +@@ -1078,9 +1086,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) + default: + break; + } +- err = snd_ctl_add(card, kctl); +- if (err < 0) +- return err; + } + } + } +diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c +index 42920a243328..3f94746d587a 100644 +--- a/sound/drivers/opl3/opl3_synth.c ++++ b/sound/drivers/opl3/opl3_synth.c +@@ -104,6 +104,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file, + { + struct snd_dm_fm_info info; + ++ memset(&info, 0, sizeof(info)); ++ + info.fm_mode = opl3->fm_mode; + info.rhythm = opl3->rhythm; + if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info))) +diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig +index 8557e54d2659..82894330cae1 100644 +--- a/sound/firewire/Kconfig ++++ b/sound/firewire/Kconfig +@@ -36,7 +36,7 @@ config SND_OXFW + * Mackie(Loud) Onyx-i series (former models) + * Mackie(Loud) Onyx Satellite + * Mackie(Loud) Tapco Link.Firewire +- * Mackie(Loud) d.2 pro/d.4 pro ++ * Mackie(Loud) d.2 pro/d.4 pro (built-in FireWire card with OXFW971 ASIC) + * Mackie(Loud) U.420/U.420d + * TASCAM FireOne + * Stanton Controllers & Systems 1 Deck/Mixer +@@ -82,7 +82,7 @@ config SND_BEBOB + * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394 + * BridgeCo RDAudio1/Audio5 + * Mackie Onyx 1220/1620/1640 (FireWire I/O Card) +- * Mackie d.2 (FireWire Option) ++ * Mackie d.2 (optional FireWire card with DM1000 ASIC) + * Stanton FinalScratch 2 (ScratchAmp) + * Tascam IF-FW/DM + * Behringer XENIX UFX 1204/1604 +@@ -108,6 +108,7 @@ config SND_BEBOB + * M-Audio Ozonic/NRV10/ProfireLightBridge + * M-Audio FireWire 1814/ProjectMix IO + * Digidesign Mbox 2 Pro ++ * ToneWeal FW66 + + To compile this driver as a module, choose M here: the module + will be called snd-bebob. +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c +index a205b93fd9ac..4bb2cbe73f20 100644 +--- a/sound/firewire/bebob/bebob.c ++++ b/sound/firewire/bebob/bebob.c +@@ -60,6 +60,7 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS); + #define VEN_MAUDIO1 0x00000d6c + #define VEN_MAUDIO2 0x000007f5 + #define VEN_DIGIDESIGN 0x00a07e ++#define OUI_SHOUYO 0x002327 + + #define MODEL_FOCUSRITE_SAFFIRE_BOTH 0x00000000 + #define MODEL_MAUDIO_AUDIOPHILE_BOTH 0x00010060 +@@ -414,7 +415,7 @@ static const struct ieee1394_device_id bebob_id_table[] = { + SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal), + /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */ + SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal), +- /* Mackie, d.2 (Firewire Option) */ ++ // Mackie, d.2 (optional Firewire card with DM1000). + SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal), + /* Stanton, ScratchAmp */ + SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal), +@@ -513,6 +514,8 @@ static const struct ieee1394_device_id bebob_id_table[] = { + &maudio_special_spec), + /* Digidesign Mbox 2 Pro */ + SND_BEBOB_DEV_ENTRY(VEN_DIGIDESIGN, 0x0000a9, &spec_normal), ++ // Toneweal FW66. ++ SND_BEBOB_DEV_ENTRY(OUI_SHOUYO, 0x020002, &spec_normal), + /* IDs are unknown but able to be supported */ + /* Apogee, Mini-ME Firewire */ + /* Apogee, Mini-DAC Firewire */ +diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c +index ce731f4d8b4f..733ba42e2462 100644 +--- a/sound/firewire/bebob/bebob_hwdep.c ++++ b/sound/firewire/bebob/bebob_hwdep.c +@@ -37,12 +37,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, + } + + memset(&event, 0, sizeof(event)); ++ count = min_t(long, count, sizeof(event.lock_status)); + if (bebob->dev_lock_changed) { + event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; + event.lock_status.status = (bebob->dev_lock_count > 0); + bebob->dev_lock_changed = false; +- +- count = min_t(long, count, sizeof(event.lock_status)); + } + + spin_unlock_irq(&bebob->lock); +diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c +index ef689997d6a5..bf53e342788e 100644 +--- a/sound/firewire/digi00x/digi00x.c ++++ b/sound/firewire/digi00x/digi00x.c +@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2"); + #define VENDOR_DIGIDESIGN 0x00a07e + #define MODEL_CONSOLE 0x000001 + #define MODEL_RACK 0x000002 ++#define SPEC_VERSION 0x000001 + + static int name_card(struct snd_dg00x *dg00x) + { +@@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { + /* Both of 002/003 use the same ID. */ + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_VERSION | + IEEE1394_MATCH_MODEL_ID, + .vendor_id = VENDOR_DIGIDESIGN, ++ .version = SPEC_VERSION, + .model_id = MODEL_CONSOLE, + }, + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_VERSION | + IEEE1394_MATCH_MODEL_ID, + .vendor_id = VENDOR_DIGIDESIGN, ++ .version = SPEC_VERSION, + .model_id = MODEL_RACK, + }, + {} +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c +index a7ab34d5e7b0..e2932ac9d487 100644 +--- a/sound/firewire/oxfw/oxfw.c ++++ b/sound/firewire/oxfw/oxfw.c +@@ -405,8 +405,7 @@ static const struct ieee1394_device_id oxfw_id_table[] = { + * Onyx-i series (former models): 0x081216 + * Mackie Onyx Satellite: 0x00200f + * Tapco LINK.firewire 4x6: 0x000460 +- * d.2 pro: Unknown +- * d.4 pro: Unknown ++ * d.2 pro/d.4 pro (built-in card): Unknown + * U.420: Unknown + * U.420d: Unknown + */ +diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c +index 4c967ac1c0e8..40ed4c92e48b 100644 +--- a/sound/firewire/tascam/tascam.c ++++ b/sound/firewire/tascam/tascam.c +@@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit) + } + + static const struct ieee1394_device_id snd_tscm_id_table[] = { ++ // Tascam, FW-1884. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | +- IEEE1394_MATCH_SPECIFIER_ID, ++ IEEE1394_MATCH_SPECIFIER_ID | ++ IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, ++ .version = 0x800000, ++ }, ++ // Tascam, FE-8 (.version = 0x800001) ++ // This kernel module doesn't support FE-8 because the most of features ++ // can be implemented in userspace without any specific support of this ++ // module. ++ // ++ // .version = 0x800002 is unknown. ++ // ++ // Tascam, FW-1082. ++ { ++ .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_SPECIFIER_ID | ++ IEEE1394_MATCH_VERSION, ++ .vendor_id = 0x00022e, ++ .specifier_id = 0x00022e, ++ .version = 0x800003, ++ }, ++ // Tascam, FW-1804. ++ { ++ .match_flags = IEEE1394_MATCH_VENDOR_ID | ++ IEEE1394_MATCH_SPECIFIER_ID | ++ IEEE1394_MATCH_VERSION, ++ .vendor_id = 0x00022e, ++ .specifier_id = 0x00022e, ++ .version = 0x800004, + }, + /* FE-08 requires reverse-engineering because it just has faders. */ + {} +diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c +index 261469188566..49d42971d90d 100644 +--- a/sound/hda/ext/hdac_ext_controller.c ++++ b/sound/hda/ext/hdac_ext_controller.c +@@ -155,6 +155,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *ebus, + return NULL; + if (ebus->idx != bus_idx) + return NULL; ++ if (addr < 0 || addr > 31) ++ return NULL; + + list_for_each_entry(hlink, &ebus->hlink_list, list) { + for (i = 0; i < HDA_MAX_CODECS; i++) { +diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c +index 0e81ea89a596..e3f68a76d90e 100644 +--- a/sound/hda/hdac_bus.c ++++ b/sound/hda/hdac_bus.c +@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work) + struct hdac_driver *drv; + unsigned int rp, caddr, res; + ++ spin_lock_irq(&bus->reg_lock); + while (bus->unsol_rp != bus->unsol_wp) { + rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; + bus->unsol_rp = rp; +@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work) + codec = bus->caddr_tbl[caddr & 0x0f]; + if (!codec || !codec->dev.driver) + continue; ++ spin_unlock_irq(&bus->reg_lock); + drv = drv_to_hdac_driver(codec->dev.driver); + if (drv->unsol_event) + drv->unsol_event(codec, res); ++ spin_lock_irq(&bus->reg_lock); + } ++ spin_unlock_irq(&bus->reg_lock); + } + + /** +diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c +index 03c9872c31cf..73264d5f58f8 100644 +--- a/sound/hda/hdac_device.c ++++ b/sound/hda/hdac_device.c +@@ -123,6 +123,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); + void snd_hdac_device_exit(struct hdac_device *codec) + { + pm_runtime_put_noidle(&codec->dev); ++ /* keep balance of runtime PM child_count in parent device */ ++ pm_runtime_set_suspended(&codec->dev); + snd_hdac_bus_remove_device(codec->bus, codec); + kfree(codec->vendor_name); + kfree(codec->chip_name); +diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c +index dfedfd85f205..463906882b95 100644 +--- a/sound/isa/cmi8330.c ++++ b/sound/isa/cmi8330.c +@@ -564,7 +564,7 @@ static int snd_cmi8330_probe(struct snd_card *card, int dev) + } + if (acard->sb->hardware != SB_HW_16) { + snd_printk(KERN_ERR PFX "SB16 not found during probe\n"); +- return err; ++ return -ENODEV; + } + + snd_wss_out(acard->wss, CS4231_MISC_INFO, 0x40); /* switch on MODE2 */ +diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c +index 1901c2bb6c3b..a36e2121ef09 100644 +--- a/sound/isa/es1688/es1688.c ++++ b/sound/isa/es1688/es1688.c +@@ -284,8 +284,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, + return error; + } + error = snd_es1688_probe(card, dev); +- if (error < 0) ++ if (error < 0) { ++ snd_card_free(card); + return error; ++ } + pnp_set_card_drvdata(pcard, card); + snd_es968_pnp_is_probed = 1; + return 0; +diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c +index 94c411299e5a..470058e89fef 100644 +--- a/sound/isa/sb/emu8000.c ++++ b/sound/isa/sb/emu8000.c +@@ -1042,8 +1042,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu) + + memset(emu->controls, 0, sizeof(emu->controls)); + for (i = 0; i < EMU8000_NUM_CONTROLS; i++) { +- if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) ++ if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) { ++ emu->controls[i] = NULL; + goto __error; ++ } + } + return 0; + +diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c +index 48da2276683d..23834691f4d3 100644 +--- a/sound/isa/sb/sb16_csp.c ++++ b/sound/isa/sb/sb16_csp.c +@@ -828,6 +828,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel + mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); ++ spin_unlock_irqrestore(&p->chip->mixer_lock, flags); + + spin_lock(&p->chip->reg_lock); + set_mode_register(p->chip, 0xc0); /* c0 = STOP */ +@@ -867,6 +868,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel + spin_unlock(&p->chip->reg_lock); + + /* restore PCM volume */ ++ spin_lock_irqsave(&p->chip->mixer_lock, flags); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); +@@ -892,6 +894,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) + mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); ++ spin_unlock_irqrestore(&p->chip->mixer_lock, flags); + + spin_lock(&p->chip->reg_lock); + if (p->running & SNDRV_SB_CSP_ST_QSOUND) { +@@ -906,6 +909,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) + spin_unlock(&p->chip->reg_lock); + + /* restore PCM volume */ ++ spin_lock_irqsave(&p->chip->mixer_lock, flags); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); + snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); +@@ -1059,10 +1063,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p) + + spin_lock_init(&p->q_lock); + +- if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) ++ if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) { ++ p->qsound_switch = NULL; + goto __error; +- if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) ++ } ++ if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) { ++ p->qsound_space = NULL; + goto __error; ++ } + + return 0; + +@@ -1082,10 +1090,14 @@ static void snd_sb_qsound_destroy(struct snd_sb_csp * p) + card = p->chip->card; + + down_write(&card->controls_rwsem); +- if (p->qsound_switch) ++ if (p->qsound_switch) { + snd_ctl_remove(card, p->qsound_switch); +- if (p->qsound_space) ++ p->qsound_switch = NULL; ++ } ++ if (p->qsound_space) { + snd_ctl_remove(card, p->qsound_space); ++ p->qsound_space = NULL; ++ } + up_write(&card->controls_rwsem); + + /* cancel pending transfer of QSound parameters */ +diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c +index e75bfc511e3e..ad42d2364199 100644 +--- a/sound/isa/sb/sb8.c ++++ b/sound/isa/sb/sb8.c +@@ -111,10 +111,6 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev) + + /* block the 0x388 port to avoid PnP conflicts */ + acard->fm_res = request_region(0x388, 4, "SoundBlaster FM"); +- if (!acard->fm_res) { +- err = -EBUSY; +- goto _err; +- } + + if (port[dev] != SNDRV_AUTO_PORT) { + if ((err = snd_sbdsp_create(card, port[dev], irq[dev], +diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c +index 718d5e3b7806..6c06d0645779 100644 +--- a/sound/isa/wavefront/wavefront_synth.c ++++ b/sound/isa/wavefront/wavefront_synth.c +@@ -1174,7 +1174,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) + "alias for %d\n", + header->number, + header->hdr.a.OriginalSample); +- ++ ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + munge_int32 (header->number, &alias_hdr[0], 2); + munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); + munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), +@@ -1205,6 +1208,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) + int num_samples; + unsigned char *msample_hdr; + ++ if (header->number >= WF_MAX_SAMPLE) ++ return -EINVAL; ++ + msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); + if (! msample_hdr) + return -ENOMEM; +diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c +index 3ef9af53ef49..0d5ff00cdabc 100644 +--- a/sound/pci/asihpi/hpioctl.c ++++ b/sound/pci/asihpi/hpioctl.c +@@ -346,7 +346,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, + struct hpi_message hm; + struct hpi_response hr; + struct hpi_adapter adapter; +- struct hpi_pci pci; ++ struct hpi_pci pci = { 0 }; + + memset(&adapter, 0, sizeof(adapter)); + +@@ -502,7 +502,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, + return 0; + + err: +- for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { ++ while (--idx >= 0) { + if (pci.ap_mem_base[idx]) { + iounmap(pci.ap_mem_base[idx]); + pci.ap_mem_base[idx] = NULL; +diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c +index 6165a57a94ae..2c30a0672c17 100644 +--- a/sound/pci/ca0106/ca0106_main.c ++++ b/sound/pci/ca0106/ca0106_main.c +@@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, + else + /* Power down */ + chip->spi_dac_reg[reg] |= bit; +- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); ++ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) ++ return -ENXIO; + } + return 0; + } +diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c +index 528102cc2d5d..d824ff4ae3e3 100644 +--- a/sound/pci/cs46xx/cs46xx_lib.c ++++ b/sound/pci/cs46xx/cs46xx_lib.c +@@ -780,7 +780,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned + rate = 48000 / 9; + + /* +- * We can not capture at at rate greater than the Input Rate (48000). ++ * We can not capture at a rate greater than the Input Rate (48000). + * Return an error if an attempt is made to stray outside that limit. + */ + if (rate > 48000) +diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c +index 7488e1b7a770..4e726d39b05d 100644 +--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c ++++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c +@@ -1742,7 +1742,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) + struct dsp_spos_instance * ins = chip->dsp_spos_instance; + + if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { +- /* remove AsynchFGTxSCB and and PCMSerialInput_II */ ++ /* remove AsynchFGTxSCB and PCMSerialInput_II */ + cs46xx_dsp_disable_spdif_out (chip); + + /* save state */ +diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c +index 18ee7768b7c4..ae8aa10a4a5d 100644 +--- a/sound/pci/ctxfi/cthw20k2.c ++++ b/sound/pci/ctxfi/cthw20k2.c +@@ -995,7 +995,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf) + + if (idx < 4) { + /* S/PDIF output */ +- switch ((conf & 0x7)) { ++ switch ((conf & 0xf)) { + case 1: + set_field(&ctl->txctl[idx], ATXCTL_NUC, 0); + break; +diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c +index d73ee11a32bd..db14ee43e461 100644 +--- a/sound/pci/echoaudio/echoaudio.c ++++ b/sound/pci/echoaudio/echoaudio.c +@@ -2215,7 +2215,6 @@ static int snd_echo_resume(struct device *dev) + if (err < 0) { + kfree(commpage_bak); + dev_err(dev, "resume init_hw err=%d\n", err); +- snd_echo_free(chip); + return err; + } + +@@ -2242,7 +2241,6 @@ static int snd_echo_resume(struct device *dev) + if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, + KBUILD_MODNAME, chip)) { + dev_err(chip->card->dev, "cannot grab irq\n"); +- snd_echo_free(chip); + return -EBUSY; + } + chip->irq = pci->irq; +diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c +index 12d87204e373..7ac92d188f4f 100644 +--- a/sound/pci/hda/hda_auto_parser.c ++++ b/sound/pci/hda/hda_auto_parser.c +@@ -76,6 +76,12 @@ static int compare_input_type(const void *ap, const void *bp) + if (a->type != b->type) + return (int)(a->type - b->type); + ++ /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ ++ if (a->is_headset_mic && b->is_headphone_mic) ++ return -1; /* don't swap */ ++ else if (a->is_headphone_mic && b->is_headset_mic) ++ return 1; /* swap */ ++ + /* In case one has boost and the other one has not, + pick the one with boost first. */ + return (int)(b->has_boost_on_pin - a->has_boost_on_pin); +diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c +index d0d6dfbfcfdf..f25c2c43c562 100644 +--- a/sound/pci/hda/hda_bind.c ++++ b/sound/pci/hda/hda_bind.c +@@ -46,6 +46,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) + if (codec->bus->shutdown) + return; + ++ /* ignore unsol events during system suspend/resume */ ++ if (codec->core.dev.power.power_state.event != PM_EVENT_ON) ++ return; ++ + if (codec->patch_ops.unsol_event) + codec->patch_ops.unsol_event(codec, ev); + } +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index cbe0248225c1..4e67614f15f8 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -3496,7 +3496,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save); + * @nid: NID to check / update + * + * Check whether the given NID is in the amp list. If it's in the list, +- * check the current AMP status, and update the the power-status according ++ * check the current AMP status, and update the power-status according + * to the mute status. + * + * This function is supposed to be set or called from the check_power_status +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c +index bd0e4710d15d..79043b481d7b 100644 +--- a/sound/pci/hda/hda_controller.c ++++ b/sound/pci/hda/hda_controller.c +@@ -1158,16 +1158,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) + if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) + active = true; + +- /* clear rirb int */ + status = azx_readb(chip, RIRBSTS); + if (status & RIRB_INT_MASK) { ++ /* ++ * Clearing the interrupt status here ensures that no ++ * interrupt gets masked after the RIRB wp is read in ++ * snd_hdac_bus_update_rirb. This avoids a possible ++ * race condition where codec response in RIRB may ++ * remain unserviced by IRQ, eventually falling back ++ * to polling mode in azx_rirb_get_response. ++ */ ++ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); + active = true; + if (status & RIRB_INT_RESPONSE) { + if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) + udelay(80); + snd_hdac_bus_update_rirb(bus); + } +- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); + } + } while (active && ++repeat < 10); + +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c +index 949c90a859fa..c29f7ff5ccd2 100644 +--- a/sound/pci/hda/hda_generic.c ++++ b/sound/pci/hda/hda_generic.c +@@ -820,7 +820,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, + } + } + +-/* sync power of each widget in the the given path */ ++/* sync power of each widget in the given path */ + static hda_nid_t path_power_update(struct hda_codec *codec, + struct nid_path *path, + bool allow_powerdown) +@@ -1165,11 +1165,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch, + *index = ch; + return "Headphone"; + case AUTO_PIN_LINE_OUT: +- /* This deals with the case where we have two DACs and +- * one LO, one HP and one Speaker */ +- if (!ch && cfg->speaker_outs && cfg->hp_outs) { +- bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type); +- bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type); ++ /* This deals with the case where one HP or one Speaker or ++ * one HP + one Speaker need to share the DAC with LO ++ */ ++ if (!ch) { ++ bool hp_lo_shared = false, spk_lo_shared = false; ++ ++ if (cfg->speaker_outs) ++ spk_lo_shared = !path_has_mixer(codec, ++ spec->speaker_paths[0], ctl_type); ++ if (cfg->hp_outs) ++ hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type); + if (hp_lo_shared && spk_lo_shared) + return spec->vmaster_mute.hook ? "PCM" : "Master"; + if (hp_lo_shared) +@@ -1327,16 +1333,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs, + struct nid_path *path; + hda_nid_t pin = pins[i]; + +- path = snd_hda_get_path_from_idx(codec, path_idx[i]); +- if (path) { +- badness += assign_out_path_ctls(codec, path); +- continue; ++ if (!spec->obey_preferred_dacs) { ++ path = snd_hda_get_path_from_idx(codec, path_idx[i]); ++ if (path) { ++ badness += assign_out_path_ctls(codec, path); ++ continue; ++ } + } + + dacs[i] = get_preferred_dac(codec, pin); + if (dacs[i]) { + if (is_dac_already_used(codec, dacs[i])) + badness += bad->shared_primary; ++ } else if (spec->obey_preferred_dacs) { ++ badness += BAD_NO_PRIMARY_DAC; + } + + if (!dacs[i]) +@@ -3411,7 +3421,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, + struct hda_gen_spec *spec = codec->spec; + const struct hda_input_mux *imux; + struct nid_path *path; +- int i, adc_idx, err = 0; ++ int i, adc_idx, ret, err = 0; + + imux = &spec->input_mux; + adc_idx = kcontrol->id.index; +@@ -3421,9 +3431,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, + if (!path || !path->ctls[type]) + continue; + kcontrol->private_value = path->ctls[type]; +- err = func(kcontrol, ucontrol); +- if (err < 0) ++ ret = func(kcontrol, ucontrol); ++ if (ret < 0) { ++ err = ret; + break; ++ } ++ if (ret > 0) ++ err = 1; + } + mutex_unlock(&codec->control_mutex); + if (err >= 0 && spec->cap_sync_hook) +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h +index 6d1cb2fb447d..33e7eafd93b3 100644 +--- a/sound/pci/hda/hda_generic.h ++++ b/sound/pci/hda/hda_generic.h +@@ -229,6 +229,7 @@ struct hda_gen_spec { + unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */ + unsigned int power_down_unused:1; /* power down unused widgets */ + unsigned int dac_min_mute:1; /* minimal = mute for DACs */ ++ unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */ + + /* other internal flags */ + unsigned int no_analog:1; /* digital I/O only */ +diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c +index e85fb04ec7be..b567c4bdae00 100644 +--- a/sound/pci/hda/hda_tegra.c ++++ b/sound/pci/hda/hda_tegra.c +@@ -363,6 +363,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) + unsigned short gcap; + int irq_id = platform_get_irq(pdev, 0); + ++ if (irq_id < 0) ++ return irq_id; ++ + err = hda_tegra_init_chip(chip, pdev); + if (err) + return err; +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index bf7593f234f6..c599730c7a3f 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -4443,11 +4443,10 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) + /* Delay enabling the HP amp, to let the mic-detection + * state machine run. + */ +- cancel_delayed_work(&spec->unsol_hp_work); +- schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); + tbl = snd_hda_jack_tbl_get(codec, cb->nid); + if (tbl) + tbl->block_report = 1; ++ schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); + } + + static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) +@@ -4625,12 +4624,25 @@ static void ca0132_free(struct hda_codec *codec) + kfree(codec->spec); + } + ++#ifdef CONFIG_PM ++static int ca0132_suspend(struct hda_codec *codec) ++{ ++ struct ca0132_spec *spec = codec->spec; ++ ++ cancel_delayed_work_sync(&spec->unsol_hp_work); ++ return 0; ++} ++#endif ++ + static const struct hda_codec_ops ca0132_patch_ops = { + .build_controls = ca0132_build_controls, + .build_pcms = ca0132_build_pcms, + .init = ca0132_init, + .free = ca0132_free, + .unsol_event = snd_hda_jack_unsol_event, ++#ifdef CONFIG_PM ++ .suspend = ca0132_suspend, ++#endif + }; + + static void ca0132_config(struct hda_codec *codec) +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 1e99500dbb6c..f7797e546e3d 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -1001,6 +1001,7 @@ static int patch_conexant_auto(struct hda_codec *codec) + static const struct hda_device_id snd_hda_id_conexant[] = { + HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), ++ HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), + HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index a866a20349c3..8b240a55cb89 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -307,13 +307,13 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol, + if (!per_pin) { + /* no pin is bound to the pcm */ + uinfo->count = 0; +- mutex_unlock(&spec->pcm_lock); +- return 0; ++ goto unlock; + } + eld = &per_pin->sink_eld; + uinfo->count = eld->eld_valid ? eld->eld_size : 0; +- mutex_unlock(&spec->pcm_lock); + ++ unlock: ++ mutex_unlock(&spec->pcm_lock); + return 0; + } + +@@ -325,6 +325,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol, + struct hdmi_spec_per_pin *per_pin; + struct hdmi_eld *eld; + int pcm_idx; ++ int err = 0; + + pcm_idx = kcontrol->private_value; + mutex_lock(&spec->pcm_lock); +@@ -333,16 +334,15 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol, + /* no pin is bound to the pcm */ + memset(ucontrol->value.bytes.data, 0, + ARRAY_SIZE(ucontrol->value.bytes.data)); +- mutex_unlock(&spec->pcm_lock); +- return 0; ++ goto unlock; + } +- eld = &per_pin->sink_eld; + ++ eld = &per_pin->sink_eld; + if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) || + eld->eld_size > ELD_MAX_SIZE) { +- mutex_unlock(&spec->pcm_lock); + snd_BUG(); +- return -EINVAL; ++ err = -EINVAL; ++ goto unlock; + } + + memset(ucontrol->value.bytes.data, 0, +@@ -350,9 +350,10 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol, + if (eld->eld_valid) + memcpy(ucontrol->value.bytes.data, eld->eld_buffer, + eld->eld_size); +- mutex_unlock(&spec->pcm_lock); + +- return 0; ++ unlock: ++ mutex_unlock(&spec->pcm_lock); ++ return err; + } + + static struct snd_kcontrol_new eld_bytes_ctl = { +@@ -1114,8 +1115,8 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, + pin_idx = hinfo_to_pin_index(codec, hinfo); + if (!spec->dyn_pcm_assign) { + if (snd_BUG_ON(pin_idx < 0)) { +- mutex_unlock(&spec->pcm_lock); +- return -EINVAL; ++ err = -EINVAL; ++ goto unlock; + } + } else { + /* no pin is assigned to the PCM +@@ -1123,16 +1124,13 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, + */ + if (pin_idx < 0) { + err = hdmi_pcm_open_no_pin(hinfo, codec, substream); +- mutex_unlock(&spec->pcm_lock); +- return err; ++ goto unlock; + } + } + + err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx); +- if (err < 0) { +- mutex_unlock(&spec->pcm_lock); +- return err; +- } ++ if (err < 0) ++ goto unlock; + + per_cvt = get_cvt(spec, cvt_idx); + /* Claim converter */ +@@ -1168,12 +1166,11 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, + per_cvt->assigned = 0; + hinfo->nid = 0; + snd_hda_spdif_ctls_unassign(codec, pcm_idx); +- mutex_unlock(&spec->pcm_lock); +- return -ENODEV; ++ err = -ENODEV; ++ goto unlock; + } + } + +- mutex_unlock(&spec->pcm_lock); + /* Store the updated parameters */ + runtime->hw.channels_min = hinfo->channels_min; + runtime->hw.channels_max = hinfo->channels_max; +@@ -1182,7 +1179,9 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, + + snd_pcm_hw_constraint_step(substream->runtime, 0, + SNDRV_PCM_HW_PARAM_CHANNELS, 2); +- return 0; ++ unlock: ++ mutex_unlock(&spec->pcm_lock); ++ return err; + } + + /* +@@ -1726,7 +1725,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + struct snd_pcm_runtime *runtime = substream->runtime; + bool non_pcm; + int pinctl; +- int err; ++ int err = 0; + + mutex_lock(&spec->pcm_lock); + pin_idx = hinfo_to_pin_index(codec, hinfo); +@@ -1738,13 +1737,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + pin_cvt_fixup(codec, NULL, cvt_nid); + snd_hda_codec_setup_stream(codec, cvt_nid, + stream_tag, 0, format); +- mutex_unlock(&spec->pcm_lock); +- return 0; ++ goto unlock; + } + + if (snd_BUG_ON(pin_idx < 0)) { +- mutex_unlock(&spec->pcm_lock); +- return -EINVAL; ++ err = -EINVAL; ++ goto unlock; + } + per_pin = get_pin(spec, pin_idx); + pin_nid = per_pin->pin_nid; +@@ -1781,6 +1779,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, + + err = spec->ops.setup_stream(codec, cvt_nid, pin_nid, + stream_tag, format); ++ unlock: + mutex_unlock(&spec->pcm_lock); + return err; + } +@@ -1802,32 +1801,34 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + struct hdmi_spec_per_cvt *per_cvt; + struct hdmi_spec_per_pin *per_pin; + int pinctl; ++ int err = 0; + ++ mutex_lock(&spec->pcm_lock); + if (hinfo->nid) { + pcm_idx = hinfo_to_pcm_index(codec, hinfo); +- if (snd_BUG_ON(pcm_idx < 0)) +- return -EINVAL; ++ if (snd_BUG_ON(pcm_idx < 0)) { ++ err = -EINVAL; ++ goto unlock; ++ } + cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid); +- if (snd_BUG_ON(cvt_idx < 0)) +- return -EINVAL; ++ if (snd_BUG_ON(cvt_idx < 0)) { ++ err = -EINVAL; ++ goto unlock; ++ } + per_cvt = get_cvt(spec, cvt_idx); +- + snd_BUG_ON(!per_cvt->assigned); + per_cvt->assigned = 0; + hinfo->nid = 0; + +- mutex_lock(&spec->pcm_lock); + snd_hda_spdif_ctls_unassign(codec, pcm_idx); + clear_bit(pcm_idx, &spec->pcm_in_use); + pin_idx = hinfo_to_pin_index(codec, hinfo); +- if (spec->dyn_pcm_assign && pin_idx < 0) { +- mutex_unlock(&spec->pcm_lock); +- return 0; +- } ++ if (spec->dyn_pcm_assign && pin_idx < 0) ++ goto unlock; + + if (snd_BUG_ON(pin_idx < 0)) { +- mutex_unlock(&spec->pcm_lock); +- return -EINVAL; ++ err = -EINVAL; ++ goto unlock; + } + per_pin = get_pin(spec, pin_idx); + +@@ -1846,10 +1847,12 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, + per_pin->setup = false; + per_pin->channels = 0; + mutex_unlock(&per_pin->lock); +- mutex_unlock(&spec->pcm_lock); + } + +- return 0; ++unlock: ++ mutex_unlock(&spec->pcm_lock); ++ ++ return err; + } + + static const struct hda_pcm_ops generic_ops = { +@@ -2153,6 +2156,18 @@ static void generic_hdmi_free(struct hda_codec *codec) + } + + #ifdef CONFIG_PM ++static int generic_hdmi_suspend(struct hda_codec *codec) ++{ ++ struct hdmi_spec *spec = codec->spec; ++ int pin_idx; ++ ++ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { ++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); ++ cancel_delayed_work_sync(&per_pin->work); ++ } ++ return 0; ++} ++ + static int generic_hdmi_resume(struct hda_codec *codec) + { + struct hdmi_spec *spec = codec->spec; +@@ -2176,6 +2191,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = { + .build_controls = generic_hdmi_build_controls, + .unsol_event = hdmi_unsol_event, + #ifdef CONFIG_PM ++ .suspend = generic_hdmi_suspend, + .resume = generic_hdmi_resume, + #endif + }; +@@ -3224,6 +3240,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) + + static int patch_tegra_hdmi(struct hda_codec *codec) + { ++ struct hdmi_spec *spec; + int err; + + err = patch_generic_hdmi(codec); +@@ -3231,6 +3248,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) + return err; + + codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; ++ spec = codec->spec; ++ spec->chmap.ops.chmap_cea_alloc_validate_get_type = ++ nvhdmi_chmap_cea_alloc_validate_get_type; ++ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; + + return 0; + } +@@ -3687,6 +3708,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), + HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), + HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), + HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), ++HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), ++HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), ++HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), ++HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), ++HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), + HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), + HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index df6d0211df51..58f03b0bb4c4 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -330,9 +330,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0225: + case 0x10ec0233: + case 0x10ec0235: +- case 0x10ec0236: + case 0x10ec0255: +- case 0x10ec0256: + case 0x10ec0257: + case 0x10ec0282: + case 0x10ec0283: +@@ -343,6 +341,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0299: + alc_update_coef_idx(codec, 0x10, 1<<9, 0); + break; ++ case 0x10ec0236: ++ case 0x10ec0256: ++ alc_write_coef_idx(codec, 0x36, 0x5757); ++ alc_update_coef_idx(codec, 0x10, 1<<9, 0); ++ break; + case 0x10ec0285: + case 0x10ec0293: + alc_update_coef_idx(codec, 0xa, 1<<13, 0); +@@ -380,6 +383,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + alc_update_coef_idx(codec, 0x7, 1<<5, 0); + break; + case 0x10ec0892: ++ case 0x10ec0897: + alc_update_coef_idx(codec, 0x7, 1<<5, 0); + break; + case 0x10ec0899: +@@ -2215,13 +2219,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + ALC882_FIXUP_ACER_ASPIRE_8930G), + SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G", + ALC882_FIXUP_ACER_ASPIRE_8930G), ++ SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", ++ ALC882_FIXUP_ACER_ASPIRE_4930G), ++ SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), + SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", + ALC882_FIXUP_ACER_ASPIRE_4930G), + SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", + ALC882_FIXUP_ACER_ASPIRE_4930G), +- SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", +- ALC882_FIXUP_ACER_ASPIRE_4930G), +- SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), + SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", + ALC882_FIXUP_ACER_ASPIRE_4930G), + SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), +@@ -2233,11 +2237,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), ++ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), ++ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), +- SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), +- SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), + + /* All Apple entries are in codec SSIDs */ + SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), +@@ -4300,6 +4304,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec, + struct alc_spec *spec = codec->spec; + spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN; + snd_hda_gen_hp_automute(codec, jack); ++ alc_update_headset_mode(codec); + } + + static void alc_probe_headset_mode(struct hda_codec *codec) +@@ -4895,6 +4900,7 @@ enum { + ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, + ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ++ ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + ALC269_FIXUP_HEADSET_MODE, + ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, + ALC269_FIXUP_ASPIRE_HEADSET_MIC, +@@ -5198,6 +5204,16 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + }, ++ [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { 0x1b, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, + [ALC269_FIXUP_HEADSET_MODE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_headset_mode, +@@ -5805,12 +5821,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), +- SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), +- SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), + SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), + SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), +@@ -5825,9 +5841,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), ++ SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), +- SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), +@@ -5859,6 +5875,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), ++ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), +@@ -5877,7 +5894,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), +- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), + SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */ + +@@ -6182,7 +6198,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x12, 0x90a60120}, + {0x14, 0x90170110}, + {0x21, 0x0321101f}), +- SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ++ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + {0x12, 0xb7a60130}, + {0x14, 0x90170110}, + {0x21, 0x04211020}), +@@ -6266,6 +6282,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + {0x17, 0x90170110}, + {0x1a, 0x03011020}, + {0x21, 0x03211030}), ++ SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, ++ ALC225_STANDARD_PINS, ++ {0x12, 0xb7a60130}, ++ {0x17, 0x90170110}), + {} + }; + +@@ -6549,8 +6569,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), + SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), + SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), +- SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F), +- SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F), ++ SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F), + SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505), + {} + }; +@@ -7487,6 +7506,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { + HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882), + HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882), + HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), ++ HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662), + HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), + HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), + {} /* terminator */ +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index d1a6d20ace0d..80b72d0702c5 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -862,7 +862,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec, + static struct snd_kcontrol_new beep_vol_ctl = + HDA_CODEC_VOLUME(NULL, 0, 0, 0); + +- /* check for mute support for the the amp */ ++ /* check for mute support for the amp */ + if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { + const struct snd_kcontrol_new *temp; + if (spec->anabeep_nid == nid) +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c +index fc30d1e8aa76..9dd104c308e1 100644 +--- a/sound/pci/hda/patch_via.c ++++ b/sound/pci/hda/patch_via.c +@@ -135,6 +135,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec) + spec->codec_type = VT1708S; + spec->gen.indep_hp = 1; + spec->gen.keep_eapd_on = 1; ++ spec->gen.dac_min_mute = 1; + spec->gen.pcm_playback_hook = via_playback_pcm_hook; + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; + codec->power_save_node = 1; +diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c +index 3919aed39ca0..5e52086d7b98 100644 +--- a/sound/pci/ice1712/prodigy192.c ++++ b/sound/pci/ice1712/prodigy192.c +@@ -31,7 +31,7 @@ + * Experimentally I found out that only a combination of + * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - + * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct +- * sampling rate. That means the the FPGA doubles the ++ * sampling rate. That means that the FPGA doubles the + * MCK01 rate. + * + * Copyright (c) 2003 Takashi Iwai +diff --git a/sound/pci/mixart/mixart_core.c b/sound/pci/mixart/mixart_core.c +index dccf3db48fe0..1381f4fa0856 100644 +--- a/sound/pci/mixart/mixart_core.c ++++ b/sound/pci/mixart/mixart_core.c +@@ -83,7 +83,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, + unsigned int i; + #endif + +- mutex_lock(&mgr->msg_lock); + err = 0; + + /* copy message descriptor from miXart to driver */ +@@ -132,8 +131,6 @@ static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp, + writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD)); + + _clean_exit: +- mutex_unlock(&mgr->msg_lock); +- + return err; + } + +@@ -271,7 +268,9 @@ int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int + resp.data = resp_data; + resp.size = max_resp_size; + ++ mutex_lock(&mgr->msg_lock); + err = get_msg(mgr, &resp, msg_frame); ++ mutex_unlock(&mgr->msg_lock); + + if( request->message_id != resp.message_id ) + dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n"); +diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c +index 4cf3200e988b..df44135e1b0c 100644 +--- a/sound/pci/oxygen/xonar_dg.c ++++ b/sound/pci/oxygen/xonar_dg.c +@@ -39,7 +39,7 @@ + * GPIO 4 <- headphone detect + * GPIO 5 -> enable ADC analog circuit for the left channel + * GPIO 6 -> enable ADC analog circuit for the right channel +- * GPIO 7 -> switch green rear output jack between CS4245 and and the first ++ * GPIO 7 -> switch green rear output jack between CS4245 and the first + * channel of CS4361 (mechanical relay) + * GPIO 8 -> enable output to speakers + * +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index b044dea3c815..9843954698f4 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -5314,7 +5314,8 @@ static int snd_hdsp_free(struct hdsp *hdsp) + if (hdsp->port) + pci_release_regions(hdsp->pci); + +- pci_disable_device(hdsp->pci); ++ if (pci_is_enabled(hdsp->pci)) ++ pci_disable_device(hdsp->pci); + return 0; + } + +diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c +index 9899ef4c7efa..a88a81fc638a 100644 +--- a/sound/pci/rme9652/hdspm.c ++++ b/sound/pci/rme9652/hdspm.c +@@ -6912,7 +6912,8 @@ static int snd_hdspm_free(struct hdspm * hdspm) + if (hdspm->port) + pci_release_regions(hdspm->pci); + +- pci_disable_device(hdspm->pci); ++ if (pci_is_enabled(hdspm->pci)) ++ pci_disable_device(hdspm->pci); + return 0; + } + +diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c +index a76b1f147660..67bd75fbdc7e 100644 +--- a/sound/pci/rme9652/rme9652.c ++++ b/sound/pci/rme9652/rme9652.c +@@ -1761,7 +1761,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652) + if (rme9652->port) + pci_release_regions(rme9652->pci); + +- pci_disable_device(rme9652->pci); ++ if (pci_is_enabled(rme9652->pci)) ++ pci_disable_device(rme9652->pci); + return 0; + } + +diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c +index 33c6be9fb388..7c70ba5e2540 100644 +--- a/sound/ppc/powermac.c ++++ b/sound/ppc/powermac.c +@@ -90,7 +90,11 @@ static int snd_pmac_probe(struct platform_device *devptr) + sprintf(card->shortname, "PowerMac %s", name_ext); + sprintf(card->longname, "%s (Dev %d) Sub-frame %d", + card->shortname, chip->device_id, chip->subframe); +- if ( snd_pmac_tumbler_init(chip) < 0 || snd_pmac_tumbler_post_init() < 0) ++ err = snd_pmac_tumbler_init(chip); ++ if (err < 0) ++ goto __error; ++ err = snd_pmac_tumbler_post_init(); ++ if (err < 0) + goto __error; + break; + case PMAC_AWACS: +diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c +index 6df29fa30fb9..9e449dd8da92 100644 +--- a/sound/soc/codecs/cs35l33.c ++++ b/sound/soc/codecs/cs35l33.c +@@ -1209,6 +1209,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client, + dev_err(&i2c_client->dev, + "CS35L33 Device ID (%X). Expected ID %X\n", + devid, CS35L33_CHIP_ID); ++ ret = -EINVAL; + goto err_enable; + } + +diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c +index 54c1768bc818..a2535a7eb4bb 100644 +--- a/sound/soc/codecs/cs42l56.c ++++ b/sound/soc/codecs/cs42l56.c +@@ -1270,6 +1270,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, + dev_err(&i2c_client->dev, + "CS42L56 Device ID (%X). Expected %X\n", + devid, CS42L56_DEVID); ++ ret = -EINVAL; + goto err_enable; + } + alpha_rev = reg & CS42L56_AREV_MASK; +@@ -1325,7 +1326,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, + ret = snd_soc_register_codec(&i2c_client->dev, + &soc_codec_dev_cs42l56, &cs42l56_dai, 1); + if (ret < 0) +- return ret; ++ goto err_enable; + + return 0; + +diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c +index 7899a2cdeb42..a41dd9d1eb82 100644 +--- a/sound/soc/codecs/rt286.c ++++ b/sound/soc/codecs/rt286.c +@@ -174,6 +174,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg) + case RT286_PROC_COEF: + case RT286_SET_AMP_GAIN_ADC_IN1: + case RT286_SET_AMP_GAIN_ADC_IN2: ++ case RT286_SET_GPIO_MASK: ++ case RT286_SET_GPIO_DIRECTION: ++ case RT286_SET_GPIO_DATA: + case RT286_SET_POWER(RT286_DAC_OUT1): + case RT286_SET_POWER(RT286_DAC_OUT2): + case RT286_SET_POWER(RT286_ADC_IN1): +@@ -1119,12 +1122,11 @@ static const struct dmi_system_id force_combo_jack_table[] = { + { } + }; + +-static const struct dmi_system_id dmi_dell_dino[] = { ++static const struct dmi_system_id dmi_dell[] = { + { +- .ident = "Dell Dino", ++ .ident = "Dell", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), +- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") + } + }, + { } +@@ -1135,7 +1137,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c, + { + struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev); + struct rt286_priv *rt286; +- int i, ret, val; ++ int i, ret, vendor_id; + + rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286), + GFP_KERNEL); +@@ -1151,14 +1153,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c, + } + + ret = regmap_read(rt286->regmap, +- RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); ++ RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id); + if (ret != 0) { + dev_err(&i2c->dev, "I2C error %d\n", ret); + return ret; + } +- if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) { ++ if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) { + dev_err(&i2c->dev, +- "Device with ID register %#x is not rt286\n", val); ++ "Device with ID register %#x is not rt286\n", ++ vendor_id); + return -ENODEV; + } + +@@ -1182,8 +1185,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c, + if (pdata) + rt286->pdata = *pdata; + +- if (dmi_check_system(force_combo_jack_table) || +- dmi_check_system(dmi_dell_dino)) ++ if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) || ++ dmi_check_system(force_combo_jack_table)) + rt286->pdata.cbj_en = true; + + regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3); +@@ -1222,7 +1225,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c, + regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737); + regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f); + +- if (dmi_check_system(dmi_dell_dino)) { ++ if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) { + regmap_update_bits(rt286->regmap, + RT286_SET_GPIO_MASK, 0x40, 0x40); + regmap_update_bits(rt286->regmap, +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c +index 3cc1135fc2cd..81fbbcaf8121 100644 +--- a/sound/soc/codecs/rt5640.c ++++ b/sound/soc/codecs/rt5640.c +@@ -341,9 +341,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg) + } + + static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); +-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); ++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); + static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); +-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); ++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); + static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); + + /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ +diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c +index f0c9e2562474..acd5c30694e8 100644 +--- a/sound/soc/codecs/rt5651.c ++++ b/sound/soc/codecs/rt5651.c +@@ -287,9 +287,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg) + } + + static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); +-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); ++static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); + static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); +-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); ++static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); + static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); + + /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ +diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c +index 635818fcda00..21a007c26407 100644 +--- a/sound/soc/codecs/rt5659.c ++++ b/sound/soc/codecs/rt5659.c +@@ -3389,12 +3389,17 @@ static int rt5659_set_dai_sysclk(struct snd_soc_dai *dai, + struct snd_soc_codec *codec = dai->codec; + struct rt5659_priv *rt5659 = snd_soc_codec_get_drvdata(codec); + unsigned int reg_val = 0; ++ int ret; + + if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src) + return 0; + + switch (clk_id) { + case RT5659_SCLK_S_MCLK: ++ ret = clk_set_rate(rt5659->mclk, freq); ++ if (ret) ++ return ret; ++ + reg_val |= RT5659_SCLK_SRC_MCLK; + break; + case RT5659_SCLK_S_PLL1: +diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h +index 3f1b0f1df809..e4e31e82311d 100644 +--- a/sound/soc/codecs/rt5670.h ++++ b/sound/soc/codecs/rt5670.h +@@ -760,7 +760,7 @@ + #define RT5670_PWR_VREF2_BIT 4 + #define RT5670_PWR_FV2 (0x1 << 3) + #define RT5670_PWR_FV2_BIT 3 +-#define RT5670_LDO_SEL_MASK (0x3) ++#define RT5670_LDO_SEL_MASK (0x7) + #define RT5670_LDO_SEL_SFT 0 + + /* Power Management for Analog 2 (0x64) */ +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c +index 0c2a1413a8f5..14e564e38f3c 100644 +--- a/sound/soc/codecs/sgtl5000.c ++++ b/sound/soc/codecs/sgtl5000.c +@@ -75,7 +75,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = { + { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f }, + { SGTL5000_DAP_MAIN_CHAN, 0x8000 }, + { SGTL5000_DAP_MIX_CHAN, 0x0000 }, +- { SGTL5000_DAP_AVC_CTRL, 0x0510 }, ++ { SGTL5000_DAP_AVC_CTRL, 0x5100 }, + { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 }, + { SGTL5000_DAP_AVC_ATTACK, 0x0028 }, + { SGTL5000_DAP_AVC_DECAY, 0x0050 }, +diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c +index d6e00c77edcd..7cf76661c3cc 100644 +--- a/sound/soc/codecs/sti-sas.c ++++ b/sound/soc/codecs/sti-sas.c +@@ -542,6 +542,7 @@ static const struct of_device_id sti_sas_dev_match[] = { + }, + {}, + }; ++MODULE_DEVICE_TABLE(of, sti_sas_dev_match); + + static int sti_sas_driver_probe(struct platform_device *pdev) + { +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c +index 28eb55bc4663..00dd37f10daf 100644 +--- a/sound/soc/codecs/wm_adsp.c ++++ b/sound/soc/codecs/wm_adsp.c +@@ -1156,7 +1156,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, + ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL); + if (!ctl_work) { + ret = -ENOMEM; +- goto err_ctl_cache; ++ goto err_list_del; + } + + ctl_work->dsp = dsp; +@@ -1166,7 +1166,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, + + return 0; + +-err_ctl_cache: ++err_list_del: ++ list_del(&ctl->list); + kfree(ctl->cache); + err_ctl_name: + kfree(ctl->name); +diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c +index dc30d780f874..3fcf174b99d3 100644 +--- a/sound/soc/fsl/fsl_asrc_dma.c ++++ b/sound/soc/fsl/fsl_asrc_dma.c +@@ -243,6 +243,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream, + ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); + if (ret) { + dev_err(dev, "failed to config DMA channel for Back-End\n"); ++ dma_release_channel(pair->dma_chan[dir]); + return ret; + } + +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c +index fa64cc2b1729..94bf497092b2 100644 +--- a/sound/soc/fsl/fsl_esai.c ++++ b/sound/soc/fsl/fsl_esai.c +@@ -495,11 +495,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream, + ESAI_SAICR_SYNC, esai_priv->synchronous ? + ESAI_SAICR_SYNC : 0); + +- /* Set a default slot number -- 2 */ ++ /* Set slots count */ + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); ++ ESAI_xCCR_xDC_MASK, ++ ESAI_xCCR_xDC(esai_priv->slots)); + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); ++ ESAI_xCCR_xDC_MASK, ++ ESAI_xCCR_xDC(esai_priv->slots)); + } + + return 0; +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +index e83e314a76a5..1b6dedfc33e3 100644 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c +@@ -135,7 +135,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, + snd_pcm_uframes_t period_size; + ssize_t periodbytes; + ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); +- u32 buffer_addr = virt_to_phys(substream->dma_buffer.area); ++ u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); + + channels = substream->runtime->channels; + period_size = substream->runtime->period_size; +@@ -241,7 +241,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, + /* set codec params and inform SST driver the same */ + sst_fill_pcm_params(substream, ¶m); + sst_fill_alloc_params(substream, &alloc_params); +- substream->runtime->dma_area = substream->dma_buffer.area; + str_params.sparams = param; + str_params.aparams = alloc_params; + str_params.codec = SST_CODEC_TYPE_PCM; +@@ -339,7 +338,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, + + ret_val = power_up_sst(stream); + if (ret_val < 0) +- return ret_val; ++ goto out_power_up; + + /* Make sure, that the period size is always even */ + snd_pcm_hw_constraint_step(substream->runtime, 0, +@@ -348,8 +347,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, + return snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + out_ops: +- kfree(stream); + mutex_unlock(&sst_lock); ++out_power_up: ++ kfree(stream); + return ret_val; + } + +@@ -507,14 +507,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { + .channels_min = SST_STEREO, + .channels_max = SST_STEREO, + .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, +- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, + .capture = { + .stream_name = "Headset Capture", + .channels_min = 1, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, +- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, + }, + { +@@ -525,7 +525,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { + .channels_min = SST_STEREO, + .channels_max = SST_STEREO, + .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000, +- .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, + }, + }, + { +diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c +index 11d0cc2b0e39..060da9577041 100644 +--- a/sound/soc/intel/boards/haswell.c ++++ b/sound/soc/intel/boards/haswell.c +@@ -197,6 +197,7 @@ static struct platform_driver haswell_audio = { + .probe = haswell_audio_probe, + .driver = { + .name = "haswell-audio", ++ .pm = &snd_soc_pm_ops, + }, + }; + +diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c +index 0dc1ab48fceb..6440729facaf 100644 +--- a/sound/soc/jz4740/jz4740-i2s.c ++++ b/sound/soc/jz4740/jz4740-i2s.c +@@ -315,10 +315,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, + switch (clk_id) { + case JZ4740_I2S_CLKSRC_EXT: + parent = clk_get(NULL, "ext"); ++ if (IS_ERR(parent)) ++ return PTR_ERR(parent); + clk_set_parent(i2s->clk_i2s, parent); + break; + case JZ4740_I2S_CLKSRC_PLL: + parent = clk_get(NULL, "pll half"); ++ if (IS_ERR(parent)) ++ return PTR_ERR(parent); + clk_set_parent(i2s->clk_i2s, parent); + ret = clk_set_rate(i2s->clk_i2s, freq); + break; +diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c +index dafd22e874e9..e655425e4819 100644 +--- a/sound/soc/kirkwood/kirkwood-dma.c ++++ b/sound/soc/kirkwood/kirkwood-dma.c +@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) + err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, + "kirkwood-i2s", priv); + if (err) +- return -EBUSY; ++ return err; + + /* + * Enable Error interrupts. We're only ack'ing them but +diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c +index 420d200f9a05..eeed53cf325a 100644 +--- a/sound/soc/qcom/lpass-platform.c ++++ b/sound/soc/qcom/lpass-platform.c +@@ -68,7 +68,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) + int ret, dma_ch, dir = substream->stream; + struct lpass_pcm_data *data; + +- data = devm_kzalloc(soc_runtime->dev, sizeof(*data), GFP_KERNEL); ++ data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + +@@ -140,6 +140,7 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream) + if (v->free_dma_channel) + v->free_dma_channel(drvdata, dma_ch); + ++ kfree(data); + return 0; + } + +diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c +index 4f3d94ce0a98..d2cfebf4c01b 100644 +--- a/sound/soc/soc-core.c ++++ b/sound/soc/soc-core.c +@@ -3677,7 +3677,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, + if (!routes) { + dev_err(card->dev, + "ASoC: Could not allocate DAPM route table\n"); +- return -EINVAL; ++ return -ENOMEM; + } + + for (i = 0; i < num_routes; i++) { +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 70c02b02030b..c3b2168c8f24 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -2366,6 +2366,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) + enum snd_soc_dapm_direction dir; + + list_del(&w->list); ++ list_del(&w->dirty); + /* + * remove source and sink paths associated to this widget. + * While removing the path, remove reference to it from both +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 50ce608a096f..64e23e702dad 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -2379,6 +2379,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_DRAIN: + ret = dpcm_dai_trigger_fe_be(substream, cmd, true); + break; + case SNDRV_PCM_TRIGGER_STOP: +@@ -2396,6 +2397,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ case SNDRV_PCM_TRIGGER_DRAIN: + ret = dpcm_dai_trigger_fe_be(substream, cmd, false); + break; + case SNDRV_PCM_TRIGGER_STOP: +diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c +index fef3b9a21a66..e441e23a37e4 100644 +--- a/sound/soc/tegra/tegra30_ahub.c ++++ b/sound/soc/tegra/tegra30_ahub.c +@@ -656,8 +656,10 @@ static int tegra30_ahub_resume(struct device *dev) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(dev); + return ret; ++ } + ret = regcache_sync(ahub->regmap_ahub); + ret |= regcache_sync(ahub->regmap_apbif); + pm_runtime_put(dev); +diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c +index 8e55583aa104..516f37896092 100644 +--- a/sound/soc/tegra/tegra30_i2s.c ++++ b/sound/soc/tegra/tegra30_i2s.c +@@ -552,8 +552,10 @@ static int tegra30_i2s_resume(struct device *dev) + int ret; + + ret = pm_runtime_get_sync(dev); +- if (ret < 0) ++ if (ret < 0) { ++ pm_runtime_put(dev); + return ret; ++ } + ret = regcache_sync(i2s->regmap); + pm_runtime_put(dev); + +diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c +index deb597f7c302..f40657da4db2 100644 +--- a/sound/soc/tegra/tegra_alc5632.c ++++ b/sound/soc/tegra/tegra_alc5632.c +@@ -149,6 +149,7 @@ static struct snd_soc_dai_link tegra_alc5632_dai = { + + static struct snd_soc_card snd_soc_tegra_alc5632 = { + .name = "tegra-alc5632", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .remove = tegra_alc5632_card_remove, + .dai_link = &tegra_alc5632_dai, +diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c +index 902da36581d1..f4f238924c76 100644 +--- a/sound/soc/tegra/tegra_max98090.c ++++ b/sound/soc/tegra/tegra_max98090.c +@@ -205,6 +205,7 @@ static struct snd_soc_dai_link tegra_max98090_dai = { + + static struct snd_soc_card snd_soc_tegra_max98090 = { + .name = "tegra-max98090", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .remove = tegra_max98090_card_remove, + .dai_link = &tegra_max98090_dai, +diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c +index e5ef4e9c4ac5..3088c6f401e3 100644 +--- a/sound/soc/tegra/tegra_rt5640.c ++++ b/sound/soc/tegra/tegra_rt5640.c +@@ -150,6 +150,7 @@ static struct snd_soc_dai_link tegra_rt5640_dai = { + + static struct snd_soc_card snd_soc_tegra_rt5640 = { + .name = "tegra-rt5640", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .remove = tegra_rt5640_card_remove, + .dai_link = &tegra_rt5640_dai, +diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c +index 1470873ecde6..451e7254e87b 100644 +--- a/sound/soc/tegra/tegra_rt5677.c ++++ b/sound/soc/tegra/tegra_rt5677.c +@@ -198,6 +198,7 @@ static struct snd_soc_dai_link tegra_rt5677_dai = { + + static struct snd_soc_card snd_soc_tegra_rt5677 = { + .name = "tegra-rt5677", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .remove = tegra_rt5677_card_remove, + .dai_link = &tegra_rt5677_dai, +diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c +index 863e04809a6b..8755a9528d92 100644 +--- a/sound/soc/tegra/tegra_sgtl5000.c ++++ b/sound/soc/tegra/tegra_sgtl5000.c +@@ -103,6 +103,7 @@ static struct snd_soc_dai_link tegra_sgtl5000_dai = { + + static struct snd_soc_card snd_soc_tegra_sgtl5000 = { + .name = "tegra-sgtl5000", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .dai_link = &tegra_sgtl5000_dai, + .num_links = 1, +diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c +index f0cd01dbfc38..633d26c4811b 100644 +--- a/sound/soc/tegra/tegra_wm8753.c ++++ b/sound/soc/tegra/tegra_wm8753.c +@@ -110,6 +110,7 @@ static struct snd_soc_dai_link tegra_wm8753_dai = { + + static struct snd_soc_card snd_soc_tegra_wm8753 = { + .name = "tegra-wm8753", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .dai_link = &tegra_wm8753_dai, + .num_links = 1, +diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c +index e485278e027a..20a3ca03ac30 100644 +--- a/sound/soc/tegra/tegra_wm8903.c ++++ b/sound/soc/tegra/tegra_wm8903.c +@@ -228,6 +228,7 @@ static struct snd_soc_dai_link tegra_wm8903_dai = { + + static struct snd_soc_card snd_soc_tegra_wm8903 = { + .name = "tegra-wm8903", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .dai_link = &tegra_wm8903_dai, + .num_links = 1, +diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c +index 6492f8143ff1..e5bebb473d95 100644 +--- a/sound/soc/tegra/tegra_wm9712.c ++++ b/sound/soc/tegra/tegra_wm9712.c +@@ -59,6 +59,7 @@ static struct snd_soc_dai_link tegra_wm9712_dai = { + + static struct snd_soc_card snd_soc_tegra_wm9712 = { + .name = "tegra-wm9712", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .dai_link = &tegra_wm9712_dai, + .num_links = 1, +diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c +index 2cea203c4f5f..90a770968f34 100644 +--- a/sound/soc/tegra/trimslice.c ++++ b/sound/soc/tegra/trimslice.c +@@ -103,6 +103,7 @@ static struct snd_soc_dai_link trimslice_tlv320aic23_dai = { + + static struct snd_soc_card snd_soc_trimslice = { + .name = "tegra-trimslice", ++ .driver_name = "tegra", + .owner = THIS_MODULE, + .dai_link = &trimslice_tlv320aic23_dai, + .num_links = 1, +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 8abe8bd41c35..c3fec43b057e 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -249,9 +249,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int + ctrlif, interface); + return -EINVAL; + } +- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); +- +- return 0; ++ return usb_driver_claim_interface(&usb_audio_driver, iface, ++ USB_AUDIO_IFACE_UNUSED); + } + + if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && +@@ -271,7 +270,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int + + if (! snd_usb_parse_audio_interface(chip, interface)) { + usb_set_interface(dev, interface, 0); /* reset the current interface */ +- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); ++ return usb_driver_claim_interface(&usb_audio_driver, iface, ++ USB_AUDIO_IFACE_UNUSED); + } + + return 0; +@@ -763,7 +763,7 @@ static void usb_audio_disconnect(struct usb_interface *intf) + struct snd_card *card; + struct list_head *p; + +- if (chip == (void *)-1L) ++ if (chip == USB_AUDIO_IFACE_UNUSED) + return; + + card = chip->card; +@@ -866,12 +866,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + struct usb_mixer_interface *mixer; + struct list_head *p; + +- if (chip == (void *)-1L) ++ if (chip == USB_AUDIO_IFACE_UNUSED) + return 0; + +- chip->autosuspended = !!PMSG_IS_AUTO(message); +- if (!chip->autosuspended) +- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); + if (!chip->num_suspended_intf++) { + list_for_each_entry(as, &chip->pcm_list, list) { + snd_pcm_suspend_all(as->pcm); +@@ -884,6 +881,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) + snd_usb_mixer_suspend(mixer); + } + ++ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { ++ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); ++ chip->system_suspend = chip->num_suspended_intf; ++ } ++ + return 0; + } + +@@ -894,12 +896,13 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + struct list_head *p; + int err = 0; + +- if (chip == (void *)-1L) +- return 0; +- if (--chip->num_suspended_intf) ++ if (chip == USB_AUDIO_IFACE_UNUSED) + return 0; + + atomic_inc(&chip->active); /* avoid autopm */ ++ if (chip->num_suspended_intf > 1) ++ goto out; ++ + /* + * ALSA leaves material resumption to user space + * we just notify and restart the mixers +@@ -914,9 +917,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) + snd_usbmidi_resume(p); + } + +- if (!chip->autosuspended) ++ out: ++ if (chip->num_suspended_intf == chip->system_suspend) { + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); +- chip->autosuspended = 0; ++ chip->system_suspend = 0; ++ } ++ chip->num_suspended_intf--; + + err_out: + atomic_dec(&chip->active); /* allow autopm after this point */ +diff --git a/sound/usb/card.h b/sound/usb/card.h +index 25cddcc40f83..511084c3c92a 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -125,6 +125,7 @@ struct snd_usb_substream { + unsigned int tx_length_quirk:1; /* add length specifier to transfers */ + unsigned int fmt_type; /* USB audio format type (1-3) */ + unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ ++ unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ + + unsigned int running: 1; /* running status */ + +diff --git a/sound/usb/clock.c b/sound/usb/clock.c +index 09c6e292a038..1003664e4866 100644 +--- a/sound/usb/clock.c ++++ b/sound/usb/clock.c +@@ -327,6 +327,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface, + } + + crate = data[0] | (data[1] << 8) | (data[2] << 16); ++ if (!crate) { ++ dev_info(&dev->dev, "failed to read current rate; disabling the check\n"); ++ chip->sample_rate_read_error = 3; /* three strikes, see above */ ++ return 0; ++ } ++ + if (crate != rate) { + dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate); + // runtime->rate = crate; +diff --git a/sound/usb/format.c b/sound/usb/format.c +index 2227b4cea338..3e901405f55d 100644 +--- a/sound/usb/format.c ++++ b/sound/usb/format.c +@@ -53,6 +53,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, + case UAC_VERSION_1: + default: { + struct uac_format_type_i_discrete_descriptor *fmt = _fmt; ++ if (format >= 64) ++ return 0; /* invalid format */ + sample_width = fmt->bBitResolution; + sample_bytes = fmt->bSubframeSize; + format = 1 << format; +@@ -216,9 +218,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof + continue; + /* C-Media CM6501 mislabels its 96 kHz altsetting */ + /* Terratec Aureon 7.1 USB C-Media 6206, too */ ++ /* Ozone Z90 USB C-Media, too */ + if (rate == 48000 && nr_rates == 1 && + (chip->usb_id == USB_ID(0x0d8c, 0x0201) || + chip->usb_id == USB_ID(0x0d8c, 0x0102) || ++ chip->usb_id == USB_ID(0x0d8c, 0x0078) || + chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && + fp->altsetting == 5 && fp->maxpacksize == 392) + rate = 96000; +diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c +index 7c812565f90d..a65a82d5791d 100644 +--- a/sound/usb/line6/capture.c ++++ b/sound/usb/line6/capture.c +@@ -291,6 +291,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) + urb->interval = LINE6_ISO_INTERVAL; + urb->error_count = 0; + urb->complete = audio_in_callback; ++ if (usb_urb_ep_type_check(urb)) ++ return -EINVAL; + } + + return 0; +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c +index ea3a9bd05e68..0107bbfeb17d 100644 +--- a/sound/usb/line6/driver.c ++++ b/sound/usb/line6/driver.c +@@ -687,6 +687,10 @@ static int line6_init_cap_control(struct usb_line6 *line6) + line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL); + if (!line6->buffer_message) + return -ENOMEM; ++ ++ ret = line6_init_midi(line6); ++ if (ret < 0) ++ return ret; + } else { + ret = line6_hwdep_init(line6); + if (ret < 0) +diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c +index 812d18191e01..1736eb3ee98e 100644 +--- a/sound/usb/line6/playback.c ++++ b/sound/usb/line6/playback.c +@@ -436,6 +436,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) + urb->interval = LINE6_ISO_INTERVAL; + urb->error_count = 0; + urb->complete = audio_out_callback; ++ if (usb_urb_ep_type_check(urb)) ++ return -EINVAL; + } + + return 0; +diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c +index 17aa616e61f5..aaa192aee883 100644 +--- a/sound/usb/line6/pod.c ++++ b/sound/usb/line6/pod.c +@@ -421,11 +421,6 @@ static int pod_init(struct usb_line6 *line6, + if (err < 0) + return err; + +- /* initialize MIDI subsystem: */ +- err = line6_init_midi(line6); +- if (err < 0) +- return err; +- + /* initialize PCM subsystem: */ + err = line6_init_pcm(line6, &pod_pcm_properties); + if (err < 0) +diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c +index 0c4512d0382e..a911cff0cec8 100644 +--- a/sound/usb/line6/variax.c ++++ b/sound/usb/line6/variax.c +@@ -217,7 +217,6 @@ static int variax_init(struct usb_line6 *line6, + const struct usb_device_id *id) + { + struct usb_line6_variax *variax = (struct usb_line6_variax *) line6; +- int err; + + line6->process_message = line6_variax_process_message; + line6->disconnect = line6_variax_disconnect; +@@ -233,11 +232,6 @@ static int variax_init(struct usb_line6 *line6, + if (variax->buffer_activate == NULL) + return -ENOMEM; + +- /* initialize MIDI subsystem: */ +- err = line6_init_midi(&variax->line6); +- if (err < 0) +- return err; +- + /* initiate startup procedure: */ + variax_startup1(variax); + return 0; +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index 7ba92921bf28..f0b41fee7130 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -1477,6 +1477,8 @@ void snd_usbmidi_disconnect(struct list_head *p) + spin_unlock_irq(&umidi->disc_lock); + up_write(&umidi->disc_rwsem); + ++ del_timer_sync(&umidi->error_timer); ++ + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { + struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; + if (ep->out) +@@ -1503,7 +1505,6 @@ void snd_usbmidi_disconnect(struct list_head *p) + ep->in = NULL; + } + } +- del_timer_sync(&umidi->error_timer); + } + EXPORT_SYMBOL(snd_usbmidi_disconnect); + +@@ -1804,6 +1805,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, + return 0; + } + ++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( ++ struct usb_host_endpoint *hostep) ++{ ++ unsigned char *extra = hostep->extra; ++ int extralen = hostep->extralen; ++ ++ while (extralen > 3) { ++ struct usb_ms_endpoint_descriptor *ms_ep = ++ (struct usb_ms_endpoint_descriptor *)extra; ++ ++ if (ms_ep->bLength > 3 && ++ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && ++ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) ++ return ms_ep; ++ if (!extra[0]) ++ break; ++ extralen -= extra[0]; ++ extra += extra[0]; ++ } ++ return NULL; ++} ++ + /* + * Returns MIDIStreaming device capabilities. + */ +@@ -1841,11 +1864,14 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, + ep = get_ep_desc(hostep); + if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) + continue; +- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; +- if (hostep->extralen < 4 || +- ms_ep->bLength < 4 || +- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || +- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) ++ ms_ep = find_usb_ms_endpoint_descriptor(hostep); ++ if (!ms_ep) ++ continue; ++ if (ms_ep->bLength <= sizeof(*ms_ep)) ++ continue; ++ if (ms_ep->bNumEmbMIDIJack > 0x10) ++ continue; ++ if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack) + continue; + if (usb_endpoint_dir_out(ep)) { + if (endpoints[epidx].out_ep) { +@@ -2099,6 +2125,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi, + cs_desc[1] == USB_DT_CS_INTERFACE && + cs_desc[2] == 0xf1 && + cs_desc[3] == 0x02) { ++ if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10) ++ continue; + endpoint->in_cables = (1 << cs_desc[4]) - 1; + endpoint->out_cables = (1 << cs_desc[5]) - 1; + return snd_usbmidi_detect_endpoints(umidi, endpoint, 1); +@@ -2260,16 +2288,22 @@ void snd_usbmidi_input_stop(struct list_head *p) + } + EXPORT_SYMBOL(snd_usbmidi_input_stop); + +-static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) ++static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, ++ struct snd_usb_midi_in_endpoint *ep) + { + unsigned int i; ++ unsigned long flags; + + if (!ep) + return; + for (i = 0; i < INPUT_URBS; ++i) { + struct urb *urb = ep->urbs[i]; +- urb->dev = ep->umidi->dev; +- snd_usbmidi_submit_urb(urb, GFP_KERNEL); ++ spin_lock_irqsave(&umidi->disc_lock, flags); ++ if (!atomic_read(&urb->use_count)) { ++ urb->dev = ep->umidi->dev; ++ snd_usbmidi_submit_urb(urb, GFP_ATOMIC); ++ } ++ spin_unlock_irqrestore(&umidi->disc_lock, flags); + } + } + +@@ -2285,7 +2319,7 @@ void snd_usbmidi_input_start(struct list_head *p) + if (umidi->input_running || !umidi->opened[1]) + return; + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) +- snd_usbmidi_input_start_ep(umidi->endpoints[i].in); ++ snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); + umidi->input_running = 1; + } + EXPORT_SYMBOL(snd_usbmidi_input_start); +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 816d6036eb59..ab7422672316 100755 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -597,8 +597,9 @@ static int check_matrix_bitmap(unsigned char *bmap, + * if failed, give up and free the control instance. + */ + +-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, +- struct snd_kcontrol *kctl) ++int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, ++ struct snd_kcontrol *kctl, ++ bool is_std_info) + { + struct usb_mixer_interface *mixer = list->mixer; + int err; +@@ -611,6 +612,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, + return err; + } + list->kctl = kctl; ++ list->is_std_info = is_std_info; + list->next_id_elem = mixer->id_elems[list->id]; + mixer->id_elems[list->id] = list; + return 0; +@@ -2763,15 +2765,23 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) + { + struct usb_mixer_elem_list *list; + +- for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) ++ for_each_mixer_elem(list, mixer, unitid) { ++ struct usb_mixer_elem_info *info; ++ ++ if (!list->is_std_info) ++ continue; ++ info = mixer_elem_list_to_info(list); ++ /* invalidate cache, so the value is read from the device */ ++ info->cached = 0; + snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &list->kctl->id); ++ } + } + + static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, + struct usb_mixer_elem_list *list) + { +- struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; ++ struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); + static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", + "S8", "U8", "S16", "U16"}; + snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " +@@ -2797,8 +2807,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, + mixer->ignore_ctl_error); + snd_iprintf(buffer, "Card: %s\n", chip->card->longname); + for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { +- for (list = mixer->id_elems[unitid]; list; +- list = list->next_id_elem) { ++ for_each_mixer_elem(list, mixer, unitid) { + snd_iprintf(buffer, " Unit: %i\n", list->id); + if (list->kctl) + snd_iprintf(buffer, +@@ -2828,19 +2837,21 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, + return; + } + +- for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) ++ for_each_mixer_elem(list, mixer, unitid) + count++; + + if (count == 0) + return; + +- for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) { ++ for_each_mixer_elem(list, mixer, unitid) { + struct usb_mixer_elem_info *info; + + if (!list->kctl) + continue; ++ if (!list->is_std_info) ++ continue; + +- info = (struct usb_mixer_elem_info *)list; ++ info = mixer_elem_list_to_info(list); + if (count > 1 && info->control != control) + continue; + +@@ -3063,7 +3074,7 @@ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) + + static int restore_mixer_value(struct usb_mixer_elem_list *list) + { +- struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; ++ struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); + int c, err, idx; + + if (cval->cmask) { +@@ -3099,8 +3110,7 @@ int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume) + if (reset_resume) { + /* restore cached mixer values */ + for (id = 0; id < MAX_ID_ELEMS; id++) { +- for (list = mixer->id_elems[id]; list; +- list = list->next_id_elem) { ++ for_each_mixer_elem(list, mixer, id) { + if (list->resume) { + err = list->resume(list); + if (err < 0) +diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h +index 545d99b09706..7d16a9221070 100644 +--- a/sound/usb/mixer.h ++++ b/sound/usb/mixer.h +@@ -48,10 +48,17 @@ struct usb_mixer_elem_list { + struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ + struct snd_kcontrol *kctl; + unsigned int id; ++ bool is_std_info; + usb_mixer_elem_dump_func_t dump; + usb_mixer_elem_resume_func_t resume; + }; + ++/* iterate over mixer element list of the given unit id */ ++#define for_each_mixer_elem(list, mixer, id) \ ++ for ((list) = (mixer)->id_elems[id]; (list); (list) = (list)->next_id_elem) ++#define mixer_elem_list_to_info(list) \ ++ container_of(list, struct usb_mixer_elem_info, head) ++ + struct usb_mixer_elem_info { + struct usb_mixer_elem_list head; + unsigned int control; /* CS or ICN (high byte) */ +@@ -79,8 +86,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); + int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, + int request, int validx, int value_set); + +-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, +- struct snd_kcontrol *kctl); ++int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, ++ struct snd_kcontrol *kctl, ++ bool is_std_info); ++ ++#define snd_usb_mixer_add_control(list, kctl) \ ++ snd_usb_mixer_add_list(list, kctl, true) + + void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, + struct usb_mixer_interface *mixer, +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c +index 723b535ca2ec..1f2c69e599d9 100644 +--- a/sound/usb/mixer_quirks.c ++++ b/sound/usb/mixer_quirks.c +@@ -168,7 +168,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, + return -ENOMEM; + } + kctl->private_free = snd_usb_mixer_elem_free; +- return snd_usb_mixer_add_control(list, kctl); ++ /* don't use snd_usb_mixer_add_control() here, this is a special list element */ ++ return snd_usb_mixer_add_list(list, kctl, false); + } + + /* +@@ -194,6 +195,7 @@ static const struct rc_config { + { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ + { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ ++ { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ + }; + +@@ -1170,7 +1172,7 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, + int unitid = 12; /* SamleRate ExtensionUnit ID */ + + list_for_each_entry(mixer, &chip->mixer_list, list) { +- cval = (struct usb_mixer_elem_info *)mixer->id_elems[unitid]; ++ cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); + if (cval) { + snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, + cval->control << 8, +diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c +index 7438e7c4a842..2876cd9b35b3 100644 +--- a/sound/usb/mixer_scarlett.c ++++ b/sound/usb/mixer_scarlett.c +@@ -287,8 +287,7 @@ static int scarlett_ctl_switch_put(struct snd_kcontrol *kctl, + + static int scarlett_ctl_resume(struct usb_mixer_elem_list *list) + { +- struct usb_mixer_elem_info *elem = +- container_of(list, struct usb_mixer_elem_info, head); ++ struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); + int i; + + for (i = 0; i < elem->channels; i++) +@@ -447,8 +446,7 @@ static int scarlett_ctl_enum_put(struct snd_kcontrol *kctl, + + static int scarlett_ctl_enum_resume(struct usb_mixer_elem_list *list) + { +- struct usb_mixer_elem_info *elem = +- container_of(list, struct usb_mixer_elem_info, head); ++ struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); + + if (elem->cached) + snd_usb_set_cur_mix_value(elem, 0, 0, *elem->cache_val); +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index adee1c0e2bdf..d7d69956855c 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -326,6 +326,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, + struct usb_host_interface *alts; + struct usb_interface *iface; + unsigned int ep; ++ unsigned int ifnum; + + /* Implicit feedback sync EPs consumers are always playback EPs */ + if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK) +@@ -334,45 +335,25 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, + switch (subs->stream->chip->usb_id) { + case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ + case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ ++ case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */ + ep = 0x81; +- iface = usb_ifnum_to_if(dev, 3); +- +- if (!iface || iface->num_altsetting == 0) +- return -EINVAL; +- +- alts = &iface->altsetting[1]; +- goto add_sync_ep; +- break; ++ ifnum = 3; ++ goto add_sync_ep_from_ifnum; + case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */ + case USB_ID(0x0763, 0x2081): + ep = 0x81; +- iface = usb_ifnum_to_if(dev, 2); +- +- if (!iface || iface->num_altsetting == 0) +- return -EINVAL; +- +- alts = &iface->altsetting[1]; +- goto add_sync_ep; +- case USB_ID(0x2466, 0x8003): ++ ifnum = 2; ++ goto add_sync_ep_from_ifnum; ++ case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */ + ep = 0x86; +- iface = usb_ifnum_to_if(dev, 2); +- +- if (!iface || iface->num_altsetting == 0) +- return -EINVAL; +- +- alts = &iface->altsetting[1]; +- goto add_sync_ep; +- case USB_ID(0x1397, 0x0002): ++ ifnum = 2; ++ goto add_sync_ep_from_ifnum; ++ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ + ep = 0x81; +- iface = usb_ifnum_to_if(dev, 1); +- +- if (!iface || iface->num_altsetting == 0) +- return -EINVAL; +- +- alts = &iface->altsetting[1]; +- goto add_sync_ep; +- ++ ifnum = 1; ++ goto add_sync_ep_from_ifnum; + } ++ + if (attr == USB_ENDPOINT_SYNC_ASYNC && + altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && + altsd->bInterfaceProtocol == 2 && +@@ -387,6 +368,14 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, + /* No quirk */ + return 0; + ++add_sync_ep_from_ifnum: ++ iface = usb_ifnum_to_if(dev, ifnum); ++ ++ if (!iface || iface->num_altsetting < 2) ++ return -EINVAL; ++ ++ alts = &iface->altsetting[1]; ++ + add_sync_ep: + subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip, + alts, ep, !subs->direction, +@@ -1381,6 +1370,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs, + // continue; + } + bytes = urb->iso_frame_desc[i].actual_length; ++ if (subs->stream_offset_adj > 0) { ++ unsigned int adj = min(subs->stream_offset_adj, bytes); ++ cp += adj; ++ bytes -= adj; ++ subs->stream_offset_adj -= adj; ++ } + frames = bytes / stride; + if (!subs->txfr_quirk) + bytes = frames * stride; +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h +index c892b4d1e733..d3d3e05fe5b8 100644 +--- a/sound/usb/quirks-table.h ++++ b/sound/usb/quirks-table.h +@@ -2479,6 +2479,16 @@ YAMAHA_DEVICE(0x7010, "UB99"), + } + }, + ++{ ++ USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204), ++ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { ++ .vendor_name = "KORG, Inc.", ++ /* .product_name = "ToneLab EX", */ ++ .ifnum = 3, ++ .type = QUIRK_MIDI_STANDARD_INTERFACE, ++ } ++}, ++ + /* AKAI devices */ + { + USB_DEVICE(0x09e8, 0x0062), +@@ -3323,4 +3333,118 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), + } + }, + ++/* ++ * MacroSilicon MS2109 based HDMI capture cards ++ * ++ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. ++ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if ++ * they pretend to be 96kHz mono as a workaround for stereo being broken ++ * by that... ++ * ++ * They also have an issue with initial stream alignment that causes the ++ * channels to be swapped and out of phase, which is dealt with in quirks.c. ++ */ ++{ ++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | ++ USB_DEVICE_ID_MATCH_INT_CLASS | ++ USB_DEVICE_ID_MATCH_INT_SUBCLASS, ++ .idVendor = 0x534d, ++ .idProduct = 0x2109, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { ++ .vendor_name = "MacroSilicon", ++ .product_name = "MS2109", ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_COMPOSITE, ++ .data = &(const struct snd_usb_audio_quirk[]) { ++ { ++ .ifnum = 2, ++ .type = QUIRK_AUDIO_ALIGN_TRANSFER, ++ }, ++ { ++ .ifnum = 2, ++ .type = QUIRK_AUDIO_STANDARD_MIXER, ++ }, ++ { ++ .ifnum = 3, ++ .type = QUIRK_AUDIO_FIXED_ENDPOINT, ++ .data = &(const struct audioformat) { ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, ++ .channels = 2, ++ .iface = 3, ++ .altsetting = 1, ++ .altset_idx = 1, ++ .attributes = 0, ++ .endpoint = 0x82, ++ .ep_attr = USB_ENDPOINT_XFER_ISOC | ++ USB_ENDPOINT_SYNC_ASYNC, ++ .rates = SNDRV_PCM_RATE_CONTINUOUS, ++ .rate_min = 48000, ++ .rate_max = 48000, ++ } ++ }, ++ { ++ .ifnum = -1 ++ } ++ } ++ } ++}, ++{ ++ /* ++ * PIONEER DJ DDJ-RB ++ * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed ++ * The feedback for the output is the dummy input. ++ */ ++ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e), ++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { ++ .ifnum = QUIRK_ANY_INTERFACE, ++ .type = QUIRK_COMPOSITE, ++ .data = (const struct snd_usb_audio_quirk[]) { ++ { ++ .ifnum = 0, ++ .type = QUIRK_AUDIO_FIXED_ENDPOINT, ++ .data = &(const struct audioformat) { ++ .formats = SNDRV_PCM_FMTBIT_S24_3LE, ++ .channels = 4, ++ .iface = 0, ++ .altsetting = 1, ++ .altset_idx = 1, ++ .endpoint = 0x01, ++ .ep_attr = USB_ENDPOINT_XFER_ISOC| ++ USB_ENDPOINT_SYNC_ASYNC, ++ .rates = SNDRV_PCM_RATE_44100, ++ .rate_min = 44100, ++ .rate_max = 44100, ++ .nr_rates = 1, ++ .rate_table = (unsigned int[]) { 44100 } ++ } ++ }, ++ { ++ .ifnum = 0, ++ .type = QUIRK_AUDIO_FIXED_ENDPOINT, ++ .data = &(const struct audioformat) { ++ .formats = SNDRV_PCM_FMTBIT_S24_3LE, ++ .channels = 2, ++ .iface = 0, ++ .altsetting = 1, ++ .altset_idx = 1, ++ .endpoint = 0x82, ++ .ep_attr = USB_ENDPOINT_XFER_ISOC| ++ USB_ENDPOINT_SYNC_ASYNC| ++ USB_ENDPOINT_USAGE_IMPLICIT_FB, ++ .rates = SNDRV_PCM_RATE_44100, ++ .rate_min = 44100, ++ .rate_max = 44100, ++ .nr_rates = 1, ++ .rate_table = (unsigned int[]) { 44100 } ++ } ++ }, ++ { ++ .ifnum = -1 ++ } ++ } ++ } ++}, ++ + #undef USB_DEVICE_VENDOR_SPEC +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 0484a8d8a5bb..02b4d0638e00 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -66,8 +66,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip, + if (!iface) + continue; + if (quirk->ifnum != probed_ifnum && +- !usb_interface_claimed(iface)) +- usb_driver_claim_interface(driver, iface, (void *)-1L); ++ !usb_interface_claimed(iface)) { ++ err = usb_driver_claim_interface(driver, iface, ++ USB_AUDIO_IFACE_UNUSED); ++ if (err < 0) ++ return err; ++ } + } + + return 0; +@@ -399,8 +403,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip, + continue; + + err = create_autodetect_quirk(chip, iface, driver); +- if (err >= 0) +- usb_driver_claim_interface(driver, iface, (void *)-1L); ++ if (err >= 0) { ++ err = usb_driver_claim_interface(driver, iface, ++ USB_AUDIO_IFACE_UNUSED); ++ if (err < 0) ++ return err; ++ } + } + + return 0; +@@ -1121,6 +1129,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, + case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ + set_format_emu_quirk(subs, fmt); + break; ++ case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ ++ subs->stream_offset_adj = 2; ++ break; + } + } + +@@ -1150,6 +1161,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */ + case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ + case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ ++ case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */ ++ case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */ + return true; + } + return false; +@@ -1162,6 +1175,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) + static bool is_itf_usb_dsd_2alts_dac(unsigned int id) + { + switch (id) { ++ case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ + case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ + case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ + case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ +@@ -1316,12 +1330,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, + && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + mdelay(20); + +- /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny +- * delay here, otherwise requests like get/set frequency return as +- * failed despite actually succeeding. ++ /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX ++ * needs a tiny delay here, otherwise requests like get/set ++ * frequency return as failed despite actually succeeding. + */ + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || + chip->usb_id == USB_ID(0x046d, 0x0a46) || ++ chip->usb_id == USB_ID(0x046d, 0x0a56) || + chip->usb_id == USB_ID(0x0b0e, 0x0349) || + chip->usb_id == USB_ID(0x0951, 0x16ad)) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) +diff --git a/sound/usb/stream.c b/sound/usb/stream.c +index 5bc84b460d72..cc0a656e24e8 100644 +--- a/sound/usb/stream.c ++++ b/sound/usb/stream.c +@@ -101,6 +101,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, + subs->tx_length_quirk = as->chip->tx_length_quirk; + subs->speed = snd_usb_get_speed(subs->dev); + subs->pkt_offset_adj = 0; ++ subs->stream_offset_adj = 0; + + snd_usb_set_pcm_ops(as->pcm, stream); + +@@ -190,16 +191,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol, + struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); + struct snd_usb_substream *subs = info->private_data; + struct snd_pcm_chmap_elem *chmap = NULL; +- int i; ++ int i = 0; + +- memset(ucontrol->value.integer.value, 0, +- sizeof(ucontrol->value.integer.value)); + if (subs->cur_audiofmt) + chmap = subs->cur_audiofmt->chmap; + if (chmap) { + for (i = 0; i < chmap->channels; i++) + ucontrol->value.integer.value[i] = chmap->map[i]; + } ++ for (; i < subs->channels_max; i++) ++ ucontrol->value.integer.value[i] = 0; + return 0; + } + +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index 93c4bed7b128..320a36dacb3d 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -37,7 +37,7 @@ struct snd_usb_audio { + struct usb_interface *pm_intf; + u32 usb_id; + struct mutex mutex; +- unsigned int autosuspended:1; ++ unsigned int system_suspend; + atomic_t active; + atomic_t shutdown; + atomic_t usage_count; +@@ -67,6 +67,8 @@ struct snd_usb_audio { + void (*disconnect_cb)(struct snd_usb_audio *chip); + }; + ++#define USB_AUDIO_IFACE_UNUSED ((void *)-1L) ++ + #define usb_audio_err(chip, fmt, args...) \ + dev_err(&(chip)->dev->dev, fmt, ##args) + #define usb_audio_warn(chip, fmt, args...) \ +diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h +index e4422b4b634e..94ae4a333a35 100644 +--- a/tools/arch/ia64/include/asm/barrier.h ++++ b/tools/arch/ia64/include/asm/barrier.h +@@ -38,9 +38,6 @@ + * sequential memory pages only. + */ + +-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */ +-#define ia64_mf() asm volatile ("mf" ::: "memory") +- + #define mb() ia64_mf() + #define rmb() mb() + #define wmb() mb() +diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c +index 37b3f141053d..85f45800f881 100644 +--- a/tools/gpio/gpio-hammer.c ++++ b/tools/gpio/gpio-hammer.c +@@ -171,7 +171,14 @@ int main(int argc, char **argv) + device_name = optarg; + break; + case 'o': +- lines[i] = strtoul(optarg, NULL, 10); ++ /* ++ * Avoid overflow. Do not immediately error, we want to ++ * be able to accurately report on the amount of times ++ * '-o' was given to give an accurate error message ++ */ ++ if (i < GPIOHANDLES_MAX) ++ lines[i] = strtoul(optarg, NULL, 10); ++ + i++; + break; + case '?': +@@ -179,6 +186,14 @@ int main(int argc, char **argv) + return -1; + } + } ++ ++ if (i >= GPIOHANDLES_MAX) { ++ fprintf(stderr, ++ "Only %d occurences of '-o' are allowed, %d were found\n", ++ GPIOHANDLES_MAX, i + 1); ++ return -1; ++ } ++ + nlines = i; + + if (!device_name || !nlines) { +diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat +index 581278c58488..5e5797cc3757 100755 +--- a/tools/kvm/kvm_stat/kvm_stat ++++ b/tools/kvm/kvm_stat/kvm_stat +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # + # top-like utility for displaying kvm statistics + # +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c +index 62f4cacf253a..b9db8739487d 100644 +--- a/tools/lib/traceevent/event-parse.c ++++ b/tools/lib/traceevent/event-parse.c +@@ -2764,6 +2764,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg, + if (read_expected(EVENT_DELIM, ")") < 0) + goto out_err; + ++ free_token(token); + type = read_token(&token); + *tok = token; + +diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h +index b3e32b010ab1..b56241a44639 100644 +--- a/tools/objtool/arch/x86/include/asm/insn.h ++++ b/tools/objtool/arch/x86/include/asm/insn.h +@@ -208,4 +208,19 @@ static inline int insn_offset_immediate(struct insn *insn) + return insn_offset_displacement(insn) + insn->displacement.nbytes; + } + ++/** ++ * for_each_insn_prefix() -- Iterate prefixes in the instruction ++ * @insn: Pointer to struct insn. ++ * @idx: Index storage. ++ * @prefix: Prefix byte. ++ * ++ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix ++ * and the index is stored in @idx (note that this @idx is just for a cursor, ++ * do not change it.) ++ * Since prefixes.nbytes can be bigger than 4 if some prefixes ++ * are repeated, it cannot be used for looping over the prefixes. ++ */ ++#define for_each_insn_prefix(insn, idx, prefix) \ ++ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) ++ + #endif /* _ASM_X86_INSN_H */ +diff --git a/tools/objtool/check.c b/tools/objtool/check.c +index b0b8ba9b800c..31c512f19662 100644 +--- a/tools/objtool/check.c ++++ b/tools/objtool/check.c +@@ -502,7 +502,7 @@ static int add_jump_destinations(struct objtool_file *file) + insn->type != INSN_JUMP_UNCONDITIONAL) + continue; + +- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) ++ if (insn->offset == FAKE_JUMP_OFFSET) + continue; + + rela = find_rela_by_dest_range(insn->sec, insn->offset, +@@ -778,6 +778,12 @@ static int add_special_section_alts(struct objtool_file *file) + } + + if (special_alt->group) { ++ if (!special_alt->orig_len) { ++ WARN_FUNC("empty alternative entry", ++ orig_insn->sec, orig_insn->offset); ++ continue; ++ } ++ + ret = handle_group_alt(file, special_alt, orig_insn, + &new_insn); + if (ret) +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c +index d84c28eac262..0ba5bb51bd93 100644 +--- a/tools/objtool/elf.c ++++ b/tools/objtool/elf.c +@@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf) + + symtab = find_section_by_name(elf, ".symtab"); + if (!symtab) { +- WARN("missing symbol table"); +- return -1; ++ /* ++ * A missing symbol table is actually possible if it's an empty ++ * .o file. This can happen for thunk_64.o. ++ */ ++ return 0; + } + + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; +diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt +index 92335193dc33..d443ca3abf27 100644 +--- a/tools/perf/Documentation/perf-record.txt ++++ b/tools/perf/Documentation/perf-record.txt +@@ -33,6 +33,10 @@ OPTIONS + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + ++ - a symbolic or raw PMU event followed by an optional colon ++ and a list of event modifiers, e.g., cpu-cycles:p. See the ++ linkperf:perf-list[1] man page for details on event modifiers. ++ + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where + 'param1', 'param2', etc are defined as formats for the PMU in + /sys/bus/event_source/devices//format/*. +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt +index d96ccd4844df..b099ac1de854 100644 +--- a/tools/perf/Documentation/perf-stat.txt ++++ b/tools/perf/Documentation/perf-stat.txt +@@ -39,6 +39,10 @@ report:: + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + ++ - a symbolic or raw PMU event followed by an optional colon ++ and a list of event modifiers, e.g., cpu-cycles:p. See the ++ linkperf:perf-list[1] man page for details on event modifiers. ++ + - a symbolically formed event like 'pmu/param1=0x3,param2/' where + param1 and param2 are defined as formats for the PMU in + /sys/bus/event_sources/devices//format/* +diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c +index ce3bfb48b26f..956b0aa916cf 100644 +--- a/tools/perf/builtin-lock.c ++++ b/tools/perf/builtin-lock.c +@@ -616,7 +616,7 @@ static int report_lock_release_event(struct perf_evsel *evsel, + case SEQ_STATE_READ_ACQUIRED: + seq->read_count--; + BUG_ON(seq->read_count < 0); +- if (!seq->read_count) { ++ if (seq->read_count) { + ls->nr_release++; + goto end; + } +diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c +index 2b420e7a92c0..929f0d0ea9da 100644 +--- a/tools/perf/builtin-probe.c ++++ b/tools/perf/builtin-probe.c +@@ -344,6 +344,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs) + + for (k = 0; k < pev->ntevs; k++) { + struct probe_trace_event *tev = &pev->tevs[k]; ++ /* Skipped events have no event name */ ++ if (!tev->event) ++ continue; + + /* We use tev's name for showing new events */ + show_perf_probe_event(tev->group, tev->event, pev, +diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c +index 0abca8783bb3..78485edb9467 100644 +--- a/tools/perf/builtin-report.c ++++ b/tools/perf/builtin-report.c +@@ -341,8 +341,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report + if (evname != NULL) + ret += fprintf(fp, " of event '%s'", evname); + +- if (symbol_conf.show_ref_callgraph && +- strstr(evname, "call-graph=no")) { ++ if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { + ret += fprintf(fp, ", show reference callgraph"); + } + +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c +index 7228d141a789..676568286eef 100644 +--- a/tools/perf/builtin-script.c ++++ b/tools/perf/builtin-script.c +@@ -1516,7 +1516,7 @@ static int is_directory(const char *base_path, const struct dirent *dent) + char path[PATH_MAX]; + struct stat st; + +- sprintf(path, "%s/%s", base_path, dent->d_name); ++ scnprintf(path, PATH_MAX, "%s/%s", base_path, dent->d_name); + if (stat(path, &st)) + return 0; + +@@ -1702,8 +1702,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused, + } + + for_each_lang(scripts_path, scripts_dir, lang_dirent) { +- snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, +- lang_dirent->d_name); ++ scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, ++ lang_dirent->d_name); + lang_dir = opendir(lang_path); + if (!lang_dir) + continue; +@@ -1712,8 +1712,8 @@ static int list_available_scripts(const struct option *opt __maybe_unused, + script_root = get_script_root(script_dirent, REPORT_SUFFIX); + if (script_root) { + desc = script_desc__findnew(script_root); +- snprintf(script_path, MAXPATHLEN, "%s/%s", +- lang_path, script_dirent->d_name); ++ scnprintf(script_path, MAXPATHLEN, "%s/%s", ++ lang_path, script_dirent->d_name); + read_script_info(desc, script_path); + free(script_root); + } +@@ -1749,7 +1749,7 @@ static int check_ev_match(char *dir_name, char *scriptname, + int match, len; + FILE *fp; + +- sprintf(filename, "%s/bin/%s-record", dir_name, scriptname); ++ scnprintf(filename, MAXPATHLEN, "%s/bin/%s-record", dir_name, scriptname); + + fp = fopen(filename, "r"); + if (!fp) +@@ -1825,8 +1825,8 @@ int find_scripts(char **scripts_array, char **scripts_path_array) + } + + for_each_lang(scripts_path, scripts_dir, lang_dirent) { +- snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, +- lang_dirent->d_name); ++ scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, ++ lang_dirent->d_name); + #ifdef NO_LIBPERL + if (strstr(lang_path, "perl")) + continue; +@@ -1881,8 +1881,8 @@ static char *get_script_path(const char *script_root, const char *suffix) + return NULL; + + for_each_lang(scripts_path, scripts_dir, lang_dirent) { +- snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, +- lang_dirent->d_name); ++ scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, ++ lang_dirent->d_name); + lang_dir = opendir(lang_path); + if (!lang_dir) + continue; +@@ -1893,8 +1893,8 @@ static char *get_script_path(const char *script_root, const char *suffix) + free(__script_root); + closedir(lang_dir); + closedir(scripts_dir); +- snprintf(script_path, MAXPATHLEN, "%s/%s", +- lang_path, script_dirent->d_name); ++ scnprintf(script_path, MAXPATHLEN, "%s/%s", ++ lang_path, script_dirent->d_name); + return strdup(script_path); + } + free(__script_root); +diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c +index cd2900ac473f..280448114add 100644 +--- a/tools/perf/builtin-top.c ++++ b/tools/perf/builtin-top.c +@@ -633,7 +633,9 @@ static void *display_thread(void *arg) + delay_msecs = top->delay_secs * MSEC_PER_SEC; + set_term_quiet_input(&save); + /* trash return*/ +- getc(stdin); ++ clearerr(stdin); ++ if (poll(&stdin_poll, 1, 0) > 0) ++ getc(stdin); + + while (!done) { + perf_top__print_sym_table(top); +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c +index 0619054bd7a0..61fe3ce5862d 100644 +--- a/tools/perf/pmu-events/jevents.c ++++ b/tools/perf/pmu-events/jevents.c +@@ -603,7 +603,7 @@ static int get_maxfds(void) + struct rlimit rlim; + + if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) +- return min((int)rlim.rlim_max / 2, 512); ++ return min(rlim.rlim_max / 2, (rlim_t)512); + + return 512; + } +diff --git a/tools/perf/python/tracepoint.py b/tools/perf/python/tracepoint.py +index eb4dbed57de7..ce273c8b512b 100755 +--- a/tools/perf/python/tracepoint.py ++++ b/tools/perf/python/tracepoint.py +@@ -1,4 +1,4 @@ +-#! /usr/bin/python ++#! /usr/bin/env python + # -*- python -*- + # -*- coding: utf-8 -*- + +diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py +index c235c22b107a..5a55b25f0b8c 100755 +--- a/tools/perf/python/twatch.py ++++ b/tools/perf/python/twatch.py +@@ -1,4 +1,4 @@ +-#! /usr/bin/python ++#! /usr/bin/env python + # -*- python -*- + # -*- coding: utf-8 -*- + # twatch - Experimental use of the perf python interface +diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py +index de66cb3b72c9..dd3e7ae2a1af 100644 +--- a/tools/perf/scripts/python/sched-migration.py ++++ b/tools/perf/scripts/python/sched-migration.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python ++#!/usr/bin/env python + # + # Cpu task migration overview toy + # +diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c +index b60a6fd66517..a607d2a851ef 100644 +--- a/tools/perf/tests/attr.c ++++ b/tools/perf/tests/attr.c +@@ -147,8 +147,8 @@ static int run_dir(const char *d, const char *perf) + if (verbose) + vcnt++; + +- snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", +- d, d, perf, vcnt, v); ++ scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", ++ d, d, perf, vcnt, v); + + return system(cmd) ? TEST_FAIL : TEST_OK; + } +diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c +index 2673e86ed50f..900d8c25e9ab 100644 +--- a/tools/perf/tests/bpf.c ++++ b/tools/perf/tests/bpf.c +@@ -1,4 +1,5 @@ + #include ++#include + #include + #include + #include +@@ -231,6 +232,7 @@ static int __test__bpf(int idx) + bpf_testcase_table[idx].target_func, + bpf_testcase_table[idx].expect_result); + out: ++ free(obj_buf); + bpf__clear(); + return ret; + } +diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c +index 1e2ba2602930..e2fab5229ec0 100644 +--- a/tools/perf/tests/pmu.c ++++ b/tools/perf/tests/pmu.c +@@ -95,7 +95,7 @@ static char *test_format_dir_get(void) + struct test_format *format = &test_formats[i]; + FILE *file; + +- snprintf(name, PATH_MAX, "%s/%s", dir, format->name); ++ scnprintf(name, PATH_MAX, "%s/%s", dir, format->name); + + file = fopen(name, "w"); + if (!file) +@@ -169,6 +169,7 @@ int test__pmu(int subtest __maybe_unused) + ret = 0; + } while (0); + ++ perf_pmu__del_formats(&formats); + test_format_dir_put(format); + return ret; + } +diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c +index 5f23710b9fee..60e5348f0a43 100644 +--- a/tools/perf/tests/sample-parsing.c ++++ b/tools/perf/tests/sample-parsing.c +@@ -167,7 +167,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) + .data = {1, 211, 212, 213}, + }; + u64 regs[64]; +- const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; ++ const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 }; + const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; + struct perf_sample sample = { + .ip = 101, +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c +index 3336cbc6ec48..1d4807c46efd 100644 +--- a/tools/perf/util/annotate.c ++++ b/tools/perf/util/annotate.c +@@ -1302,7 +1302,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil + int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + { + struct dso *dso = map->dso; +- char command[PATH_MAX * 2]; ++ char *command; + FILE *file; + char symfs_filename[PATH_MAX]; + struct kcore_extract kce; +@@ -1364,7 +1364,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + strcpy(symfs_filename, tmp); + } + +- snprintf(command, sizeof(command), ++ err = asprintf(&command, + "%s %s%s --start-address=0x%016" PRIx64 + " --stop-address=0x%016" PRIx64 + " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand", +@@ -1377,12 +1377,17 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + symbol_conf.annotate_src ? "-S" : "", + symfs_filename, symfs_filename); + ++ if (err < 0) { ++ pr_err("Failure allocating memory for the command to run\n"); ++ goto out_remove_tmp; ++ } ++ + pr_debug("Executing: %s\n", command); + + err = -1; + if (pipe(stdout_fd) < 0) { + pr_err("Failure creating the pipe to run %s\n", command); +- goto out_remove_tmp; ++ goto out_free_command; + } + + pid = fork(); +@@ -1409,7 +1414,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + * If we were using debug info should retry with + * original binary. + */ +- goto out_remove_tmp; ++ goto out_free_command; + } + + nline = 0; +@@ -1432,6 +1437,8 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + + fclose(file); + err = 0; ++out_free_command: ++ free(command); + out_remove_tmp: + close(stdout_fd[0]); + +@@ -1445,7 +1452,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize) + + out_close_stdout: + close(stdout_fd[1]); +- goto out_remove_tmp; ++ goto out_free_command; + } + + static void insert_source_line(struct rb_root *root, struct source_line *src_line) +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c +index b87221efdf7e..51fdec9273d7 100644 +--- a/tools/perf/util/auxtrace.c ++++ b/tools/perf/util/auxtrace.c +@@ -248,10 +248,6 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, + queue->set = true; + queue->tid = buffer->tid; + queue->cpu = buffer->cpu; +- } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { +- pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", +- queue->cpu, queue->tid, buffer->cpu, buffer->tid); +- return -EINVAL; + } + + buffer->buffer_nr = queues->next_buffer_nr++; +diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c +index 8fdee24725a7..5bc2b92ace6d 100644 +--- a/tools/perf/util/cgroup.c ++++ b/tools/perf/util/cgroup.c +@@ -64,7 +64,7 @@ static int open_cgroup(char *name) + if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1)) + return -1; + +- snprintf(path, PATH_MAX, "%s/%s", mnt, name); ++ scnprintf(path, PATH_MAX, "%s/%s", mnt, name); + + fd = open(path, O_RDONLY); + if (fd == -1) +diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c +index 8bec05365aae..9be7c95bd1e1 100644 +--- a/tools/perf/util/dso.c ++++ b/tools/perf/util/dso.c +@@ -19,6 +19,7 @@ char dso__symtab_origin(const struct dso *dso) + [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', + [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', + [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', ++ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', + [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', + [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', + [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', +@@ -77,6 +78,21 @@ int dso__read_binary_type_filename(const struct dso *dso, + snprintf(filename + len, size - len, "%s", dso->long_name); + break; + ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: ++ /* ++ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in ++ * /usr/lib/debug/lib when it is expected to be in ++ * /usr/lib/debug/usr/lib ++ */ ++ if (strlen(dso->long_name) < 9 || ++ strncmp(dso->long_name, "/usr/lib/", 9)) { ++ ret = -1; ++ break; ++ } ++ len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); ++ snprintf(filename + len, size - len, "%s", dso->long_name + 4); ++ break; ++ + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + { + const char *last_slash; +diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h +index ecc4bbd3f82e..b886720ffea0 100644 +--- a/tools/perf/util/dso.h ++++ b/tools/perf/util/dso.h +@@ -22,6 +22,7 @@ enum dso_binary_type { + DSO_BINARY_TYPE__BUILD_ID_CACHE, + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__SYSTEM_PATH_DSO, + DSO_BINARY_TYPE__GUEST_KMODULE, +diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c +index fb4e1d2839c5..cbbacc346749 100644 +--- a/tools/perf/util/dwarf-aux.c ++++ b/tools/perf/util/dwarf-aux.c +@@ -329,6 +329,7 @@ bool die_is_func_def(Dwarf_Die *dw_die) + int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr) + { + Dwarf_Addr base, end; ++ Dwarf_Attribute attr; + + if (!addr) + return -EINVAL; +@@ -336,6 +337,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr) + if (dwarf_entrypc(dw_die, addr) == 0) + return 0; + ++ /* ++ * Since the dwarf_ranges() will return 0 if there is no ++ * DW_AT_ranges attribute, we should check it first. ++ */ ++ if (!dwarf_attr(dw_die, DW_AT_ranges, &attr)) ++ return -ENOENT; ++ + return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0; + } + +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c +index 2d5744d986f0..5742adf4d5e8 100644 +--- a/tools/perf/util/event.c ++++ b/tools/perf/util/event.c +@@ -682,11 +682,13 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + int err; + union perf_event *event; + +- if (symbol_conf.kptr_restrict) +- return -1; + if (map == NULL) + return -1; + ++ kmap = map__kmap(map); ++ if (!kmap->ref_reloc_sym) ++ return -1; ++ + /* + * We should get this from /sys/kernel/sections/.text, but till that is + * available use this, and after it is use this as a fallback for older +@@ -710,7 +712,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + } + +- kmap = map__kmap(map); + size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; + size = PERF_ALIGN(size, sizeof(u64)); +@@ -1369,6 +1370,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al, + } + + al->sym = map__find_symbol(al->map, al->addr); ++ } else if (symbol_conf.dso_list) { ++ al->filtered |= (1 << HIST_FILTER__DSO); + } + + if (symbol_conf.sym_list && +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +index 63fa3a95a1d6..7292f73118ed 100644 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +@@ -1508,6 +1508,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) + break; + + case INTEL_PT_CYC: ++ intel_pt_calc_cyc_timestamp(decoder); ++ break; ++ + case INTEL_PT_VMCS: + case INTEL_PT_MNT: + case INTEL_PT_PAD: +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c +index 24c6621e2d95..54790a09d158 100644 +--- a/tools/perf/util/intel-pt.c ++++ b/tools/perf/util/intel-pt.c +@@ -873,6 +873,8 @@ static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, + + if (queue->tid == -1 || pt->have_sched_switch) { + ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); ++ if (ptq->tid == -1) ++ ptq->pid = -1; + thread__zput(ptq->thread); + } + +@@ -1732,10 +1734,8 @@ static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, + tid = sample->tid; + } + +- if (tid == -1) { +- pr_err("context_switch event has no tid\n"); +- return -EINVAL; +- } ++ if (tid == -1) ++ intel_pt_log("context_switch event has no tid\n"); + + intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", + cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, +diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c +index 9ddea5cecd94..ba12643d2ded 100644 +--- a/tools/perf/util/lzma.c ++++ b/tools/perf/util/lzma.c +@@ -61,7 +61,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) + + if (ferror(infile)) { + pr_err("lzma: read error: %s\n", strerror(errno)); +- goto err_fclose; ++ goto err_lzma_end; + } + + if (feof(infile)) +@@ -75,7 +75,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) + + if (writen(output_fd, buf_out, write_size) != write_size) { + pr_err("lzma: write error: %s\n", strerror(errno)); +- goto err_fclose; ++ goto err_lzma_end; + } + + strm.next_out = buf_out; +@@ -87,11 +87,13 @@ int lzma_decompress_to_file(const char *input, int output_fd) + break; + + pr_err("lzma: failed %s\n", lzma_strerror(ret)); +- goto err_fclose; ++ goto err_lzma_end; + } + } + + err = 0; ++err_lzma_end: ++ lzma_end(&strm); + err_fclose: + fclose(infile); + return err; +diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c +index ab8ebfa2159d..2593b7c28de7 100644 +--- a/tools/perf/util/map.c ++++ b/tools/perf/util/map.c +@@ -91,8 +91,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) + if (!strncmp(filename, "/system/lib/", 12)) { + char *ndk, *app; + const char *arch; +- size_t ndk_length; +- size_t app_length; ++ int ndk_length, app_length; + + ndk = getenv("NDK_ROOT"); + app = getenv("APP_PLATFORM"); +@@ -120,8 +119,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) + if (new_length > PATH_MAX) + return false; + snprintf(newfilename, new_length, +- "%s/platforms/%s/arch-%s/usr/lib/%s", +- ndk, app, arch, libname); ++ "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", ++ ndk_length, ndk, app_length, app, arch, libname); + + return true; + } +diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c +index 6193be6d7639..f9f7e35f47a7 100644 +--- a/tools/perf/util/parse-events.c ++++ b/tools/perf/util/parse-events.c +@@ -195,8 +195,8 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) + + for_each_event(sys_dirent, evt_dir, evt_dirent) { + +- snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, +- evt_dirent->d_name); ++ scnprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, ++ evt_dirent->d_name); + fd = open(evt_path, O_RDONLY); + if (fd < 0) + continue; +diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c +index 646ecf736aad..be2ab1091c2b 100644 +--- a/tools/perf/util/parse-regs-options.c ++++ b/tools/perf/util/parse-regs-options.c +@@ -40,7 +40,7 @@ parse_regs(const struct option *opt, const char *str, int unset) + } + fputc('\n', stderr); + /* just printing available regs */ +- return -1; ++ goto error; + } + for (r = sample_reg_masks; r->name; r++) { + if (!strcasecmp(s, r->name)) +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c +index c86c1d5ea65c..5f1ba6f84f5f 100644 +--- a/tools/perf/util/pmu.c ++++ b/tools/perf/util/pmu.c +@@ -325,7 +325,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head) + if (pmu_alias_info_file(name)) + continue; + +- snprintf(path, PATH_MAX, "%s/%s", dir, name); ++ scnprintf(path, PATH_MAX, "%s/%s", dir, name); + + file = fopen(path, "r"); + if (!file) { +@@ -1018,6 +1018,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) + set_bit(b, bits); + } + ++void perf_pmu__del_formats(struct list_head *formats) ++{ ++ struct perf_pmu_format *fmt, *tmp; ++ ++ list_for_each_entry_safe(fmt, tmp, formats, list) { ++ list_del(&fmt->list); ++ free(fmt->name); ++ free(fmt); ++ } ++} ++ + static int sub_non_neg(int a, int b) + { + if (b > a) +diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h +index 25712034c815..fed6c3b56ce7 100644 +--- a/tools/perf/util/pmu.h ++++ b/tools/perf/util/pmu.h +@@ -71,6 +71,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, + int config, unsigned long *bits); + void perf_pmu__set_format(unsigned long *bits, long from, long to); + int perf_pmu__format_parse(char *dir, struct list_head *head); ++void perf_pmu__del_formats(struct list_head *formats); + + struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); + +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c +index a7452fd3b6ee..0551a02ee17c 100644 +--- a/tools/perf/util/probe-event.c ++++ b/tools/perf/util/probe-event.c +@@ -118,7 +118,7 @@ static struct symbol *__find_kernel_function(u64 addr, struct map **mapp) + return machine__find_kernel_function(host_machine, addr, mapp); + } + +-static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) ++static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap) + { + /* kmap->ref_reloc_sym should be set if host_machine is initialized */ + struct kmap *kmap; +@@ -130,6 +130,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void) + kmap = map__kmap(map); + if (!kmap) + return NULL; ++ ++ if (pmap) ++ *pmap = map; ++ + return kmap->ref_reloc_sym; + } + +@@ -141,7 +145,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, + struct map *map; + + /* ref_reloc_sym is just a label. Need a special fix*/ +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (reloc_sym && strcmp(name, reloc_sym->name) == 0) + *addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr; + else { +@@ -742,6 +746,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + int ntevs) + { + struct ref_reloc_sym *reloc_sym; ++ struct map *map; + char *tmp; + int i, skipped = 0; + +@@ -750,7 +755,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + return post_process_offline_probe_trace_events(tevs, ntevs, + symbol_conf.vmlinux_name); + +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(&map); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + return -EINVAL; +@@ -759,9 +764,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs, + for (i = 0; i < ntevs; i++) { + if (!tevs[i].point.address || tevs[i].point.retprobe) + continue; +- /* If we found a wrong one, mark it by NULL symbol */ ++ /* ++ * If we found a wrong one, mark it by NULL symbol. ++ * Since addresses in debuginfo is same as objdump, we need ++ * to convert it to addresses on memory. ++ */ + if (kprobe_warn_out_range(tevs[i].point.symbol, +- tevs[i].point.address)) { ++ map__objdump_2mem(map, tevs[i].point.address))) { + tmp = NULL; + skipped++; + } else { +@@ -2850,7 +2859,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, + + /* Note that the symbols in the kmodule are not relocated */ + if (!pev->uprobes && !pp->retprobe && !pev->target) { +- reloc_sym = kernel_get_ref_reloc_sym(); ++ reloc_sym = kernel_get_ref_reloc_sym(NULL); + if (!reloc_sym) { + pr_warning("Relocated base symbol is not found!\n"); + ret = -EINVAL; +diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c +index b9507a8d0e30..293df9409afa 100644 +--- a/tools/perf/util/probe-file.c ++++ b/tools/perf/util/probe-file.c +@@ -334,11 +334,11 @@ int probe_file__del_events(int fd, struct strfilter *filter) + + ret = probe_file__get_events(fd, filter, namelist); + if (ret < 0) +- return ret; ++ goto out; + + ret = probe_file__del_strlist(fd, namelist); ++out: + strlist__delete(namelist); +- + return ret; + } + +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index 82e4f158c88e..863f668a0735 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -111,6 +111,7 @@ enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +@@ -1350,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, + tf.ntevs = 0; + + ret = debuginfo__find_probes(dbg, &tf.pf); +- if (ret < 0) { ++ if (ret < 0 || tf.ntevs == 0) { + for (i = 0; i < tf.ntevs; i++) + clear_probe_trace_event(&tf.tevs[i]); + zfree(tevs); +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c +index 7e0573e55a35..9187d8119a75 100644 +--- a/tools/perf/util/session.c ++++ b/tools/perf/util/session.c +@@ -482,6 +482,7 @@ static void perf_event__mmap2_swap(union perf_event *event, + event->mmap2.maj = bswap_32(event->mmap2.maj); + event->mmap2.min = bswap_32(event->mmap2.min); + event->mmap2.ino = bswap_64(event->mmap2.ino); ++ event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); + + if (sample_id_all) { + void *data = &event->mmap2.filename; +@@ -1426,6 +1427,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset, + if (event->header.size < hdr_sz || event->header.size > buf_sz) + return -1; + ++ buf += hdr_sz; + rest = event->header.size - hdr_sz; + + if (readn(fd, buf, rest) != (ssize_t)rest) +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py +index c8680984d2d6..163f38fbd79c 100644 +--- a/tools/perf/util/setup.py ++++ b/tools/perf/util/setup.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/python2 ++#!/usr/bin/env python2 + + from distutils.core import setup, Extension + from os import getenv +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c +index 031e64ce7156..013e3f510225 100644 +--- a/tools/perf/util/sort.c ++++ b/tools/perf/util/sort.c +@@ -2532,7 +2532,7 @@ static char *prefix_if_not_in(const char *pre, char *str) + return str; + + if (asprintf(&n, "%s,%s", pre, str) < 0) +- return NULL; ++ n = NULL; + + free(str); + return n; +diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c +index b4db3f48e3b0..2853d4728ab9 100644 +--- a/tools/perf/util/srcline.c ++++ b/tools/perf/util/srcline.c +@@ -86,16 +86,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data) + bfd_vma pc, vma; + bfd_size_type size; + struct a2l_data *a2l = data; ++ flagword flags; + + if (a2l->found) + return; + +- if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0) ++#ifdef bfd_get_section_flags ++ flags = bfd_get_section_flags(abfd, section); ++#else ++ flags = bfd_section_flags(section); ++#endif ++ if ((flags & SEC_ALLOC) == 0) + return; + + pc = a2l->addr; ++#ifdef bfd_get_section_vma + vma = bfd_get_section_vma(abfd, section); ++#else ++ vma = bfd_section_vma(section); ++#endif ++#ifdef bfd_get_section_size + size = bfd_get_section_size(section); ++#else ++ size = bfd_section_size(section); ++#endif + + if (pc < vma || pc >= vma + size) + return; +diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c +index d4f872f1750e..8a178b36981a 100644 +--- a/tools/perf/util/stat.c ++++ b/tools/perf/util/stat.c +@@ -341,8 +341,10 @@ int perf_stat_process_counter(struct perf_stat_config *config, + * interval mode, otherwise overall avg running + * averages will be shown for each interval. + */ +- if (config->interval) +- init_stats(ps->res_stats); ++ if (config->interval) { ++ for (i = 0; i < 3; i++) ++ init_stats(&ps->res_stats[i]); ++ } + + if (counter->per_pkg) + zero_per_pkg(counter); +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c +index 5a50326c8158..e155783c601a 100644 +--- a/tools/perf/util/symbol-elf.c ++++ b/tools/perf/util/symbol-elf.c +@@ -1421,6 +1421,7 @@ struct kcore_copy_info { + u64 first_symbol; + u64 last_symbol; + u64 first_module; ++ u64 first_module_symbol; + u64 last_module_symbol; + struct phdr_data kernel_map; + struct phdr_data modules_map; +@@ -1435,6 +1436,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, + return 0; + + if (strchr(name, '[')) { ++ if (!kci->first_module_symbol || start < kci->first_module_symbol) ++ kci->first_module_symbol = start; + if (start > kci->last_module_symbol) + kci->last_module_symbol = start; + return 0; +@@ -1559,6 +1562,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, + kci->etext += page_size; + } + ++ if (kci->first_module_symbol && ++ (!kci->first_module || kci->first_module_symbol < kci->first_module)) ++ kci->first_module = kci->first_module_symbol; ++ + kci->first_module = round_down(kci->first_module, page_size); + + if (kci->last_module_symbol) { +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c +index acde8e489352..4e27b868f774 100644 +--- a/tools/perf/util/symbol.c ++++ b/tools/perf/util/symbol.c +@@ -58,6 +58,7 @@ static enum dso_binary_type binary_type_symtab[] = { + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, + DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, ++ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, + }; + +@@ -1361,6 +1362,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, + case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: + case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: + case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: ++ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: + case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: + case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: + return !kmod && dso->kernel == DSO_TYPE_USER; +diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c +index a680bdaa65dc..060957aeb79a 100644 +--- a/tools/perf/util/symbol_fprintf.c ++++ b/tools/perf/util/symbol_fprintf.c +@@ -64,7 +64,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso, + + for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { + pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); +- fprintf(fp, "%s\n", pos->sym.name); ++ ret += fprintf(fp, "%s\n", pos->sym.name); + } + + return ret; +diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl +index a373a5bfff68..c488e863d83f 100755 +--- a/tools/testing/ktest/compare-ktest-sample.pl ++++ b/tools/testing/ktest/compare-ktest-sample.pl +@@ -1,4 +1,4 @@ +-#!/usr/bin/perl ++#!/usr/bin/env perl + + open (IN,"ktest.pl"); + while () { +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk +index 50a93f5f13d6..d8fa6c72b7ca 100644 +--- a/tools/testing/selftests/lib.mk ++++ b/tools/testing/selftests/lib.mk +@@ -1,6 +1,10 @@ + # This mimics the top-level Makefile. We do it explicitly here so that this + # Makefile can operate with or without the kbuild infrastructure. ++ifneq ($(LLVM),) ++CC := clang ++else + CC := $(CROSS_COMPILE)gcc ++endif + + define RUN_TESTS + @for TEST in $(TEST_PROGS); do \ +diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c +index 5cdfd743447b..900ed4b47899 100644 +--- a/tools/testing/selftests/networking/timestamping/timestamping.c ++++ b/tools/testing/selftests/networking/timestamping/timestamping.c +@@ -332,10 +332,16 @@ int main(int argc, char **argv) + int val; + socklen_t len; + struct timeval next; ++ size_t if_len; + + if (argc < 2) + usage(0); + interface = argv[1]; ++ if_len = strlen(interface); ++ if (if_len >= IFNAMSIZ) { ++ printf("interface name exceeds IFNAMSIZ\n"); ++ exit(1); ++ } + + for (i = 2; i < argc; i++) { + if (!strcasecmp(argv[i], "SO_TIMESTAMP")) +@@ -369,12 +375,12 @@ int main(int argc, char **argv) + bail("socket"); + + memset(&device, 0, sizeof(device)); +- strncpy(device.ifr_name, interface, sizeof(device.ifr_name)); ++ memcpy(device.ifr_name, interface, if_len + 1); + if (ioctl(sock, SIOCGIFADDR, &device) < 0) + bail("getting interface IP address"); + + memset(&hwtstamp, 0, sizeof(hwtstamp)); +- strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); ++ memcpy(hwtstamp.ifr_name, interface, if_len + 1); + hwtstamp.ifr_data = (void *)&hwconfig; + memset(&hwconfig, 0, sizeof(hwconfig)); + hwconfig.tx_type = +diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c +index a36883ad48a4..4b4d2ce91256 100644 +--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c ++++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -97,8 +98,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) + + static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) + { +- int pid; +- cpu_set_t cpuset; ++ int pid, ncpus; ++ cpu_set_t *cpuset; ++ size_t size; + + pid = fork(); + if (pid == -1) { +@@ -109,14 +111,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) + if (pid) + return; + +- CPU_ZERO(&cpuset); +- CPU_SET(cpu, &cpuset); ++ ncpus = get_nprocs(); ++ size = CPU_ALLOC_SIZE(ncpus); ++ cpuset = CPU_ALLOC(ncpus); ++ if (!cpuset) { ++ perror("malloc"); ++ exit(1); ++ } ++ CPU_ZERO_S(size, cpuset); ++ CPU_SET_S(cpu, size, cpuset); + +- if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { ++ if (sched_setaffinity(0, size, cpuset)) { + perror("sched_setaffinity"); ++ CPU_FREE(cpuset); + exit(1); + } + ++ CPU_FREE(cpuset); + fn(arg); + + exit(0); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +index 94110b1dcd3d..031baa43646f 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +@@ -91,8 +91,6 @@ int back_to_back_ebbs(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + event_close(&event); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +index 7c57a8d79535..361e0be9df9a 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +@@ -42,8 +42,6 @@ int cycles(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + event_close(&event); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +index ecf5ee3283a3..fe7d0dc2a1a2 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +@@ -99,8 +99,6 @@ int cycles_with_freeze(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + printf("EBBs while frozen %d\n", ebbs_while_frozen); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +index c0faba520b35..b9b30f974b5e 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +@@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + event_close(&event); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +index 46681fec549b..2694ae161a84 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +@@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + event_close(&event); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +index a991d2ea8d0a..174e4f4dae6c 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +@@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + FAIL_IF(ebb_state.stats.ebb_count == 0); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +index eb8acb78bc6c..531083accfca 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +@@ -75,7 +75,6 @@ static int test_body(void) + ebb_freeze_pmcs(); + ebb_global_disable(); + +- count_pmc(4, sample_period); + mtspr(SPRN_PMC4, 0xdead); + + dump_summary_ebb_state(); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +index 6ff8c8ff27d6..035c02273cd4 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +@@ -70,13 +70,6 @@ int multi_counter(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- count_pmc(2, sample_period); +- count_pmc(3, sample_period); +- count_pmc(4, sample_period); +- count_pmc(5, sample_period); +- count_pmc(6, sample_period); +- + dump_ebb_state(); + + for (i = 0; i < 6; i++) +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +index 037cb6154f36..3e9d4ac965c8 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +@@ -61,8 +61,6 @@ static int cycles_child(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_summary_ebb_state(); + + event_close(&event); +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c +index 8341d7778d5e..87630d44fb4c 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c +@@ -50,8 +50,6 @@ static int no_handler_test(void) + + event_close(&event); + +- dump_ebb_state(); +- + /* The real test is that we never took an EBB at 0x0 */ + + return 0; +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +index c5fa64790c22..d90891fe96a3 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +@@ -82,8 +82,6 @@ static int test_body(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(1, sample_period); +- + dump_ebb_state(); + + if (mmcr0_mismatch) +diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +index 30e1ac62e8cb..8ca92b9ee5b0 100644 +--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c ++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +@@ -76,8 +76,6 @@ int pmc56_overflow(void) + ebb_global_disable(); + ebb_freeze_pmcs(); + +- count_pmc(2, sample_period); +- + dump_ebb_state(); + + printf("PMC5/6 overflow %d\n", pmc56_overflowed); +diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c +index dcf74184bfd0..bafb70d0ee26 100644 +--- a/tools/testing/selftests/powerpc/utils.c ++++ b/tools/testing/selftests/powerpc/utils.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -62,26 +63,38 @@ void *get_auxv_entry(int type) + + int pick_online_cpu(void) + { +- cpu_set_t mask; +- int cpu; ++ int ncpus, cpu = -1; ++ cpu_set_t *mask; ++ size_t size; ++ ++ ncpus = get_nprocs_conf(); ++ size = CPU_ALLOC_SIZE(ncpus); ++ mask = CPU_ALLOC(ncpus); ++ if (!mask) { ++ perror("malloc"); ++ return -1; ++ } + +- CPU_ZERO(&mask); ++ CPU_ZERO_S(size, mask); + +- if (sched_getaffinity(0, sizeof(mask), &mask)) { ++ if (sched_getaffinity(0, size, mask)) { + perror("sched_getaffinity"); +- return -1; ++ goto done; + } + + /* We prefer a primary thread, but skip 0 */ +- for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) +- if (CPU_ISSET(cpu, &mask)) +- return cpu; ++ for (cpu = 8; cpu < ncpus; cpu += 8) ++ if (CPU_ISSET_S(cpu, size, mask)) ++ goto done; + + /* Search for anything, but in reverse */ +- for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) +- if (CPU_ISSET(cpu, &mask)) +- return cpu; ++ for (cpu = ncpus - 1; cpu >= 0; cpu--) ++ if (CPU_ISSET_S(cpu, size, mask)) ++ goto done; + + printf("No cpus in affinity mask?!\n"); +- return -1; ++ ++done: ++ CPU_FREE(mask); ++ return cpu; + } +diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c +index 874972ccfc95..d78736845b8e 100644 +--- a/tools/testing/selftests/x86/protection_keys.c ++++ b/tools/testing/selftests/x86/protection_keys.c +@@ -23,6 +23,7 @@ + #define _GNU_SOURCE + #include + #include ++#include + #include + #include + #include +@@ -611,7 +612,6 @@ int alloc_random_pkey(void) + + /* allocate every possible key and make a note of which ones we got */ + max_nr_pkey_allocs = NR_PKEYS; +- max_nr_pkey_allocs = 1; + for (i = 0; i < max_nr_pkey_allocs; i++) { + int new_pkey = alloc_pkey(); + if (new_pkey < 0) +@@ -1386,6 +1386,8 @@ int main(void) + { + int nr_iterations = 22; + ++ srand((unsigned int)time(NULL)); ++ + setup_handlers(); + + printf("has pku: %d\n", cpu_has_pku()); +diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c +index 43fcab367fb0..74e6b3fc2d09 100644 +--- a/tools/testing/selftests/x86/syscall_nt.c ++++ b/tools/testing/selftests/x86/syscall_nt.c +@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags) + set_eflags(get_eflags() | extraflags); + syscall(SYS_getpid); + flags = get_eflags(); ++ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); + if ((flags & extraflags) == extraflags) { + printf("[OK]\tThe syscall worked and flags are still set\n"); + } else { +diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c +index 4bb905925b0e..99c26a175beb 100644 +--- a/tools/usb/usbip/libsrc/usbip_host_common.c ++++ b/tools/usb/usbip/libsrc/usbip_host_common.c +@@ -35,7 +35,7 @@ + #include "list.h" + #include "sysfs_utils.h" + +-struct udev *udev_context; ++extern struct udev *udev_context; + + static int32_t read_attr_usbip_status(struct usbip_usb_device *udev) + { +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c +index 4e4bb5dd2dcd..db859b595dba 100644 +--- a/virt/kvm/kvm_main.c ++++ b/virt/kvm/kvm_main.c +@@ -154,6 +154,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) + */ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)) && ++ !is_zero_pfn(pfn) && + !kvm_is_zone_device_pfn(pfn); + + return true; +@@ -381,9 +382,8 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, + */ + kvm->mmu_notifier_count++; + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); +- need_tlb_flush |= kvm->tlbs_dirty; + /* we've to flush the tlb before the pages can be freed */ +- if (need_tlb_flush) ++ if (need_tlb_flush || kvm->tlbs_dirty) + kvm_flush_remote_tlbs(kvm); + + spin_unlock(&kvm->mmu_lock); +@@ -3639,7 +3639,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, + void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev) + { +- int i; ++ int i, j; + struct kvm_io_bus *new_bus, *bus; + + bus = kvm->buses[bus_idx]; +@@ -3656,17 +3656,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + + new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * + sizeof(struct kvm_io_range)), GFP_KERNEL); +- if (!new_bus) { ++ if (new_bus) { ++ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); ++ new_bus->dev_count--; ++ memcpy(new_bus->range + i, bus->range + i + 1, ++ (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); ++ } else { + pr_err("kvm: failed to shrink bus, removing it completely\n"); +- goto broken; ++ for (j = 0; j < bus->dev_count; j++) { ++ if (j == i) ++ continue; ++ kvm_iodevice_destructor(bus->range[j].dev); ++ } + } + +- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); +- new_bus->dev_count--; +- memcpy(new_bus->range + i, bus->range + i + 1, +- (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); +- +-broken: + rcu_assign_pointer(kvm->buses[bus_idx], new_bus); + synchronize_srcu_expedited(&kvm->srcu); + kfree(bus); diff --git a/Scripts/LineageOS-19.1/CVE_Patchers/android_kernel_oneplus_sdm845.sh b/Scripts/LineageOS-19.1/CVE_Patchers/android_kernel_oneplus_sdm845.sh index d8e756c3..cfedbbad 100644 --- a/Scripts/LineageOS-19.1/CVE_Patchers/android_kernel_oneplus_sdm845.sh +++ b/Scripts/LineageOS-19.1/CVE_Patchers/android_kernel_oneplus_sdm845.sh @@ -1,8 +1,7 @@ #!/bin/bash cd "$DOS_BUILD_BASE""kernel/oneplus/sdm845" -git apply $DOS_PATCHES_LINUX_CVES/0001-LinuxIncrementals/4.9/4.9.0239-0240.patch --exclude=Makefile -git apply $DOS_PATCHES_LINUX_CVES/0001-LinuxIncrementals/4.9/4.9.0242-0243.patch --exclude=Makefile git apply $DOS_PATCHES_LINUX_CVES/0001-LinuxIncrementals/4.9/4.9.0286-0287.patch --exclude=Makefile +git apply $DOS_PATCHES_LINUX_CVES/0001-LinuxIncrementals/4.9/4.9.0289-0290.patch --exclude=Makefile git apply $DOS_PATCHES_LINUX_CVES/0001-LinuxIncrementals/4.9/4.9.0300-0301.patch --exclude=Makefile git apply $DOS_PATCHES_LINUX_CVES/0002-Misc_Fixes/4.9/0010.patch git apply $DOS_PATCHES_LINUX_CVES/0005-Graphene-Deny_USB/4.9/0002.patch @@ -42,161 +41,39 @@ git apply $DOS_PATCHES_LINUX_CVES/CVE-2016-3695/ANY/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-16USB/ANY/0006.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-16USB/ANY/0008.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-16USB/ANY/0009.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-0605/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-7477/4.9/0003.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-13693/^4.12.9/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-13694/^4.12.9/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2017-1000252/^4.13/0002.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-5897/ANY/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-8043/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-9415/ANY/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-10323/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-13094/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2018-20855/^4.18/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-0145/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-0148/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-8912/^5.0/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-12378/^5.1.5/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-12455/^5.1.5/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-12456/^5.1.5/0002.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-15291/4.9/0007.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-16921/^4.16/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-16994/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-18808/4.9/0002.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19051/4.9/0013.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19054/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19060/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19061/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19073/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19074/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19318/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19448/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19813/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19816/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19947/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-20810/4.9/0006.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-19068/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2019-20908/^5.2/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-0067/ANY/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-0427/4.9/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-0433/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-0465/4.9/0011.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-0465/4.9/0012.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3674/ANY/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3702/4.9/0031.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3702/4.9/0032.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3702/4.9/0033.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3702/4.9/0034.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-3702/4.9/0035.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0015.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0016.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0017.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0018.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0019.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0020.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0021.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-4788/4.9/0022.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-10766/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-12352/ANY/0009.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-12655/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-12656/4.9/0010.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-12656/4.9/0011.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-12771/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14314/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14331/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14351/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14356/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14386/4.9/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-14390/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-15393/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-15437/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-16119/4.9/0007.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-16119/^5.10/0002.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-16166/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-24394/^5.8/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-24588/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25211/4.9/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25212/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25220/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25284/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25285/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25641/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25643/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25645/4.9/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25668/4.9/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25669/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25670/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25671/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25672/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-25673/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-26088/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-26139/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-26147/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-26555/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-27815/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-27825/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-28915/4.9/0016.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-28915/4.9/0017.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-28915/4.9/0018.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-28974/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29368/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29368/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29371/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29568/4.9/0031.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29568/4.9/0032.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29568/4.9/0033.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29568/4.9/0034.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29568/4.9/0035.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29569/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-29660/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-35508/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-35519/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-36158/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-36312/4.9/0006.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-36516/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0605/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0929/ANY/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0935/^4.16/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0008.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0011.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0936/ANY/0012.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0937/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0941/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-0961/ANY/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3178/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0036.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0037.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0038.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0039.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0040.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0041.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0042.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0043.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0044.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3347/4.9/0045.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3428/4.9/0015.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3428/4.9/0016.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3428/4.9/0017.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3483/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3564/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3573/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3609/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3612/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3640/4.9/0012.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3640/4.9/0013.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3640/4.9/0014.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3653/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3655/4.9/0012.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3655/4.9/0013.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3655/4.9/0014.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3655/4.9/0015.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3659/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3679/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3732/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3752/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3753/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3760/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3772/4.9/0022.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3772/4.9/0023.patch @@ -204,86 +81,39 @@ git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-3896/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4002/4.9/0008.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4149/4.9/0006.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4155/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4157/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4202/4.9/0009.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-4202/4.9/0010.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-20292/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-20320/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-20320/4.9/0006.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-20322/4.9/0010.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-21781/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-23133/4.9/0010.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-23134/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26930/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26931/4.9/0013.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26931/4.9/0014.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26931/4.9/0015.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26932/4.9/0021.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26932/4.9/0022.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26932/4.9/0023.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26932/4.9/0024.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-26932/4.9/0025.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-27363/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-27365/4.9/0009.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-27365/4.9/0010.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28038/4.9/0009.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28038/4.9/0010.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28660/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28688/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28713/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28964/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28971/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28972/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-29154/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-29647/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-29650/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-30002/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-31916/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-32399/4.9/0004.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28714/4.9/0004.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-28715/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-33033/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-33034/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-33098/^5.12/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-33909/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-34693/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-34981/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-37576/4.9/0007.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-37576/4.9/0008.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38160/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38199/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38204/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38205/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38208/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-38209/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39633/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39634/4.9/0006.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39636/ANY/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39636/ANY/0002.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39636/ANY/0003.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39636/ANY/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39636/ANY/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39648/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39656/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39685/4.9/0013.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39685/4.9/0015.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39714/4.9/0002.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-39792/ANY/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-40490/3.9-^5.14/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-42008/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-42739/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-43976/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-44879/^5.16/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-45095/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-45469/4.9-^5.16/0001.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-45485/4.9/0005.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-45486/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-45868/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0330/4.9/0005.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0617/4.9/0009.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0617/4.9/0010.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0644/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0847/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-0850/4.9/0006.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-1016/4.9/0004.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-1195/4.9/0007.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-1195/4.9/0008.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-23039/4.9/0003.patch +git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-23040/4.9/0003.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-23042/4.9/0003.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-23960/4.9/0050.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-23960/4.9/0051.patch @@ -299,7 +129,6 @@ git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-26966/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-27223/4.9/0004.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-27950/^5.16/0001.patch git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-28356/4.9/0004.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2020-24586/4.9/0009.patch -git apply $DOS_PATCHES_LINUX_CVES/CVE-2021-37159/4.9/0007.patch -editKernelLocalversion "-dos.p301" +git apply $DOS_PATCHES_LINUX_CVES/CVE-2022-GPZ2253/4.9/0007.patch +editKernelLocalversion "-dos.p130" cd "$DOS_BUILD_BASE" diff --git a/Scripts/LineageOS-19.1/Patch.sh b/Scripts/LineageOS-19.1/Patch.sh index 45880c1d..fc7f8f6a 100644 --- a/Scripts/LineageOS-19.1/Patch.sh +++ b/Scripts/LineageOS-19.1/Patch.sh @@ -393,6 +393,10 @@ if enterAndClear "kernel/google/wahoo"; then sed -i 's/asm(SET_PSTATE_UAO(1));/asm(SET_PSTATE_UAO(1)); return 0;/' arch/arm64/mm/fault.c; #fix build with CONFIG_ARM64_UAO fi; +#if enterAndClear "kernel/oneplus/sdm845"; then +#applyPatch "$DOS_PATCHES/android_kernel_oneplus_sdm845/4.9.282-qc.patch"; #4.9.227 -> 4.9.282 +#fi; + #Make changes to all devices cd "$DOS_BUILD_BASE"; if [ "$DOS_LOWRAM_ENABLED" = true ]; then find "device" -maxdepth 2 -mindepth 2 -type d -print0 | xargs -0 -n 1 -P 8 -I {} bash -c 'enableLowRam "{}"'; fi;